@inproceedings{2157,
abstract = {We show that the following algorithmic problem is decidable: given a 2-dimensional simplicial complex, can it be embedded (topologically, or equivalently, piecewise linearly) in ℝ3? By a known reduction, it suffices to decide the embeddability of a given triangulated 3-manifold X into the 3-sphere S3. The main step, which allows us to simplify X and recurse, is in proving that if X can be embedded in S3, then there is also an embedding in which X has a short meridian, i.e., an essential curve in the boundary of X bounding a disk in S3 nX with length bounded by a computable function of the number of tetrahedra of X.},
author = {Matoušek, Jiří and Sedgwick, Eric and Tancer, Martin and Wagner, Uli},
booktitle = {Proceedings of the Annual Symposium on Computational Geometry},
location = {Kyoto, Japan},
pages = {78 -- 84},
publisher = {ACM},
title = {{Embeddability in the 3 sphere is decidable}},
doi = {10.1145/2582112.2582137},
year = {2014},
}
@article{2158,
abstract = {Directional guidance of migrating cells is relatively well explored in the reductionist setting of cell culture experiments. Here spatial gradients of chemical cues as well as gradients of mechanical substrate characteristics prove sufficient to attract single cells as well as their collectives. How such gradients present and act in the context of an organism is far less clear. Here we review recent advances in understanding how guidance cues emerge and operate in the physiological context.},
author = {Majumdar, Ritankar and Sixt, Michael K and Parent, Carole},
journal = {Current Opinion in Cell Biology},
number = {1},
pages = {33 -- 40},
publisher = {Elsevier},
title = {{New paradigms in the establishment and maintenance of gradients during directed cell migration}},
doi = {10.1016/j.ceb.2014.05.010},
volume = {30},
year = {2014},
}
@inproceedings{2160,
abstract = {Transfer learning has received a lot of attention in the machine learning community over the last years, and several effective algorithms have been developed. However, relatively little is known about their theoretical properties, especially in the setting of lifelong learning, where the goal is to transfer information to tasks for which no data have been observed so far. In this work we study lifelong learning from a theoretical perspective. Our main result is a PAC-Bayesian generalization bound that offers a unified view on existing paradigms for transfer learning, such as the transfer of parameters or the transfer of low-dimensional representations. We also use the bound to derive two principled lifelong learning algorithms, and we show that these yield results comparable with existing methods.},
author = {Pentina, Anastasia and Lampert, Christoph},
editor = {Xing, Eric and Jebara, Tony},
location = {Beijing, China},
pages = {991 -- 999},
publisher = {Omnipress},
title = {{A PAC-Bayesian bound for Lifelong Learning}},
volume = {32},
year = {2014},
}
@inproceedings{2162,
abstract = {We study two-player (zero-sum) concurrent mean-payoff games played on a finite-state graph. We focus on the important sub-class of ergodic games where all states are visited infinitely often with probability 1. The algorithmic study of ergodic games was initiated in a seminal work of Hoffman and Karp in 1966, but all basic complexity questions have remained unresolved. Our main results for ergodic games are as follows: We establish (1) an optimal exponential bound on the patience of stationary strategies (where patience of a distribution is the inverse of the smallest positive probability and represents a complexity measure of a stationary strategy); (2) the approximation problem lies in FNP; (3) the approximation problem is at least as hard as the decision problem for simple stochastic games (for which NP ∩ coNP is the long-standing best known bound). We present a variant of the strategy-iteration algorithm by Hoffman and Karp; show that both our algorithm and the classical value-iteration algorithm can approximate the value in exponential time; and identify a subclass where the value-iteration algorithm is a FPTAS. We also show that the exact value can be expressed in the existential theory of the reals, and establish square-root sum hardness for a related class of games.},
author = {Chatterjee, Krishnendu and Ibsen-Jensen, Rasmus},
location = {Copenhagen, Denmark},
number = {Part 2},
pages = {122 -- 133},
publisher = {Springer},
title = {{The complexity of ergodic mean payoff games}},
doi = {10.1007/978-3-662-43951-7_11},
volume = {8573},
year = {2014},
}
@inproceedings{2163,
abstract = {We consider multi-player graph games with partial-observation and parity objective. While the decision problem for three-player games with a coalition of the first and second players against the third player is undecidable in general, we present a decidability result for partial-observation games where the first and third player are in a coalition against the second player, thus where the second player is adversarial but weaker due to partial-observation. We establish tight complexity bounds in the case where player 1 is less informed than player 2, namely 2-EXPTIME-completeness for parity objectives. The symmetric case of player 1 more informed than player 2 is much more complicated, and we show that already in the case where player 1 has perfect observation, memory of size non-elementary is necessary in general for reachability objectives, and the problem is decidable for safety and reachability objectives. From our results we derive new complexity results for partial-observation stochastic games.},
author = {Chatterjee, Krishnendu and Doyen, Laurent},
booktitle = {Lecture Notes in Computer Science},
location = {Copenhagen, Denmark},
number = {Part 2},
pages = {110 -- 121},
publisher = {Springer},
title = {{Games with a weak adversary}},
doi = {10.1007/978-3-662-43951-7_10},
volume = {8573},
year = {2014},
}
@article{2165,
abstract = {In machine learning, the domain adaptation problem arrives when the test (tar-get) and the train (source) data are generated from different distributions. A key applied issue is thus the design of algorithms able to generalize on a new distribution, for which we have no label information. We focus on learning classification models defined as a weighted majority vote over a set of real-valued functions. In this context, Germain et al. (2013) have shown that a measure of disagreement between these functions is crucial to control. The core of this measure is a theoretical bound—the C-bound (Lacasse et al., 2007)—which involves the disagreement and leads to a well performing majority vote learn-ing algorithm in usual non-adaptative supervised setting: MinCq. In this work,we propose a framework to extend MinCq to a domain adaptation scenario.This procedure takes advantage of the recent perturbed variation divergence between distributions proposed by Harel and Mannor (2012). Justified by a theoretical bound on the target risk of the vote, we provide to MinCq a tar-get sample labeled thanks to a perturbed variation-based self-labeling focused on the regions where the source and target marginals appear similar. We also study the influence of our self-labeling, from which we deduce an original process for tuning the hyperparameters. Finally, our framework called PV-MinCq shows very promising results on a rotation and translation synthetic problem.},
author = {Morvant, Emilie},
journal = {Pattern Recognition Letters},
pages = {37--43},
publisher = {Elsevier},
title = {{Domain Adaptation of Weighted Majority Votes via Perturbed Variation-Based Self-Labeling}},
doi = {10.1016/j.patrec.2014.08.013},
volume = {51},
year = {2014},
}
@inproceedings{2167,
abstract = {Model-based testing is a promising technology for black-box software and hardware testing, in which test cases are generated automatically from high-level specifications. Nowadays, systems typically consist of multiple interacting components and, due to their complexity, testing presents a considerable portion of the effort and cost in the design process. Exploiting the compositional structure of system specifications can considerably reduce the effort in model-based testing. Moreover, inferring properties about the system from testing its individual components allows the designer to reduce the amount of integration testing. In this paper, we study compositional properties of the ioco-testing theory. We propose a new approach to composition and hiding operations, inspired by contract-based design and interface theories. These operations preserve behaviors that are compatible under composition and hiding, and prune away incompatible ones. The resulting specification characterizes the input sequences for which the unit testing of components is sufficient to infer the correctness of component integration without the need for further tests. We provide a methodology that uses these results to minimize integration testing effort, but also to detect potential weaknesses in specifications. While we focus on asynchronous models and the ioco conformance relation, the resulting methodology can be applied to a broader class of systems.},
author = {Daca, Przemyslaw and Henzinger, Thomas A and Krenn, Willibald and Nickovic, Dejan},
booktitle = {IEEE 7th International Conference on Software Testing, Verification and Validation},
isbn = {978-1-4799-2255-0},
issn = {2159-4848},
location = {Cleveland, USA},
publisher = {IEEE},
title = {{Compositional specifications for IOCO testing}},
doi = {10.1109/ICST.2014.50},
year = {2014},
}
@article{2168,
abstract = {Many species have an essentially continuous distribution in space, in which there are no natural divisions between randomly mating subpopulations. Yet, the standard approach to modelling these populations is to impose an arbitrary grid of demes, adjusting deme sizes and migration rates in an attempt to capture the important features of the population. Such indirect methods are required because of the failure of the classical models of isolation by distance, which have been shown to have major technical flaws. A recently introduced model of extinction and recolonisation in two dimensions solves these technical problems, and provides a rigorous technical foundation for the study of populations evolving in a spatial continuum. The coalescent process for this model is simply stated, but direct simulation is very inefficient for large neighbourhood sizes. We present efficient and exact algorithms to simulate this coalescent process for arbitrary sample sizes and numbers of loci, and analyse these algorithms in detail.},
author = {Kelleher, Jerome and Etheridge, Alison and Barton, Nicholas H},
journal = {Theoretical Population Biology},
pages = {13 -- 23},
publisher = {Academic Press},
title = {{Coalescent simulation in continuous space: Algorithms for large neighbourhood size}},
doi = {10.1016/j.tpb.2014.05.001},
volume = {95},
year = {2014},
}
@article{2169,
author = {Barton, Nicholas H and Novak, Sebastian and Paixao, Tiago},
journal = {PNAS},
number = {29},
pages = {10398 -- 10399},
publisher = {National Academy of Sciences},
title = {{Diverse forms of selection in evolution and computer science}},
doi = {10.1073/pnas.1410107111},
volume = {111},
year = {2014},
}
@article{2170,
abstract = { Short-read sequencing technologies have in principle made it feasible to draw detailed inferences about the recent history of any organism. In practice, however, this remains challenging due to the difficulty of genome assembly in most organisms and the lack of statistical methods powerful enough to discriminate between recent, nonequilibrium histories. We address both the assembly and inference challenges. We develop a bioinformatic pipeline for generating outgroup-rooted alignments of orthologous sequence blocks from de novo low-coverage short-read data for a small number of genomes, and show how such sequence blocks can be used to fit explicit models of population divergence and admixture in a likelihood framework. To illustrate our approach, we reconstruct the Pleistocene history of an oak-feeding insect (the oak gallwasp Biorhiza pallida), which, in common with many other taxa, was restricted during Pleistocene ice ages to a longitudinal series of southern refugia spanning the Western Palaearctic. Our analysis of sequence blocks sampled from a single genome from each of three major glacial refugia reveals support for an unexpected history dominated by recent admixture. Despite the fact that 80% of the genome is affected by admixture during the last glacial cycle, we are able to infer the deeper divergence history of these populations. These inferences are robust to variation in block length, mutation model and the sampling location of individual genomes within refugia. This combination of de novo assembly and numerical likelihood calculation provides a powerful framework for estimating recent population history that can be applied to any organism without the need for prior genetic resources.},
author = {Hearn, Jack and Stone, Graham and Bunnefeld, Lynsey and Nicholls, James and Barton, Nicholas H and Lohse, Konrad},
journal = {Molecular Ecology},
number = {1},
pages = {198 -- 211},
publisher = {Wiley-Blackwell},
title = {{Likelihood-based inference of population history from low-coverage de novo genome assemblies}},
doi = {10.1111/mec.12578},
volume = {23},
year = {2014},
}
@inproceedings{2171,
abstract = {We present LS-CRF, a new method for training cyclic Conditional Random Fields (CRFs) from large datasets that is inspired by classical closed-form expressions for the maximum likelihood parameters of a generative graphical model with tree topology. Training a CRF with LS-CRF requires only solving a set of independent regression problems, each of which can be solved efficiently in closed form or by an iterative solver. This makes LS-CRF orders of magnitude faster than classical CRF training based on probabilistic inference, and at the same time more flexible and easier to implement than other approximate techniques, such as pseudolikelihood or piecewise training. We apply LS-CRF to the task of semantic image segmentation, showing that it achieves on par accuracy to other training techniques at higher speed, thereby allowing efficient CRF training from very large training sets. For example, training a linearly parameterized pairwise CRF on 150,000 images requires less than one hour on a modern workstation.},
author = {Kolesnikov, Alexander and Guillaumin, Matthieu and Ferrari, Vittorio and Lampert, Christoph},
booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
editor = {Fleet, David and Pajdla, Tomas and Schiele, Bernt and Tuytelaars, Tinne},
location = {Zurich, Switzerland},
number = {PART 3},
pages = {550 -- 565},
publisher = {Springer},
title = {{Closed-form approximate CRF training for scalable image segmentation}},
doi = {10.1007/978-3-319-10578-9_36},
volume = {8691},
year = {2014},
}
@inproceedings{2173,
abstract = {In this work we introduce a new approach to co-classification, i.e. the task of jointly classifying multiple, otherwise independent, data samples. The method we present, named CoConut, is based on the idea of adding a regularizer in the label space to encode certain priors on the resulting labelings. A regularizer that encourages labelings that are smooth across the test set, for instance, can be seen as a test-time variant of the cluster assumption, which has been proven useful at training time in semi-supervised learning. A regularizer that introduces a preference for certain class proportions can be regarded as a prior distribution on the class labels. CoConut can build on existing classifiers without making any assumptions on how they were obtained and without the need to re-train them. The use of a regularizer adds a new level of flexibility. It allows the integration of potentially new information at test time, even in other modalities than what the classifiers were trained on. We evaluate our framework on six datasets, reporting a clear performance gain in classification accuracy compared to the standard classification setup that predicts labels for each test sample separately.
},
author = {Khamis, Sameh and Lampert, Christoph},
booktitle = {Proceedings of the British Machine Vision Conference 2014},
location = {Nottingham, UK},
publisher = {BMVA Press},
title = {{CoConut: Co-classification with output space regularization}},
year = {2014},
}
@article{2174,
abstract = {When polygenic traits are under stabilizing selection, many different combinations of alleles allow close adaptation to the optimum. If alleles have equal effects, all combinations that result in the same deviation from the optimum are equivalent. Furthermore, the genetic variance that is maintained by mutation-selection balance is 2μ/S per locus, where μ is the mutation rate and S the strength of stabilizing selection. In reality, alleles vary in their effects, making the fitness landscape asymmetric and complicating analysis of the equilibria. We show that that the resulting genetic variance depends on the fraction of alleles near fixation, which contribute by 2μ/S, and on the total mutational effects of alleles that are at intermediate frequency. The inpplayfi between stabilizing selection and mutation leads to a sharp transition: alleles with effects smaller than a threshold value of 2 remain polymorphic, whereas those with larger effects are fixed. The genetic load in equilibrium is less than for traits of equal effects, and the fitness equilibria are more similar. We find p the optimum is displaced, alleles with effects close to the threshold value sweep first, and their rate of increase is bounded by Long-term response leads in general to well-adapted traits, unlike the case of equal effects that often end up at a suboptimal fitness peak. However, the particular peaks to which the populations converge are extremely sensitive to the initial states and to the speed of the shift of the optimum trait value.},
author = {De Vladar, Harold and Barton, Nicholas H},
journal = {Genetics},
number = {2},
pages = {749 -- 767},
publisher = {Genetics Society of America},
title = {{Stability and response of polygenic traits to stabilizing selection and mutation}},
doi = {10.1534/genetics.113.159111},
volume = {197},
year = {2014},
}
@article{2175,
abstract = {The cerebral cortex, the seat of our cognitive abilities, is composed of an intricate network of billions of excitatory projection and inhibitory interneurons. Postmitotic cortical neurons are generated by a diverse set of neural stem cell progenitors within dedicated zones and defined periods of neurogenesis during embryonic development. Disruptions in neurogenesis can lead to alterations in the neuronal cytoarchitecture, which is thought to represent a major underlying cause for several neurological disorders, including microcephaly, autism and epilepsy. Although a number of signaling pathways regulating neurogenesis have been described, the precise cellular and molecular mechanisms regulating the functional neural stem cell properties in cortical neurogenesis remain unclear. Here, we discuss the most up-to-date strategies to monitor the fundamental mechanistic parameters of neuronal progenitor proliferation, and recent advances deciphering the logic and dynamics of neurogenesis.},
author = {Postiglione, Maria P and Hippenmeyer, Simon},
journal = {Future Neurology},
number = {3},
pages = {323 -- 340},
publisher = {Future Medicine Ltd.},
title = {{Monitoring neurogenesis in the cerebral cortex: an update}},
doi = {10.2217/fnl.14.18},
volume = {9},
year = {2014},
}
@article{2178,
abstract = {We consider the three-state toric homogeneous Markov chain model (THMC) without loops and initial parameters. At time T, the size of the design matrix is 6 × 3 · 2T-1 and the convex hull of its columns is the model polytope. We study the behavior of this polytope for T ≥ 3 and we show that it is defined by 24 facets for all T ≥ 5. Moreover, we give a complete description of these facets. From this, we deduce that the toric ideal associated with the design matrix is generated by binomials of degree at most 6. Our proof is based on a result due to Sturmfels, who gave a bound on the degree of the generators of a toric ideal, provided the normality of the corresponding toric variety. In our setting, we established the normality of the toric variety associated to the THMC model by studying the geometric properties of the model polytope.},
author = {Haws, David and Martin Del Campo Sanchez, Abraham and Takemura, Akimichi and Yoshida, Ruriko},
journal = {Beitrage zur Algebra und Geometrie},
number = {1},
pages = {161 -- 188},
publisher = {Springer},
title = {{Markov degree of the three-state toric homogeneous Markov chain model}},
doi = {10.1007/s13366-013-0178-y},
volume = {55},
year = {2014},
}
@article{2179,
abstract = {We extend the proof of the local semicircle law for generalized Wigner matrices given in MR3068390 to the case when the matrix of variances has an eigenvalue -1. In particular, this result provides a short proof of the optimal local Marchenko-Pastur law at the hard edge (i.e. around zero) for sample covariance matrices X*X, where the variances of the entries of X may vary.},
author = {Ajanki, Oskari H and Erdös, László and Krüger, Torben H},
journal = {Electronic Communications in Probability},
publisher = {Institute of Mathematical Statistics},
title = {{Local semicircle law with imprimitive variance matrix}},
doi = {10.1214/ECP.v19-3121},
volume = {19},
year = {2014},
}
@article{2180,
abstract = {Weighted majority votes allow one to combine the output of several classifiers or voters. MinCq is a recent algorithm for optimizing the weight of each voter based on the minimization of a theoretical bound over the risk of the vote with elegant PAC-Bayesian generalization guarantees. However, while it has demonstrated good performance when combining weak classifiers, MinCq cannot make use of the useful a priori knowledge that one may have when using a mixture of weak and strong voters. In this paper, we propose P-MinCq, an extension of MinCq that can incorporate such knowledge in the form of a constraint over the distribution of the weights, along with general proofs of convergence that stand in the sample compression setting for data-dependent voters. The approach is applied to a vote of k-NN classifiers with a specific modeling of the voters' performance. P-MinCq significantly outperforms the classic k-NN classifier, a symmetric NN and MinCq using the same voters. We show that it is also competitive with LMNN, a popular metric learning algorithm, and that combining both approaches further reduces the error.},
author = {Bellet, Aurélien and Habrard, Amaury and Morvant, Emilie and Sebban, Marc},
journal = {Machine Learning},
number = {1-2},
pages = {129 -- 154},
publisher = {Springer},
title = {{Learning a priori constrained weighted majority votes}},
doi = {10.1007/s10994-014-5462-z},
volume = {97},
year = {2014},
}
@article{2183,
abstract = {We describe a simple adaptive network of coupled chaotic maps. The network reaches a stationary state (frozen topology) for all values of the coupling parameter, although the dynamics of the maps at the nodes of the network can be nontrivial. The structure of the network shows interesting hierarchical properties and in certain parameter regions the dynamics is polysynchronous: Nodes can be divided in differently synchronized classes but, contrary to cluster synchronization, nodes in the same class need not be connected to each other. These complicated synchrony patterns have been conjectured to play roles in systems biology and circuits. The adaptive system we study describes ways whereby this behavior can evolve from undifferentiated nodes.},
author = {Botella Soler, Vicente and Glendinning, Paul},
journal = {Physical Review E Statistical Nonlinear and Soft Matter Physics},
number = {6},
publisher = {American Institute of Physics},
title = {{Hierarchy and polysynchrony in an adaptive network }},
doi = {10.1103/PhysRevE.89.062809},
volume = {89},
year = {2014},
}
@article{2184,
abstract = {Given topological spaces X,Y, a fundamental problem of algebraic topology is understanding the structure of all continuous maps X→ Y. We consider a computational version, where X,Y are given as finite simplicial complexes, and the goal is to compute [X,Y], that is, all homotopy classes of suchmaps.We solve this problem in the stable range, where for some d ≥ 2, we have dim X ≤ 2d-2 and Y is (d-1)-connected; in particular, Y can be the d-dimensional sphere Sd. The algorithm combines classical tools and ideas from homotopy theory (obstruction theory, Postnikov systems, and simplicial sets) with algorithmic tools from effective algebraic topology (locally effective simplicial sets and objects with effective homology). In contrast, [X,Y] is known to be uncomputable for general X,Y, since for X = S1 it includes a well known undecidable problem: testing triviality of the fundamental group of Y. In follow-up papers, the algorithm is shown to run in polynomial time for d fixed, and extended to other problems, such as the extension problem, where we are given a subspace A ⊂ X and a map A→ Y and ask whether it extends to a map X → Y, or computing the Z2-index-everything in the stable range. Outside the stable range, the extension problem is undecidable.},
author = {Čadek, Martin and Krcál, Marek and Matoušek, Jiří and Sergeraert, Francis and Vokřínek, Lukáš and Wagner, Uli},
journal = {Journal of the ACM},
number = {3},
publisher = {ACM},
title = {{Computing all maps into a sphere}},
doi = {10.1145/2597629},
volume = {61},
year = {2014},
}
@inproceedings{2185,
abstract = {We revisit the classical problem of converting an imperfect source of randomness into a usable cryptographic key. Assume that we have some cryptographic application P that expects a uniformly random m-bit key R and ensures that the best attack (in some complexity class) against P(R) has success probability at most δ. Our goal is to design a key-derivation function (KDF) h that converts any random source X of min-entropy k into a sufficiently "good" key h(X), guaranteeing that P(h(X)) has comparable security δ′ which is 'close' to δ. Seeded randomness extractors provide a generic way to solve this problem for all applications P, with resulting security δ′ = O(δ), provided that we start with entropy k ≥ m + 2 log (1/δ) - O(1). By a result of Radhakrishnan and Ta-Shma, this bound on k (called the "RT-bound") is also known to be tight in general. Unfortunately, in many situations the loss of 2 log (1/δ) bits of entropy is unacceptable. This motivates the study KDFs with less entropy waste by placing some restrictions on the source X or the application P. In this work we obtain the following new positive and negative results in this regard: - Efficient samplability of the source X does not help beat the RT-bound for general applications. This resolves the SRT (samplable RT) conjecture of Dachman-Soled et al. [DGKM12] in the affirmative, and also shows that the existence of computationally-secure extractors beating the RT-bound implies the existence of one-way functions. - We continue in the line of work initiated by Barak et al. [BDK+11] and construct new information-theoretic KDFs which beat the RT-bound for large but restricted classes of applications. Specifically, we design efficient KDFs that work for all unpredictability applications P (e.g., signatures, MACs, one-way functions, etc.) and can either: (1) extract all of the entropy k = m with a very modest security loss δ′ = O(δ·log (1/δ)), or alternatively, (2) achieve essentially optimal security δ′ = O(δ) with a very modest entropy loss k ≥ m + loglog (1/δ). In comparison, the best prior results from [BDK+11] for this class of applications would only guarantee δ′ = O(√δ) when k = m, and would need k ≥ m + log (1/δ) to get δ′ = O(δ). - The weaker bounds of [BDK+11] hold for a larger class of so-called "square- friendly" applications (which includes all unpredictability, but also some important indistinguishability, applications). Unfortunately, we show that these weaker bounds are tight for the larger class of applications. - We abstract out a clean, information-theoretic notion of (k,δ,δ′)- unpredictability extractors, which guarantee "induced" security δ′ for any δ-secure unpredictability application P, and characterize the parameters achievable for such unpredictability extractors. Of independent interest, we also relate this notion to the previously-known notion of (min-entropy) condensers, and improve the state-of-the-art parameters for such condensers.},
author = {Dodis, Yevgeniy and Pietrzak, Krzysztof Z and Wichs, Daniel},
editor = {Nguyen, Phong and Oswald, Elisabeth},
location = {Copenhagen, Denmark},
pages = {93 -- 110},
publisher = {Springer},
title = {{Key derivation without entropy waste}},
doi = {10.1007/978-3-642-55220-5_6},
volume = {8441},
year = {2014},
}