@inproceedings{2033,
abstract = {The learning with privileged information setting has recently attracted a lot of attention within the machine learning community, as it allows the integration of additional knowledge into the training process of a classifier, even when this comes in the form of a data modality that is not available at test time. Here, we show that privileged information can naturally be treated as noise in the latent function of a Gaussian process classifier (GPC). That is, in contrast to the standard GPC setting, the latent function is not just a nuisance but a feature: it becomes a natural measure of confidence about the training data by modulating the slope of the GPC probit likelihood function. Extensive experiments on public datasets show that the proposed GPC method using privileged noise, called GPC+, improves over a standard GPC without privileged knowledge, and also over the current state-of-the-art SVM-based method, SVM+. Moreover, we show that advanced neural networks and deep learning methods can be compressed as privileged information.},
author = {Hernandez Lobato, Daniel and Sharmanska, Viktoriia and Kersting, Kristian and Lampert, Christoph and Quadrianto, Novi},
booktitle = {Advances in Neural Information Processing Systems},
location = {Montreal, Canada},
number = {January},
pages = {837--845},
publisher = {Neural Information Processing Systems},
title = {{Mind the nuisance: Gaussian process classification using privileged noise}},
volume = {1},
year = {2014},
}
@article{2036,
abstract = { In rapidly changing environments, selection history may impact the dynamics of adaptation. Mutations selected in one environment may result in pleiotropic fitness trade-offs in subsequent novel environments, slowing the rates of adaptation. Epistatic interactions between mutations selected in sequential stressful environments may slow or accelerate subsequent rates of adaptation, depending on the nature of that interaction. We explored the dynamics of adaptation during sequential exposure to herbicides with different modes of action in Chlamydomonas reinhardtii. Evolution of resistance to two of the herbicides was largely independent of selection history. For carbetamide, previous adaptation to other herbicide modes of action positively impacted the likelihood of adaptation to this herbicide. Furthermore, while adaptation to all individual herbicides was associated with pleiotropic fitness costs in stress-free environments, we observed that accumulation of resistance mechanisms was accompanied by a reduction in overall fitness costs. We suggest that antagonistic epistasis may be a driving mechanism that enables populations to more readily adapt in novel environments. These findings highlight the potential for sequences of xenobiotics to facilitate the rapid evolution of multiple-drug and -pesticide resistance, as well as the potential for epistatic interactions between adaptive mutations to facilitate evolutionary rescue in rapidly changing environments. },
author = {Lagator, Mato and Colegrave, Nick and Neve, Paul},
journal = {Proceedings of the Royal Society of London Series B Biological Sciences},
number = {1794},
publisher = {Royal Society, The},
title = {{Selection history and epistatic interactions impact dynamics of adaptation to novel environmental stresses}},
doi = {10.1098/rspb.2014.1679},
volume = {281},
year = {2014},
}
@article{2038,
abstract = {Recently, there has been an effort to add quantitative objectives to formal verification and synthesis. We introduce and investigate the extension of temporal logics with quantitative atomic assertions. At the heart of quantitative objectives lies the accumulation of values along a computation. It is often the accumulated sum, as with energy objectives, or the accumulated average, as with mean-payoff objectives. We investigate the extension of temporal logics with the prefix-accumulation assertions Sum(v) ≥ c and Avg(v) ≥ c, where v is a numeric (or Boolean) variable of the system, c is a constant rational number, and Sum(v) and Avg(v) denote the accumulated sum and average of the values of v from the beginning of the computation up to the current point in time. We also allow the path-accumulation assertions LimInfAvg(v) ≥ c and LimSupAvg(v) ≥ c, referring to the average value along an entire infinite computation. We study the border of decidability for such quantitative extensions of various temporal logics. In particular, we show that extending the fragment of CTL that has only the EX, EF, AX, and AG temporal modalities with both prefix-accumulation assertions, or extending LTL with both path-accumulation assertions, results in temporal logics whose model-checking problem is decidable. Moreover, the prefix-accumulation assertions may be generalized with "controlled accumulation," allowing, for example, to specify constraints on the average waiting time between a request and a grant. On the negative side, we show that this branching-time logic is, in a sense, the maximal logic with one or both of the prefix-accumulation assertions that permits a decidable model-checking procedure. Extending a temporal logic that has the EG or EU modalities, such as CTL or LTL, makes the problem undecidable.},
author = {Boker, Udi and Chatterjee, Krishnendu and Henzinger, Thomas A and Kupferman, Orna},
journal = {ACM Transactions on Computational Logic (TOCL)},
number = {4},
publisher = {ACM},
title = {{Temporal specifications with accumulative values}},
doi = {10.1145/2629686},
volume = {15},
year = {2014},
}
@article{2039,
abstract = {A fundamental question in biology is the following: what is the time scale that is needed for evolutionary innovations? There are many results that characterize single steps in terms of the fixation time of new mutants arising in populations of certain size and structure. But here we ask a different question, which is concerned with the much longer time scale of evolutionary trajectories: how long does it take for a population exploring a fitness landscape to find target sequences that encode new biological functions? Our key variable is the length, (Formula presented.) of the genetic sequence that undergoes adaptation. In computer science there is a crucial distinction between problems that require algorithms which take polynomial or exponential time. The latter are considered to be intractable. Here we develop a theoretical approach that allows us to estimate the time of evolution as function of (Formula presented.) We show that adaptation on many fitness landscapes takes time that is exponential in (Formula presented.) even if there are broad selection gradients and many targets uniformly distributed in sequence space. These negative results lead us to search for specific mechanisms that allow evolution to work on polynomial time scales. We study a regeneration process and show that it enables evolution to work in polynomial time.},
author = {Chatterjee, Krishnendu and Pavlogiannis, Andreas and Adlam, Ben and Nowak, Martin},
journal = {PLoS Computational Biology},
number = {9},
publisher = {Public Library of Science},
title = {{The time scale of evolutionary innovation}},
doi = {10.1371/journal.pcbi.1003818},
volume = {10},
year = {2014},
}
@article{2040,
abstract = {Development requires tissue growth as well as cell diversification. To address how these processes are coordinated, we analyzed the development of molecularly distinct domains of neural progenitors in the mouse and chick neural tube. We show that during development, these domains undergo changes in size that do not scale with changes in overall tissue size. Our data show that domain proportions are first established by opposing morphogen gradients and subsequently controlled by domain-specific regulation of differentiation rate but not differences in proliferation rate. Regulation of differentiation rate is key to maintaining domain proportions while accommodating both intra- and interspecies variations in size. Thus, the sequential control of progenitor specification and differentiation elaborates pattern without requiring that signaling gradients grow as tissues expand. },
author = {Kicheva, Anna and Bollenbach, Mark Tobias and Ribeiro, Ana and Pérez Valle, Helena and Lovell Badge, Robin and Episkopou, Vasso and Briscoe, James},
journal = {Science},
number = {6204},
publisher = {American Association for the Advancement of Science},
title = {{Coordination of progenitor specification and growth in mouse and chick spinal cord}},
doi = {10.1126/science.1254927},
volume = {345},
year = {2014},
}
@article{2041,
abstract = {The hippocampus mediates several higher brain functions, such as learning, memory, and spatial coding. The input region of the hippocampus, the dentate gyrus, plays a critical role in these processes. Several lines of evidence suggest that the dentate gyrus acts as a preprocessor of incoming information, preparing it for subsequent processing in CA3. For example, the dentate gyrus converts input from the entorhinal cortex, where cells have multiple spatial fields, into the spatially more specific place cell activity characteristic of the CA3 region. Furthermore, the dentate gyrus is involved in pattern separation, transforming relatively similar input patterns into substantially different output patterns. Finally, the dentate gyrus produces a very sparse coding scheme in which only a very small fraction of neurons are active at any one time.},
author = {Jonas, Peter M and Lisman, John},
journal = {Frontiers in Neural Circuits},
publisher = {Frontiers Research Foundation},
title = {{Structure, function and plasticity of hippocampal dentate gyrus microcircuits}},
doi = {10.3389/fncir.2014.00107},
volume = {8},
year = {2014},
}
@article{2042,
abstract = {Background: CRISPR is a microbial immune system likely to be involved in host-parasite coevolution. It functions using target sequences encoded by the bacterial genome, which interfere with invading nucleic acids using a homology-dependent system. The system also requires protospacer associated motifs (PAMs), short motifs close to the target sequence that are required for interference in CRISPR types I and II. Here, we investigate whether PAMs are depleted in phage genomes due to selection pressure to escape recognition.Results: To this end, we analyzed two data sets. Phages infecting all bacterial hosts were analyzed first, followed by a detailed analysis of phages infecting the genus Streptococcus, where PAMs are best understood. We use two different measures of motif underrepresentation that control for codon bias and the frequency of submotifs. We compare phages infecting species with a particular CRISPR type to those infecting species without that type. Since only known PAMs were investigated, the analysis is restricted to CRISPR types I-C and I-E and in Streptococcus to types I-C and II. We found evidence for PAM depletion in Streptococcus phages infecting hosts with CRISPR type I-C, in Vibrio phages infecting hosts with CRISPR type I-E and in Streptococcus thermopilus phages infecting hosts with type II-A, known as CRISPR3.Conclusions: The observed motif depletion in phages with hosts having CRISPR can be attributed to selection rather than to mutational bias, as mutational bias should affect the phages of all hosts. This observation implies that the CRISPR system has been efficient in the groups discussed here.},
author = {Kupczok, Anne and Bollback, Jonathan P},
journal = {BMC Genomics},
number = {1},
publisher = {BioMed Central},
title = {{Motif depletion in bacteriophages infecting hosts with CRISPR systems}},
doi = {10.1186/1471-2164-15-663},
volume = {15},
year = {2014},
}
@inproceedings{2043,
abstract = {Persistent homology is a popular and powerful tool for capturing topological features of data. Advances in algorithms for computing persistent homology have reduced the computation time drastically – as long as the algorithm does not exhaust the available memory. Following up on a recently presented parallel method for persistence computation on shared memory systems [1], we demonstrate that a simple adaption of the standard reduction algorithm leads to a variant for distributed systems. Our algorithmic design ensures that the data is distributed over the nodes without redundancy; this permits the computation of much larger instances than on a single machine. Moreover, we observe that the parallelism at least compensates for the overhead caused by communication between nodes, and often even speeds up the computation compared to sequential and even parallel shared memory algorithms. In our experiments, we were able to compute the persistent homology of filtrations with more than a billion (109) elements within seconds on a cluster with 32 nodes using less than 6GB of memory per node.},
author = {Bauer, Ulrich and Kerber, Michael and Reininghaus, Jan},
booktitle = {Proceedings of the Workshop on Algorithm Engineering and Experiments},
editor = { McGeoch, Catherine and Meyer, Ulrich},
location = {Portland, USA},
pages = {31 -- 38},
publisher = {Society of Industrial and Applied Mathematics},
title = {{Distributed computation of persistent homology}},
doi = {10.1137/1.9781611973198.4},
year = {2014},
}
@inbook{2044,
abstract = {We present a parallel algorithm for computing the persistent homology of a filtered chain complex. Our approach differs from the commonly used reduction algorithm by first computing persistence pairs within local chunks, then simplifying the unpaired columns, and finally applying standard reduction on the simplified matrix. The approach generalizes a technique by Günther et al., which uses discrete Morse Theory to compute persistence; we derive the same worst-case complexity bound in a more general context. The algorithm employs several practical optimization techniques, which are of independent interest. Our sequential implementation of the algorithm is competitive with state-of-the-art methods, and we further improve the performance through parallel computation.},
author = {Bauer, Ulrich and Kerber, Michael and Reininghaus, Jan},
booktitle = {Topological Methods in Data Analysis and Visualization III},
editor = {Bremer, Peer-Timo and Hotz, Ingrid and Pascucci, Valerio and Peikert, Ronald},
pages = {103 -- 117},
publisher = {Springer},
title = {{Clear and Compress: Computing Persistent Homology in Chunks}},
doi = {10.1007/978-3-319-04099-8_7},
year = {2014},
}
@inproceedings{2045,
abstract = {We introduce and study a new notion of enhanced chosen-ciphertext security (ECCA) for public-key encryption. Loosely speaking, in the ECCA security experiment, the decryption oracle provided to the adversary is augmented to return not only the output of the decryption algorithm on a queried ciphertext but also of a randomness-recovery algorithm associated to the scheme. Our results mainly concern the case where the randomness-recovery algorithm is efficient. We provide constructions of ECCA-secure encryption from adaptive trapdoor functions as defined by Kiltz et al. (EUROCRYPT 2010), resulting in ECCA encryption from standard number-theoretic assumptions. We then give two applications of ECCA-secure encryption: (1) We use it as a unifying concept in showing equivalence of adaptive trapdoor functions and tag-based adaptive trapdoor functions, resolving an open question of Kiltz et al. (2) We show that ECCA-secure encryption can be used to securely realize an approach to public-key encryption with non-interactive opening (PKENO) originally suggested by Damgård and Thorbek (EUROCRYPT 2007), resulting in new and practical PKENO schemes quite different from those in prior work. Our results demonstrate that ECCA security is of both practical and theoretical interest.},
author = {Dachman Soled, Dana and Fuchsbauer, Georg and Mohassel, Payman and O’Neill, Adam},
booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
editor = {Krawczyk, Hugo},
location = {Buenos Aires, Argentina},
pages = {329 -- 344},
publisher = {Springer},
title = {{Enhanced chosen-ciphertext security and applications}},
doi = {10.1007/978-3-642-54631-0_19},
volume = {8383},
year = {2014},
}
@inproceedings{2046,
abstract = {We introduce policy-based signatures (PBS), where a signer can only sign messages conforming to some authority-specified policy. The main requirements are unforgeability and privacy, the latter meaning that signatures not reveal the policy. PBS offers value along two fronts: (1) On the practical side, they allow a corporation to control what messages its employees can sign under the corporate key. (2) On the theoretical side, they unify existing work, capturing other forms of signatures as special cases or allowing them to be easily built. Our work focuses on definitions of PBS, proofs that this challenging primitive is realizable for arbitrary policies, efficient constructions for specific policies, and a few representative applications.},
author = {Bellare, Mihir and Fuchsbauer, Georg},
booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
editor = {Krawczyk, Hugo},
location = {Buenos Aires, Argentina},
pages = {520 -- 537},
publisher = {Springer},
title = {{Policy-based signatures}},
doi = {10.1007/978-3-642-54631-0_30},
volume = {8383},
year = {2014},
}
@inproceedings{2047,
abstract = {Following the publication of an attack on genome-wide association studies (GWAS) data proposed by Homer et al., considerable attention has been given to developing methods for releasing GWAS data in a privacy-preserving way. Here, we develop an end-to-end differentially private method for solving regression problems with convex penalty functions and selecting the penalty parameters by cross-validation. In particular, we focus on penalized logistic regression with elastic-net regularization, a method widely used to in GWAS analyses to identify disease-causing genes. We show how a differentially private procedure for penalized logistic regression with elastic-net regularization can be applied to the analysis of GWAS data and evaluate our method’s performance.},
author = {Yu, Fei and Rybar, Michal and Uhler, Caroline and Fienberg, Stephen},
booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
editor = {Domingo Ferrer, Josep},
location = {Ibiza, Spain},
pages = {170 -- 184},
publisher = {Springer},
title = {{Differentially-private logistic regression for detecting multiple-SNP association in GWAS databases}},
doi = {10.1007/978-3-319-11257-2_14},
volume = {8744},
year = {2014},
}
@article{2050,
abstract = {The flow instability and further transition to turbulence in a toroidal pipe (torus) with curvature ratio (tube-to-coiling diameter) 0.049 is investigated experimentally. The flow inside the toroidal pipe is driven by a steel sphere fitted to the inner pipe diameter. The sphere is moved with constant azimuthal velocity from outside the torus by a moving magnet. The experiment is designed to investigate curved pipe flow by optical measurement techniques. Using stereoscopic particle image velocimetry, laser Doppler velocimetry and pressure drop measurements, the flow is measured for Reynolds numbers ranging from 1000 to 15 000. Time- and space-resolved velocity fields are obtained and analysed. The steady axisymmetric basic flow is strongly influenced by centrifugal effects. On an increase of the Reynolds number we find a sequence of bifurcations. For Re=4075±2% a supercritical bifurcation to an oscillatory flow is found in which waves travel in the streamwise direction with a phase velocity slightly faster than the mean flow. The oscillatory flow is superseded by a presumably quasi-periodic flow at a further increase of the Reynolds number before turbulence sets in. The results are found to be compatible, in general, with earlier experimental and numerical investigations on transition to turbulence in helical and curved pipes. However, important aspects of the bifurcation scenario differ considerably.},
author = {Kühnen, Jakob and Holzner, Markus and Hof, Björn and Kuhlmann, Hendrik},
journal = {Journal of Fluid Mechanics},
pages = {463 -- 491},
publisher = {Cambridge University Press},
title = {{Experimental investigation of transitional flow in a toroidal pipe}},
doi = {10.1017/jfm.2013.603},
volume = {738},
year = {2014},
}
@inproceedings{2052,
abstract = {A standard technique for solving the parameterized model checking problem is to reduce it to the classic model checking problem of finitely many finite-state systems. This work considers some of the theoretical power and limitations of this technique. We focus on concurrent systems in which processes communicate via pairwise rendezvous, as well as the special cases of disjunctive guards and token passing; specifications are expressed in indexed temporal logic without the next operator; and the underlying network topologies are generated by suitable Monadic Second Order Logic formulas and graph operations. First, we settle the exact computational complexity of the parameterized model checking problem for some of our concurrent systems, and establish new decidability results for others. Second, we consider the cases that model checking the parameterized system can be reduced to model checking some fixed number of processes, the number is known as a cutoff. We provide many cases for when such cutoffs can be computed, establish lower bounds on the size of such cutoffs, and identify cases where no cutoff exists. Third, we consider cases for which the parameterized system is equivalent to a single finite-state system (more precisely a Büchi word automaton), and establish tight bounds on the sizes of such automata.},
author = {Aminof, Benjamin and Kotek, Tomer and Rubin, Sacha and Spegni, Francesco and Veith, Helmut},
booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
editor = {Baldan, Paolo and Gorla, Daniele},
location = {Rome, Italy},
pages = {109 -- 124},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{Parameterized model checking of rendezvous systems}},
doi = {10.1007/978-3-662-44584-6_9},
volume = {8704},
year = {2014},
}
@inproceedings{2053,
abstract = {In contrast to the usual understanding of probabilistic systems as stochastic processes, recently these systems have also been regarded as transformers of probabilities. In this paper, we give a natural definition of strong bisimulation for probabilistic systems corresponding to this view that treats probability distributions as first-class citizens. Our definition applies in the same way to discrete systems as well as to systems with uncountable state and action spaces. Several examples demonstrate that our definition refines the understanding of behavioural equivalences of probabilistic systems. In particular, it solves a longstanding open problem concerning the representation of memoryless continuous time by memoryfull continuous time. Finally, we give algorithms for computing this bisimulation not only for finite but also for classes of uncountably infinite systems.},
author = {Hermanns, Holger and Krčál, Jan and Kretinsky, Jan},
booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
editor = {Baldan, Paolo and Gorla, Daniele},
location = {Rome, Italy},
pages = {249 -- 265},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{Probabilistic bisimulation: Naturally on distributions}},
doi = {10.1007/978-3-662-44584-6_18},
volume = {8704},
year = {2014},
}
@inproceedings{2054,
abstract = {We study two-player concurrent games on finite-state graphs played for an infinite number of rounds, where in each round, the two players (player 1 and player 2) choose their moves independently and simultaneously; the current state and the two moves determine the successor state. The objectives are ω-regular winning conditions specified as parity objectives. We consider the qualitative analysis problems: the computation of the almost-sure and limit-sure winning set of states, where player 1 can ensure to win with probability 1 and with probability arbitrarily close to 1, respectively. In general the almost-sure and limit-sure winning strategies require both infinite-memory as well as infinite-precision (to describe probabilities). While the qualitative analysis problem for concurrent parity games with infinite-memory, infinite-precision randomized strategies was studied before, we study the bounded-rationality problem for qualitative analysis of concurrent parity games, where the strategy set for player 1 is restricted to bounded-resource strategies. In terms of precision, strategies can be deterministic, uniform, finite-precision, or infinite-precision; and in terms of memory, strategies can be memoryless, finite-memory, or infinite-memory. We present a precise and complete characterization of the qualitative winning sets for all combinations of classes of strategies. In particular, we show that uniform memoryless strategies are as powerful as finite-precision infinite-memory strategies, and infinite-precision memoryless strategies are as powerful as infinite-precision finite-memory strategies. We show that the winning sets can be computed in (n2d+3) time, where n is the size of the game structure and 2d is the number of priorities (or colors), and our algorithms are symbolic. The membership problem of whether a state belongs to a winning set can be decided in NP ∩ coNP. Our symbolic algorithms are based on a characterization of the winning sets as μ-calculus formulas, however, our μ-calculus formulas are crucially different from the ones for concurrent parity games (without bounded rationality); and our memoryless witness strategy constructions are significantly different from the infinite-memory witness strategy constructions for concurrent parity games.},
author = {Chatterjee, Krishnendu},
booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
editor = {Baldan, Paolo and Gorla, Daniele},
location = {Rome, Italy},
pages = {544 -- 559},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{Qualitative concurrent parity games: Bounded rationality}},
doi = {10.1007/978-3-662-44584-6_37},
volume = {8704},
year = {2014},
}
@article{2056,
abstract = {We consider a continuous-time Markov chain (CTMC) whose state space is partitioned into aggregates, and each aggregate is assigned a probability measure. A sufficient condition for defining a CTMC over the aggregates is presented as a variant of weak lumpability, which also characterizes that the measure over the original process can be recovered from that of the aggregated one. We show how the applicability of de-aggregation depends on the initial distribution. The application section is devoted to illustrate how the developed theory aids in reducing CTMC models of biochemical systems particularly in connection to protein-protein interactions. We assume that the model is written by a biologist in form of site-graph-rewrite rules. Site-graph-rewrite rules compactly express that, often, only a local context of a protein (instead of a full molecular species) needs to be in a certain configuration in order to trigger a reaction event. This observation leads to suitable aggregate Markov chains with smaller state spaces, thereby providing sufficient reduction in computational complexity. This is further exemplified in two case studies: simple unbounded polymerization and early EGFR/insulin crosstalk.},
author = {Ganguly, Arnab and Petrov, Tatjana and Koeppl, Heinz},
journal = {Journal of Mathematical Biology},
number = {3},
pages = {767 -- 797},
publisher = {Springer},
title = {{Markov chain aggregation and its applications to combinatorial reaction networks}},
doi = {10.1007/s00285-013-0738-7},
volume = {69},
year = {2014},
}
@inproceedings{2057,
abstract = {In the past few years, a lot of attention has been devoted to multimedia indexing by fusing multimodal informations. Two kinds of fusion schemes are generally considered: The early fusion and the late fusion. We focus on late classifier fusion, where one combines the scores of each modality at the decision level. To tackle this problem, we investigate a recent and elegant well-founded quadratic program named MinCq coming from the machine learning PAC-Bayesian theory. MinCq looks for the weighted combination, over a set of real-valued functions seen as voters, leading to the lowest misclassification rate, while maximizing the voters’ diversity. We propose an extension of MinCq tailored to multimedia indexing. Our method is based on an order-preserving pairwise loss adapted to ranking that allows us to improve Mean Averaged Precision measure while taking into account the diversity of the voters that we want to fuse. We provide evidence that this method is naturally adapted to late fusion procedures and confirm the good behavior of our approach on the challenging PASCAL VOC’07 benchmark.},
author = {Morvant, Emilie and Habrard, Amaury and Ayache, Stéphane},
booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
location = {Joensuu, Finland},
pages = {153 -- 162},
publisher = {Springer},
title = {{Majority vote of diverse classifiers for late fusion}},
doi = {10.1007/978-3-662-44415-3_16},
volume = {8621},
year = {2014},
}
@article{2059,
abstract = {Plant embryogenesis is regulated by differential distribution of the plant hormone auxin. However, the cells establishing these gradients during microspore embryogenesis remain to be identified. For the first time, we describe, using the DR5 or DR5rev reporter gene systems, the GFP- and GUS-based auxin biosensors to monitor auxin during Brassica napus androgenesis at cellular resolution in the initial stages. Our study provides evidence that the distribution of auxin changes during embryo development and depends on the temperature-inducible in vitro culture conditions. For this, microspores (mcs) were induced to embryogenesis by heat treatment and then subjected to genetic modification via Agrobacterium tumefaciens. The duration of high temperature treatment had a significant influence on auxin distribution in isolated and in vitro-cultured microspores and on microspore-derived embryo development. In the “mild” heat-treated (1 day at 32 °C) mcs, auxin localized in a polar way already at the uni-nucleate microspore, which was critical for the initiation of embryos with suspensor-like structure. Assuming a mean mcs radius of 20 μm, endogenous auxin content in a single cell corresponded to concentration of 1.01 μM. In mcs subjected to a prolonged heat (5 days at 32 °C), although auxin concentration increased dozen times, auxin polarization was set up at a few-celled pro-embryos without suspensor. Those embryos were enclosed in the outer wall called the exine. The exine rupture was accompanied by the auxin gradient polarization. Relative quantitative estimation of auxin, using time-lapse imaging, revealed that primordia possess up to 1.3-fold higher amounts than those found in the root apices of transgenic MDEs in the presence of exogenous auxin. Our results show, for the first time, which concentration of endogenous auxin coincides with the first cell division and how the high temperature interplays with auxin, by what affects delay early establishing microspore polarity. Moreover, we present how the local auxin accumulation demonstrates the apical–basal axis formation of the androgenic embryo and directs the axiality of the adult haploid plant.},
author = {Dubas, Ewa and Moravčíková, Jana and Libantová, Jana and Matušíková, Ildikó and Benková, Eva and Zur, Iwona and Krzewska, Monika},
journal = {Protoplasma},
number = {5},
pages = {1077 -- 1087},
publisher = {Springer},
title = {{The influence of heat stress on auxin distribution in transgenic B napus microspores and microspore derived embryos}},
doi = {10.1007/s00709-014-0616-1},
volume = {251},
year = {2014},
}
@article{2061,
abstract = {Development of cambium and its activity is important for our knowledge of the mechanism of secondary growth. Arabidopsis thaliana emerges as a good model plant for such a kind of study. Thus, this paper reports on cellular events taking place in the interfascicular regions of inflorescence stems of A. thaliana, leading to the development of interfascicular cambium from differentiated interfascicular parenchyma cells (IPC). These events are as follows: appearance of auxin accumulation, PIN1 gene expression, polar PIN1 protein localization in the basal plasma membrane and periclinal divisions. Distribution of auxin was observed to be higher in differentiating into cambium parenchyma cells compared to cells within the pith and cortex. Expression of PIN1 in IPC was always preceded by auxin accumulation. Basal localization of PIN1 was already established in the cells prior to their periclinal division. These cellular events initiated within parenchyma cells adjacent to the vascular bundles and successively extended from that point towards the middle region of the interfascicular area, located between neighboring vascular bundles. The final consequence of which was the closure of the cambial ring within the stem. Changes in the chemical composition of IPC walls were also detected and included changes of pectic epitopes, xyloglucans (XG) and extensins rich in hydroxyproline (HRGPs). In summary, results presented in this paper describe interfascicular cambium ontogenesis in terms of successive cellular events in the interfascicular regions of inflorescence stems of Arabidopsis.},
author = {Mazur, Ewa and Kurczyñska, Ewa and Friml, Jiří},
journal = {Protoplasma},
number = {5},
pages = {1125 -- 1139},
publisher = {Springer},
title = {{Cellular events during interfascicular cambium ontogenesis in inflorescence stems of Arabidopsis}},
doi = {10.1007/s00709-014-0620-5},
volume = {251},
year = {2014},
}
@article{2062,
abstract = {The success story of fast-spiking, parvalbumin-positive (PV+) GABAergic interneurons (GABA, γ-aminobutyric acid) in the mammalian central nervous system is noteworthy. In 1995, the properties of these interneurons were completely unknown. Twenty years later, thanks to the massive use of subcellular patch-clamp techniques, simultaneous multiple-cell recording, optogenetics, in vivo measurements, and computational approaches, our knowledge about PV+ interneurons became more extensive than for several types of pyramidal neurons. These findings have implications beyond the “small world” of basic research on GABAergic cells. For example, the results provide a first proof of principle that neuroscientists might be able to close the gaps between the molecular, cellular, network, and behavioral levels, representing one of the main challenges at the present time. Furthermore, the results may form the basis for PV+ interneurons as therapeutic targets for brain disease in the future. However, much needs to be learned about the basic function of these interneurons before clinical neuroscientists will be able to use PV+ interneurons for therapeutic purposes.},
author = {Hu, Hua and Gan, Jian and Jonas, Peter M},
journal = {Science},
number = {6196},
publisher = {American Association for the Advancement of Science},
title = {{Fast-spiking parvalbumin^+ GABAergic interneurons: From cellular design to microcircuit function}},
doi = {10.1126/science.1255263},
volume = {345},
year = {2014},
}
@inproceedings{2063,
abstract = {We consider Markov decision processes (MDPs) which are a standard model for probabilistic systems.We focus on qualitative properties forMDPs that can express that desired behaviors of the system arise almost-surely (with probability 1) or with positive probability. We introduce a new simulation relation to capture the refinement relation ofMDPs with respect to qualitative properties, and present discrete graph theoretic algorithms with quadratic complexity to compute the simulation relation.We present an automated technique for assume-guarantee style reasoning for compositional analysis ofMDPs with qualitative properties by giving a counterexample guided abstraction-refinement approach to compute our new simulation relation. We have implemented our algorithms and show that the compositional analysis leads to significant improvements.},
author = {Chatterjee, Krishnendu and Chmelik, Martin and Daca, Przemyslaw},
location = {Vienna, Austria},
pages = {473 -- 490},
publisher = {Springer},
title = {{CEGAR for qualitative analysis of probabilistic systems}},
doi = {10.1007/978-3-319-08867-9_31},
volume = {8559},
year = {2014},
}
@article{2064,
abstract = {We examined the synaptic structure, quantity, and distribution of α-amino-3-hydroxy-5-methylisoxazole-4-propionic acid (AMPA)- and N-methyl-D-aspartate (NMDA)-type glutamate receptors (AMPARs and NMDARs, respectively) in rat cochlear nuclei by a highly sensitive freeze-fracture replica labeling technique. Four excitatory synapses formed by two distinct inputs, auditory nerve (AN) and parallel fibers (PF), on different cell types were analyzed. These excitatory synapse types included AN synapses on bushy cells (AN-BC synapses) and fusiform cells (AN-FC synapses) and PF synapses on FC (PF-FC synapses) and cartwheel cell spines (PF-CwC synapses). Immunogold labeling revealed differences in synaptic structure as well as AMPAR and NMDAR number and/or density in both AN and PF synapses, indicating a target-dependent organization. The immunogold receptor labeling also identified differences in the synaptic organization of FCs based on AN or PF connections, indicating an input-dependent organization in FCs. Among the four excitatory synapse types, the AN-BC synapses were the smallest and had the most densely packed intramembrane particles (IMPs), whereas the PF-CwC synapses were the largest and had sparsely packed IMPs. All four synapse types showed positive correlations between the IMP-cluster area and the AMPAR number, indicating a common intrasynapse-type relationship for glutamatergic synapses. Immunogold particles for AMPARs were distributed over the entire area of individual AN synapses; PF synapses often showed synaptic areas devoid of labeling. The gold-labeling for NMDARs occurred in a mosaic fashion, with less positive correlations between the IMP-cluster area and the NMDAR number. Our observations reveal target- and input-dependent features in the structure, number, and organization of AMPARs and NMDARs in AN and PF synapses.},
author = {Rubio, Maía and Fukazawa, Yugo and Kamasawa, Naomi and Clarkson, Cheryl and Molnár, Elek and Shigemoto, Ryuichi},
journal = {Journal of Comparative Neurology},
number = {18},
pages = {4023 -- 4042},
publisher = {Wiley-Blackwell},
title = {{Target- and input-dependent organization of AMPA and NMDA receptors in synaptic connections of the cochlear nucleus}},
doi = {10.1002/cne.23654},
volume = {522},
year = {2014},
}
@inproceedings{2082,
abstract = {NMAC is a mode of operation which turns a fixed input-length keyed hash function f into a variable input-length function. A practical single-key variant of NMAC called HMAC is a very popular and widely deployed message authentication code (MAC). Security proofs and attacks for NMAC can typically be lifted to HMAC. NMAC was introduced by Bellare, Canetti and Krawczyk [Crypto'96], who proved it to be a secure pseudorandom function (PRF), and thus also a MAC, assuming that (1) f is a PRF and (2) the function we get when cascading f is weakly collision-resistant. Unfortunately, HMAC is typically instantiated with cryptographic hash functions like MD5 or SHA-1 for which (2) has been found to be wrong. To restore the provable guarantees for NMAC, Bellare [Crypto'06] showed its security based solely on the assumption that f is a PRF, albeit via a non-uniform reduction. - Our first contribution is a simpler and uniform proof for this fact: If f is an ε-secure PRF (against q queries) and a δ-non-adaptively secure PRF (against q queries), then NMAC f is an (ε+ℓqδ)-secure PRF against q queries of length at most ℓ blocks each. - We then show that this ε+ℓqδ bound is basically tight. For the most interesting case where ℓqδ ≥ ε we prove this by constructing an f for which an attack with advantage ℓqδ exists. This also violates the bound O(ℓε) on the PRF-security of NMAC recently claimed by Koblitz and Menezes. - Finally, we analyze the PRF-security of a modification of NMAC called NI [An and Bellare, Crypto'99] that differs mainly by using a compression function with an additional keying input. This avoids the constant rekeying on multi-block messages in NMAC and allows for a security proof starting by the standard switch from a PRF to a random function, followed by an information-theoretic analysis. We carry out such an analysis, obtaining a tight ℓq2/2 c bound for this step, improving over the trivial bound of ℓ2q2/2c. The proof borrows combinatorial techniques originally developed for proving the security of CBC-MAC [Bellare et al., Crypto'05].},
author = {Gazi, Peter and Pietrzak, Krzysztof Z and Rybar, Michal},
editor = {Garay, Juan and Gennaro, Rosario},
location = {Santa Barbara, USA},
number = {1},
pages = {113 -- 130},
publisher = {Springer},
title = {{The exact PRF-security of NMAC and HMAC}},
doi = {10.1007/978-3-662-44371-2_7},
volume = {8616},
year = {2014},
}
@article{2083,
abstract = {Understanding the effects of sex and migration on adaptation to novel environments remains a key problem in evolutionary biology. Using a single-cell alga Chlamydomonas reinhardtii, we investigated how sex and migration affected rates of evolutionary rescue in a sink environment, and subsequent changes in fitness following evolutionary rescue. We show that sex and migration affect both the rate of evolutionary rescue and subsequent adaptation. However, their combined effects change as the populations adapt to a sink habitat. Both sex and migration independently increased rates of evolutionary rescue, but the effect of sex on subsequent fitness improvements, following initial rescue, changed with migration, as sex was beneficial in the absence of migration but constraining adaptation when combined with migration. These results suggest that sex and migration are beneficial during the initial stages of adaptation, but can become detrimental as the population adapts to its environment.},
author = {Lagator, Mato and Morgan, Andrew and Neve, Paul and Colegrave, Nick},
journal = {Evolution},
number = {8},
pages = {2296 -- 2305},
publisher = {Wiley},
title = {{Role of sex and migration in adaptation to sink environments}},
doi = {10.1111/evo.12440},
volume = {68},
year = {2014},
}
@article{2084,
abstract = {Receptor tyrosine kinases (RTKs) are a large family of cell surface receptors that sense growth factors and hormones and regulate a variety of cell behaviours in health and disease. Contactless activation of RTKs with spatial and temporal precision is currently not feasible. Here, we generated RTKs that are insensitive to endogenous ligands but can be selectively activated by low-intensity blue light. We screened light-oxygen-voltage (LOV)-sensing domains for their ability to activate RTKs by light-activated dimerization. Incorporation of LOV domains found in aureochrome photoreceptors of stramenopiles resulted in robust activation of the fibroblast growth factor receptor 1 (FGFR1), epidermal growth factor receptor (EGFR) and rearranged during transfection (RET). In human cancer and endothelial cells, light induced cellular signalling with spatial and temporal precision. Furthermore, light faithfully mimicked complex mitogenic and morphogenic cell behaviour induced by growth factors. RTKs under optical control (Opto-RTKs) provide a powerful optogenetic approach to actuate cellular signals and manipulate cell behaviour.},
author = {Grusch, Michael and Schelch, Karin and Riedler, Robert and Gschaider-Reichhart, Eva and Differ, Christopher and Berger, Walter and Inglés Prieto, Álvaro and Janovjak, Harald L},
journal = {EMBO Journal},
number = {15},
pages = {1713 -- 1726},
publisher = {Wiley-Blackwell},
title = {{Spatio-temporally precise activation of engineered receptor tyrosine kinases by light}},
doi = {10.15252/embj.201387695},
volume = {33},
year = {2014},
}
@article{2086,
abstract = {Pathogens may gain a fitness advantage through manipulation of the behaviour of their hosts. Likewise, host behavioural changes can be a defence mechanism, counteracting the impact of pathogens on host fitness. We apply harmonic radar technology to characterize the impact of an emerging pathogen - Nosema ceranae (Microsporidia) - on honeybee (Apis mellifera) flight and orientation performance in the field. Honeybees are the most important commercial pollinators. Emerging diseases have been proposed to play a prominent role in colony decline, partly through sub-lethal behavioural manipulation of their hosts. We found that homing success was significantly reduced in diseased (65.8%) versus healthy foragers (92.5%). Although lost bees had significantly reduced continuous flight times and prolonged resting times, other flight characteristics and navigational abilities showed no significant difference between infected and non-infected bees. Our results suggest that infected bees express normal flight characteristics but are constrained in their homing ability, potentially compromising the colony by reducing its resource inputs, but also counteracting the intra-colony spread of infection. We provide the first high-resolution analysis of sub-lethal effects of an emerging disease on insect flight behaviour. The potential causes and the implications for both host and parasite are discussed.},
author = {Wolf, Stephan and Mcmahon, Dino and Lim, Ka and Pull, Christopher and Clark, Suzanne and Paxton, Robert and Osborne, Juliet},
journal = {PLoS One},
number = {8},
publisher = {Public Library of Science},
title = {{So near and yet so far: Harmonic radar reveals reduced homing ability of Nosema infected honeybees}},
doi = {10.1371/journal.pone.0103989},
volume = {9},
year = {2014},
}
@article{2141,
abstract = {The computation of the winning set for Büchi objectives in alternating games on graphs is a central problem in computer-aided verification with a large number of applications. The long-standing best known upper bound for solving the problem is Õ(n ⋅ m), where n is the number of vertices and m is the number of edges in the graph. We are the first to break the Õ(n ⋅ m) boundary by presenting a new technique that reduces the running time to O(n2). This bound also leads to O(n2)-time algorithms for computing the set of almost-sure winning vertices for Büchi objectives (1) in alternating games with probabilistic transitions (improving an earlier bound of Õ(n ⋅ m)), (2) in concurrent graph games with constant actions (improving an earlier bound of O(n3)), and (3) in Markov decision processes (improving for m>n4/3 an earlier bound of O(m ⋅ √m)). We then show how to maintain the winning set for Büchi objectives in alternating games under a sequence of edge insertions or a sequence of edge deletions in O(n) amortized time per operation. Our algorithms are the first dynamic algorithms for this problem. We then consider another core graph theoretic problem in verification of probabilistic systems, namely computing the maximal end-component decomposition of a graph. We present two improved static algorithms for the maximal end-component decomposition problem. Our first algorithm is an O(m ⋅ √m)-time algorithm, and our second algorithm is an O(n2)-time algorithm which is obtained using the same technique as for alternating Büchi games. Thus, we obtain an O(min &lcu;m ⋅ √m,n2})-time algorithm improving the long-standing O(n ⋅ m) time bound. Finally, we show how to maintain the maximal end-component decomposition of a graph under a sequence of edge insertions or a sequence of edge deletions in O(n) amortized time per edge deletion, and O(m) worst-case time per edge insertion. Again, our algorithms are the first dynamic algorithms for this problem.},
author = {Chatterjee, Krishnendu and Henzinger, Monika},
journal = {Journal of the ACM},
number = {3},
publisher = {ACM},
title = {{Efficient and dynamic algorithms for alternating Büchi games and maximal end-component decomposition}},
doi = {10.1145/2597631},
volume = {61},
year = {2014},
}
@inproceedings{2153,
abstract = {We define a simple, explicit map sending a morphism f : M → N of pointwise finite dimensional persistence modules to a matching between the barcodes of M and N. Our main result is that, in a precise sense, the quality of this matching is tightly controlled by the lengths of the longest intervals in the barcodes of ker f and coker f . As an immediate corollary, we obtain a new proof of the algebraic stability theorem for persistence barcodes [5, 9], a fundamental result in the theory of persistent homology. In contrast to previous proofs, ours shows explicitly how a δ-interleaving morphism between two persistence modules induces a δ-matching between the barcodes of the two modules. Our main result also specializes to a structure theorem for submodules and quotients of persistence modules. Copyright is held by the owner/author(s).},
author = {Bauer, Ulrich and Lesnick, Michael},
booktitle = {Proceedings of the Annual Symposium on Computational Geometry},
location = {Kyoto, Japan},
pages = {355 -- 364},
publisher = {ACM},
title = {{Induced matchings of barcodes and the algebraic stability of persistence}},
doi = {10.1145/2582112.2582168},
year = {2014},
}
@article{2154,
abstract = {A result of Boros and Füredi (d = 2) and of Bárány (arbitrary d) asserts that for every d there exists cd > 0 such that for every n-point set P ⊂ ℝd, some point of ℝd is covered by at least (Formula presented.) of the d-simplices spanned by the points of P. The largest possible value of cd has been the subject of ongoing research. Recently Gromov improved the existing lower bounds considerably by introducing a new, topological proof method. We provide an exposition of the combinatorial component of Gromov's approach, in terms accessible to combinatorialists and discrete geometers, and we investigate the limits of his method. In particular, we give tighter bounds on the cofilling profiles for the (n - 1)-simplex. These bounds yield a minor improvement over Gromov's lower bounds on cd for large d, but they also show that the room for further improvement through the cofilling profiles alone is quite small. We also prove a slightly better lower bound for c3 by an approach using an additional structure besides the cofilling profiles. We formulate a combinatorial extremal problem whose solution might perhaps lead to a tight lower bound for cd.},
author = {Matoušek, Jiří and Wagner, Uli},
journal = {Discrete & Computational Geometry},
number = {1},
pages = {1 -- 33},
publisher = {Springer},
title = {{On Gromov's method of selecting heavily covered points}},
doi = {10.1007/s00454-014-9584-7},
volume = {52},
year = {2014},
}
@inproceedings{2155,
abstract = {Given a finite set of points in Rn and a positive radius, we study the Čech, Delaunay-Čech, alpha, and wrap complexes as instances of a generalized discrete Morse theory. We prove that the latter three complexes are simple-homotopy equivalent. Our results have applications in topological data analysis and in the reconstruction of shapes from sampled data. Copyright is held by the owner/author(s).},
author = {Bauer, Ulrich and Edelsbrunner, Herbert},
booktitle = {Proceedings of the Annual Symposium on Computational Geometry},
location = {Kyoto, Japan},
pages = {484 -- 490},
publisher = {ACM},
title = {{The morse theory of Čech and Delaunay filtrations}},
doi = {10.1145/2582112.2582167},
year = {2014},
}
@inproceedings{2156,
abstract = {We propose a metric for Reeb graphs, called the functional distortion distance. Under this distance, the Reeb graph is stable against small changes of input functions. At the same time, it remains discriminative at differentiating input functions. In particular, the main result is that the functional distortion distance between two Reeb graphs is bounded from below by the bottleneck distance between both the ordinary and extended persistence diagrams for appropriate dimensions. As an application of our results, we analyze a natural simplification scheme for Reeb graphs, and show that persistent features in Reeb graph remains persistent under simplification. Understanding the stability of important features of the Reeb graph under simplification is an interesting problem on its own right, and critical to the practical usage of Reeb graphs. Copyright is held by the owner/author(s).},
author = {Bauer, Ulrich and Ge, Xiaoyin and Wang, Yusu},
booktitle = {Proceedings of the Annual Symposium on Computational Geometry},
location = {Kyoto, Japan},
pages = {464 -- 473},
publisher = {ACM},
title = {{Measuring distance between Reeb graphs}},
doi = {10.1145/2582112.2582169},
year = {2014},
}
@inproceedings{2157,
abstract = {We show that the following algorithmic problem is decidable: given a 2-dimensional simplicial complex, can it be embedded (topologically, or equivalently, piecewise linearly) in ℝ3? By a known reduction, it suffices to decide the embeddability of a given triangulated 3-manifold X into the 3-sphere S3. The main step, which allows us to simplify X and recurse, is in proving that if X can be embedded in S3, then there is also an embedding in which X has a short meridian, i.e., an essential curve in the boundary of X bounding a disk in S3 nX with length bounded by a computable function of the number of tetrahedra of X.},
author = {Matoušek, Jiří and Sedgwick, Eric and Tancer, Martin and Wagner, Uli},
booktitle = {Proceedings of the Annual Symposium on Computational Geometry},
location = {Kyoto, Japan},
pages = {78 -- 84},
publisher = {ACM},
title = {{Embeddability in the 3 sphere is decidable}},
doi = {10.1145/2582112.2582137},
year = {2014},
}
@article{2158,
abstract = {Directional guidance of migrating cells is relatively well explored in the reductionist setting of cell culture experiments. Here spatial gradients of chemical cues as well as gradients of mechanical substrate characteristics prove sufficient to attract single cells as well as their collectives. How such gradients present and act in the context of an organism is far less clear. Here we review recent advances in understanding how guidance cues emerge and operate in the physiological context.},
author = {Majumdar, Ritankar and Sixt, Michael K and Parent, Carole},
journal = {Current Opinion in Cell Biology},
number = {1},
pages = {33 -- 40},
publisher = {Elsevier},
title = {{New paradigms in the establishment and maintenance of gradients during directed cell migration}},
doi = {10.1016/j.ceb.2014.05.010},
volume = {30},
year = {2014},
}
@inproceedings{2160,
abstract = {Transfer learning has received a lot of attention in the machine learning community over the last years, and several effective algorithms have been developed. However, relatively little is known about their theoretical properties, especially in the setting of lifelong learning, where the goal is to transfer information to tasks for which no data have been observed so far. In this work we study lifelong learning from a theoretical perspective. Our main result is a PAC-Bayesian generalization bound that offers a unified view on existing paradigms for transfer learning, such as the transfer of parameters or the transfer of low-dimensional representations. We also use the bound to derive two principled lifelong learning algorithms, and we show that these yield results comparable with existing methods.},
author = {Pentina, Anastasia and Lampert, Christoph},
editor = {Xing, Eric and Jebara, Tony},
location = {Beijing, China},
pages = {991 -- 999},
publisher = {Omnipress},
title = {{A PAC-Bayesian bound for Lifelong Learning}},
volume = {32},
year = {2014},
}
@article{2161,
abstract = {Repeated pathogen exposure is a common threat in colonies of social insects, posing selection pressures on colony members to respond with improved disease-defense performance. We here tested whether experience gained by repeated tending of low-level fungus-exposed (Metarhizium robertsii) larvae may alter the performance of sanitary brood care in the clonal ant, Platythyrea punctata. We trained ants individually over nine consecutive trials to either sham-treated or fungus-exposed larvae. We then compared the larval grooming behavior of naive and trained ants and measured how effectively they removed infectious fungal conidiospores from the fungus-exposed larvae. We found that the ants changed the duration of larval grooming in response to both, larval treatment and their level of experience: (1) sham-treated larvae received longer grooming than the fungus-exposed larvae and (2) trained ants performed less self-grooming but longer larval grooming than naive ants, which was true for both, ants trained to fungus-exposed and also to sham-treated larvae. Ants that groomed the fungus-exposed larvae for longer periods removed a higher number of fungal conidiospores from the surface of the fungus-exposed larvae. As experienced ants performed longer larval grooming, they were more effective in fungal removal, thus making them better caretakers under pathogen attack of the colony. By studying this clonal ant, we can thus conclude that even in the absence of genetic variation between colony members, differences in experience levels of brood care may affect performance of sanitary brood care in social insects.},
author = {Westhus, Claudia and Ugelvig, Line V and Tourdot, Edouard and Heinze, Jürgen and Doums, Claudie and Cremer, Sylvia},
journal = {Behavioral Ecology and Sociobiology},
number = {10},
pages = {1701 -- 1710},
publisher = {Springer},
title = {{Increased grooming after repeated brood care provides sanitary benefits in a clonal ant}},
doi = {10.1007/s00265-014-1778-8},
volume = {68},
year = {2014},
}
@inproceedings{2162,
abstract = {We study two-player (zero-sum) concurrent mean-payoff games played on a finite-state graph. We focus on the important sub-class of ergodic games where all states are visited infinitely often with probability 1. The algorithmic study of ergodic games was initiated in a seminal work of Hoffman and Karp in 1966, but all basic complexity questions have remained unresolved. Our main results for ergodic games are as follows: We establish (1) an optimal exponential bound on the patience of stationary strategies (where patience of a distribution is the inverse of the smallest positive probability and represents a complexity measure of a stationary strategy); (2) the approximation problem lies in FNP; (3) the approximation problem is at least as hard as the decision problem for simple stochastic games (for which NP ∩ coNP is the long-standing best known bound). We present a variant of the strategy-iteration algorithm by Hoffman and Karp; show that both our algorithm and the classical value-iteration algorithm can approximate the value in exponential time; and identify a subclass where the value-iteration algorithm is a FPTAS. We also show that the exact value can be expressed in the existential theory of the reals, and establish square-root sum hardness for a related class of games.},
author = {Chatterjee, Krishnendu and Ibsen-Jensen, Rasmus},
location = {Copenhagen, Denmark},
number = {Part 2},
pages = {122 -- 133},
publisher = {Springer},
title = {{The complexity of ergodic mean payoff games}},
doi = {10.1007/978-3-662-43951-7_11},
volume = {8573},
year = {2014},
}
@inproceedings{2163,
abstract = {We consider multi-player graph games with partial-observation and parity objective. While the decision problem for three-player games with a coalition of the first and second players against the third player is undecidable in general, we present a decidability result for partial-observation games where the first and third player are in a coalition against the second player, thus where the second player is adversarial but weaker due to partial-observation. We establish tight complexity bounds in the case where player 1 is less informed than player 2, namely 2-EXPTIME-completeness for parity objectives. The symmetric case of player 1 more informed than player 2 is much more complicated, and we show that already in the case where player 1 has perfect observation, memory of size non-elementary is necessary in general for reachability objectives, and the problem is decidable for safety and reachability objectives. From our results we derive new complexity results for partial-observation stochastic games.},
author = {Chatterjee, Krishnendu and Doyen, Laurent},
booktitle = {Lecture Notes in Computer Science},
location = {Copenhagen, Denmark},
number = {Part 2},
pages = {110 -- 121},
publisher = {Springer},
title = {{Games with a weak adversary}},
doi = {10.1007/978-3-662-43951-7_10},
volume = {8573},
year = {2014},
}
@article{2164,
abstract = {Neuronal ectopia, such as granule cell dispersion (GCD) in temporal lobe epilepsy (TLE), has been assumed to result from a migration defect during development. Indeed, recent studies reported that aberrant migration of neonatal-generated dentate granule cells (GCs) increased the risk to develop epilepsy later in life. On the contrary, in the present study, we show that fully differentiated GCs become motile following the induction of epileptiform activity, resulting in GCD. Hippocampal slice cultures from transgenic mice expressing green fluorescent protein in differentiated, but not in newly generated GCs, were incubated with the glutamate receptor agonist kainate (KA), which induced GC burst activity and GCD. Using real-time microscopy, we observed that KA-exposed, differentiated GCs translocated their cell bodies and changed their dendritic organization. As found in human TLE, KA application was associated with decreased expression of the extracellular matrix protein Reelin, particularly in hilar interneurons. Together these findings suggest that KA-induced motility of differentiated GCs contributes to the development of GCD and establish slice cultures as a model to study neuronal changes induced by epileptiform activity. },
author = {Chai, Xuejun and Münzner, Gert and Zhao, Shanting and Tinnes, Stefanie and Kowalski, Janina and Häussler, Ute and Young, Christina and Haas, Carola and Frotscher, Michael},
journal = {Cerebral Cortex},
number = {8},
pages = {2130 -- 2140},
publisher = {Oxford University Press},
title = {{Epilepsy-induced motility of differentiated neurons}},
doi = {10.1093/cercor/bht067},
volume = {24},
year = {2014},
}
@inproceedings{2167,
abstract = {Model-based testing is a promising technology for black-box software and hardware testing, in which test cases are generated automatically from high-level specifications. Nowadays, systems typically consist of multiple interacting components and, due to their complexity, testing presents a considerable portion of the effort and cost in the design process. Exploiting the compositional structure of system specifications can considerably reduce the effort in model-based testing. Moreover, inferring properties about the system from testing its individual components allows the designer to reduce the amount of integration testing. In this paper, we study compositional properties of the ioco-testing theory. We propose a new approach to composition and hiding operations, inspired by contract-based design and interface theories. These operations preserve behaviors that are compatible under composition and hiding, and prune away incompatible ones. The resulting specification characterizes the input sequences for which the unit testing of components is sufficient to infer the correctness of component integration without the need for further tests. We provide a methodology that uses these results to minimize integration testing effort, but also to detect potential weaknesses in specifications. While we focus on asynchronous models and the ioco conformance relation, the resulting methodology can be applied to a broader class of systems.},
author = {Daca, Przemyslaw and Henzinger, Thomas A and Krenn, Willibald and Nickovic, Dejan},
booktitle = {IEEE 7th International Conference on Software Testing, Verification and Validation},
isbn = {978-1-4799-2255-0},
issn = {2159-4848},
location = {Cleveland, USA},
publisher = {IEEE},
title = {{Compositional specifications for IOCO testing}},
doi = {10.1109/ICST.2014.50},
year = {2014},
}
@article{2168,
abstract = {Many species have an essentially continuous distribution in space, in which there are no natural divisions between randomly mating subpopulations. Yet, the standard approach to modelling these populations is to impose an arbitrary grid of demes, adjusting deme sizes and migration rates in an attempt to capture the important features of the population. Such indirect methods are required because of the failure of the classical models of isolation by distance, which have been shown to have major technical flaws. A recently introduced model of extinction and recolonisation in two dimensions solves these technical problems, and provides a rigorous technical foundation for the study of populations evolving in a spatial continuum. The coalescent process for this model is simply stated, but direct simulation is very inefficient for large neighbourhood sizes. We present efficient and exact algorithms to simulate this coalescent process for arbitrary sample sizes and numbers of loci, and analyse these algorithms in detail.},
author = {Kelleher, Jerome and Etheridge, Alison and Barton, Nicholas H},
journal = {Theoretical Population Biology},
pages = {13 -- 23},
publisher = {Academic Press},
title = {{Coalescent simulation in continuous space: Algorithms for large neighbourhood size}},
doi = {10.1016/j.tpb.2014.05.001},
volume = {95},
year = {2014},
}
@article{2169,
author = {Barton, Nicholas H and Novak, Sebastian and Paixao, Tiago},
journal = {PNAS},
number = {29},
pages = {10398 -- 10399},
publisher = {National Academy of Sciences},
title = {{Diverse forms of selection in evolution and computer science}},
doi = {10.1073/pnas.1410107111},
volume = {111},
year = {2014},
}
@article{2170,
abstract = { Short-read sequencing technologies have in principle made it feasible to draw detailed inferences about the recent history of any organism. In practice, however, this remains challenging due to the difficulty of genome assembly in most organisms and the lack of statistical methods powerful enough to discriminate between recent, nonequilibrium histories. We address both the assembly and inference challenges. We develop a bioinformatic pipeline for generating outgroup-rooted alignments of orthologous sequence blocks from de novo low-coverage short-read data for a small number of genomes, and show how such sequence blocks can be used to fit explicit models of population divergence and admixture in a likelihood framework. To illustrate our approach, we reconstruct the Pleistocene history of an oak-feeding insect (the oak gallwasp Biorhiza pallida), which, in common with many other taxa, was restricted during Pleistocene ice ages to a longitudinal series of southern refugia spanning the Western Palaearctic. Our analysis of sequence blocks sampled from a single genome from each of three major glacial refugia reveals support for an unexpected history dominated by recent admixture. Despite the fact that 80% of the genome is affected by admixture during the last glacial cycle, we are able to infer the deeper divergence history of these populations. These inferences are robust to variation in block length, mutation model and the sampling location of individual genomes within refugia. This combination of de novo assembly and numerical likelihood calculation provides a powerful framework for estimating recent population history that can be applied to any organism without the need for prior genetic resources.},
author = {Hearn, Jack and Stone, Graham and Bunnefeld, Lynsey and Nicholls, James and Barton, Nicholas H and Lohse, Konrad},
journal = {Molecular Ecology},
number = {1},
pages = {198 -- 211},
publisher = {Wiley-Blackwell},
title = {{Likelihood-based inference of population history from low-coverage de novo genome assemblies}},
doi = {10.1111/mec.12578},
volume = {23},
year = {2014},
}
@inproceedings{2171,
abstract = {We present LS-CRF, a new method for training cyclic Conditional Random Fields (CRFs) from large datasets that is inspired by classical closed-form expressions for the maximum likelihood parameters of a generative graphical model with tree topology. Training a CRF with LS-CRF requires only solving a set of independent regression problems, each of which can be solved efficiently in closed form or by an iterative solver. This makes LS-CRF orders of magnitude faster than classical CRF training based on probabilistic inference, and at the same time more flexible and easier to implement than other approximate techniques, such as pseudolikelihood or piecewise training. We apply LS-CRF to the task of semantic image segmentation, showing that it achieves on par accuracy to other training techniques at higher speed, thereby allowing efficient CRF training from very large training sets. For example, training a linearly parameterized pairwise CRF on 150,000 images requires less than one hour on a modern workstation.},
author = {Kolesnikov, Alexander and Guillaumin, Matthieu and Ferrari, Vittorio and Lampert, Christoph},
booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
editor = {Fleet, David and Pajdla, Tomas and Schiele, Bernt and Tuytelaars, Tinne},
location = {Zurich, Switzerland},
number = {PART 3},
pages = {550 -- 565},
publisher = {Springer},
title = {{Closed-form approximate CRF training for scalable image segmentation}},
doi = {10.1007/978-3-319-10578-9_36},
volume = {8691},
year = {2014},
}
@inproceedings{2172,
abstract = {Fisher Kernels and Deep Learning were two developments with significant impact on large-scale object categorization in the last years. Both approaches were shown to achieve state-of-the-art results on large-scale object categorization datasets, such as ImageNet. Conceptually, however, they are perceived as very different and it is not uncommon for heated debates to spring up when advocates of both paradigms meet at conferences or workshops. In this work, we emphasize the similarities between both architectures rather than their differences and we argue that such a unified view allows us to transfer ideas from one domain to the other. As a concrete example we introduce a method for learning a support vector machine classifier with Fisher kernel at the same time as a task-specific data representation. We reinterpret the setting as a multi-layer feed forward network. Its final layer is the classifier, parameterized by a weight vector, and the two previous layers compute Fisher vectors, parameterized by the coefficients of a Gaussian mixture model. We introduce a gradient descent based learning algorithm that, in contrast to other feature learning techniques, is not just derived from intuition or biological analogy, but has a theoretical justification in the framework of statistical learning theory. Our experiments show that the new training procedure leads to significant improvements in classification accuracy while preserving the modularity and geometric interpretability of a support vector machine setup.},
author = {Sydorov, Vladyslav and Sakurada, Mayu and Lampert, Christoph},
booktitle = {Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition},
location = {Columbus, USA},
pages = {1402 -- 1409},
publisher = {IEEE},
title = {{Deep Fisher Kernels – End to end learning of the Fisher Kernel GMM parameters}},
doi = {10.1109/CVPR.2014.182},
year = {2014},
}
@inproceedings{2173,
abstract = {In this work we introduce a new approach to co-classification, i.e. the task of jointly classifying multiple, otherwise independent, data samples. The method we present, named CoConut, is based on the idea of adding a regularizer in the label space to encode certain priors on the resulting labelings. A regularizer that encourages labelings that are smooth across the test set, for instance, can be seen as a test-time variant of the cluster assumption, which has been proven useful at training time in semi-supervised learning. A regularizer that introduces a preference for certain class proportions can be regarded as a prior distribution on the class labels. CoConut can build on existing classifiers without making any assumptions on how they were obtained and without the need to re-train them. The use of a regularizer adds a new level of flexibility. It allows the integration of potentially new information at test time, even in other modalities than what the classifiers were trained on. We evaluate our framework on six datasets, reporting a clear performance gain in classification accuracy compared to the standard classification setup that predicts labels for each test sample separately.
},
author = {Khamis, Sameh and Lampert, Christoph},
booktitle = {Proceedings of the British Machine Vision Conference 2014},
location = {Nottingham, UK},
publisher = {BMVA Press},
title = {{CoConut: Co-classification with output space regularization}},
year = {2014},
}
@article{2174,
abstract = {When polygenic traits are under stabilizing selection, many different combinations of alleles allow close adaptation to the optimum. If alleles have equal effects, all combinations that result in the same deviation from the optimum are equivalent. Furthermore, the genetic variance that is maintained by mutation-selection balance is 2μ/S per locus, where μ is the mutation rate and S the strength of stabilizing selection. In reality, alleles vary in their effects, making the fitness landscape asymmetric and complicating analysis of the equilibria. We show that that the resulting genetic variance depends on the fraction of alleles near fixation, which contribute by 2μ/S, and on the total mutational effects of alleles that are at intermediate frequency. The inpplayfi between stabilizing selection and mutation leads to a sharp transition: alleles with effects smaller than a threshold value of 2 remain polymorphic, whereas those with larger effects are fixed. The genetic load in equilibrium is less than for traits of equal effects, and the fitness equilibria are more similar. We find p the optimum is displaced, alleles with effects close to the threshold value sweep first, and their rate of increase is bounded by Long-term response leads in general to well-adapted traits, unlike the case of equal effects that often end up at a suboptimal fitness peak. However, the particular peaks to which the populations converge are extremely sensitive to the initial states and to the speed of the shift of the optimum trait value.},
author = {De Vladar, Harold and Barton, Nicholas H},
journal = {Genetics},
number = {2},
pages = {749 -- 767},
publisher = {Genetics Society of America},
title = {{Stability and response of polygenic traits to stabilizing selection and mutation}},
doi = {10.1534/genetics.113.159111},
volume = {197},
year = {2014},
}
@article{2175,
abstract = {The cerebral cortex, the seat of our cognitive abilities, is composed of an intricate network of billions of excitatory projection and inhibitory interneurons. Postmitotic cortical neurons are generated by a diverse set of neural stem cell progenitors within dedicated zones and defined periods of neurogenesis during embryonic development. Disruptions in neurogenesis can lead to alterations in the neuronal cytoarchitecture, which is thought to represent a major underlying cause for several neurological disorders, including microcephaly, autism and epilepsy. Although a number of signaling pathways regulating neurogenesis have been described, the precise cellular and molecular mechanisms regulating the functional neural stem cell properties in cortical neurogenesis remain unclear. Here, we discuss the most up-to-date strategies to monitor the fundamental mechanistic parameters of neuronal progenitor proliferation, and recent advances deciphering the logic and dynamics of neurogenesis.},
author = {Postiglione, Maria P and Hippenmeyer, Simon},
journal = {Future Neurology},
number = {3},
pages = {323 -- 340},
publisher = {Future Medicine Ltd.},
title = {{Monitoring neurogenesis in the cerebral cortex: an update}},
doi = {10.2217/fnl.14.18},
volume = {9},
year = {2014},
}
@article{2176,
abstract = {Electron microscopy (EM) allows for the simultaneous visualization of all tissue components at high resolution. However, the extent to which conventional aldehyde fixation and ethanol dehydration of the tissue alter the fine structure of cells and organelles, thereby preventing detection of subtle structural changes induced by an experiment, has remained an issue. Attempts have been made to rapidly freeze tissue to preserve native ultrastructure. Shock-freezing of living tissue under high pressure (high-pressure freezing, HPF) followed by cryosubstitution of the tissue water avoids aldehyde fixation and dehydration in ethanol; the tissue water is immobilized in â ̂1/450 ms, and a close-to-native fine structure of cells, organelles and molecules is preserved. Here we describe a protocol for HPF that is useful to monitor ultrastructural changes associated with functional changes at synapses in the brain but can be applied to many other tissues as well. The procedure requires a high-pressure freezer and takes a minimum of 7 d but can be paused at several points.},
author = {Studer, Daniel and Zhao, Shanting and Chai, Xuejun and Jonas, Peter M and Graber, Werner and Nestel, Sigrun and Frotscher, Michael},
journal = {Nature Protocols},
number = {6},
pages = {1480 -- 1495},
publisher = {Nature Publishing Group},
title = {{Capture of activity-induced ultrastructural changes at synapses by high-pressure freezing of brain tissue}},
doi = {10.1038/nprot.2014.099},
volume = {9},
year = {2014},
}
@inproceedings{2177,
abstract = {We give evidence for the difficulty of computing Betti numbers of simplicial complexes over a finite field. We do this by reducing the rank computation for sparse matrices with to non-zero entries to computing Betti numbers of simplicial complexes consisting of at most a constant times to simplices. Together with the known reduction in the other direction, this implies that the two problems have the same computational complexity.},
author = {Edelsbrunner, Herbert and Parsa, Salman},
booktitle = {Proceedings of the Annual ACM-SIAM Symposium on Discrete Algorithms},
location = {Portland, USA},
pages = {152 -- 160},
publisher = {SIAM},
title = {{On the computational complexity of betti numbers reductions from matrix rank}},
doi = {10.1137/1.9781611973402.11},
year = {2014},
}