@inproceedings{2027,
abstract = {We present a general framework for applying machine-learning algorithms to the verification of Markov decision processes (MDPs). The primary goal of these techniques is to improve performance by avoiding an exhaustive exploration of the state space. Our framework focuses on probabilistic reachability, which is a core property for verification, and is illustrated through two distinct instantiations. The first assumes that full knowledge of the MDP is available, and performs a heuristic-driven partial exploration of the model, yielding precise lower and upper bounds on the required probability. The second tackles the case where we may only sample the MDP, and yields probabilistic guarantees, again in terms of both the lower and upper bounds, which provides efficient stopping criteria for the approximation. The latter is the first extension of statistical model checking for unbounded properties inMDPs. In contrast with other related techniques, our approach is not restricted to time-bounded (finite-horizon) or discounted properties, nor does it assume any particular properties of the MDP. We also show how our methods extend to LTL objectives. We present experimental results showing the performance of our framework on several examples.},
author = {Brázdil, Tomáš and Chatterjee, Krishnendu and Chmelik, Martin and Forejt, Vojtěch and Kretinsky, Jan and Kwiatkowska, Marta and Parker, David and Ujma, Mateusz},
booktitle = { Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
editor = {Cassez, Franck and Raskin, Jean-François},
location = {Sydney, Australia},
pages = {98 -- 114},
publisher = {Society of Industrial and Applied Mathematics},
title = {{Verification of markov decision processes using learning algorithms}},
doi = {10.1007/978-3-319-11936-6_8},
volume = {8837},
year = {2014},
}
@article{2028,
abstract = {Understanding the dynamics of noisy neurons remains an important challenge in neuroscience. Here, we describe a simple probabilistic model that accurately describes the firing behavior in a large class (type II) of neurons. To demonstrate the usefulness of this model, we show how it accurately predicts the interspike interval (ISI) distributions, bursting patterns and mean firing rates found by: (1) simulations of the classic Hodgkin-Huxley model with channel noise, (2) experimental data from squid giant axon with a noisy input current and (3) experimental data on noisy firing from a neuron within the suprachiasmatic nucleus (SCN). This simple model has 6 parameters, however, in some cases, two of these parameters are coupled and only 5 parameters account for much of the known behavior. From these parameters, many properties of spiking can be found through simple calculation. Thus, we show how the complex effects of noise can be understood through a simple and general probabilistic model.},
author = {Bodova, Katarina and Paydarfar, David and Forger, Daniel},
journal = { Journal of Theoretical Biology},
pages = {40 -- 54},
publisher = {Academic Press},
title = {{Characterizing spiking in noisy type II neurons}},
doi = {10.1016/j.jtbi.2014.09.041},
volume = {365},
year = {2014},
}
@article{2029,
abstract = {Spin-wave theory is a key ingredient in our comprehension of quantum spin systems, and is used successfully for understanding a wide range of magnetic phenomena, including magnon condensation and stability of patterns in dipolar systems. Nevertheless, several decades of research failed to establish the validity of spin-wave theory rigorously, even for the simplest models of quantum spins. A rigorous justification of the method for the three-dimensional quantum Heisenberg ferromagnet at low temperatures is presented here. We derive sharp bounds on its free energy by combining a bosonic formulation of the model introduced by Holstein and Primakoff with probabilistic estimates and operator inequalities.},
author = {Correggi, Michele and Giuliani, Alessandro and Seiringer, Robert},
journal = {EPL},
number = {2},
publisher = {IOP Publishing Ltd.},
title = {{Validity of spin-wave theory for the quantum Heisenberg model}},
doi = {10.1209/0295-5075/108/20003},
volume = {108},
year = {2014},
}
@article{2031,
abstract = {A puzzling property of synaptic transmission, originally established at the neuromuscular junction, is that the time course of transmitter release is independent of the extracellular Ca2+ concentration ([Ca2+]o), whereas the rate of release is highly [Ca2+]o-dependent. Here, we examine the time course of release at inhibitory basket cell-Purkinje cell synapses and show that it is independent of [Ca2+]o. Modeling of Ca2+-dependent transmitter release suggests that the invariant time course of release critically depends on tight coupling between Ca2+ channels and release sensors. Experiments with exogenous Ca2+ chelators reveal that channel-sensor coupling at basket cell-Purkinje cell synapses is very tight, with a mean distance of 10–20 nm. Thus, tight channel-sensor coupling provides a mechanistic explanation for the apparent [Ca2+]o independence of the time course of release.},
author = {Arai, Itaru and Jonas, Peter M},
journal = {eLife},
publisher = {eLife Sciences Publications},
title = {{Nanodomain coupling explains Ca^2+ independence of transmitter release time course at a fast central synapse}},
doi = {10.7554/eLife.04057},
volume = {3},
year = {2014},
}
@article{2032,
abstract = {As light-based control of fundamental signaling pathways is becoming a reality, the field of optogenetics is rapidly moving beyond neuroscience. We have recently developed receptor tyrosine kinases that are activated by light and control cell proliferation, epithelial–mesenchymal transition, and angiogenic sprouting—cell behaviors central to cancer progression.},
author = {Inglés Prieto, Álvaro and Gschaider-Reichhart, Eva and Schelch, Karin and Janovjak, Harald L and Grusch, Michael},
journal = {Molecular and Cellular Oncology},
number = {4},
publisher = {Taylor & Francis},
title = {{The optogenetic promise for oncology: Episode I}},
doi = {10.4161/23723548.2014.964045},
volume = {1},
year = {2014},
}
@inproceedings{2033,
abstract = {The learning with privileged information setting has recently attracted a lot of attention within the machine learning community, as it allows the integration of additional knowledge into the training process of a classifier, even when this comes in the form of a data modality that is not available at test time. Here, we show that privileged information can naturally be treated as noise in the latent function of a Gaussian process classifier (GPC). That is, in contrast to the standard GPC setting, the latent function is not just a nuisance but a feature: it becomes a natural measure of confidence about the training data by modulating the slope of the GPC probit likelihood function. Extensive experiments on public datasets show that the proposed GPC method using privileged noise, called GPC+, improves over a standard GPC without privileged knowledge, and also over the current state-of-the-art SVM-based method, SVM+. Moreover, we show that advanced neural networks and deep learning methods can be compressed as privileged information.},
author = {Hernandez Lobato, Daniel and Sharmanska, Viktoriia and Kersting, Kristian and Lampert, Christoph and Quadrianto, Novi},
booktitle = {Advances in Neural Information Processing Systems},
location = {Montreal, Canada},
number = {January},
pages = {837--845},
publisher = {Neural Information Processing Systems},
title = {{Mind the nuisance: Gaussian process classification using privileged noise}},
volume = {1},
year = {2014},
}
@article{2036,
abstract = { In rapidly changing environments, selection history may impact the dynamics of adaptation. Mutations selected in one environment may result in pleiotropic fitness trade-offs in subsequent novel environments, slowing the rates of adaptation. Epistatic interactions between mutations selected in sequential stressful environments may slow or accelerate subsequent rates of adaptation, depending on the nature of that interaction. We explored the dynamics of adaptation during sequential exposure to herbicides with different modes of action in Chlamydomonas reinhardtii. Evolution of resistance to two of the herbicides was largely independent of selection history. For carbetamide, previous adaptation to other herbicide modes of action positively impacted the likelihood of adaptation to this herbicide. Furthermore, while adaptation to all individual herbicides was associated with pleiotropic fitness costs in stress-free environments, we observed that accumulation of resistance mechanisms was accompanied by a reduction in overall fitness costs. We suggest that antagonistic epistasis may be a driving mechanism that enables populations to more readily adapt in novel environments. These findings highlight the potential for sequences of xenobiotics to facilitate the rapid evolution of multiple-drug and -pesticide resistance, as well as the potential for epistatic interactions between adaptive mutations to facilitate evolutionary rescue in rapidly changing environments. },
author = {Lagator, Mato and Colegrave, Nick and Neve, Paul},
journal = {Proceedings of the Royal Society of London Series B Biological Sciences},
number = {1794},
publisher = {Royal Society, The},
title = {{Selection history and epistatic interactions impact dynamics of adaptation to novel environmental stresses}},
doi = {10.1098/rspb.2014.1679},
volume = {281},
year = {2014},
}
@article{2038,
abstract = {Recently, there has been an effort to add quantitative objectives to formal verification and synthesis. We introduce and investigate the extension of temporal logics with quantitative atomic assertions. At the heart of quantitative objectives lies the accumulation of values along a computation. It is often the accumulated sum, as with energy objectives, or the accumulated average, as with mean-payoff objectives. We investigate the extension of temporal logics with the prefix-accumulation assertions Sum(v) ≥ c and Avg(v) ≥ c, where v is a numeric (or Boolean) variable of the system, c is a constant rational number, and Sum(v) and Avg(v) denote the accumulated sum and average of the values of v from the beginning of the computation up to the current point in time. We also allow the path-accumulation assertions LimInfAvg(v) ≥ c and LimSupAvg(v) ≥ c, referring to the average value along an entire infinite computation. We study the border of decidability for such quantitative extensions of various temporal logics. In particular, we show that extending the fragment of CTL that has only the EX, EF, AX, and AG temporal modalities with both prefix-accumulation assertions, or extending LTL with both path-accumulation assertions, results in temporal logics whose model-checking problem is decidable. Moreover, the prefix-accumulation assertions may be generalized with "controlled accumulation," allowing, for example, to specify constraints on the average waiting time between a request and a grant. On the negative side, we show that this branching-time logic is, in a sense, the maximal logic with one or both of the prefix-accumulation assertions that permits a decidable model-checking procedure. Extending a temporal logic that has the EG or EU modalities, such as CTL or LTL, makes the problem undecidable.},
author = {Boker, Udi and Chatterjee, Krishnendu and Henzinger, Thomas A and Kupferman, Orna},
journal = {ACM Transactions on Computational Logic (TOCL)},
number = {4},
publisher = {ACM},
title = {{Temporal specifications with accumulative values}},
doi = {10.1145/2629686},
volume = {15},
year = {2014},
}
@article{2039,
abstract = {A fundamental question in biology is the following: what is the time scale that is needed for evolutionary innovations? There are many results that characterize single steps in terms of the fixation time of new mutants arising in populations of certain size and structure. But here we ask a different question, which is concerned with the much longer time scale of evolutionary trajectories: how long does it take for a population exploring a fitness landscape to find target sequences that encode new biological functions? Our key variable is the length, (Formula presented.) of the genetic sequence that undergoes adaptation. In computer science there is a crucial distinction between problems that require algorithms which take polynomial or exponential time. The latter are considered to be intractable. Here we develop a theoretical approach that allows us to estimate the time of evolution as function of (Formula presented.) We show that adaptation on many fitness landscapes takes time that is exponential in (Formula presented.) even if there are broad selection gradients and many targets uniformly distributed in sequence space. These negative results lead us to search for specific mechanisms that allow evolution to work on polynomial time scales. We study a regeneration process and show that it enables evolution to work in polynomial time.},
author = {Chatterjee, Krishnendu and Pavlogiannis, Andreas and Adlam, Ben and Nowak, Martin},
journal = {PLoS Computational Biology},
number = {9},
publisher = {Public Library of Science},
title = {{The time scale of evolutionary innovation}},
doi = {10.1371/journal.pcbi.1003818},
volume = {10},
year = {2014},
}
@article{2040,
abstract = {Development requires tissue growth as well as cell diversification. To address how these processes are coordinated, we analyzed the development of molecularly distinct domains of neural progenitors in the mouse and chick neural tube. We show that during development, these domains undergo changes in size that do not scale with changes in overall tissue size. Our data show that domain proportions are first established by opposing morphogen gradients and subsequently controlled by domain-specific regulation of differentiation rate but not differences in proliferation rate. Regulation of differentiation rate is key to maintaining domain proportions while accommodating both intra- and interspecies variations in size. Thus, the sequential control of progenitor specification and differentiation elaborates pattern without requiring that signaling gradients grow as tissues expand. },
author = {Kicheva, Anna and Bollenbach, Mark Tobias and Ribeiro, Ana and Pérez Valle, Helena and Lovell Badge, Robin and Episkopou, Vasso and Briscoe, James},
journal = {Science},
number = {6204},
publisher = {American Association for the Advancement of Science},
title = {{Coordination of progenitor specification and growth in mouse and chick spinal cord}},
doi = {10.1126/science.1254927},
volume = {345},
year = {2014},
}
@article{2041,
abstract = {The hippocampus mediates several higher brain functions, such as learning, memory, and spatial coding. The input region of the hippocampus, the dentate gyrus, plays a critical role in these processes. Several lines of evidence suggest that the dentate gyrus acts as a preprocessor of incoming information, preparing it for subsequent processing in CA3. For example, the dentate gyrus converts input from the entorhinal cortex, where cells have multiple spatial fields, into the spatially more specific place cell activity characteristic of the CA3 region. Furthermore, the dentate gyrus is involved in pattern separation, transforming relatively similar input patterns into substantially different output patterns. Finally, the dentate gyrus produces a very sparse coding scheme in which only a very small fraction of neurons are active at any one time.},
author = {Jonas, Peter M and Lisman, John},
journal = {Frontiers in Neural Circuits},
publisher = {Frontiers Research Foundation},
title = {{Structure, function and plasticity of hippocampal dentate gyrus microcircuits}},
doi = {10.3389/fncir.2014.00107},
volume = {8},
year = {2014},
}
@article{2042,
abstract = {Background: CRISPR is a microbial immune system likely to be involved in host-parasite coevolution. It functions using target sequences encoded by the bacterial genome, which interfere with invading nucleic acids using a homology-dependent system. The system also requires protospacer associated motifs (PAMs), short motifs close to the target sequence that are required for interference in CRISPR types I and II. Here, we investigate whether PAMs are depleted in phage genomes due to selection pressure to escape recognition.Results: To this end, we analyzed two data sets. Phages infecting all bacterial hosts were analyzed first, followed by a detailed analysis of phages infecting the genus Streptococcus, where PAMs are best understood. We use two different measures of motif underrepresentation that control for codon bias and the frequency of submotifs. We compare phages infecting species with a particular CRISPR type to those infecting species without that type. Since only known PAMs were investigated, the analysis is restricted to CRISPR types I-C and I-E and in Streptococcus to types I-C and II. We found evidence for PAM depletion in Streptococcus phages infecting hosts with CRISPR type I-C, in Vibrio phages infecting hosts with CRISPR type I-E and in Streptococcus thermopilus phages infecting hosts with type II-A, known as CRISPR3.Conclusions: The observed motif depletion in phages with hosts having CRISPR can be attributed to selection rather than to mutational bias, as mutational bias should affect the phages of all hosts. This observation implies that the CRISPR system has been efficient in the groups discussed here.},
author = {Kupczok, Anne and Bollback, Jonathan P},
journal = {BMC Genomics},
number = {1},
publisher = {BioMed Central},
title = {{Motif depletion in bacteriophages infecting hosts with CRISPR systems}},
doi = {10.1186/1471-2164-15-663},
volume = {15},
year = {2014},
}
@inproceedings{2043,
abstract = {Persistent homology is a popular and powerful tool for capturing topological features of data. Advances in algorithms for computing persistent homology have reduced the computation time drastically – as long as the algorithm does not exhaust the available memory. Following up on a recently presented parallel method for persistence computation on shared memory systems [1], we demonstrate that a simple adaption of the standard reduction algorithm leads to a variant for distributed systems. Our algorithmic design ensures that the data is distributed over the nodes without redundancy; this permits the computation of much larger instances than on a single machine. Moreover, we observe that the parallelism at least compensates for the overhead caused by communication between nodes, and often even speeds up the computation compared to sequential and even parallel shared memory algorithms. In our experiments, we were able to compute the persistent homology of filtrations with more than a billion (109) elements within seconds on a cluster with 32 nodes using less than 6GB of memory per node.},
author = {Bauer, Ulrich and Kerber, Michael and Reininghaus, Jan},
booktitle = {Proceedings of the Workshop on Algorithm Engineering and Experiments},
editor = { McGeoch, Catherine and Meyer, Ulrich},
location = {Portland, USA},
pages = {31 -- 38},
publisher = {Society of Industrial and Applied Mathematics},
title = {{Distributed computation of persistent homology}},
doi = {10.1137/1.9781611973198.4},
year = {2014},
}
@inbook{2044,
abstract = {We present a parallel algorithm for computing the persistent homology of a filtered chain complex. Our approach differs from the commonly used reduction algorithm by first computing persistence pairs within local chunks, then simplifying the unpaired columns, and finally applying standard reduction on the simplified matrix. The approach generalizes a technique by Günther et al., which uses discrete Morse Theory to compute persistence; we derive the same worst-case complexity bound in a more general context. The algorithm employs several practical optimization techniques, which are of independent interest. Our sequential implementation of the algorithm is competitive with state-of-the-art methods, and we further improve the performance through parallel computation.},
author = {Bauer, Ulrich and Kerber, Michael and Reininghaus, Jan},
booktitle = {Topological Methods in Data Analysis and Visualization III},
editor = {Bremer, Peer-Timo and Hotz, Ingrid and Pascucci, Valerio and Peikert, Ronald},
pages = {103 -- 117},
publisher = {Springer},
title = {{Clear and Compress: Computing Persistent Homology in Chunks}},
doi = {10.1007/978-3-319-04099-8_7},
year = {2014},
}
@inproceedings{2045,
abstract = {We introduce and study a new notion of enhanced chosen-ciphertext security (ECCA) for public-key encryption. Loosely speaking, in the ECCA security experiment, the decryption oracle provided to the adversary is augmented to return not only the output of the decryption algorithm on a queried ciphertext but also of a randomness-recovery algorithm associated to the scheme. Our results mainly concern the case where the randomness-recovery algorithm is efficient. We provide constructions of ECCA-secure encryption from adaptive trapdoor functions as defined by Kiltz et al. (EUROCRYPT 2010), resulting in ECCA encryption from standard number-theoretic assumptions. We then give two applications of ECCA-secure encryption: (1) We use it as a unifying concept in showing equivalence of adaptive trapdoor functions and tag-based adaptive trapdoor functions, resolving an open question of Kiltz et al. (2) We show that ECCA-secure encryption can be used to securely realize an approach to public-key encryption with non-interactive opening (PKENO) originally suggested by Damgård and Thorbek (EUROCRYPT 2007), resulting in new and practical PKENO schemes quite different from those in prior work. Our results demonstrate that ECCA security is of both practical and theoretical interest.},
author = {Dachman Soled, Dana and Fuchsbauer, Georg and Mohassel, Payman and O’Neill, Adam},
booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
editor = {Krawczyk, Hugo},
location = {Buenos Aires, Argentina},
pages = {329 -- 344},
publisher = {Springer},
title = {{Enhanced chosen-ciphertext security and applications}},
doi = {10.1007/978-3-642-54631-0_19},
volume = {8383},
year = {2014},
}
@inproceedings{2046,
abstract = {We introduce policy-based signatures (PBS), where a signer can only sign messages conforming to some authority-specified policy. The main requirements are unforgeability and privacy, the latter meaning that signatures not reveal the policy. PBS offers value along two fronts: (1) On the practical side, they allow a corporation to control what messages its employees can sign under the corporate key. (2) On the theoretical side, they unify existing work, capturing other forms of signatures as special cases or allowing them to be easily built. Our work focuses on definitions of PBS, proofs that this challenging primitive is realizable for arbitrary policies, efficient constructions for specific policies, and a few representative applications.},
author = {Bellare, Mihir and Fuchsbauer, Georg},
booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
editor = {Krawczyk, Hugo},
location = {Buenos Aires, Argentina},
pages = {520 -- 537},
publisher = {Springer},
title = {{Policy-based signatures}},
doi = {10.1007/978-3-642-54631-0_30},
volume = {8383},
year = {2014},
}
@inproceedings{2047,
abstract = {Following the publication of an attack on genome-wide association studies (GWAS) data proposed by Homer et al., considerable attention has been given to developing methods for releasing GWAS data in a privacy-preserving way. Here, we develop an end-to-end differentially private method for solving regression problems with convex penalty functions and selecting the penalty parameters by cross-validation. In particular, we focus on penalized logistic regression with elastic-net regularization, a method widely used to in GWAS analyses to identify disease-causing genes. We show how a differentially private procedure for penalized logistic regression with elastic-net regularization can be applied to the analysis of GWAS data and evaluate our method’s performance.},
author = {Yu, Fei and Rybar, Michal and Uhler, Caroline and Fienberg, Stephen},
booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
editor = {Domingo Ferrer, Josep},
location = {Ibiza, Spain},
pages = {170 -- 184},
publisher = {Springer},
title = {{Differentially-private logistic regression for detecting multiple-SNP association in GWAS databases}},
doi = {10.1007/978-3-319-11257-2_14},
volume = {8744},
year = {2014},
}
@article{2050,
abstract = {The flow instability and further transition to turbulence in a toroidal pipe (torus) with curvature ratio (tube-to-coiling diameter) 0.049 is investigated experimentally. The flow inside the toroidal pipe is driven by a steel sphere fitted to the inner pipe diameter. The sphere is moved with constant azimuthal velocity from outside the torus by a moving magnet. The experiment is designed to investigate curved pipe flow by optical measurement techniques. Using stereoscopic particle image velocimetry, laser Doppler velocimetry and pressure drop measurements, the flow is measured for Reynolds numbers ranging from 1000 to 15 000. Time- and space-resolved velocity fields are obtained and analysed. The steady axisymmetric basic flow is strongly influenced by centrifugal effects. On an increase of the Reynolds number we find a sequence of bifurcations. For Re=4075±2% a supercritical bifurcation to an oscillatory flow is found in which waves travel in the streamwise direction with a phase velocity slightly faster than the mean flow. The oscillatory flow is superseded by a presumably quasi-periodic flow at a further increase of the Reynolds number before turbulence sets in. The results are found to be compatible, in general, with earlier experimental and numerical investigations on transition to turbulence in helical and curved pipes. However, important aspects of the bifurcation scenario differ considerably.},
author = {Kühnen, Jakob and Holzner, Markus and Hof, Björn and Kuhlmann, Hendrik},
journal = {Journal of Fluid Mechanics},
pages = {463 -- 491},
publisher = {Cambridge University Press},
title = {{Experimental investigation of transitional flow in a toroidal pipe}},
doi = {10.1017/jfm.2013.603},
volume = {738},
year = {2014},
}
@inproceedings{2051,
abstract = {We show that the usual score function for conditional Markov networks can be written as the expectation over the scores of their spanning trees. We also show that a small random sample of these output trees can attain a significant fraction of the margin obtained by the complete graph and we provide conditions under which we can perform tractable inference. The experimental results confirm that practical learning is scalable to realistic datasets using this approach.},
author = {Marchand, Mario and Hongyu, Su and Emilie Morvant and Rousu, Juho and Shawe-Taylor, John},
publisher = {Neural Information Processing Systems},
title = {{Multilabel structured output learning with random spanning trees of max-margin Markov networks}},
year = {2014},
}
@inproceedings{2053,
abstract = {In contrast to the usual understanding of probabilistic systems as stochastic processes, recently these systems have also been regarded as transformers of probabilities. In this paper, we give a natural definition of strong bisimulation for probabilistic systems corresponding to this view that treats probability distributions as first-class citizens. Our definition applies in the same way to discrete systems as well as to systems with uncountable state and action spaces. Several examples demonstrate that our definition refines the understanding of behavioural equivalences of probabilistic systems. In particular, it solves a longstanding open problem concerning the representation of memoryless continuous time by memoryfull continuous time. Finally, we give algorithms for computing this bisimulation not only for finite but also for classes of uncountably infinite systems.},
author = {Hermanns, Holger and Krčál, Jan and Kretinsky, Jan},
booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
editor = {Baldan, Paolo and Gorla, Daniele},
location = {Rome, Italy},
pages = {249 -- 265},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{Probabilistic bisimulation: Naturally on distributions}},
doi = {10.1007/978-3-662-44584-6_18},
volume = {8704},
year = {2014},
}
@article{2056,
abstract = {We consider a continuous-time Markov chain (CTMC) whose state space is partitioned into aggregates, and each aggregate is assigned a probability measure. A sufficient condition for defining a CTMC over the aggregates is presented as a variant of weak lumpability, which also characterizes that the measure over the original process can be recovered from that of the aggregated one. We show how the applicability of de-aggregation depends on the initial distribution. The application section is devoted to illustrate how the developed theory aids in reducing CTMC models of biochemical systems particularly in connection to protein-protein interactions. We assume that the model is written by a biologist in form of site-graph-rewrite rules. Site-graph-rewrite rules compactly express that, often, only a local context of a protein (instead of a full molecular species) needs to be in a certain configuration in order to trigger a reaction event. This observation leads to suitable aggregate Markov chains with smaller state spaces, thereby providing sufficient reduction in computational complexity. This is further exemplified in two case studies: simple unbounded polymerization and early EGFR/insulin crosstalk.},
author = {Ganguly, Arnab and Petrov, Tatjana and Koeppl, Heinz},
journal = {Journal of Mathematical Biology},
number = {3},
pages = {767 -- 797},
publisher = {Springer},
title = {{Markov chain aggregation and its applications to combinatorial reaction networks}},
doi = {10.1007/s00285-013-0738-7},
volume = {69},
year = {2014},
}
@inproceedings{2057,
abstract = {In the past few years, a lot of attention has been devoted to multimedia indexing by fusing multimodal informations. Two kinds of fusion schemes are generally considered: The early fusion and the late fusion. We focus on late classifier fusion, where one combines the scores of each modality at the decision level. To tackle this problem, we investigate a recent and elegant well-founded quadratic program named MinCq coming from the machine learning PAC-Bayesian theory. MinCq looks for the weighted combination, over a set of real-valued functions seen as voters, leading to the lowest misclassification rate, while maximizing the voters’ diversity. We propose an extension of MinCq tailored to multimedia indexing. Our method is based on an order-preserving pairwise loss adapted to ranking that allows us to improve Mean Averaged Precision measure while taking into account the diversity of the voters that we want to fuse. We provide evidence that this method is naturally adapted to late fusion procedures and confirm the good behavior of our approach on the challenging PASCAL VOC’07 benchmark.},
author = {Morvant, Emilie and Habrard, Amaury and Ayache, Stéphane},
booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
location = {Joensuu, Finland},
pages = {153 -- 162},
publisher = {Springer},
title = {{Majority vote of diverse classifiers for late fusion}},
doi = {10.1007/978-3-662-44415-3_16},
volume = {8621},
year = {2014},
}
@inproceedings{2058,
abstract = {We present a method for smoothly blending between existing liquid animations. We introduce a semi-automatic method for matching two existing liquid animations, which we use to create new fluid motion that plausibly interpolates the input. Our contributions include a new space-time non-rigid iterative closest point algorithm that incorporates user guidance, a subsampling technique for efficient registration of meshes with millions of vertices, and a fast surface extraction algorithm that produces 3D triangle meshes from a 4D space-time surface. Our technique can be used to instantly create hundreds of new simulations, or to interactively explore complex parameter spaces. Our method is guaranteed to produce output that does not deviate from the input animations, and it generalizes to multiple dimensions. Because our method runs at interactive rates after the initial precomputation step, it has potential applications in games and training simulations.},
author = {Raveendran, Karthik and Wojtan, Christopher J and Thuerey, Nils and Türk, Greg},
booktitle = {ACM Transactions on Graphics},
location = {Vancouver, Canada},
number = {4},
publisher = {ACM},
title = {{Blending liquids}},
doi = {10.1145/2601097.2601126},
volume = {33},
year = {2014},
}
@article{2059,
abstract = {Plant embryogenesis is regulated by differential distribution of the plant hormone auxin. However, the cells establishing these gradients during microspore embryogenesis remain to be identified. For the first time, we describe, using the DR5 or DR5rev reporter gene systems, the GFP- and GUS-based auxin biosensors to monitor auxin during Brassica napus androgenesis at cellular resolution in the initial stages. Our study provides evidence that the distribution of auxin changes during embryo development and depends on the temperature-inducible in vitro culture conditions. For this, microspores (mcs) were induced to embryogenesis by heat treatment and then subjected to genetic modification via Agrobacterium tumefaciens. The duration of high temperature treatment had a significant influence on auxin distribution in isolated and in vitro-cultured microspores and on microspore-derived embryo development. In the “mild” heat-treated (1 day at 32 °C) mcs, auxin localized in a polar way already at the uni-nucleate microspore, which was critical for the initiation of embryos with suspensor-like structure. Assuming a mean mcs radius of 20 μm, endogenous auxin content in a single cell corresponded to concentration of 1.01 μM. In mcs subjected to a prolonged heat (5 days at 32 °C), although auxin concentration increased dozen times, auxin polarization was set up at a few-celled pro-embryos without suspensor. Those embryos were enclosed in the outer wall called the exine. The exine rupture was accompanied by the auxin gradient polarization. Relative quantitative estimation of auxin, using time-lapse imaging, revealed that primordia possess up to 1.3-fold higher amounts than those found in the root apices of transgenic MDEs in the presence of exogenous auxin. Our results show, for the first time, which concentration of endogenous auxin coincides with the first cell division and how the high temperature interplays with auxin, by what affects delay early establishing microspore polarity. Moreover, we present how the local auxin accumulation demonstrates the apical–basal axis formation of the androgenic embryo and directs the axiality of the adult haploid plant.},
author = {Dubas, Ewa and Moravčíková, Jana and Libantová, Jana and Matušíková, Ildikó and Benková, Eva and Zur, Iwona and Krzewska, Monika},
journal = {Protoplasma},
number = {5},
pages = {1077 -- 1087},
publisher = {Springer},
title = {{The influence of heat stress on auxin distribution in transgenic B napus microspores and microspore derived embryos}},
doi = {10.1007/s00709-014-0616-1},
volume = {251},
year = {2014},
}
@article{2062,
abstract = {The success story of fast-spiking, parvalbumin-positive (PV+) GABAergic interneurons (GABA, γ-aminobutyric acid) in the mammalian central nervous system is noteworthy. In 1995, the properties of these interneurons were completely unknown. Twenty years later, thanks to the massive use of subcellular patch-clamp techniques, simultaneous multiple-cell recording, optogenetics, in vivo measurements, and computational approaches, our knowledge about PV+ interneurons became more extensive than for several types of pyramidal neurons. These findings have implications beyond the “small world” of basic research on GABAergic cells. For example, the results provide a first proof of principle that neuroscientists might be able to close the gaps between the molecular, cellular, network, and behavioral levels, representing one of the main challenges at the present time. Furthermore, the results may form the basis for PV+ interneurons as therapeutic targets for brain disease in the future. However, much needs to be learned about the basic function of these interneurons before clinical neuroscientists will be able to use PV+ interneurons for therapeutic purposes.},
author = {Hu, Hua and Gan, Jian and Jonas, Peter M},
journal = {Science},
number = {6196},
publisher = {American Association for the Advancement of Science},
title = {{Fast-spiking parvalbumin^+ GABAergic interneurons: From cellular design to microcircuit function}},
doi = {10.1126/science.1255263},
volume = {345},
year = {2014},
}
@article{2064,
abstract = {We examined the synaptic structure, quantity, and distribution of α-amino-3-hydroxy-5-methylisoxazole-4-propionic acid (AMPA)- and N-methyl-D-aspartate (NMDA)-type glutamate receptors (AMPARs and NMDARs, respectively) in rat cochlear nuclei by a highly sensitive freeze-fracture replica labeling technique. Four excitatory synapses formed by two distinct inputs, auditory nerve (AN) and parallel fibers (PF), on different cell types were analyzed. These excitatory synapse types included AN synapses on bushy cells (AN-BC synapses) and fusiform cells (AN-FC synapses) and PF synapses on FC (PF-FC synapses) and cartwheel cell spines (PF-CwC synapses). Immunogold labeling revealed differences in synaptic structure as well as AMPAR and NMDAR number and/or density in both AN and PF synapses, indicating a target-dependent organization. The immunogold receptor labeling also identified differences in the synaptic organization of FCs based on AN or PF connections, indicating an input-dependent organization in FCs. Among the four excitatory synapse types, the AN-BC synapses were the smallest and had the most densely packed intramembrane particles (IMPs), whereas the PF-CwC synapses were the largest and had sparsely packed IMPs. All four synapse types showed positive correlations between the IMP-cluster area and the AMPAR number, indicating a common intrasynapse-type relationship for glutamatergic synapses. Immunogold particles for AMPARs were distributed over the entire area of individual AN synapses; PF synapses often showed synaptic areas devoid of labeling. The gold-labeling for NMDARs occurred in a mosaic fashion, with less positive correlations between the IMP-cluster area and the NMDAR number. Our observations reveal target- and input-dependent features in the structure, number, and organization of AMPARs and NMDARs in AN and PF synapses.},
author = {Rubio, Maía and Fukazawa, Yugo and Kamasawa, Naomi and Clarkson, Cheryl and Molnár, Elek and Shigemoto, Ryuichi},
journal = {Journal of Comparative Neurology},
number = {18},
pages = {4023 -- 4042},
publisher = {Wiley-Blackwell},
title = {{Target- and input-dependent organization of AMPA and NMDA receptors in synaptic connections of the cochlear nucleus}},
doi = {10.1002/cne.23654},
volume = {522},
year = {2014},
}
@inproceedings{2082,
abstract = {NMAC is a mode of operation which turns a fixed input-length keyed hash function f into a variable input-length function. A practical single-key variant of NMAC called HMAC is a very popular and widely deployed message authentication code (MAC). Security proofs and attacks for NMAC can typically be lifted to HMAC. NMAC was introduced by Bellare, Canetti and Krawczyk [Crypto'96], who proved it to be a secure pseudorandom function (PRF), and thus also a MAC, assuming that (1) f is a PRF and (2) the function we get when cascading f is weakly collision-resistant. Unfortunately, HMAC is typically instantiated with cryptographic hash functions like MD5 or SHA-1 for which (2) has been found to be wrong. To restore the provable guarantees for NMAC, Bellare [Crypto'06] showed its security based solely on the assumption that f is a PRF, albeit via a non-uniform reduction. - Our first contribution is a simpler and uniform proof for this fact: If f is an ε-secure PRF (against q queries) and a δ-non-adaptively secure PRF (against q queries), then NMAC f is an (ε+ℓqδ)-secure PRF against q queries of length at most ℓ blocks each. - We then show that this ε+ℓqδ bound is basically tight. For the most interesting case where ℓqδ ≥ ε we prove this by constructing an f for which an attack with advantage ℓqδ exists. This also violates the bound O(ℓε) on the PRF-security of NMAC recently claimed by Koblitz and Menezes. - Finally, we analyze the PRF-security of a modification of NMAC called NI [An and Bellare, Crypto'99] that differs mainly by using a compression function with an additional keying input. This avoids the constant rekeying on multi-block messages in NMAC and allows for a security proof starting by the standard switch from a PRF to a random function, followed by an information-theoretic analysis. We carry out such an analysis, obtaining a tight ℓq2/2 c bound for this step, improving over the trivial bound of ℓ2q2/2c. The proof borrows combinatorial techniques originally developed for proving the security of CBC-MAC [Bellare et al., Crypto'05].},
author = {Gazi, Peter and Pietrzak, Krzysztof Z and Rybar, Michal},
editor = {Garay, Juan and Gennaro, Rosario},
location = {Santa Barbara, USA},
number = {1},
pages = {113 -- 130},
publisher = {Springer},
title = {{The exact PRF-security of NMAC and HMAC}},
doi = {10.1007/978-3-662-44371-2_7},
volume = {8616},
year = {2014},
}
@article{2083,
abstract = {Understanding the effects of sex and migration on adaptation to novel environments remains a key problem in evolutionary biology. Using a single-cell alga Chlamydomonas reinhardtii, we investigated how sex and migration affected rates of evolutionary rescue in a sink environment, and subsequent changes in fitness following evolutionary rescue. We show that sex and migration affect both the rate of evolutionary rescue and subsequent adaptation. However, their combined effects change as the populations adapt to a sink habitat. Both sex and migration independently increased rates of evolutionary rescue, but the effect of sex on subsequent fitness improvements, following initial rescue, changed with migration, as sex was beneficial in the absence of migration but constraining adaptation when combined with migration. These results suggest that sex and migration are beneficial during the initial stages of adaptation, but can become detrimental as the population adapts to its environment.},
author = {Lagator, Mato and Morgan, Andrew and Neve, Paul and Colegrave, Nick},
journal = {Evolution},
number = {8},
pages = {2296 -- 2305},
publisher = {Wiley},
title = {{Role of sex and migration in adaptation to sink environments}},
doi = {10.1111/evo.12440},
volume = {68},
year = {2014},
}
@article{2084,
abstract = {Receptor tyrosine kinases (RTKs) are a large family of cell surface receptors that sense growth factors and hormones and regulate a variety of cell behaviours in health and disease. Contactless activation of RTKs with spatial and temporal precision is currently not feasible. Here, we generated RTKs that are insensitive to endogenous ligands but can be selectively activated by low-intensity blue light. We screened light-oxygen-voltage (LOV)-sensing domains for their ability to activate RTKs by light-activated dimerization. Incorporation of LOV domains found in aureochrome photoreceptors of stramenopiles resulted in robust activation of the fibroblast growth factor receptor 1 (FGFR1), epidermal growth factor receptor (EGFR) and rearranged during transfection (RET). In human cancer and endothelial cells, light induced cellular signalling with spatial and temporal precision. Furthermore, light faithfully mimicked complex mitogenic and morphogenic cell behaviour induced by growth factors. RTKs under optical control (Opto-RTKs) provide a powerful optogenetic approach to actuate cellular signals and manipulate cell behaviour.},
author = {Grusch, Michael and Schelch, Karin and Riedler, Robert and Gschaider-Reichhart, Eva and Differ, Christopher and Berger, Walter and Inglés Prieto, Álvaro and Janovjak, Harald L},
journal = {EMBO Journal},
number = {15},
pages = {1713 -- 1726},
publisher = {Wiley-Blackwell},
title = {{Spatio-temporally precise activation of engineered receptor tyrosine kinases by light}},
doi = {10.15252/embj.201387695},
volume = {33},
year = {2014},
}
@article{2086,
abstract = {Pathogens may gain a fitness advantage through manipulation of the behaviour of their hosts. Likewise, host behavioural changes can be a defence mechanism, counteracting the impact of pathogens on host fitness. We apply harmonic radar technology to characterize the impact of an emerging pathogen - Nosema ceranae (Microsporidia) - on honeybee (Apis mellifera) flight and orientation performance in the field. Honeybees are the most important commercial pollinators. Emerging diseases have been proposed to play a prominent role in colony decline, partly through sub-lethal behavioural manipulation of their hosts. We found that homing success was significantly reduced in diseased (65.8%) versus healthy foragers (92.5%). Although lost bees had significantly reduced continuous flight times and prolonged resting times, other flight characteristics and navigational abilities showed no significant difference between infected and non-infected bees. Our results suggest that infected bees express normal flight characteristics but are constrained in their homing ability, potentially compromising the colony by reducing its resource inputs, but also counteracting the intra-colony spread of infection. We provide the first high-resolution analysis of sub-lethal effects of an emerging disease on insect flight behaviour. The potential causes and the implications for both host and parasite are discussed.},
author = {Wolf, Stephan and Mcmahon, Dino and Lim, Ka and Pull, Christopher and Clark, Suzanne and Paxton, Robert and Osborne, Juliet},
journal = {PLoS One},
number = {8},
publisher = {Public Library of Science},
title = {{So near and yet so far: Harmonic radar reveals reduced homing ability of Nosema infected honeybees}},
doi = {10.1371/journal.pone.0103989},
volume = {9},
year = {2014},
}
@article{2131,
abstract = {We study approximations to a class of vector-valued equations of Burgers type driven by a multiplicative space-time white noise. A solution theory for this class of equations has been developed recently in Probability Theory Related Fields by Hairer and Weber. The key idea was to use the theory of controlled rough paths to give definitions of weak/mild solutions and to set up a Picard iteration argument. In this article the limiting behavior of a rather large class of (spatial) approximations to these equations is studied. These approximations are shown to converge and convergence rates are given, but the limit may depend on the particular choice of approximation. This effect is a spatial analogue to the Itô-Stratonovich correction in the theory of stochastic ordinary differential equations, where it is well known that different approximation schemes may converge to different solutions.},
author = {Hairer, Martin M and Jan Maas and Weber, Hendrik},
journal = {Communications on Pure and Applied Mathematics},
number = {5},
pages = {776 -- 870},
publisher = {Wiley-Blackwell},
title = {{Approximating Rough Stochastic PDEs}},
doi = {10.1002/cpa.21495},
volume = {67},
year = {2014},
}
@article{2132,
abstract = {We consider discrete porous medium equations of the form ∂tρt=Δϕ(ρt), where Δ is the generator of a reversible continuous time Markov chain on a finite set χ, and ϕ is an increasing function. We show that these equations arise as gradient flows of certain entropy functionals with respect to suitable non-local transportation metrics. This may be seen as a discrete analogue of the Wasserstein gradient flow structure for porous medium equations in ℝn discovered by Otto. We present a one-dimensional counterexample to geodesic convexity and discuss Gromov-Hausdorff convergence to the Wasserstein metric.},
author = {Erbar, Matthias and Jan Maas},
journal = {Discrete and Continuous Dynamical Systems- Series A},
number = {4},
pages = {1355 -- 1374},
publisher = {Southwest Missouri State University},
title = {{Gradient flow structures for discrete porous medium equations}},
doi = {10.3934/dcds.2014.34.1355 },
volume = {34},
year = {2014},
}
@article{2133,
abstract = {Let ℭ denote the Clifford algebra over ℝ𝑛, which is the von Neumann algebra generated by n self-adjoint operators Q j , j = 1,…,n satisfying the canonical anticommutation relations, Q i Q j + Q j Q i = 2δ ij I, and let τ denote the normalized trace on ℭ. This algebra arises in quantum mechanics as the algebra of observables generated by n fermionic degrees of freedom. Let 𝔓 denote the set of all positive operators 𝜌∈ℭ such that τ(ρ) = 1; these are the non-commutative analogs of probability densities in the non-commutative probability space (ℭ,𝜏). The fermionic Fokker–Planck equation is a quantum-mechanical analog of the classical Fokker–Planck equation with which it has much in common, such as the same optimal hypercontractivity properties. In this paper we construct a Riemannian metric on 𝔓 that we show to be a natural analog of the classical 2-Wasserstein metric, and we show that, in analogy with the classical case, the fermionic Fokker–Planck equation is gradient flow in this metric for the relative entropy with respect to the ground state. We derive a number of consequences of this, such as a sharp Talagrand inequality for this metric, and we prove a number of results pertaining to this metric. Several open problems are raised.},
author = {Carlen, Eric and Maas, Jan},
journal = {Communications in Mathematical Physics},
number = {3},
pages = {887 -- 926},
publisher = {Springer},
title = {{An analog of the 2-Wasserstein metric in non-commutative probability under which the fermionic Fokker-Planck equation is gradient flow for the entropy}},
doi = {10.1007/s00220-014-2124-8},
volume = {331},
year = {2014},
}
@article{2140,
abstract = {We propose a technique for engineering momentum-dependent dissipation in Bose-Einstein condensates with non-local interactions. The scheme relies on the use of momentum-dependent dark-states in close analogy to velocity-selective coherent population trapping. During the short-time dissipative dynamics, the system is driven into a particular finite-momentum phonon mode, which in real space corresponds to an ordered structure with non-local density-density correlations. Dissipation-induced ordering can be observed and studied in present-day experiments using cold atoms with dipole-dipole or off-resonant Rydberg interactions. Due to its dissipative nature, the ordering does not require artificial breaking of translational symmetry by an opticallattice or harmonic trap. This opens up a perspective of direct cooling of quantum gases into strongly-interacting phases.},
author = {Otterbach, Johannes and Lemeshko, Mikhail},
journal = {Physical Review Letters},
number = {7},
publisher = {American Physical Society},
title = {{Dissipative preparation of spatial order in Rydberg-dressed Bose-Einstein condensates}},
doi = {10.1103/PhysRevLett.113.070401},
volume = {113},
year = {2014},
}
@article{2141,
abstract = {The computation of the winning set for Büchi objectives in alternating games on graphs is a central problem in computer-aided verification with a large number of applications. The long-standing best known upper bound for solving the problem is Õ(n ⋅ m), where n is the number of vertices and m is the number of edges in the graph. We are the first to break the Õ(n ⋅ m) boundary by presenting a new technique that reduces the running time to O(n2). This bound also leads to O(n2)-time algorithms for computing the set of almost-sure winning vertices for Büchi objectives (1) in alternating games with probabilistic transitions (improving an earlier bound of Õ(n ⋅ m)), (2) in concurrent graph games with constant actions (improving an earlier bound of O(n3)), and (3) in Markov decision processes (improving for m>n4/3 an earlier bound of O(m ⋅ √m)). We then show how to maintain the winning set for Büchi objectives in alternating games under a sequence of edge insertions or a sequence of edge deletions in O(n) amortized time per operation. Our algorithms are the first dynamic algorithms for this problem. We then consider another core graph theoretic problem in verification of probabilistic systems, namely computing the maximal end-component decomposition of a graph. We present two improved static algorithms for the maximal end-component decomposition problem. Our first algorithm is an O(m ⋅ √m)-time algorithm, and our second algorithm is an O(n2)-time algorithm which is obtained using the same technique as for alternating Büchi games. Thus, we obtain an O(min &lcu;m ⋅ √m,n2})-time algorithm improving the long-standing O(n ⋅ m) time bound. Finally, we show how to maintain the maximal end-component decomposition of a graph under a sequence of edge insertions or a sequence of edge deletions in O(n) amortized time per edge deletion, and O(m) worst-case time per edge insertion. Again, our algorithms are the first dynamic algorithms for this problem.},
author = {Chatterjee, Krishnendu and Henzinger, Monika},
journal = {Journal of the ACM},
number = {3},
publisher = {ACM},
title = {{Efficient and dynamic algorithms for alternating Büchi games and maximal end-component decomposition}},
doi = {10.1145/2597631},
volume = {61},
year = {2014},
}
@inproceedings{2153,
abstract = {We define a simple, explicit map sending a morphism f : M → N of pointwise finite dimensional persistence modules to a matching between the barcodes of M and N. Our main result is that, in a precise sense, the quality of this matching is tightly controlled by the lengths of the longest intervals in the barcodes of ker f and coker f . As an immediate corollary, we obtain a new proof of the algebraic stability theorem for persistence barcodes [5, 9], a fundamental result in the theory of persistent homology. In contrast to previous proofs, ours shows explicitly how a δ-interleaving morphism between two persistence modules induces a δ-matching between the barcodes of the two modules. Our main result also specializes to a structure theorem for submodules and quotients of persistence modules. Copyright is held by the owner/author(s).},
author = {Bauer, Ulrich and Lesnick, Michael},
booktitle = {Proceedings of the Annual Symposium on Computational Geometry},
location = {Kyoto, Japan},
pages = {355 -- 364},
publisher = {ACM},
title = {{Induced matchings of barcodes and the algebraic stability of persistence}},
doi = {10.1145/2582112.2582168},
year = {2014},
}
@article{2154,
abstract = {A result of Boros and Füredi (d = 2) and of Bárány (arbitrary d) asserts that for every d there exists cd > 0 such that for every n-point set P ⊂ ℝd, some point of ℝd is covered by at least (Formula presented.) of the d-simplices spanned by the points of P. The largest possible value of cd has been the subject of ongoing research. Recently Gromov improved the existing lower bounds considerably by introducing a new, topological proof method. We provide an exposition of the combinatorial component of Gromov's approach, in terms accessible to combinatorialists and discrete geometers, and we investigate the limits of his method. In particular, we give tighter bounds on the cofilling profiles for the (n - 1)-simplex. These bounds yield a minor improvement over Gromov's lower bounds on cd for large d, but they also show that the room for further improvement through the cofilling profiles alone is quite small. We also prove a slightly better lower bound for c3 by an approach using an additional structure besides the cofilling profiles. We formulate a combinatorial extremal problem whose solution might perhaps lead to a tight lower bound for cd.},
author = {Matoušek, Jiří and Wagner, Uli},
journal = {Discrete & Computational Geometry},
number = {1},
pages = {1 -- 33},
publisher = {Springer},
title = {{On Gromov's method of selecting heavily covered points}},
doi = {10.1007/s00454-014-9584-7},
volume = {52},
year = {2014},
}
@inproceedings{2155,
abstract = {Given a finite set of points in Rn and a positive radius, we study the Čech, Delaunay-Čech, alpha, and wrap complexes as instances of a generalized discrete Morse theory. We prove that the latter three complexes are simple-homotopy equivalent. Our results have applications in topological data analysis and in the reconstruction of shapes from sampled data. Copyright is held by the owner/author(s).},
author = {Bauer, Ulrich and Edelsbrunner, Herbert},
booktitle = {Proceedings of the Annual Symposium on Computational Geometry},
location = {Kyoto, Japan},
pages = {484 -- 490},
publisher = {ACM},
title = {{The morse theory of Čech and Delaunay filtrations}},
doi = {10.1145/2582112.2582167},
year = {2014},
}
@inproceedings{2156,
abstract = {We propose a metric for Reeb graphs, called the functional distortion distance. Under this distance, the Reeb graph is stable against small changes of input functions. At the same time, it remains discriminative at differentiating input functions. In particular, the main result is that the functional distortion distance between two Reeb graphs is bounded from below by the bottleneck distance between both the ordinary and extended persistence diagrams for appropriate dimensions. As an application of our results, we analyze a natural simplification scheme for Reeb graphs, and show that persistent features in Reeb graph remains persistent under simplification. Understanding the stability of important features of the Reeb graph under simplification is an interesting problem on its own right, and critical to the practical usage of Reeb graphs. Copyright is held by the owner/author(s).},
author = {Bauer, Ulrich and Ge, Xiaoyin and Wang, Yusu},
booktitle = {Proceedings of the Annual Symposium on Computational Geometry},
location = {Kyoto, Japan},
pages = {464 -- 473},
publisher = {ACM},
title = {{Measuring distance between Reeb graphs}},
doi = {10.1145/2582112.2582169},
year = {2014},
}
@inproceedings{2157,
abstract = {We show that the following algorithmic problem is decidable: given a 2-dimensional simplicial complex, can it be embedded (topologically, or equivalently, piecewise linearly) in ℝ3? By a known reduction, it suffices to decide the embeddability of a given triangulated 3-manifold X into the 3-sphere S3. The main step, which allows us to simplify X and recurse, is in proving that if X can be embedded in S3, then there is also an embedding in which X has a short meridian, i.e., an essential curve in the boundary of X bounding a disk in S3 nX with length bounded by a computable function of the number of tetrahedra of X.},
author = {Matoušek, Jiří and Sedgwick, Eric and Tancer, Martin and Wagner, Uli},
booktitle = {Proceedings of the Annual Symposium on Computational Geometry},
location = {Kyoto, Japan},
pages = {78 -- 84},
publisher = {ACM},
title = {{Embeddability in the 3 sphere is decidable}},
doi = {10.1145/2582112.2582137},
year = {2014},
}
@article{2158,
abstract = {Directional guidance of migrating cells is relatively well explored in the reductionist setting of cell culture experiments. Here spatial gradients of chemical cues as well as gradients of mechanical substrate characteristics prove sufficient to attract single cells as well as their collectives. How such gradients present and act in the context of an organism is far less clear. Here we review recent advances in understanding how guidance cues emerge and operate in the physiological context.},
author = {Majumdar, Ritankar and Sixt, Michael K and Parent, Carole},
journal = {Current Opinion in Cell Biology},
number = {1},
pages = {33 -- 40},
publisher = {Elsevier},
title = {{New paradigms in the establishment and maintenance of gradients during directed cell migration}},
doi = {10.1016/j.ceb.2014.05.010},
volume = {30},
year = {2014},
}
@inproceedings{2160,
abstract = {Transfer learning has received a lot of attention in the machine learning community over the last years, and several effective algorithms have been developed. However, relatively little is known about their theoretical properties, especially in the setting of lifelong learning, where the goal is to transfer information to tasks for which no data have been observed so far. In this work we study lifelong learning from a theoretical perspective. Our main result is a PAC-Bayesian generalization bound that offers a unified view on existing paradigms for transfer learning, such as the transfer of parameters or the transfer of low-dimensional representations. We also use the bound to derive two principled lifelong learning algorithms, and we show that these yield results comparable with existing methods.},
author = {Pentina, Anastasia and Lampert, Christoph},
editor = {Xing, Eric and Jebara, Tony},
location = {Beijing, China},
pages = {991 -- 999},
publisher = {Omnipress},
title = {{A PAC-Bayesian bound for Lifelong Learning}},
volume = {32},
year = {2014},
}
@inproceedings{2162,
abstract = {We study two-player (zero-sum) concurrent mean-payoff games played on a finite-state graph. We focus on the important sub-class of ergodic games where all states are visited infinitely often with probability 1. The algorithmic study of ergodic games was initiated in a seminal work of Hoffman and Karp in 1966, but all basic complexity questions have remained unresolved. Our main results for ergodic games are as follows: We establish (1) an optimal exponential bound on the patience of stationary strategies (where patience of a distribution is the inverse of the smallest positive probability and represents a complexity measure of a stationary strategy); (2) the approximation problem lies in FNP; (3) the approximation problem is at least as hard as the decision problem for simple stochastic games (for which NP ∩ coNP is the long-standing best known bound). We present a variant of the strategy-iteration algorithm by Hoffman and Karp; show that both our algorithm and the classical value-iteration algorithm can approximate the value in exponential time; and identify a subclass where the value-iteration algorithm is a FPTAS. We also show that the exact value can be expressed in the existential theory of the reals, and establish square-root sum hardness for a related class of games.},
author = {Chatterjee, Krishnendu and Ibsen-Jensen, Rasmus},
location = {Copenhagen, Denmark},
number = {Part 2},
pages = {122 -- 133},
publisher = {Springer},
title = {{The complexity of ergodic mean payoff games}},
doi = {10.1007/978-3-662-43951-7_11},
volume = {8573},
year = {2014},
}
@inproceedings{2163,
abstract = {We consider multi-player graph games with partial-observation and parity objective. While the decision problem for three-player games with a coalition of the first and second players against the third player is undecidable in general, we present a decidability result for partial-observation games where the first and third player are in a coalition against the second player, thus where the second player is adversarial but weaker due to partial-observation. We establish tight complexity bounds in the case where player 1 is less informed than player 2, namely 2-EXPTIME-completeness for parity objectives. The symmetric case of player 1 more informed than player 2 is much more complicated, and we show that already in the case where player 1 has perfect observation, memory of size non-elementary is necessary in general for reachability objectives, and the problem is decidable for safety and reachability objectives. From our results we derive new complexity results for partial-observation stochastic games.},
author = {Chatterjee, Krishnendu and Doyen, Laurent},
booktitle = {Lecture Notes in Computer Science},
location = {Copenhagen, Denmark},
number = {Part 2},
pages = {110 -- 121},
publisher = {Springer},
title = {{Games with a weak adversary}},
doi = {10.1007/978-3-662-43951-7_10},
volume = {8573},
year = {2014},
}
@article{2165,
abstract = {In machine learning, the domain adaptation problem arrives when the test (tar-get) and the train (source) data are generated from different distributions. A key applied issue is thus the design of algorithms able to generalize on a new distribution, for which we have no label information. We focus on learning classification models defined as a weighted majority vote over a set of real-valued functions. In this context, Germain et al. (2013) have shown that a measure of disagreement between these functions is crucial to control. The core of this measure is a theoretical bound—the C-bound (Lacasse et al., 2007)—which involves the disagreement and leads to a well performing majority vote learn-ing algorithm in usual non-adaptative supervised setting: MinCq. In this work,we propose a framework to extend MinCq to a domain adaptation scenario.This procedure takes advantage of the recent perturbed variation divergence between distributions proposed by Harel and Mannor (2012). Justified by a theoretical bound on the target risk of the vote, we provide to MinCq a tar-get sample labeled thanks to a perturbed variation-based self-labeling focused on the regions where the source and target marginals appear similar. We also study the influence of our self-labeling, from which we deduce an original process for tuning the hyperparameters. Finally, our framework called PV-MinCq shows very promising results on a rotation and translation synthetic problem.},
author = {Morvant, Emilie},
journal = {Pattern Recognition Letters},
pages = {37--43},
publisher = {Elsevier},
title = {{Domain Adaptation of Weighted Majority Votes via Perturbed Variation-Based Self-Labeling}},
doi = {10.1016/j.patrec.2014.08.013},
volume = {51},
year = {2014},
}
@inproceedings{2167,
abstract = {Model-based testing is a promising technology for black-box software and hardware testing, in which test cases are generated automatically from high-level specifications. Nowadays, systems typically consist of multiple interacting components and, due to their complexity, testing presents a considerable portion of the effort and cost in the design process. Exploiting the compositional structure of system specifications can considerably reduce the effort in model-based testing. Moreover, inferring properties about the system from testing its individual components allows the designer to reduce the amount of integration testing. In this paper, we study compositional properties of the ioco-testing theory. We propose a new approach to composition and hiding operations, inspired by contract-based design and interface theories. These operations preserve behaviors that are compatible under composition and hiding, and prune away incompatible ones. The resulting specification characterizes the input sequences for which the unit testing of components is sufficient to infer the correctness of component integration without the need for further tests. We provide a methodology that uses these results to minimize integration testing effort, but also to detect potential weaknesses in specifications. While we focus on asynchronous models and the ioco conformance relation, the resulting methodology can be applied to a broader class of systems.},
author = {Daca, Przemyslaw and Henzinger, Thomas A and Krenn, Willibald and Nickovic, Dejan},
booktitle = {IEEE 7th International Conference on Software Testing, Verification and Validation},
isbn = {978-1-4799-2255-0},
issn = {2159-4848},
location = {Cleveland, USA},
publisher = {IEEE},
title = {{Compositional specifications for IOCO testing}},
doi = {10.1109/ICST.2014.50},
year = {2014},
}
@article{2168,
abstract = {Many species have an essentially continuous distribution in space, in which there are no natural divisions between randomly mating subpopulations. Yet, the standard approach to modelling these populations is to impose an arbitrary grid of demes, adjusting deme sizes and migration rates in an attempt to capture the important features of the population. Such indirect methods are required because of the failure of the classical models of isolation by distance, which have been shown to have major technical flaws. A recently introduced model of extinction and recolonisation in two dimensions solves these technical problems, and provides a rigorous technical foundation for the study of populations evolving in a spatial continuum. The coalescent process for this model is simply stated, but direct simulation is very inefficient for large neighbourhood sizes. We present efficient and exact algorithms to simulate this coalescent process for arbitrary sample sizes and numbers of loci, and analyse these algorithms in detail.},
author = {Kelleher, Jerome and Etheridge, Alison and Barton, Nicholas H},
journal = {Theoretical Population Biology},
pages = {13 -- 23},
publisher = {Academic Press},
title = {{Coalescent simulation in continuous space: Algorithms for large neighbourhood size}},
doi = {10.1016/j.tpb.2014.05.001},
volume = {95},
year = {2014},
}
@article{2169,
author = {Barton, Nicholas H and Novak, Sebastian and Paixao, Tiago},
journal = {PNAS},
number = {29},
pages = {10398 -- 10399},
publisher = {National Academy of Sciences},
title = {{Diverse forms of selection in evolution and computer science}},
doi = {10.1073/pnas.1410107111},
volume = {111},
year = {2014},
}
@article{2170,
abstract = { Short-read sequencing technologies have in principle made it feasible to draw detailed inferences about the recent history of any organism. In practice, however, this remains challenging due to the difficulty of genome assembly in most organisms and the lack of statistical methods powerful enough to discriminate between recent, nonequilibrium histories. We address both the assembly and inference challenges. We develop a bioinformatic pipeline for generating outgroup-rooted alignments of orthologous sequence blocks from de novo low-coverage short-read data for a small number of genomes, and show how such sequence blocks can be used to fit explicit models of population divergence and admixture in a likelihood framework. To illustrate our approach, we reconstruct the Pleistocene history of an oak-feeding insect (the oak gallwasp Biorhiza pallida), which, in common with many other taxa, was restricted during Pleistocene ice ages to a longitudinal series of southern refugia spanning the Western Palaearctic. Our analysis of sequence blocks sampled from a single genome from each of three major glacial refugia reveals support for an unexpected history dominated by recent admixture. Despite the fact that 80% of the genome is affected by admixture during the last glacial cycle, we are able to infer the deeper divergence history of these populations. These inferences are robust to variation in block length, mutation model and the sampling location of individual genomes within refugia. This combination of de novo assembly and numerical likelihood calculation provides a powerful framework for estimating recent population history that can be applied to any organism without the need for prior genetic resources.},
author = {Hearn, Jack and Stone, Graham and Bunnefeld, Lynsey and Nicholls, James and Barton, Nicholas H and Lohse, Konrad},
journal = {Molecular Ecology},
number = {1},
pages = {198 -- 211},
publisher = {Wiley-Blackwell},
title = {{Likelihood-based inference of population history from low-coverage de novo genome assemblies}},
doi = {10.1111/mec.12578},
volume = {23},
year = {2014},
}
@inproceedings{2171,
abstract = {We present LS-CRF, a new method for training cyclic Conditional Random Fields (CRFs) from large datasets that is inspired by classical closed-form expressions for the maximum likelihood parameters of a generative graphical model with tree topology. Training a CRF with LS-CRF requires only solving a set of independent regression problems, each of which can be solved efficiently in closed form or by an iterative solver. This makes LS-CRF orders of magnitude faster than classical CRF training based on probabilistic inference, and at the same time more flexible and easier to implement than other approximate techniques, such as pseudolikelihood or piecewise training. We apply LS-CRF to the task of semantic image segmentation, showing that it achieves on par accuracy to other training techniques at higher speed, thereby allowing efficient CRF training from very large training sets. For example, training a linearly parameterized pairwise CRF on 150,000 images requires less than one hour on a modern workstation.},
author = {Kolesnikov, Alexander and Guillaumin, Matthieu and Ferrari, Vittorio and Lampert, Christoph},
booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
editor = {Fleet, David and Pajdla, Tomas and Schiele, Bernt and Tuytelaars, Tinne},
location = {Zurich, Switzerland},
number = {PART 3},
pages = {550 -- 565},
publisher = {Springer},
title = {{Closed-form approximate CRF training for scalable image segmentation}},
doi = {10.1007/978-3-319-10578-9_36},
volume = {8691},
year = {2014},
}
@inproceedings{2173,
abstract = {In this work we introduce a new approach to co-classification, i.e. the task of jointly classifying multiple, otherwise independent, data samples. The method we present, named CoConut, is based on the idea of adding a regularizer in the label space to encode certain priors on the resulting labelings. A regularizer that encourages labelings that are smooth across the test set, for instance, can be seen as a test-time variant of the cluster assumption, which has been proven useful at training time in semi-supervised learning. A regularizer that introduces a preference for certain class proportions can be regarded as a prior distribution on the class labels. CoConut can build on existing classifiers without making any assumptions on how they were obtained and without the need to re-train them. The use of a regularizer adds a new level of flexibility. It allows the integration of potentially new information at test time, even in other modalities than what the classifiers were trained on. We evaluate our framework on six datasets, reporting a clear performance gain in classification accuracy compared to the standard classification setup that predicts labels for each test sample separately.
},
author = {Khamis, Sameh and Lampert, Christoph},
booktitle = {Proceedings of the British Machine Vision Conference 2014},
location = {Nottingham, UK},
publisher = {BMVA Press},
title = {{CoConut: Co-classification with output space regularization}},
year = {2014},
}
@article{2174,
abstract = {When polygenic traits are under stabilizing selection, many different combinations of alleles allow close adaptation to the optimum. If alleles have equal effects, all combinations that result in the same deviation from the optimum are equivalent. Furthermore, the genetic variance that is maintained by mutation-selection balance is 2μ/S per locus, where μ is the mutation rate and S the strength of stabilizing selection. In reality, alleles vary in their effects, making the fitness landscape asymmetric and complicating analysis of the equilibria. We show that that the resulting genetic variance depends on the fraction of alleles near fixation, which contribute by 2μ/S, and on the total mutational effects of alleles that are at intermediate frequency. The inpplayfi between stabilizing selection and mutation leads to a sharp transition: alleles with effects smaller than a threshold value of 2 remain polymorphic, whereas those with larger effects are fixed. The genetic load in equilibrium is less than for traits of equal effects, and the fitness equilibria are more similar. We find p the optimum is displaced, alleles with effects close to the threshold value sweep first, and their rate of increase is bounded by Long-term response leads in general to well-adapted traits, unlike the case of equal effects that often end up at a suboptimal fitness peak. However, the particular peaks to which the populations converge are extremely sensitive to the initial states and to the speed of the shift of the optimum trait value.},
author = {De Vladar, Harold and Barton, Nicholas H},
journal = {Genetics},
number = {2},
pages = {749 -- 767},
publisher = {Genetics Society of America},
title = {{Stability and response of polygenic traits to stabilizing selection and mutation}},
doi = {10.1534/genetics.113.159111},
volume = {197},
year = {2014},
}
@article{2175,
abstract = {The cerebral cortex, the seat of our cognitive abilities, is composed of an intricate network of billions of excitatory projection and inhibitory interneurons. Postmitotic cortical neurons are generated by a diverse set of neural stem cell progenitors within dedicated zones and defined periods of neurogenesis during embryonic development. Disruptions in neurogenesis can lead to alterations in the neuronal cytoarchitecture, which is thought to represent a major underlying cause for several neurological disorders, including microcephaly, autism and epilepsy. Although a number of signaling pathways regulating neurogenesis have been described, the precise cellular and molecular mechanisms regulating the functional neural stem cell properties in cortical neurogenesis remain unclear. Here, we discuss the most up-to-date strategies to monitor the fundamental mechanistic parameters of neuronal progenitor proliferation, and recent advances deciphering the logic and dynamics of neurogenesis.},
author = {Postiglione, Maria P and Hippenmeyer, Simon},
journal = {Future Neurology},
number = {3},
pages = {323 -- 340},
publisher = {Future Medicine Ltd.},
title = {{Monitoring neurogenesis in the cerebral cortex: an update}},
doi = {10.2217/fnl.14.18},
volume = {9},
year = {2014},
}
@article{2178,
abstract = {We consider the three-state toric homogeneous Markov chain model (THMC) without loops and initial parameters. At time T, the size of the design matrix is 6 × 3 · 2T-1 and the convex hull of its columns is the model polytope. We study the behavior of this polytope for T ≥ 3 and we show that it is defined by 24 facets for all T ≥ 5. Moreover, we give a complete description of these facets. From this, we deduce that the toric ideal associated with the design matrix is generated by binomials of degree at most 6. Our proof is based on a result due to Sturmfels, who gave a bound on the degree of the generators of a toric ideal, provided the normality of the corresponding toric variety. In our setting, we established the normality of the toric variety associated to the THMC model by studying the geometric properties of the model polytope.},
author = {Haws, David and Martin Del Campo Sanchez, Abraham and Takemura, Akimichi and Yoshida, Ruriko},
journal = {Beitrage zur Algebra und Geometrie},
number = {1},
pages = {161 -- 188},
publisher = {Springer},
title = {{Markov degree of the three-state toric homogeneous Markov chain model}},
doi = {10.1007/s13366-013-0178-y},
volume = {55},
year = {2014},
}
@article{2179,
abstract = {We extend the proof of the local semicircle law for generalized Wigner matrices given in MR3068390 to the case when the matrix of variances has an eigenvalue -1. In particular, this result provides a short proof of the optimal local Marchenko-Pastur law at the hard edge (i.e. around zero) for sample covariance matrices X*X, where the variances of the entries of X may vary.},
author = {Ajanki, Oskari H and Erdös, László and Krüger, Torben H},
journal = {Electronic Communications in Probability},
publisher = {Institute of Mathematical Statistics},
title = {{Local semicircle law with imprimitive variance matrix}},
doi = {10.1214/ECP.v19-3121},
volume = {19},
year = {2014},
}
@article{2180,
abstract = {Weighted majority votes allow one to combine the output of several classifiers or voters. MinCq is a recent algorithm for optimizing the weight of each voter based on the minimization of a theoretical bound over the risk of the vote with elegant PAC-Bayesian generalization guarantees. However, while it has demonstrated good performance when combining weak classifiers, MinCq cannot make use of the useful a priori knowledge that one may have when using a mixture of weak and strong voters. In this paper, we propose P-MinCq, an extension of MinCq that can incorporate such knowledge in the form of a constraint over the distribution of the weights, along with general proofs of convergence that stand in the sample compression setting for data-dependent voters. The approach is applied to a vote of k-NN classifiers with a specific modeling of the voters' performance. P-MinCq significantly outperforms the classic k-NN classifier, a symmetric NN and MinCq using the same voters. We show that it is also competitive with LMNN, a popular metric learning algorithm, and that combining both approaches further reduces the error.},
author = {Bellet, Aurélien and Habrard, Amaury and Morvant, Emilie and Sebban, Marc},
journal = {Machine Learning},
number = {1-2},
pages = {129 -- 154},
publisher = {Springer},
title = {{Learning a priori constrained weighted majority votes}},
doi = {10.1007/s10994-014-5462-z},
volume = {97},
year = {2014},
}
@article{2183,
abstract = {We describe a simple adaptive network of coupled chaotic maps. The network reaches a stationary state (frozen topology) for all values of the coupling parameter, although the dynamics of the maps at the nodes of the network can be nontrivial. The structure of the network shows interesting hierarchical properties and in certain parameter regions the dynamics is polysynchronous: Nodes can be divided in differently synchronized classes but, contrary to cluster synchronization, nodes in the same class need not be connected to each other. These complicated synchrony patterns have been conjectured to play roles in systems biology and circuits. The adaptive system we study describes ways whereby this behavior can evolve from undifferentiated nodes.},
author = {Botella Soler, Vicente and Glendinning, Paul},
journal = {Physical Review E Statistical Nonlinear and Soft Matter Physics},
number = {6},
publisher = {American Institute of Physics},
title = {{Hierarchy and polysynchrony in an adaptive network }},
doi = {10.1103/PhysRevE.89.062809},
volume = {89},
year = {2014},
}
@article{2184,
abstract = {Given topological spaces X,Y, a fundamental problem of algebraic topology is understanding the structure of all continuous maps X→ Y. We consider a computational version, where X,Y are given as finite simplicial complexes, and the goal is to compute [X,Y], that is, all homotopy classes of suchmaps.We solve this problem in the stable range, where for some d ≥ 2, we have dim X ≤ 2d-2 and Y is (d-1)-connected; in particular, Y can be the d-dimensional sphere Sd. The algorithm combines classical tools and ideas from homotopy theory (obstruction theory, Postnikov systems, and simplicial sets) with algorithmic tools from effective algebraic topology (locally effective simplicial sets and objects with effective homology). In contrast, [X,Y] is known to be uncomputable for general X,Y, since for X = S1 it includes a well known undecidable problem: testing triviality of the fundamental group of Y. In follow-up papers, the algorithm is shown to run in polynomial time for d fixed, and extended to other problems, such as the extension problem, where we are given a subspace A ⊂ X and a map A→ Y and ask whether it extends to a map X → Y, or computing the Z2-index-everything in the stable range. Outside the stable range, the extension problem is undecidable.},
author = {Čadek, Martin and Krcál, Marek and Matoušek, Jiří and Sergeraert, Francis and Vokřínek, Lukáš and Wagner, Uli},
journal = {Journal of the ACM},
number = {3},
publisher = {ACM},
title = {{Computing all maps into a sphere}},
doi = {10.1145/2597629},
volume = {61},
year = {2014},
}
@inproceedings{2185,
abstract = {We revisit the classical problem of converting an imperfect source of randomness into a usable cryptographic key. Assume that we have some cryptographic application P that expects a uniformly random m-bit key R and ensures that the best attack (in some complexity class) against P(R) has success probability at most δ. Our goal is to design a key-derivation function (KDF) h that converts any random source X of min-entropy k into a sufficiently "good" key h(X), guaranteeing that P(h(X)) has comparable security δ′ which is 'close' to δ. Seeded randomness extractors provide a generic way to solve this problem for all applications P, with resulting security δ′ = O(δ), provided that we start with entropy k ≥ m + 2 log (1/δ) - O(1). By a result of Radhakrishnan and Ta-Shma, this bound on k (called the "RT-bound") is also known to be tight in general. Unfortunately, in many situations the loss of 2 log (1/δ) bits of entropy is unacceptable. This motivates the study KDFs with less entropy waste by placing some restrictions on the source X or the application P. In this work we obtain the following new positive and negative results in this regard: - Efficient samplability of the source X does not help beat the RT-bound for general applications. This resolves the SRT (samplable RT) conjecture of Dachman-Soled et al. [DGKM12] in the affirmative, and also shows that the existence of computationally-secure extractors beating the RT-bound implies the existence of one-way functions. - We continue in the line of work initiated by Barak et al. [BDK+11] and construct new information-theoretic KDFs which beat the RT-bound for large but restricted classes of applications. Specifically, we design efficient KDFs that work for all unpredictability applications P (e.g., signatures, MACs, one-way functions, etc.) and can either: (1) extract all of the entropy k = m with a very modest security loss δ′ = O(δ·log (1/δ)), or alternatively, (2) achieve essentially optimal security δ′ = O(δ) with a very modest entropy loss k ≥ m + loglog (1/δ). In comparison, the best prior results from [BDK+11] for this class of applications would only guarantee δ′ = O(√δ) when k = m, and would need k ≥ m + log (1/δ) to get δ′ = O(δ). - The weaker bounds of [BDK+11] hold for a larger class of so-called "square- friendly" applications (which includes all unpredictability, but also some important indistinguishability, applications). Unfortunately, we show that these weaker bounds are tight for the larger class of applications. - We abstract out a clean, information-theoretic notion of (k,δ,δ′)- unpredictability extractors, which guarantee "induced" security δ′ for any δ-secure unpredictability application P, and characterize the parameters achievable for such unpredictability extractors. Of independent interest, we also relate this notion to the previously-known notion of (min-entropy) condensers, and improve the state-of-the-art parameters for such condensers.},
author = {Dodis, Yevgeniy and Pietrzak, Krzysztof Z and Wichs, Daniel},
editor = {Nguyen, Phong and Oswald, Elisabeth},
location = {Copenhagen, Denmark},
pages = {93 -- 110},
publisher = {Springer},
title = {{Key derivation without entropy waste}},
doi = {10.1007/978-3-642-55220-5_6},
volume = {8441},
year = {2014},
}
@article{2186,
abstract = {We prove the existence of scattering states for the defocusing cubic Gross-Pitaevskii (GP) hierarchy in ℝ3. Moreover, we show that an exponential energy growth condition commonly used in the well-posedness theory of the GP hierarchy is, in a specific sense, necessary. In fact, we prove that without the latter, there exist initial data for the focusing cubic GP hierarchy for which instantaneous blowup occurs.},
author = {Chen, Thomas and Hainzl, Christian and Pavlović, Nataša and Seiringer, Robert},
journal = {Letters in Mathematical Physics},
number = {7},
pages = {871 -- 891},
publisher = {Springer},
title = {{On the well-posedness and scattering for the Gross-Pitaevskii hierarchy via quantum de Finetti}},
doi = {10.1007/s11005-014-0693-2},
volume = {104},
year = {2014},
}
@article{2187,
abstract = {Systems should not only be correct but also robust in the sense that they behave reasonably in unexpected situations. This article addresses synthesis of robust reactive systems from temporal specifications. Existing methods allow arbitrary behavior if assumptions in the specification are violated. To overcome this, we define two robustness notions, combine them, and show how to enforce them in synthesis. The first notion applies to safety properties: If safety assumptions are violated temporarily, we require that the system recovers to normal operation with as few errors as possible. The second notion requires that, if liveness assumptions are violated, as many guarantees as possible should be fulfilled nevertheless. We present a synthesis procedure achieving this for the important class of GR(1) specifications, and establish complexity bounds. We also present an implementation of a special case of robustness, and show experimental results.},
author = {Bloem, Roderick and Chatterjee, Krishnendu and Greimel, Karin and Henzinger, Thomas A and Hofferek, Georg and Jobstmann, Barbara and Könighofer, Bettina and Könighofer, Robert},
journal = {Acta Informatica},
number = {3-4},
pages = {193 -- 220},
publisher = {Springer},
title = {{Synthesizing robust systems}},
doi = {10.1007/s00236-013-0191-5},
volume = {51},
year = {2014},
}
@article{2188,
abstract = {Although plant and animal cells use a similar core mechanism to deliver proteins to the plasma membrane, their different lifestyle, body organization and specific cell structures resulted in the acquisition of regulatory mechanisms that vary in the two kingdoms. In particular, cell polarity regulators do not seem to be conserved, because genes encoding key components are absent in plant genomes. In plants, the broad knowledge on polarity derives from the study of auxin transporters, the PIN-FORMED proteins, in the model plant Arabidopsis thaliana. In animals, much information is provided from the study of polarity in epithelial cells that exhibit basolateral and luminal apical polarities, separated by tight junctions. In this review, we summarize the similarities and differences of the polarization mechanisms between plants and animals and survey the main genetic approaches that have been used to characterize new genes involved in polarity establishment in plants, including the frequently used forward and reverse genetics screens as well as a novel chemical genetics approach that is expected to overcome the limitation of classical genetics methods.},
author = {Kania, Urszula and Fendrych, Matyas and Friml, Jiřĺ},
journal = {Open Biology},
number = {APRIL},
publisher = {Royal Society},
title = {{Polar delivery in plants; commonalities and differences to animal epithelial cells}},
doi = {10.1098/rsob.140017},
volume = {4},
year = {2014},
}
@inproceedings{2189,
abstract = {En apprentissage automatique, nous parlons d'adaptation de domaine lorsque les données de test (cibles) et d'apprentissage (sources) sont générées selon différentes distributions. Nous devons donc développer des algorithmes de classification capables de s'adapter à une nouvelle distribution, pour laquelle aucune information sur les étiquettes n'est disponible. Nous attaquons cette problématique sous l'angle de l'approche PAC-Bayésienne qui se focalise sur l'apprentissage de modèles définis comme des votes de majorité sur un ensemble de fonctions. Dans ce contexte, nous introduisons PV-MinCq une version adaptative de l'algorithme (non adaptatif) MinCq. PV-MinCq suit le principe suivant. Nous transférons les étiquettes sources aux points cibles proches pour ensuite appliquer MinCq sur l'échantillon cible ``auto-étiqueté'' (justifié par une borne théorique). Plus précisément, nous définissons un auto-étiquetage non itératif qui se focalise dans les régions où les distributions marginales source et cible sont les plus similaires. Dans un second temps, nous étudions l'influence de notre auto-étiquetage pour en déduire une procédure de validation des hyperparamètres. Finalement, notre approche montre des résultats empiriques prometteurs.},
author = {Morvant, Emilie},
location = {Saint-Etienne, France},
pages = {49--58},
publisher = {Elsevier},
title = {{Adaptation de domaine de vote de majorité par auto-étiquetage non itératif}},
volume = {1},
year = {2014},
}
@inproceedings{2190,
abstract = {We present a new algorithm to construct a (generalized) deterministic Rabin automaton for an LTL formula φ. The automaton is the product of a master automaton and an array of slave automata, one for each G-subformula of φ. The slave automaton for G ψ is in charge of recognizing whether FG ψ holds. As opposed to standard determinization procedures, the states of all our automata have a clear logical structure, which allows for various optimizations. Our construction subsumes former algorithms for fragments of LTL. Experimental results show improvement in the sizes of the resulting automata compared to existing methods.},
author = {Esparza, Javier and Kretinsky, Jan},
pages = {192 -- 208},
publisher = {Springer},
title = {{From LTL to deterministic automata: A safraless compositional approach}},
doi = {10.1007/978-3-319-08867-9_13},
volume = {8559},
year = {2014},
}
@article{2208,
abstract = {We propose to detect quadrupole interactions of neutral ultracold atoms via their induced mean-field shift. We consider a Mott insulator state of spin-polarized atoms in a two-dimensional optical square lattice. The quadrupole moments of the atoms are aligned by an external magnetic field. As the alignment angle is varied, the mean-field shift shows a characteristic angular dependence, which constitutes the defining signature of the quadrupole interaction. For the 3P2 states of Yb and Sr atoms, we find a frequency shift of the order of tens of Hertz, which can be realistically detected in experiment with current technology. We compare our results to the mean-field shift of a spin-polarized quasi-two-dimensional Fermi gas in continuum. },
author = {Lahrz, Martin and Lemeshko, Mikhail and Sengstock, Klaus and Becker, Christoph and Mathey, Ludwig},
journal = {Physical Review A - Atomic, Molecular, and Optical Physics},
number = {4},
publisher = {American Physical Society},
title = {{Detecting quadrupole interactions in ultracold Fermi gases}},
doi = {10.1103/PhysRevA.89.043616},
volume = {89},
year = {2014},
}
@article{2211,
abstract = {In two-player finite-state stochastic games of partial observation on graphs, in every state of the graph, the players simultaneously choose an action, and their joint actions determine a probability distribution over the successor states. The game is played for infinitely many rounds and thus the players construct an infinite path in the graph. We consider reachability objectives where the first player tries to ensure a target state to be visited almost-surely (i.e., with probability 1) or positively (i.e., with positive probability), no matter the strategy of the second player. We classify such games according to the information and to the power of randomization available to the players. On the basis of information, the game can be one-sided with either (a) player 1, or (b) player 2 having partial observation (and the other player has perfect observation), or two-sided with (c) both players having partial observation. On the basis of randomization, (a) the players may not be allowed to use randomization (pure strategies), or (b) they may choose a probability distribution over actions but the actual random choice is external and not visible to the player (actions invisible), or (c) they may use full randomization. Our main results for pure strategies are as follows: (1) For one-sided games with player 2 having perfect observation we show that (in contrast to full randomized strategies) belief-based (subset-construction based) strategies are not sufficient, and we present an exponential upper bound on memory both for almost-sure and positive winning strategies; we show that the problem of deciding the existence of almost-sure and positive winning strategies for player 1 is EXPTIME-complete and present symbolic algorithms that avoid the explicit exponential construction. (2) For one-sided games with player 1 having perfect observation we show that nonelementarymemory is both necessary and sufficient for both almost-sure and positive winning strategies. (3) We show that for the general (two-sided) case finite-memory strategies are sufficient for both positive and almost-sure winning, and at least nonelementary memory is required. We establish the equivalence of the almost-sure winning problems for pure strategies and for randomized strategies with actions invisible. Our equivalence result exhibit serious flaws in previous results of the literature: we show a nonelementary memory lower bound for almost-sure winning whereas an exponential upper bound was previously claimed.},
author = {Chatterjee, Krishnendu and Doyen, Laurent},
journal = {ACM Transactions on Computational Logic (TOCL)},
number = {2},
publisher = {ACM},
title = {{Partial-observation stochastic games: How to win when belief fails}},
doi = {10.1145/2579821},
volume = {15},
year = {2014},
}
@inproceedings{2213,
abstract = {We consider two-player partial-observation stochastic games on finitestate graphs where player 1 has partial observation and player 2 has perfect observation. The winning condition we study are ε-regular conditions specified as parity objectives. The qualitative-analysis problem given a partial-observation stochastic game and a parity objective asks whether there is a strategy to ensure that the objective is satisfied with probability 1 (resp. positive probability). These qualitative-analysis problems are known to be undecidable. However in many applications the relevant question is the existence of finite-memory strategies, and the qualitative-analysis problems under finite-memory strategies was recently shown to be decidable in 2EXPTIME.We improve the complexity and show that the qualitative-analysis problems for partial-observation stochastic parity games under finite-memory strategies are EXPTIME-complete; and also establish optimal (exponential) memory bounds for finite-memory strategies required for qualitative analysis.},
author = {Chatterjee, Krishnendu and Doyen, Laurent and Nain, Sumit and Vardi, Moshe},
location = {Grenoble, France},
pages = {242 -- 257},
publisher = {Springer},
title = {{The complexity of partial-observation stochastic parity games with finite-memory strategies}},
doi = {10.1007/978-3-642-54830-7_16},
volume = {8412},
year = {2014},
}
@article{2214,
abstract = {A hallmark of immune cell trafficking is directional guidance via gradients of soluble or surface bound chemokines. Vascular endothelial cells produce, transport and deposit either their own chemokines or chemokines produced by the underlying stroma. Endothelial heparan sulfate (HS) was suggested to be a critical scaffold for these chemokine pools, but it is unclear how steep chemokine gradients are sustained between the lumenal and ablumenal aspects of blood vessels. Addressing this question by semi-quantitative immunostaining of HS moieties around blood vessels with a pan anti-HS IgM mAb, we found a striking HS enrichment in the basal lamina of resting and inflamed post capillary skin venules, as well as in high endothelial venules (HEVs) of lymph nodes. Staining of skin vessels with a glycocalyx probe further suggested that their lumenal glycocalyx contains much lower HS density than their basolateral extracellular matrix (ECM). This polarized HS pattern was observed also in isolated resting and inflamed microvascular dermal cells. Notably, progressive skin inflammation resulted in massive ECM deposition and in further HS enrichment around skin post capillary venules and their associated pericytes. Inflammation-dependent HS enrichment was not compromised in mice deficient in the main HS degrading enzyme, heparanase. Our results suggest that the blood vasculature patterns steep gradients of HS scaffolds between their lumenal and basolateral endothelial aspects, and that inflammatory processes can further enrich the HS content nearby inflamed vessels. We propose that chemokine gradients between the lumenal and ablumenal sides of vessels could be favored by these sharp HS scaffold gradients.},
author = {Stoler Barak, Liat and Moussion, Christine and Shezen, Elias and Hatzav, Miki and Sixt, Michael K and Alon, Ronen},
journal = {PLoS One},
number = {1},
publisher = {Public Library of Science},
title = {{Blood vessels pattern heparan sulfate gradients between their apical and basolateral aspects}},
doi = {10.1371/journal.pone.0085699},
volume = {9},
year = {2014},
}
@inproceedings{2216,
abstract = {The edit distance between two (untimed) traces is the minimum cost of a sequence of edit operations (insertion, deletion, or substitution) needed to transform one trace to the other. Edit distances have been extensively studied in the untimed setting, and form the basis for approximate matching of sequences in different domains such as coding theory, parsing, and speech recognition. In this paper, we lift the study of edit distances from untimed languages to the timed setting. We define an edit distance between timed words which incorporates both the edit distance between the untimed words and the absolute difference in time stamps. Our edit distance between two timed words is computable in polynomial time. Further, we show that the edit distance between a timed word and a timed language generated by a timed automaton, defined as the edit distance between the word and the closest word in the language, is PSPACE-complete. While computing the edit distance between two timed automata is undecidable, we show that the approximate version, where we decide if the edit distance between two timed automata is either less than a given parameter or more than δ away from the parameter, for δ > 0, can be solved in exponential space and is EXPSPACE-hard. Our definitions and techniques can be generalized to the setting of hybrid systems, and analogous decidability results hold for rectangular automata.},
author = {Chatterjee, Krishnendu and Ibsen-Jensen, Rasmus and Majumdar, Ritankar},
location = {Berlin, Germany},
pages = {303 -- 312},
publisher = {Springer},
title = {{Edit distance for timed automata}},
doi = {10.1145/2562059.2562141},
year = {2014},
}
@inproceedings{2219,
abstract = {Recently, Döttling et al. (ASIACRYPT 2012) proposed the first chosen-ciphertext (IND-CCA) secure public-key encryption scheme from the learning parity with noise (LPN) assumption. In this work we give an alternative scheme which is conceptually simpler and more efficient. At the core of our construction is a trapdoor technique originally proposed for lattices by Micciancio and Peikert (EUROCRYPT 2012), which we adapt to the LPN setting. The main technical tool is a new double-trapdoor mechanism, together with a trapdoor switching lemma based on a computational variant of the leftover hash lemma.},
author = {Kiltz, Eike and Masny, Daniel and Pietrzak, Krzysztof Z},
isbn = {978-364254630-3},
pages = {1 -- 18},
publisher = {Springer},
title = {{Simple chosen-ciphertext security from low noise LPN}},
doi = {10.1007/978-3-642-54631-0_1},
volume = {8383},
year = {2014},
}
@article{2220,
abstract = {In this issue of Chemistry & Biology, Cokol and colleagues report a systematic study of drug interactions between antifungal compounds. Suppressive drug interactions occur more frequently than previously realized and come in different flavors with interesting implications.},
author = {De Vos, Marjon and Bollenbach, Mark Tobias},
issn = {10745521},
journal = {Chemistry and Biology},
number = {4},
pages = {439 -- 440},
publisher = {Cell Press},
title = {{Suppressive drug interactions between antifungals}},
doi = {10.1016/j.chembiol.2014.04.004},
volume = {21},
year = {2014},
}
@article{2223,
abstract = {Correct positioning of membrane proteins is an essential process in eukaryotic organisms. The plant hormone auxin is distributed through intercellular transport and triggers various cellular responses. Auxin transporters of the PIN-FORMED (PIN) family localize asymmetrically at the plasma membrane (PM) and mediate the directional transport of auxin between cells. A fungal toxin, brefeldin A (BFA), inhibits a subset of guanine nucleotide exchange factors for ADP-ribosylation factor small GTPases (ARF GEFs) including GNOM, which plays a major role in localization of PIN1 predominantly to the basal side of the PM. The Arabidopsis genome encodes 19 ARF-related putative GTPases. However, ARF components involved in PIN1 localization have been genetically poorly defined. Using a fluorescence imaging-based forward genetic approach, we identified an Arabidopsis mutant, bfa-visualized exocytic trafficking defective1 (bex1), in which PM localization of PIN1-green fluorescent protein (GFP) as well as development is hypersensitive to BFA. We found that in bex1 a member of the ARF1 gene family, ARF1A1C, was mutated. ARF1A1C localizes to the trans-Golgi network/early endosome and Golgi apparatus, acts synergistically to BEN1/MIN7 ARF GEF and is important for PIN recycling to the PM. Consistent with the developmental importance of PIN proteins, functional interference with ARF1 resulted in an impaired auxin response gradient and various developmental defects including embryonic patterning defects and growth arrest. Our results show that ARF1A1C is essential for recycling of PIN auxin transporters and for various auxin-dependent developmental processes.},
author = {Tanaka, Hirokazu and Nodzyński, Tomasz and Kitakura, Saeko and Feraru, Mugurel and Sasabe, Michiko and Ishikawa, Tomomi and Kleine Vehn, Jürgen and Kakimoto, Tatsuo and Friml, Jirí},
issn = {00320781},
journal = {Plant and Cell Physiology},
number = {4},
pages = {737 -- 749},
publisher = {Oxford University Press},
title = {{BEX1/ARF1A1C is required for BFA-sensitive recycling of PIN auxin transporters and auxin-mediated development in arabidopsis}},
doi = {10.1093/pcp/pct196},
volume = {55},
year = {2014},
}
@article{2225,
abstract = {We consider sample covariance matrices of the form X∗X, where X is an M×N matrix with independent random entries. We prove the isotropic local Marchenko-Pastur law, i.e. we prove that the resolvent (X∗X−z)−1 converges to a multiple of the identity in the sense of quadratic forms. More precisely, we establish sharp high-probability bounds on the quantity ⟨v,(X∗X−z)−1w⟩−⟨v,w⟩m(z), where m is the Stieltjes transform of the Marchenko-Pastur law and v,w∈CN. We require the logarithms of the dimensions M and N to be comparable. Our result holds down to scales Iz≥N−1+ε and throughout the entire spectrum away from 0. We also prove analogous results for generalized Wigner matrices.
},
author = {Bloemendal, Alex and Erdös, László and Knowles, Antti and Yau, Horng and Yin, Jun},
issn = {10836489},
journal = {Electronic Journal of Probability},
publisher = {Institute of Mathematical Statistics},
title = {{Isotropic local laws for sample covariance and generalized Wigner matrices}},
doi = {10.1214/EJP.v19-3054},
volume = {19},
year = {2014},
}
@article{2226,
abstract = {Coriolis force effects on shear flows are important in geophysical and astrophysical contexts. We report a study on the linear stability and the transient energy growth of the plane Couette flow with system rotation perpendicular to the shear direction. External rotation causes linear instability. At small rotation rates, the onset of linear instability scales inversely with the rotation rate and the optimal transient growth in the linearly stable region is slightly enhanced ∼Re2. The corresponding optimal initial perturbations are characterized by roll structures inclined in the streamwise direction and are twisted under external rotation. At large rotation rates, the transient growth is significantly inhibited and hence linear stability analysis is a reliable indicator for instability.},
author = {Shi, Liang and Hof, Björn and Tilgner, Andreas},
issn = {15393755},
journal = {Physical Review E Statistical Nonlinear and Soft Matter Physics},
number = {1},
publisher = {American Institute of Physics},
title = {{Transient growth of Ekman-Couette flow}},
doi = {10.1103/PhysRevE.89.013001},
volume = {89},
year = {2014},
}
@article{2228,
abstract = {Fast-spiking, parvalbumin-expressing GABAergic interneurons, a large proportion of which are basket cells (BCs), have a key role in feedforward and feedback inhibition, gamma oscillations and complex information processing. For these functions, fast propagation of action potentials (APs) from the soma to the presynaptic terminals is important. However, the functional properties of interneuron axons remain elusive. We examined interneuron axons by confocally targeted subcellular patch-clamp recording in rat hippocampal slices. APs were initiated in the proximal axon ∼20 μm from the soma and propagated to the distal axon with high reliability and speed. Subcellular mapping revealed a stepwise increase of Na^+ conductance density from the soma to the proximal axon, followed by a further gradual increase in the distal axon. Active cable modeling and experiments with partial channel block revealed that low axonal Na^+ conductance density was sufficient for reliability, but high Na^+ density was necessary for both speed of propagation and fast-spiking AP phenotype. Our results suggest that a supercritical density of Na^+ channels compensates for the morphological properties of interneuron axons (small segmental diameter, extensive branching and high bouton density), ensuring fast AP propagation and high-frequency repetitive firing.},
author = {Hu, Hua and Jonas, Peter M},
issn = {10976256},
journal = {Nature Neuroscience},
number = {5},
pages = {686--693},
publisher = {Nature Publishing Group},
title = {{A supercritical density of Na^+ channels ensures fast signaling in GABAergic interneuron axons}},
doi = {10.1038/nn.3678},
volume = {17},
year = {2014},
}
@article{2229,
abstract = {The distance between Ca^2+ channels and release sensors determines the speed and efficacy of synaptic transmission. Tight "nanodomain" channel-sensor coupling initiates transmitter release at synapses in the mature brain, whereas loose "microdomain" coupling appears restricted to early developmental stages. To probe the coupling configuration at a plastic synapse in the mature central nervous system, we performed paired recordings between mossy fiber terminals and CA3 pyramidal neurons in rat hippocampus. Millimolar concentrations of both the fast Ca^2+ chelator BAPTA [1,2-bis(2-aminophenoxy)ethane- N,N, N′,N′-tetraacetic acid] and the slow chelator EGTA efficiently suppressed transmitter release, indicating loose coupling between Ca^2+ channels and release sensors. Loose coupling enabled the control of initial release probability by fast endogenous Ca^2+ buffers and the generation of facilitation by buffer saturation. Thus, loose coupling provides the molecular framework for presynaptic plasticity.},
author = {Vyleta, Nicholas and Jonas, Peter M},
issn = {00368075},
journal = {Science},
number = {6171},
pages = {665 -- 670},
publisher = {American Association for the Advancement of Science},
title = {{Loose coupling between Ca^2+ channels and release sensors at a plastic hippocampal synapse}},
doi = {10.1126/science.1244811},
volume = {343},
year = {2014},
}
@article{2230,
abstract = {Intracellular electrophysiological recordings provide crucial insights into elementary neuronal signals such as action potentials and synaptic currents. Analyzing and interpreting these signals is essential for a quantitative understanding of neuronal information processing, and requires both fast data visualization and ready access to complex analysis routines. To achieve this goal, we have developed Stimfit, a free software package for cellular neurophysiology with a Python scripting interface and a built-in Python shell. The program supports most standard file formats for cellular neurophysiology and other biomedical signals through the Biosig library. To quantify and interpret the activity of single neurons and communication between neurons, the program includes algorithms to characterize the kinetics of presynaptic action potentials and postsynaptic currents, estimate latencies between pre- and postsynaptic events, and detect spontaneously occurring events. We validate and benchmark these algorithms, give estimation errors, and provide sample use cases, showing that Stimfit represents an efficient, accessible and extensible way to accurately analyze and interpret neuronal signals.},
author = {Guzmán, José and Schlögl, Alois and Schmidt Hieber, Christoph},
issn = {16625196},
journal = {Frontiers in Neuroinformatics},
number = {FEB},
publisher = {Frontiers Research Foundation},
title = {{Stimfit: Quantifying electrophysiological data with Python}},
doi = {10.3389/fninf.2014.00016},
volume = {8},
year = {2014},
}
@article{2231,
abstract = {Based on the measurements of noise in gene expression performed during the past decade, it has become customary to think of gene regulation in terms of a two-state model, where the promoter of a gene can stochastically switch between an ON and an OFF state. As experiments are becoming increasingly precise and the deviations from the two-state model start to be observable, we ask about the experimental signatures of complex multistate promoters, as well as the functional consequences of this additional complexity. In detail, we i), extend the calculations for noise in gene expression to promoters described by state transition diagrams with multiple states, ii), systematically compute the experimentally accessible noise characteristics for these complex promoters, and iii), use information theory to evaluate the channel capacities of complex promoter architectures and compare them with the baseline provided by the two-state model. We find that adding internal states to the promoter generically decreases channel capacity, except in certain cases, three of which (cooperativity, dual-role regulation, promoter cycling) we analyze in detail.},
author = {Rieckh, Georg and Tkacik, Gasper},
issn = {00063495},
journal = {Biophysical Journal},
number = {5},
pages = {1194 -- 1204},
publisher = {Biophysical Society},
title = {{Noise and information transmission in promoters with multiple internal states}},
doi = {10.1016/j.bpj.2014.01.014},
volume = {106},
year = {2014},
}
@article{2232,
abstract = {The purpose of this contribution is to summarize and discuss recent advances regarding the onset of turbulence in shear flows. The absence of a clear-cut instability mechanism, the spatio-temporal intermittent character and extremely long lived transients are some of the major difficulties encountered in these flows and have hindered progress towards understanding the transition process. We will show for the case of pipe flow that concepts from nonlinear dynamics and statistical physics can help to explain the onset of turbulence. In particular, the turbulent structures (puffs) observed close to onset are spatially localized chaotic transients and their lifetimes increase super-exponentially with Reynolds number. At the same time fluctuations of individual turbulent puffs can (although very rarely) lead to the nucleation of new puffs. The competition between these two stochastic processes gives rise to a non-equilibrium phase transition where turbulence changes from a super-transient to a sustained state.},
author = {Song, Baofang and Hof, Björn},
issn = {17425468},
journal = {Journal of Statistical Mechanics Theory and Experiment},
number = {2},
publisher = {IOP Publishing Ltd.},
title = {{Deterministic and stochastic aspects of the transition to turbulence}},
doi = {10.1088/1742-5468/2014/02/P02001},
volume = {2014},
year = {2014},
}
@article{2233,
abstract = { A discounted-sum automaton (NDA) is a nondeterministic finite automaton with edge weights, valuing a run by the discounted sum of visited edge weights. More precisely, the weight in the i-th position of the run is divided by λi, where the discount factor λ is a fixed rational number greater than 1. The value of a word is the minimal value of the automaton runs on it. Discounted summation is a common and useful measuring scheme, especially for infinite sequences, reflecting the assumption that earlier weights are more important than later weights. Unfortunately, determinization of NDAs, which is often essential in formal verification, is, in general, not possible. We provide positive news, showing that every NDA with an integral discount factor is determinizable. We complete the picture by proving that the integers characterize exactly the discount factors that guarantee determinizability: for every nonintegral rational discount factor λ, there is a nondeterminizable λ-NDA. We also prove that the class of NDAs with integral discount factors enjoys closure under the algebraic operations min, max, addition, and subtraction, which is not the case for general NDAs nor for deterministic NDAs. For general NDAs, we look into approximate determinization, which is always possible as the influence of a word's suffix decays. We show that the naive approach, of unfolding the automaton computations up to a sufficient level, is doubly exponential in the discount factor. We provide an alternative construction for approximate determinization, which is singly exponential in the discount factor, in the precision, and in the number of states. We also prove matching lower bounds, showing that the exponential dependency on each of these three parameters cannot be avoided. All our results hold equally for automata over finite words and for automata over infinite words. },
author = {Boker, Udi and Henzinger, Thomas A},
issn = {18605974},
journal = {Logical Methods in Computer Science},
number = {1},
publisher = {International Federation of Computational Logic},
title = {{Exact and approximate determinization of discounted-sum automata}},
doi = {10.2168/LMCS-10(1:10)2014},
volume = {10},
year = {2014},
}
@article{2234,
abstract = {We study Markov decision processes (MDPs) with multiple limit-average (or mean-payoff) functions. We consider two different objectives, namely, expectation and satisfaction objectives. Given an MDP with κ limit-average functions, in the expectation objective the goal is to maximize the expected limit-average value, and in the satisfaction objective the goal is to maximize the probability of runs such that the limit-average value stays above a given vector. We show that under the expectation objective, in contrast to the case of one limit-average function, both randomization and memory are necessary for strategies even for ε-approximation, and that finite-memory randomized strategies are sufficient for achieving Pareto optimal values. Under the satisfaction objective, in contrast to the case of one limit-average function, infinite memory is necessary for strategies achieving a specific value (i.e. randomized finite-memory strategies are not sufficient), whereas memoryless randomized strategies are sufficient for ε-approximation, for all ε > 0. We further prove that the decision problems for both expectation and satisfaction objectives can be solved in polynomial time and the trade-off curve (Pareto curve) can be ε-approximated in time polynomial in the size of the MDP and 1/ε, and exponential in the number of limit-average functions, for all ε > 0. Our analysis also reveals flaws in previous work for MDPs with multiple mean-payoff functions under the expectation objective, corrects the flaws, and allows us to obtain improved results.},
author = {Brázdil, Tomáš and Brožek, Václav and Chatterjee, Krishnendu and Forejt, Vojtěch and Kučera, Antonín},
issn = {18605974},
journal = {Logical Methods in Computer Science},
number = {1},
publisher = {International Federation of Computational Logic},
title = {{Markov decision processes with multiple long-run average objectives}},
doi = {10.2168/LMCS-10(1:13)2014},
volume = {10},
year = {2014},
}
@article{2235,
abstract = {Emerging infectious diseases (EIDs) pose a risk to human welfare, both directly and indirectly, by affecting managed livestock and wildlife that provide valuable resources and ecosystem services, such as the pollination of crops. Honeybees (Apis mellifera), the prevailing managed insect crop pollinator, suffer from a range of emerging and exotic high-impact pathogens, and population maintenance requires active management by beekeepers to control them. Wild pollinators such as bumblebees (Bombus spp.) are in global decline, one cause of which may be pathogen spillover from managed pollinators like honeybees or commercial colonies of bumblebees. Here we use a combination of infection experiments and landscape-scale field data to show that honeybee EIDs are indeed widespread infectious agents within the pollinator assemblage. The prevalence of deformed wing virus (DWV) and the exotic parasite Nosema ceranae in honeybees and bumblebees is linked; as honeybees have higher DWV prevalence, and sympatric bumblebees and honeybees are infected by the same DWV strains, Apis is the likely source of at least one major EID in wild pollinators. Lessons learned from vertebrates highlight the need for increased pathogen control in managed bee species to maintain wild pollinators, as declines in native pollinators may be caused by interspecies pathogen transmission originating from managed pollinators.},
author = {Fürst, Matthias and Mcmahon, Dino and Osborne, Juliet and Paxton, Robert and Brown, Mark},
issn = {00280836},
journal = {Nature},
number = {7488},
pages = {364 -- 366},
publisher = {Nature Publishing Group},
title = {{Disease associations between honeybees and bumblebees as a threat to wild pollinators}},
doi = {10.1038/nature12977},
volume = {506},
year = {2014},
}
@inproceedings{2236,
abstract = {Consider a joint distribution (X,A) on a set. We show that for any family of distinguishers, there exists a simulator such that 1 no function in can distinguish (X,A) from (X,h(X)) with advantage ε, 2 h is only O(2 3ℓ ε -2) times less efficient than the functions in. For the most interesting settings of the parameters (in particular, the cryptographic case where X has superlogarithmic min-entropy, ε > 0 is negligible and consists of circuits of polynomial size), we can make the simulator h deterministic. As an illustrative application of our theorem, we give a new security proof for the leakage-resilient stream-cipher from Eurocrypt'09. Our proof is simpler and quantitatively much better than the original proof using the dense model theorem, giving meaningful security guarantees if instantiated with a standard blockcipher like AES. Subsequent to this work, Chung, Lui and Pass gave an interactive variant of our main theorem, and used it to investigate weak notions of Zero-Knowledge. Vadhan and Zheng give a more constructive version of our theorem using their new uniform min-max theorem.},
author = {Jetchev, Dimitar and Pietrzak, Krzysztof Z},
editor = {Lindell, Yehuda},
isbn = {978-364254241-1},
location = {San Diego, USA},
pages = {566 -- 590},
publisher = {Springer},
title = {{How to fake auxiliary input}},
doi = {10.1007/978-3-642-54242-8_24},
volume = {8349},
year = {2014},
}
@article{2246,
abstract = {Muller games are played by two players moving a token along a graph; the winner is determined by the set of vertices that occur infinitely often. The central algorithmic problem is to compute the winning regions for the players. Different classes and representations of Muller games lead to problems of varying computational complexity. One such class are parity games; these are of particular significance in computational complexity, as they remain one of the few combinatorial problems known to be in NP ∩ co-NP but not known to be in P. We show that winning regions for a Muller game can be determined from the alternating structure of its traps. To every Muller game we then associate a natural number that we call its trap depth; this parameter measures how complicated the trap structure is. We present algorithms for parity games that run in polynomial time for graphs of bounded trap depth, and in general run in time exponential in the trap depth. },
author = {Grinshpun, Andrey and Phalitnonkiat, Pakawat and Rubin, Sasha and Tarfulea, Andrei},
issn = {03043975},
journal = {Theoretical Computer Science},
pages = {73 -- 91},
publisher = {Elsevier},
title = {{Alternating traps in Muller and parity games}},
doi = {10.1016/j.tcs.2013.11.032},
volume = {521},
year = {2014},
}
@article{2249,
abstract = {The unfolded protein response (UPR) is a signaling network triggered by overload of protein-folding demand in the endoplasmic reticulum (ER), a condition termed ER stress. The UPR is critical for growth and development; nonetheless, connections between the UPR and other cellular regulatory processes remain largely unknown. Here, we identify a link between the UPR and the phytohormone auxin, a master regulator of plant physiology. We show that ER stress triggers down-regulation of auxin receptors and transporters in Arabidopsis thaliana. We also demonstrate that an Arabidopsis mutant of a conserved ER stress sensor IRE1 exhibits defects in the auxin response and levels. These data not only support that the plant IRE1 is required for auxin homeostasis, they also reveal a species-specific feature of IRE1 in multicellular eukaryotes. Furthermore, by establishing that UPR activation is reduced in mutants of ER-localized auxin transporters, including PIN5, we define a long-neglected biological significance of ER-based auxin regulation. We further examine the functional relationship of IRE1 and PIN5 by showing that an ire1 pin5 triple mutant enhances defects of UPR activation and auxin homeostasis in ire1 or pin5. Our results imply that the plant UPR has evolved a hormone-dependent strategy for coordinating ER function with physiological processes.},
author = {Chen, Yani and Aung, Kyaw and Rolčík, Jakub and Walicki, Kathryn and Friml, Jirí and Brandizzí, Federica},
issn = {09607412},
journal = {Plant Journal},
number = {1},
pages = {97 -- 107},
publisher = {Wiley-Blackwell},
title = {{Inter-regulation of the unfolded protein response and auxin signaling}},
doi = {10.1111/tpj.12373},
volume = {77},
year = {2014},
}
@article{2250,
abstract = {The genome sequences of new viruses often contain many "orphan" or "taxon-specific" proteins apparently lacking homologs. However, because viral proteins evolve very fast, commonly used sequence similarity detection methods such as BLAST may overlook homologs. We analyzed a data set of proteins from RNA viruses characterized as "genus specific" by BLAST. More powerful methods developed recently, such as HHblits or HHpred (available through web-based, user-friendly interfaces), could detect distant homologs of a quarter of these proteins, suggesting that these methods should be used to annotate viral genomes. In-depth manual analyses of a subset of the remaining sequences, guided by contextual information such as taxonomy, gene order, or domain cooccurrence, identified distant homologs of another third. Thus, a combination of powerful automated methods and manual analyses can uncover distant homologs of many proteins thought to be orphans. We expect these methodological results to be also applicable to cellular organisms, since they generally evolve much more slowly than RNA viruses. As an application, we reanalyzed the genome of a bee pathogen, Chronic bee paralysis virus (CBPV). We could identify homologs of most of its proteins thought to be orphans; in each case, identifying homologs provided functional clues. We discovered that CBPV encodes a domain homologous to the Alphavirus methyltransferase-guanylyltransferase; a putative membrane protein, SP24, with homologs in unrelated insect viruses and insect-transmitted plant viruses having different morphologies (cileviruses, higreviruses, blunerviruses, negeviruses); and a putative virion glycoprotein, ORF2, also found in negeviruses. SP24 and ORF2 are probably major structural components of the virionsd.},
author = {Kuchibhatla, Durga and Sherman, Westley and Chung, Betty and Cook, Shelley and Schneider, Georg and Eisenhaber, Birgit and Karlin, David},
issn = {0022538X},
journal = {Journal of Virology},
number = {1},
pages = {10 -- 20},
publisher = {ASM},
title = {{Powerful sequence similarity search methods and in-depth manual analyses can identify remote homologs in many apparently "orphan" viral proteins}},
doi = {10.1128/JVI.02595-13},
volume = {88},
year = {2014},
}
@article{2251,
abstract = {Sharp wave/ripple (SWR, 150–250 Hz) hippocampal events have long been postulated to be involved in memory consolidation. However, more recent work has investigated SWRs that occur during active waking behaviour: findings that suggest that SWRs may also play a role in cell assembly strengthening or spatial working memory. Do such theories of SWR function apply to animal learning? This review discusses how general theories linking SWRs to memory-related function may explain circuit mechanisms related to rodent spatial learning and to the associated stabilization of new cognitive maps.},
author = {Csicsvari, Jozsef L and Dupret, David},
issn = {09628436},
journal = {Philosophical Transactions of the Royal Society of London. Series B, Biological Sciences},
number = {1635},
publisher = {Royal Society, The},
title = {{Sharp wave/ripple network oscillations and learning-associated hippocampal maps}},
doi = {10.1098/rstb.2012.0528},
volume = {369},
year = {2014},
}
@article{2253,
abstract = {Plant growth is achieved predominantly by cellular elongation, which is thought to be controlled on several levels by apoplastic auxin. Auxin export into the apoplast is achieved by plasma membrane efflux catalysts of the PIN-FORMED (PIN) and ATP-binding cassette protein subfamily B/phosphor- glycoprotein (ABCB/PGP) classes; the latter were shown to depend on interaction with the FKBP42, TWISTED DWARF1 (TWD1). Here by using a transgenic approach in combination with phenotypical, biochemical and cell biological analyses we demonstrate the importance of a putative C-terminal in-plane membrane anchor of TWD1 in the regulation of ABCB-mediated auxin transport. In contrast with dwarfed twd1 loss-of-function alleles, TWD1 gain-of-function lines that lack a putative in-plane membrane anchor (HA-TWD1-Ct) show hypermorphic plant architecture, characterized by enhanced stem length and leaf surface but reduced shoot branching. Greater hypocotyl length is the result of enhanced cell elongation that correlates with reduced polar auxin transport capacity for HA-TWD1-Ct. As a consequence, HA-TWD1-Ct displays higher hypocotyl auxin accumulation, which is shown to result in elevated auxin-induced cell elongation rates. Our data highlight the importance of C-terminal membrane anchoring for TWD1 action, which is required for specific regulation of ABCB-mediated auxin transport. These data support a model in which TWD1 controls lateral ABCB1-mediated export into the apoplast, which is required for auxin-mediated cell elongation.},
author = {Bailly, Aurélien and Wang, Bangjun and Zwiewka, Marta and Pollmann, Stephan and Schenck, Daniel and Lüthen, Hartwig and Schulz, Alexander and Friml, Jirí and Geisler, Markus},
issn = {09607412},
journal = {Plant Journal},
number = {1},
pages = {108 -- 118},
publisher = {Wiley-Blackwell},
title = {{Expression of TWISTED DWARF1 lacking its in-plane membrane anchor leads to increased cell elongation and hypermorphic growth}},
doi = {10.1111/tpj.12369},
volume = {77},
year = {2014},
}
@article{2254,
abstract = {Theta-gamma network oscillations are thought to represent key reference signals for information processing in neuronal ensembles, but the underlying synaptic mechanisms remain unclear. To address this question, we performed whole-cell (WC) patch-clamp recordings from mature hippocampal granule cells (GCs) in vivo in the dentate gyrus of anesthetized and awake rats. GCs in vivo fired action potentials at low frequency, consistent with sparse coding in the dentate gyrus. GCs were exposed to barrages of fast AMPAR-mediated excitatory postsynaptic currents (EPSCs), primarily relayed from the entorhinal cortex, and inhibitory postsynaptic currents (IPSCs), presumably generated by local interneurons. EPSCs exhibited coherence with the field potential predominantly in the theta frequency band, whereas IPSCs showed coherence primarily in the gamma range. Action potentials in GCs were phase locked to network oscillations. Thus, theta-gamma-modulated synaptic currents may provide a framework for sparse temporal coding of information in the dentate gyrus.},
author = {Pernia-Andrade, Alejandro and Jonas, Peter M},
issn = {08966273},
journal = {Neuron},
number = {1},
pages = {140 -- 152},
publisher = {Elsevier},
title = {{Theta-gamma-modulated synaptic currents in hippocampal granule cells in vivo define a mechanism for network oscillations}},
doi = {10.1016/j.neuron.2013.09.046},
volume = {81},
year = {2014},
}
@article{2255,
abstract = {Motivated by applications in biology, we present an algorithm for estimating the length of tube-like shapes in 3-dimensional Euclidean space. In a first step, we combine the tube formula of Weyl with integral geometric methods to obtain an integral representation of the length, which we approximate using a variant of the Koksma-Hlawka Theorem. In a second step, we use tools from computational topology to decrease the dependence on small perturbations of the shape. We present computational experiments that shed light on the stability and the convergence rate of our algorithm.},
author = {Edelsbrunner, Herbert and Pausinger, Florian},
issn = {09249907},
journal = {Journal of Mathematical Imaging and Vision},
number = {1},
pages = {164 -- 177},
publisher = {Springer},
title = {{Stable length estimates of tube-like shapes}},
doi = {10.1007/s10851-013-0468-x},
volume = {50},
year = {2014},
}
@article{2257,
abstract = {Maximum entropy models are the least structured probability distributions that exactly reproduce a chosen set of statistics measured in an interacting network. Here we use this principle to construct probabilistic models which describe the correlated spiking activity of populations of up to 120 neurons in the salamander retina as it responds to natural movies. Already in groups as small as 10 neurons, interactions between spikes can no longer be regarded as small perturbations in an otherwise independent system; for 40 or more neurons pairwise interactions need to be supplemented by a global interaction that controls the distribution of synchrony in the population. Here we show that such “K-pairwise” models—being systematic extensions of the previously used pairwise Ising models—provide an excellent account of the data. We explore the properties of the neural vocabulary by: 1) estimating its entropy, which constrains the population's capacity to represent visual information; 2) classifying activity patterns into a small set of metastable collective modes; 3) showing that the neural codeword ensembles are extremely inhomogenous; 4) demonstrating that the state of individual neurons is highly predictable from the rest of the population, allowing the capacity for error correction.},
author = {Tkacik, Gasper and Marre, Olivier and Amodei, Dario and Schneidman, Elad and Bialek, William and Berry, Michael},
issn = {1553734X},
journal = {PLoS Computational Biology},
number = {1},
publisher = {Public Library of Science},
title = {{Searching for collective behavior in a large network of sensory neurons}},
doi = {10.1371/journal.pcbi.1003408},
volume = {10},
year = {2014},
}
@article{7598,
author = {Tan, Shutang and Xue, Hong-Wei},
issn = {2211-1247},
journal = {Cell Reports},
number = {5},
pages = {1692--1702},
publisher = {Elsevier},
title = {{Casein kinase 1 regulates ethylene synthesis by phosphorylating and promoting the turnover of ACS5}},
doi = {10.1016/j.celrep.2014.10.047},
volume = {9},
year = {2014},
}
@inproceedings{772,
abstract = {Lock-free concurrent algorithms guarantee that some concurrent operation will always make progress in a finite number of steps. Yet programmers prefer to treat concurrent code as if it were wait-free, guaranteeing that all operations always make progress. Unfortunately, designing wait-free algorithms is generally a very complex task, and the resulting algorithms are not always efficient. While obtaining efficient wait-free algorithms has been a long-time goal for the theory community, most non-blocking commercial code is only lock-free. This paper suggests a simple solution to this problem. We show that, for a large class of lock-free algorithms, under scheduling conditions which approximate those found in commercial hardware architectures, lock-free algorithms behave as if they are wait-free. In other words, programmers can keep on designing simple lock-free algorithms instead of complex wait-free ones, and in practice, they will get wait-free progress. Our main contribution is a new way of analyzing a general class of lock-free algorithms under a stochastic scheduler. Our analysis relates the individual performance of processes with the global performance of the system using Markov chain lifting between a complex per-process chain and a simpler system progress chain. We show that lock-free algorithms are not only wait-free with probability 1, but that in fact a general subset of lock-free algorithms can be closely bounded in terms of the average number of steps required until an operation completes. To the best of our knowledge, this is the first attempt to analyze progress conditions, typically stated in relation to a worst case adversary, in a stochastic model capturing their expected asymptotic behavior.},
author = {Alistarh, Dan-Adrian and Censor Hillel, Keren and Shavit, Nir},
pages = {714 -- 723},
publisher = {ACM},
title = {{Are lock-free concurrent algorithms practically wait-free?}},
doi = {10.1145/2591796.2591836},
year = {2014},
}
@inproceedings{775,
abstract = {The long-lived renaming problem appears in shared-memory systems where a set of threads need to register and deregister frequently from the computation, while concurrent operations scan the set of currently registered threads. Instances of this problem show up in concurrent implementations of transactional memory, flat combining, thread barriers, and memory reclamation schemes for lock-free data structures. In this paper, we analyze a randomized solution for long-lived renaming. The algorithmic technique we consider, called the Level Array, has previously been used for hashing and one-shot (single-use) renaming. Our main contribution is to prove that, in long-lived executions, where processes may register and deregister polynomially many times, the technique guarantees constant steps on average and O (log log n) steps with high probability for registering, unit cost for deregistering, and O (n) steps for collect queries, where n is an upper bound on the number of processes that may be active at any point in time. We also show that the algorithm has the surprising property that it is self-healing: under reasonable assumptions on the schedule, operations running while the data structure is in a degraded state implicitly help the data structure re-balance itself. This subtle mechanism obviates the need for expensive periodic rebuilding procedures. Our benchmarks validate this approach, showing that, for typical use parameters, the average number of steps a process takes to register is less than two and the worst-case number of steps is bounded by six, even in executions with billions of operations. We contrast this with other randomized implementations, whose worst-case behavior we show to be unreliable, and with deterministic implementations, whose cost is linear in n.},
author = {Alistarh, Dan-Adrian and Kopinsky, Justin and Matveev, Alexander and Shavit, Nir},
pages = {348 -- 357},
publisher = {IEEE},
title = {{The levelarray: A fast, practical long-lived renaming algorithm}},
doi = {10.1109/ICDCS.2014.43},
year = {2014},
}
@article{7771,
abstract = {In their Letter, Schreck, Bertrand, O'Hern and Shattuck [Phys. Rev. Lett. 107, 078301 (2011)] study nonlinearities in jammed particulate systems that arise when contacts are altered. They conclude that there is "no harmonic regime in the large system limit for all compressions" and "at jamming onset for any system size." Their argument rests on the claim that for finite-range repulsive potentials, of the form used in studies of jamming, the breaking or forming of a single contact is sufficient to destroy the linear regime. We dispute these conclusions and argue that linear response is both justified and essential for understanding the nature of the jammed solid. },
author = {Goodrich, Carl Peter and Liu, Andrea J. and Nagel, Sidney R.},
issn = {0031-9007},
journal = {Physical Review Letters},
number = {4},
publisher = {American Physical Society},
title = {{Comment on “Repulsive contact interactions make jammed particulate systems inherently nonharmonic”}},
doi = {10.1103/physrevlett.112.049801},
volume = {112},
year = {2014},
}
@article{468,
abstract = {Invasive alien parasites and pathogens are a growing threat to biodiversity worldwide, which can contribute to the extinction of endemic species. On the Galápagos Islands, the invasive parasitic fly Philornis downsi poses a major threat to the endemic avifauna. Here, we investigated the influence of this parasite on the breeding success of two Darwin's finch species, the warbler finch (Certhidea olivacea) and the sympatric small tree finch (Camarhynchus parvulus), on Santa Cruz Island in 2010 and 2012. While the population of the small tree finch appeared to be stable, the warbler finch has experienced a dramatic decline in population size on Santa Cruz Island since 1997. We aimed to identify whether warbler finches are particularly vulnerable during different stages of the breeding cycle. Contrary to our prediction, breeding success was lower in the small tree finch than in the warbler finch. In both species P. downsi had a strong negative impact on breeding success and our data suggest that heavy rain events also lowered the fledging success. On the one hand parents might be less efficient in compensating their chicks' energy loss due to parasitism as they might be less efficient in foraging on days of heavy rain. On the other hand, intense rainfalls might lead to increased humidity and more rapid cooling of the nests. In the case of the warbler finch we found that the control of invasive plant species with herbicides had a significant additive negative impact on the breeding success. It is very likely that the availability of insects (i.e. food abundance) is lower in such controlled areas, as herbicide usage led to the removal of the entire understory. Predation seems to be a minor factor in brood loss.},
author = {Cimadom, Arno and Ulloa, Angel and Meidl, Patrick and Zöttl, Markus and Zöttl, Elisabet and Fessl, Birgit and Nemeth, Erwin and Dvorak, Michael and Cunninghame, Francesca and Tebbich, Sabine},
journal = {PLoS One},
number = {9},
publisher = {Public Library of Science},
title = {{Invasive parasites habitat change and heavy rainfall reduce breeding success in Darwin's finches}},
doi = {10.1371/journal.pone.0107518},
volume = {9},
year = {2014},
}
@inproceedings{475,
abstract = {First cycle games (FCG) are played on a finite graph by two players who push a token along the edges until a vertex is repeated, and a simple cycle is formed. The winner is determined by some fixed property Y of the sequence of labels of the edges (or nodes) forming this cycle. These games are traditionally of interest because of their connection with infinite-duration games such as parity and mean-payoff games. We study the memory requirements for winning strategies of FCGs and certain associated infinite duration games. We exhibit a simple FCG that is not memoryless determined (this corrects a mistake in Memoryless determinacy of parity and mean payoff games: a simple proof by Bj⋯orklund, Sandberg, Vorobyov (2004) that claims that FCGs for which Y is closed under cyclic permutations are memoryless determined). We show that θ (n)! memory (where n is the number of nodes in the graph), which is always sufficient, may be necessary to win some FCGs. On the other hand, we identify easy to check conditions on Y (i.e., Y is closed under cyclic permutations, and both Y and its complement are closed under concatenation) that are sufficient to ensure that the corresponding FCGs and their associated infinite duration games are memoryless determined. We demonstrate that many games considered in the literature, such as mean-payoff, parity, energy, etc., satisfy these conditions. On the complexity side, we show (for efficiently computable Y) that while solving FCGs is in PSPACE, solving some families of FCGs is PSPACE-hard. },
author = {Aminof, Benjamin and Rubin, Sasha},
booktitle = {Electronic Proceedings in Theoretical Computer Science, EPTCS},
location = {Grenoble, France},
pages = {83 -- 90},
publisher = {Open Publishing Association},
title = {{First cycle games}},
doi = {10.4204/EPTCS.146.11},
volume = {146},
year = {2014},
}
@article{535,
abstract = {Energy games belong to a class of turn-based two-player infinite-duration games played on a weighted directed graph. It is one of the rare and intriguing combinatorial problems that lie in NP∩co-NP, but are not known to be in P. The existence of polynomial-time algorithms has been a major open problem for decades and apart from pseudopolynomial algorithms there is no algorithm that solves any non-trivial subclass in polynomial time. In this paper, we give several results based on the weight structures of the graph. First, we identify a notion of penalty and present a polynomial-time algorithm when the penalty is large. Our algorithm is the first polynomial-time algorithm on a large class of weighted graphs. It includes several worst-case instances on which previous algorithms, such as value iteration and random facet algorithms, require at least sub-exponential time. Our main technique is developing the first non-trivial approximation algorithm and showing how to convert it to an exact algorithm. Moreover, we show that in a practical case in verification where weights are clustered around a constant number of values, the energy game problem can be solved in polynomial time. We also show that the problem is still as hard as in general when the clique-width is bounded or the graph is strongly ergodic, suggesting that restricting the graph structure does not necessarily help.},
author = {Chatterjee, Krishnendu and Henzinger, Monika and Krinninger, Sebastian and Nanongkai, Danupon},
journal = {Algorithmica},
number = {3},
pages = {457 -- 492},
publisher = {Springer},
title = {{Polynomial time algorithms for energy games with special weight structures}},
doi = {10.1007/s00453-013-9843-7},
volume = {70},
year = {2014},
}
@article{537,
abstract = {Transgenerational effects are broader than only parental relationships. Despite mounting evidence that multigenerational effects alter phenotypic and life-history traits, our understanding of how they combine to determine fitness is not well developed because of the added complexity necessary to study them. Here, we derive a quantitative genetic model of adaptation to an extraordinary new environment by an additive genetic component, phenotypic plasticity, maternal and grandmaternal effects. We show how, at equilibrium, negative maternal and negative grandmaternal effects maximize expected population mean fitness. We define negative transgenerational effects as those that have a negative effect on trait expression in the subsequent generation, that is, they slow, or potentially reverse, the expected evolutionary dynamic. When maternal effects are positive, negative grandmaternal effects are preferred. As expected under Mendelian inheritance, the grandmaternal effects have a lower impact on fitness than the maternal effects, but this dual inheritance model predicts a more complex relationship between maternal and grandmaternal effects to constrain phenotypic variance and so maximize expected population mean fitness in the offspring.},
author = {Prizak, Roshan and Ezard, Thomas and Hoyle, Rebecca},
journal = {Ecology and Evolution},
number = {15},
pages = {3139 -- 3145},
publisher = {Wiley-Blackwell},
title = {{Fitness consequences of maternal and grandmaternal effects}},
doi = {10.1002/ece3.1150},
volume = {4},
year = {2014},
}
@misc{5411,
abstract = {Model-based testing is a promising technology for black-box software and hardware testing, in which test cases are generated automatically from high-level specifications. Nowadays, systems typically consist of multiple interacting components and, due to their complexity, testing presents a considerable portion of the effort and cost in the design process. Exploiting the compositional structure of system specifications can considerably reduce the effort in model-based testing. Moreover, inferring properties about the system from testing its individual components allows the designer to reduce the amount of integration testing.
In this paper, we study compositional properties of the IOCO-testing theory. We propose a new approach to composition and hiding operations, inspired by contract-based design and interface theories. These operations preserve behaviors that are compatible under composition and hiding, and prune away incompatible ones. The resulting specification characterizes the input sequences for which the unit testing of components is sufficient to infer the correctness of component integration without the need for further tests. We provide a methodology that uses these results to minimize integration testing effort, but also to detect potential weaknesses in specifications. While we focus on asynchronous models and the IOCO conformance relation, the resulting methodology can be applied to a broader class of systems.},
author = {Daca, Przemyslaw and Henzinger, Thomas A and Krenn, Willibald and Nickovic, Dejan},
issn = {2664-1690},
pages = {20},
publisher = {IST Austria},
title = {{Compositional specifications for IOCO testing}},
doi = {10.15479/AT:IST-2014-148-v2-1},
year = {2014},
}