@inproceedings{2048,
abstract = {Leakage resilient cryptography attempts to incorporate side-channel leakage into the black-box security model and designs cryptographic schemes that are provably secure within it. Informally, a scheme is leakage-resilient if it remains secure even if an adversary learns a bounded amount of arbitrary information about the schemes internal state. Unfortunately, most leakage resilient schemes are unnecessarily complicated in order to achieve strong provable security guarantees. As advocated by Yu et al. [CCS’10], this mostly is an artefact of the security proof and in practice much simpler construction may already suffice to protect against realistic side-channel attacks. In this paper, we show that indeed for simpler constructions leakage-resilience can be obtained when we aim for relaxed security notions where the leakage-functions and/or the inputs to the primitive are chosen non-adaptively. For example, we show that a three round Feistel network instantiated with a leakage resilient PRF yields a leakage resilient PRP if the inputs are chosen non-adaptively (This complements the result of Dodis and Pietrzak [CRYPTO’10] who show that if a adaptive queries are allowed, a superlogarithmic number of rounds is necessary.) We also show that a minor variation of the classical GGM construction gives a leakage resilient PRF if both, the leakage-function and the inputs, are chosen non-adaptively.},
author = {Faust, Sebastian and Pietrzak, Krzysztof Z and Schipper, Joachim},
booktitle = { Conference proceedings CHES 2012},
location = {Leuven, Belgium},
pages = {213 -- 232},
publisher = {Springer},
title = {{Practical leakage-resilient symmetric cryptography}},
doi = {10.1007/978-3-642-33027-8_13},
volume = {7428},
year = {2012},
}
@inproceedings{2049,
abstract = {We propose a new authentication protocol that is provably secure based on a ring variant of the learning parity with noise (LPN) problem. The protocol follows the design principle of the LPN-based protocol from Eurocrypt’11 (Kiltz et al.), and like it, is a two round protocol secure against active attacks. Moreover, our protocol has small communication complexity and a very small footprint which makes it applicable in scenarios that involve low-cost, resource-constrained devices.
Performance-wise, our protocol is more efficient than previous LPN-based schemes, such as the many variants of the Hopper-Blum (HB) protocol and the aforementioned protocol from Eurocrypt’11. Our implementation results show that it is even comparable to the standard challenge-and-response protocols based on the AES block-cipher. Our basic protocol is roughly 20 times slower than AES, but with the advantage of having 10 times smaller code size. Furthermore, if a few hundred bytes of non-volatile memory are available to allow the storage of some off-line pre-computations, then the online phase of our protocols is only twice as slow as AES.
},
author = {Heyse, Stefan and Kiltz, Eike and Lyubashevsky, Vadim and Paar, Christof and Pietrzak, Krzysztof Z},
booktitle = { Conference proceedings FSE 2012},
location = {Washington, DC, USA},
pages = {346 -- 365},
publisher = {Springer},
title = {{Lapin: An efficient authentication protocol based on ring-LPN}},
doi = {10.1007/978-3-642-34047-5_20},
volume = {7549},
year = {2012},
}
@article{493,
abstract = {The BCI competition IV stands in the tradition of prior BCI competitions that aim to provide high quality neuroscientific data for open access to the scientific community. As experienced already in prior competitions not only scientists from the narrow field of BCI compete, but scholars with a broad variety of backgrounds and nationalities. They include high specialists as well as students.The goals of all BCI competitions have always been to challenge with respect to novel paradigms and complex data. We report on the following challenges: (1) asynchronous data, (2) synthetic, (3) multi-class continuous data, (4) sessionto-session transfer, (5) directionally modulated MEG, (6) finger movements recorded by ECoG. As after past competitions, our hope is that winning entries may enhance the analysis methods of future BCIs.},
author = {Tangermann, Michael and Müller, Klaus and Aertsen, Ad and Birbaumer, Niels and Braun, Christoph and Brunner, Clemens and Leeb, Robert and Mehring, Carsten and Miller, Kai and Müller Putz, Gernot and Nolte, Guido and Pfurtscheller, Gert and Preissl, Hubert and Schalk, Gerwin and Schlögl, Alois and Vidaurre, Carmen and Waldert, Stephan and Blankertz, Benjamin},
journal = {Frontiers in Neuroscience},
publisher = {Frontiers Research Foundation},
title = {{Review of the BCI competition IV}},
doi = {10.3389/fnins.2012.00055},
volume = {6},
year = {2012},
}
@article{494,
abstract = {We solve the longstanding open problems of the blow-up involved in the translations, when possible, of a nondeterministic Büchi word automaton (NBW) to a nondeterministic co-Büchi word automaton (NCW) and to a deterministic co-Büchi word automaton (DCW). For the NBW to NCW translation, the currently known upper bound is 2o(nlog n) and the lower bound is 1.5n. We improve the upper bound to n2n and describe a matching lower bound of 2ω(n). For the NBW to DCW translation, the currently known upper bound is 2o(nlog n). We improve it to 2 o(n), which is asymptotically tight. Both of our upper-bound constructions are based on a simple subset construction, do not involve intermediate automata with richer acceptance conditions, and can be implemented symbolically. We continue and solve the open problems of translating nondeterministic Streett, Rabin, Muller, and parity word automata to NCW and to DCW. Going via an intermediate NBW is not optimal and we describe direct, simple, and asymptotically tight constructions, involving a 2o(n) blow-up. The constructions are variants of the subset construction, providing a unified approach for translating all common classes of automata to NCW and DCW. Beyond the theoretical importance of the results, we point to numerous applications of the new constructions. In particular, they imply a simple subset-construction based translation, when possible, of LTL to deterministic Büchi word automata.},
author = {Boker, Udi and Kupferman, Orna},
journal = {ACM Transactions on Computational Logic (TOCL)},
number = {4},
publisher = {ACM},
title = {{Translating to Co-Büchi made tight, unified, and useful}},
doi = {10.1145/2362355.2362357},
volume = {13},
year = {2012},
}
@inproceedings{495,
abstract = {An automaton with advice is a finite state automaton which has access to an additional fixed infinite string called an advice tape. We refine the Myhill-Nerode theorem to characterize the languages of finite strings that are accepted by automata with advice. We do the same for tree automata with advice.},
author = {Kruckman, Alex and Rubin, Sasha and Sheridan, John and Zax, Ben},
booktitle = {Proceedings GandALF 2012},
location = {Napoli, Italy},
pages = {238 -- 246},
publisher = {Open Publishing Association},
title = {{A Myhill Nerode theorem for automata with advice}},
doi = {10.4204/EPTCS.96.18},
volume = {96},
year = {2012},
}
@inproceedings{496,
abstract = {We study the expressive power of logical interpretations on the class of scattered trees, namely those with countably many infinite branches. Scattered trees can be thought of as the tree analogue of scattered linear orders. Every scattered tree has an ordinal rank that reflects the structure of its infinite branches. We prove, roughly, that trees and orders of large rank cannot be interpreted in scattered trees of small rank. We consider a quite general notion of interpretation: each element of the interpreted structure is represented by a set of tuples of subsets of the interpreting tree. Our trees are countable, not necessarily finitely branching, and may have finitely many unary predicates as labellings. We also show how to replace injective set-interpretations in (not necessarily scattered) trees by 'finitary' set-interpretations.},
author = {Rabinovich, Alexander and Rubin, Sasha},
location = {Dubrovnik, Croatia},
publisher = {IEEE},
title = {{Interpretations in trees with countably many branches}},
doi = {10.1109/LICS.2012.65},
year = {2012},
}
@inproceedings{497,
abstract = {One central issue in the formal design and analysis of reactive systems is the notion of refinement that asks whether all behaviors of the implementation is allowed by the specification. The local interpretation of behavior leads to the notion of simulation. Alternating transition systems (ATSs) provide a general model for composite reactive systems, and the simulation relation for ATSs is known as alternating simulation. The simulation relation for fair transition systems is called fair simulation. In this work our main contributions are as follows: (1) We present an improved algorithm for fair simulation with Büchi fairness constraints; our algorithm requires O(n 3·m) time as compared to the previous known O(n 6)-time algorithm, where n is the number of states and m is the number of transitions. (2) We present a game based algorithm for alternating simulation that requires O(m2)-time as compared to the previous known O((n·m)2)-time algorithm, where n is the number of states and m is the size of transition relation. (3) We present an iterative algorithm for alternating simulation that matches the time complexity of the game based algorithm, but is more space efficient than the game based algorithm. © Krishnendu Chatterjee, Siddhesh Chaubal, and Pritish Kamath.},
author = {Chatterjee, Krishnendu and Chaubal, Siddhesh and Kamath, Pritish},
location = {Fontainebleau, France},
pages = {167 -- 182},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{Faster algorithms for alternating refinement relations}},
doi = {10.4230/LIPIcs.CSL.2012.167},
volume = {16},
year = {2012},
}
@article{498,
abstract = {Understanding patterns and correlates of local adaptation in heterogeneous landscapes can provide important information in the selection of appropriate seed sources for restoration. We assessed the extent of local adaptation of fitness components in 12 population pairs of the perennial herb Rutidosis leptorrhynchoides (Asteraceae) and examined whether spatial scale (0.7-600 km), environmental distance, quantitative (QST) and neutral (FST) genetic differentiation, and size of the local and foreign populations could predict patterns of adaptive differentiation. Local adaptation varied among populations and fitness components. Including all population pairs, local adaptation was observed for seedling survival, but not for biomass, while foreign genotype advantage was observed for reproduction (number of inflorescences). Among population pairs, local adaptation increased with QST and local population size for biomass. QST was associated with environmental distance, suggesting ecological selection for phenotypic divergence. However, low FST and variation in population structure in small populations demonstrates the interaction of gene flow and drift in constraining local adaptation in R. leptorrhynchoides. Our study indicates that for species in heterogeneous landscapes, collecting seed from large populations from similar environments to candidate sites is likely to provide the most appropriate seed sources for restoration.},
author = {Pickup, Melinda and Field, David and Rowell, David and Young, Andrew},
journal = {Evolutionary Applications},
number = {8},
pages = {913 -- 924},
publisher = {Wiley-Blackwell},
title = {{Predicting local adaptation in fragmented plant populations: Implications for restoration genetics}},
doi = {10.1111/j.1752-4571.2012.00284.x},
volume = {5},
year = {2012},
}
@article{506,
author = {Sixt, Michael K},
journal = {Journal of Cell Biology},
number = {3},
pages = {347 -- 349},
publisher = {Rockefeller University Press},
title = {{Cell migration: Fibroblasts find a new way to get ahead}},
doi = {10.1083/jcb.201204039},
volume = {197},
year = {2012},
}
@misc{5377,
abstract = {Two-player games on graphs are central in many problems in formal verification and program analysis such as synthesis and verification of open systems. In this work we consider solving recursive game graphs (or pushdown game graphs) that can model the control flow of sequential programs with recursion. While pushdown games have been studied before with qualitative objectives, such as reachability and ω-regular objectives, in this work we study for the first time such games with the most well-studied quantitative objective, namely, mean-payoff objectives. In pushdown games two types of strategies are relevant: (1) global strategies, that depend on the entire global history; and (2) modular strategies, that have only local memory and thus do not depend on the context of invocation, but only on the history of the current invocation of the module. Our main results are as follows: (1) One-player pushdown games with mean-payoff objectives under global strategies are decidable in polynomial time. (2) Two- player pushdown games with mean-payoff objectives under global strategies are undecidable. (3) One-player pushdown games with mean-payoff objectives under modular strategies are NP- hard. (4) Two-player pushdown games with mean-payoff objectives under modular strategies can be solved in NP (i.e., both one-player and two-player pushdown games with mean-payoff objectives under modular strategies are NP-complete). We also establish the optimal strategy complexity showing that global strategies for mean-payoff objectives require infinite memory even in one-player pushdown games; and memoryless modular strategies are sufficient in two- player pushdown games. Finally we also show that all the problems have the same complexity if the stack boundedness condition is added, where along with the mean-payoff objective the player must also ensure that the stack height is bounded.},
author = {Chatterjee, Krishnendu and Velner, Yaron},
issn = {2664-1690},
pages = {33},
publisher = {IST Austria},
title = {{Mean-payoff pushdown games}},
doi = {10.15479/AT:IST-2012-0002},
year = {2012},
}
@misc{5378,
abstract = {One central issue in the formal design and analysis of reactive systems is the notion of refinement that asks whether all behaviors of the implementation is allowed by the specification. The local interpretation of behavior leads to the notion of simulation. Alternating transition systems (ATSs) provide a general model for composite reactive systems, and the simulation relation for ATSs is known as alternating simulation. The simulation relation for fair transition systems is called fair simulation. In this work our main contributions are as follows: (1) We present an improved algorithm for fair simulation with Büchi fairness constraints; our algorithm requires O(n3 · m) time as compared to the previous known O(n6)-time algorithm, where n is the number of states and m is the number of transitions. (2) We present a game based algorithm for alternating simulation that requires O(m2)-time as compared to the previous known O((n · m)2)-time algorithm, where n is the number of states and m is the size of transition relation. (3) We present an iterative algorithm for alternating simulation that matches the time complexity of the game based algorithm, but is more space efficient than the game based algorithm.},
author = {Chatterjee, Krishnendu and Chaubal, Siddhesh and Kamath, Pritish},
issn = {2664-1690},
pages = {21},
publisher = {IST Austria},
title = {{Faster algorithms for alternating refinement relations}},
doi = {10.15479/AT:IST-2012-0001},
year = {2012},
}
@misc{5396,
abstract = {We consider the problem of inference in agraphical model with binary variables. While in theory it is arguably preferable to compute marginal probabilities, in practice researchers often use MAP inference due to the availability of efficient discrete optimization algorithms. We bridge the gap between the two approaches by introducing the Discrete Marginals technique in which approximate marginals are obtained by minimizing an objective function with unary and pair-wise terms over a discretized domain. This allows the use of techniques originally devel-oped for MAP-MRF inference and learning. We explore two ways to set up the objective function - by discretizing the Bethe free energy and by learning it from training data. Experimental results show that for certain types of graphs a learned function can out-perform the Bethe approximation. We also establish a link between the Bethe free energy and submodular functions.},
author = {Korc, Filip and Kolmogorov, Vladimir and Lampert, Christoph},
issn = {2664-1690},
pages = {13},
publisher = {IST Austria},
title = {{Approximating marginals using discrete energy minimization}},
doi = {10.15479/AT:IST-2012-0003},
year = {2012},
}
@techreport{5398,
abstract = {This document is created as a part of the project “Repository for Research Data on IST Austria”. It summarises the actual state of research data at IST Austria, based on survey results. It supports the choice of appropriate software, which would best fit the requirements of their users, the researchers.},
author = {Porsche, Jana},
publisher = {IST Austria},
title = {{Actual state of research data @ ISTAustria}},
year = {2012},
}
@inbook{5745,
author = {Gupta, Ashutosh},
booktitle = {Automated Technology for Verification and Analysis},
isbn = {9783642333859},
issn = {0302-9743},
location = {Thiruvananthapuram, Kerala, India},
pages = {107--121},
publisher = {Springer Berlin Heidelberg},
title = {{Improved Single Pass Algorithms for Resolution Proof Reduction}},
doi = {10.1007/978-3-642-33386-6_10},
volume = {7561},
year = {2012},
}
@article{6588,
abstract = {First we note that the best polynomial approximation to vertical bar x vertical bar on the set, which consists of an interval on the positive half-axis and a point on the negative half-axis, can be given by means of the classical Chebyshev polynomials. Then we explore the cases when a solution of the related problem on two intervals can be given in elementary functions.},
author = {Pausinger, Florian},
issn = {1812-9471},
journal = {Journal of Mathematical Physics, Analysis, Geometry},
number = {1},
pages = {63--78},
publisher = {B. Verkin Institute for Low Temperature Physics and Engineering},
title = {{Elementary solutions of the Bernstein problem on two intervals}},
volume = {8},
year = {2012},
}
@inproceedings{1384,
abstract = {Software model checking, as an undecidable problem, has three possible outcomes: (1) the program satisfies the specification, (2) the program does not satisfy the specification, and (3) the model checker fails. The third outcome usually manifests itself in a space-out, time-out, or one component of the verification tool giving up; in all of these failing cases, significant computation is performed by the verification tool before the failure, but no result is reported. We propose to reformulate the model-checking problem as follows, in order to have the verification tool report a summary of the performed work even in case of failure: given a program and a specification, the model checker returns a condition Ψ - usually a state predicate - such that the program satisfies the specification under the condition Ψ - that is, as long as the program does not leave the states in which Ψ is satisfied. In our experiments, we investigated as one major application of conditional model checking the sequential combination of model checkers with information passing. We give the condition that one model checker produces, as input to a second conditional model checker, such that the verification problem for the second is restricted to the part of the state space that is not covered by the condition, i.e., the second model checker works on the problems that the first model checker could not solve. Our experiments demonstrate that repeated application of conditional model checkers, passing information from one model checker to the next, can significantly improve the verification results and performance, i.e., we can now verify programs that we could not verify before.},
author = {Beyer, Dirk and Henzinger, Thomas A and Keremoglu, Mehmet and Wendler, Philipp},
booktitle = {Proceedings of the ACM SIGSOFT 20th International Symposium on the Foundations of Software Engineering},
location = {Cary, NC, USA},
publisher = {ACM},
title = {{Conditional model checking: A technique to pass information between verifiers}},
doi = {10.1145/2393596.2393664},
year = {2012},
}
@article{3836,
abstract = {Hierarchical Timing Language (HTL) is a coordination language for distributed, hard real-time applications. HTL is a hierarchical extension of Giotto and, like its predecessor, based on the logical execution time (LET) paradigm of real-time programming. Giotto is compiled into code for a virtual machine, called the EmbeddedMachine (or E machine). If HTL is targeted to the E machine, then the hierarchicalprogram structure needs to be flattened; the flattening makes separatecompilation difficult, and may result in E machinecode of exponential size. In this paper, we propose a generalization of the E machine, which supports a hierarchicalprogram structure at runtime through real-time trigger mechanisms that are arranged in a tree. We present the generalized E machine, and a modular compiler for HTL that generates code of linear size. The compiler may generate code for any part of a given HTL program separately in any order.},
author = {Ghosal, Arkadeb and Iercan, Daniel and Kirsch, Christoph and Henzinger, Thomas A and Sangiovanni Vincentelli, Alberto},
journal = {Science of Computer Programming},
number = {2},
pages = {96 -- 112},
publisher = {Elsevier},
title = {{Separate compilation of hierarchical real-time programs into linear-bounded embedded machine code}},
doi = {10.1016/j.scico.2010.06.004},
volume = {77},
year = {2012},
}
@article{3846,
abstract = {We summarize classical and recent results about two-player games played on graphs with ω-regular objectives. These games have applications in the verification and synthesis of reactive systems. Important distinctions are whether a graph game is turn-based or concurrent; deterministic or stochastic; zero-sum or not. We cluster known results and open problems according to these classifications.},
author = {Chatterjee, Krishnendu and Henzinger, Thomas A},
journal = {Journal of Computer and System Sciences},
number = {2},
pages = {394 -- 413},
publisher = {Elsevier},
title = {{A survey of stochastic ω regular games}},
doi = {10.1016/j.jcss.2011.05.002},
volume = {78},
year = {2012},
}
@article{3115,
abstract = {We consider the offset-deconstruction problem: Given a polygonal shape Q with n vertices, can it be expressed, up to a tolerance ε in Hausdorff distance, as the Minkowski sum of another polygonal shape P with a disk of fixed radius? If it does, we also seek a preferably simple-looking solution P; then, P's offset constitutes an accurate, vertex-reduced, and smoothened approximation of Q. We give an O(nlogn)-time exact decision algorithm that handles any polygonal shape, assuming the real-RAM model of computation. A variant of the algorithm, which we have implemented using the cgal library, is based on rational arithmetic and answers the same deconstruction problem up to an uncertainty parameter δ its running time additionally depends on δ. If the input shape is found to be approximable, this algorithm also computes an approximate solution for the problem. It also allows us to solve parameter-optimization problems induced by the offset-deconstruction problem. For convex shapes, the complexity of the exact decision algorithm drops to O(n), which is also the time required to compute a solution P with at most one more vertex than a vertex-minimal one.},
author = {Berberich, Eric and Halperin, Dan and Kerber, Michael and Pogalnikova, Roza},
journal = {Discrete & Computational Geometry},
number = {4},
pages = {964 -- 989},
publisher = {Springer},
title = {{Deconstructing approximate offsets}},
doi = {10.1007/s00454-012-9441-5},
volume = {48},
year = {2012},
}
@article{3117,
abstract = {We consider the problem of minimizing a function represented as a sum of submodular terms. We assume each term allows an efficient computation of exchange capacities. This holds, for example, for terms depending on a small number of variables, or for certain cardinality-dependent terms. A naive application of submodular minimization algorithms would not exploit the existence of specialized exchange capacity subroutines for individual terms. To overcome this, we cast the problem as a submodular flow (SF) problem in an auxiliary graph in such a way that applying most existing SF algorithms would rely only on these subroutines. We then explore in more detail Iwata's capacity scaling approach for submodular flows (Iwata 1997 [19]). In particular, we show how to improve its complexity in the case when the function contains cardinality-dependent terms.},
author = {Kolmogorov, Vladimir},
journal = {Discrete Applied Mathematics},
number = {15},
pages = {2246 -- 2258},
publisher = {Elsevier},
title = {{Minimizing a sum of submodular functions}},
doi = {10.1016/j.dam.2012.05.025},
volume = {160},
year = {2012},
}
@article{3118,
abstract = {We present a method for recovering a temporally coherent, deforming triangle mesh with arbitrarily changing topology from an incoherent sequence of static closed surfaces. We solve this problem using the surface geometry alone, without any prior information like surface templates or velocity fields. Our system combines a proven strategy for triangle mesh improvement, a robust multi-resolution non-rigid registration routine, and a reliable technique for changing surface mesh topology. We also introduce a novel topological constraint enforcement algorithm to ensure that the output and input always have similar topology. We apply our technique to a series of diverse input data from video reconstructions, physics simulations, and artistic morphs. The structured output of our algorithm allows us to efficiently track information like colors and displacement maps, recover velocity information, and solve PDEs on the mesh as a post process.},
author = {Bojsen-Hansen, Morten and Li, Hao and Wojtan, Christopher J},
journal = {ACM Transactions on Graphics},
number = {4},
publisher = {ACM},
title = {{Tracking surfaces with evolving topology}},
doi = {10.1145/2185520.2185549},
volume = {31},
year = {2012},
}
@inproceedings{3119,
abstract = {We present an approach for artist-directed animation of liquids using multiple levels of control over the simulation, ranging from the overall tracking of desired shapes to highly detailed secondary effects such as dripping streams, separating sheets of fluid, surface waves and ripples. The first portion of our technique is a volume preserving morph that allows the animator to produce a plausible fluid-like motion from a sparse set of control meshes. By rasterizing the resulting control meshes onto the simulation grid, the mesh velocities act as boundary conditions during the projection step of the fluid simulation. We can then blend this motion together with uncontrolled fluid velocities to achieve a more relaxed control over the fluid that captures natural inertial effects. Our method can produce highly detailed liquid surfaces with control over sub-grid details by using a mesh-based surface tracker on top of a coarse grid-based fluid simulation. We can create ripples and waves on the fluid surface attracting the surface mesh to the control mesh with spring-like forces and also by running a wave simulation over the surface mesh. Our video results demonstrate how our control scheme can be used to create animated characters and shapes that are made of water.
},
author = {Raveendran, Karthik and Thuerey, Nils and Wojtan, Christopher J and Turk, Greg},
booktitle = {Proceedings of the ACM SIGGRAPH/Eurographics Symposium on Computer Animation},
location = {Aire-la-Ville, Switzerland},
pages = {255 -- 264},
publisher = {ACM},
title = {{Controlling liquids using meshes}},
year = {2012},
}
@article{3120,
abstract = {We introduce a strategy based on Kustin-Miller unprojection that allows us to construct many hundreds of Gorenstein codimension 4 ideals with 9 × 16 resolutions (that is, nine equations and sixteen first syzygies). Our two basic games are called Tom and Jerry; the main application is the biregular construction of most of the anticanonically polarised Mori Fano 3-folds of Altinok's thesis. There are 115 cases whose numerical data (in effect, the Hilbert series) allow a Type I projection. In every case, at least one Tom and one Jerry construction works, providing at least two deformation families of quasismooth Fano 3-folds having the same numerics but different topology. © 2012 Copyright Foundation Compositio Mathematica.},
author = {Brown, Gavin and Kerber, Michael and Reid, Miles},
journal = {Compositio Mathematica},
number = {4},
pages = {1171 -- 1194},
publisher = {Cambridge University Press},
title = {{Fano 3 folds in codimension 4 Tom and Jerry Part I}},
doi = {10.1112/S0010437X11007226},
volume = {148},
year = {2012},
}
@article{3121,
abstract = {Voltage-activated Ca(2+) channels (VACCs) mediate Ca(2+) influx to trigger action potential-evoked neurotransmitter release, but the mechanism by which Ca(2+) regulates spontaneous transmission is unclear. We found that VACCs are the major physiological triggers for spontaneous release at mouse neocortical inhibitory synapses. Moreover, despite the absence of a synchronizing action potential, we found that spontaneous fusion of a GABA-containing vesicle required the activation of multiple tightly coupled VACCs of variable type.},
author = {Williams, Courtney and Chen, Wenyan and Lee, Chia and Yaeger, Daniel and Vyleta, Nicholas and Smith, Stephen},
journal = {Nature Neuroscience},
number = {9},
pages = {1195 -- 1197},
publisher = {Nature Publishing Group},
title = {{Coactivation of multiple tightly coupled calcium channels triggers spontaneous release of GABA}},
doi = {10.1038/nn.3162},
volume = {15},
year = {2012},
}
@article{3122,
abstract = {Since Darwin's pioneering research on plant reproductive biology (e.g. Darwin 1877), understanding the mechanisms maintaining the diverse sexual strategies of plants has remained an important challenge for evolutionary biologists. In some species, populations are sexually polymorphic and contain two or more mating morphs (sex phenotypes). Differences in morphology or phenology among the morphs influence patterns of non-random mating. In these populations, negative frequency-dependent selection arising from disassortative (intermorph) mating is usually required for the evolutionary maintenance of sexual polymorphism, but few studies have demonstrated the required patterns of non-random mating. In the current issue of Molecular Ecology, Shang (2012) make an important contribution to our understanding of how disassortative mating influences sex phenotype ratios in Acer pictum subsp. mono (painted maple), a heterodichogamous, deciduous tree of eastern China. They monitored sex expression in 97 adults and used paternity analysis of open-pollinated seed to examine disassortative mating among three sex phenotypes. Using a deterministic 'pollen transfer' model, Shang et al. present convincing evidence that differences in the degree of disassortative mating in progeny arrays of the sex phenotypes can explain their uneven frequencies in the adult population. This study provides a useful example of how the deployment of genetic markers, demographic monitoring and modelling can be integrated to investigate the maintenance of sexual diversity in plants. },
author = {Field, David and Barrett, Spencer},
journal = {Molecular Ecology},
number = {15},
pages = {3640 -- 3643},
publisher = {Wiley-Blackwell},
title = {{Disassortative mating and the maintenance of sexual polymorphism in painted maple}},
doi = {10.1111/j.1365-294X.2012.05643.x},
volume = {21},
year = {2012},
}
@inproceedings{3123,
abstract = {We introduce the idea of using an explicit triangle mesh to track the air/fluid interface in a smoothed particle hydrodynamics (SPH) simulator. Once an initial surface mesh is created, this mesh is carried forward in time using nearby particle velocities to advect the mesh vertices. The mesh connectivity remains mostly unchanged across time-steps; it is only modified locally for topology change events or for the improvement of triangle quality. In order to ensure that the surface mesh does not diverge from the underlying particle simulation, we periodically project the mesh surface onto an implicit surface defined by the physics simulation. The mesh surface gives us several advantages over previous SPH surface tracking techniques. We demonstrate a new method for surface tension calculations that clearly outperforms the state of the art in SPH surface tension for computer graphics. We also demonstrate a method for tracking detailed surface information (like colors) that is less susceptible to numerical diffusion than competing techniques. Finally, our temporally-coherent surface mesh allows us to simulate high-resolution surface wave dynamics without being limited by the particle resolution of the SPH simulation.},
author = {Yu, Jihun and Wojtan, Christopher J and Turk, Greg and Yap, Chee},
booktitle = {Computer Graphics Forum},
location = {Cagliari, Sardinia, Italy},
number = {2},
pages = {815 -- 824},
publisher = {Blackwell Publishing},
title = {{Explicit mesh surfaces for particle based fluids}},
doi = {10.1111/j.1467-8659.2012.03062.x},
volume = {31},
year = {2012},
}
@inproceedings{3124,
abstract = {We consider the problem of inference in a graphical model with binary variables. While in theory it is arguably preferable to compute marginal probabilities, in practice researchers often use MAP inference due to the availability of efficient discrete optimization algorithms. We bridge the gap between the two approaches by introducing the Discrete Marginals technique in which approximate marginals are obtained by minimizing an objective function with unary and pairwise terms over a discretized domain. This allows the use of techniques originally developed for MAP-MRF inference and learning. We explore two ways to set up the objective function - by discretizing the Bethe free energy and by learning it from training data. Experimental results show that for certain types of graphs a learned function can outperform the Bethe approximation. We also establish a link between the Bethe free energy and submodular functions.
},
author = {Korc, Filip and Kolmogorov, Vladimir and Lampert, Christoph},
location = {Edinburgh, Scotland},
publisher = {ICML},
title = {{Approximating marginals using discrete energy minimization}},
year = {2012},
}
@inproceedings{3125,
abstract = {We propose a new learning method to infer a mid-level feature representation that combines the advantage of semantic attribute representations with the higher expressive power of non-semantic features. The idea lies in augmenting an existing attribute-based representation with additional dimensions for which an autoencoder model is coupled with a large-margin principle. This construction allows a smooth transition between the zero-shot regime with no training example, the unsupervised regime with training examples but without class labels, and the supervised regime with training examples and with class labels. The resulting optimization problem can be solved efficiently, because several of the necessity steps have closed-form solutions. Through extensive experiments we show that the augmented representation achieves better results in terms of object categorization accuracy than the semantic representation alone.},
author = {Sharmanska, Viktoriia and Quadrianto, Novi and Lampert, Christoph},
location = {Florence, Italy},
number = {PART 5},
pages = {242 -- 255},
publisher = {Springer},
title = {{Augmented attribute representations}},
doi = {10.1007/978-3-642-33715-4_18},
volume = {7576},
year = {2012},
}
@inproceedings{3126,
abstract = {In this work we propose a new information-theoretic clustering algorithm that infers cluster memberships by direct optimization of a non-parametric mutual information estimate between data distribution and cluster assignment. Although the optimization objective has a solid theoretical foundation it is hard to optimize. We propose an approximate optimization formulation that leads to an efficient algorithm with low runtime complexity. The algorithm has a single free parameter, the number of clusters to find. We demonstrate superior performance on several synthetic and real datasets.
},
author = {Müller, Andreas and Nowozin, Sebastian and Lampert, Christoph},
location = {Graz, Austria},
pages = {205 -- 215},
publisher = {Springer},
title = {{Information theoretic clustering using minimal spanning trees}},
doi = {10.1007/978-3-642-32717-9_21},
volume = {7476},
year = {2012},
}
@inproceedings{3127,
abstract = {When searching for characteristic subpatterns in potentially noisy graph data, it appears self-evident that having multiple observations would be better than having just one. However, it turns out that the inconsistencies introduced when different graph instances have different edge sets pose a serious challenge. In this work we address this challenge for the problem of finding maximum weighted cliques.
We introduce the concept of most persistent soft-clique. This is subset of vertices, that 1) is almost fully or at least densely connected, 2) occurs in all or almost all graph instances, and 3) has the maximum weight. We present a measure of clique-ness, that essentially counts the number of edge missing to make a subset of vertices into a clique. With this measure, we show that the problem of finding the most persistent soft-clique problem can be cast either as: a) a max-min two person game optimization problem, or b) a min-min soft margin optimization problem. Both formulations lead to the same solution when using a partial Lagrangian method to solve the optimization problems. By experiments on synthetic data and on real social network data, we show that the proposed method is able to reliably find soft cliques in graph data, even if that is distorted by random noise or unreliable observations.},
author = {Quadrianto, Novi and Lampert, Christoph and Chen, Chao},
booktitle = {Proceedings of the 29th International Conference on Machine Learning},
location = {Edinburgh, United Kingdom},
pages = {211--218},
publisher = {Omnipress},
title = {{The most persistent soft-clique in a set of sampled graphs}},
year = {2012},
}
@article{3128,
abstract = {We consider two-player zero-sum stochastic games on graphs with ω-regular winning conditions specified as parity objectives. These games have applications in the design and control of reactive systems. We survey the complexity results for the problem of deciding the winner in such games, and in classes of interest obtained as special cases, based on the information and the power of randomization available to the players, on the class of objectives and on the winning mode. On the basis of information, these games can be classified as follows: (a) partial-observation (both players have partial view of the game); (b) one-sided partial-observation (one player has partial-observation and the other player has complete-observation); and (c) complete-observation (both players have complete view of the game). The one-sided partial-observation games have two important subclasses: the one-player games, known as partial-observation Markov decision processes (POMDPs), and the blind one-player games, known as probabilistic automata. On the basis of randomization, (a) the players may not be allowed to use randomization (pure strategies), or (b) they may choose a probability distribution over actions but the actual random choice is external and not visible to the player (actions invisible), or (c) they may use full randomization. Finally, various classes of games are obtained by restricting the parity objective to a reachability, safety, Büchi, or coBüchi condition. We also consider several winning modes, such as sure-winning (i.e., all outcomes of a strategy have to satisfy the winning condition), almost-sure winning (i.e., winning with probability 1), limit-sure winning (i.e., winning with probability arbitrarily close to 1), and value-threshold winning (i.e., winning with probability at least ν, where ν is a given rational). },
author = {Chatterjee, Krishnendu and Doyen, Laurent and Henzinger, Thomas A},
journal = {Formal Methods in System Design},
number = {2},
pages = {268 -- 284},
publisher = {Springer},
title = {{A survey of partial-observation stochastic parity games}},
doi = {10.1007/s10703-012-0164-2},
volume = {43},
year = {2012},
}
@inproceedings{3129,
abstract = {Let K be a simplicial complex and g the rank of its p-th homology group Hp(K) defined with ℤ2 coefficients. We show that we can compute a basis H of Hp(K) and annotate each p-simplex of K with a binary vector of length g with the following property: the annotations, summed over all p-simplices in any p-cycle z, provide the coordinate vector of the homology class [z] in the basis H. The basis and the annotations for all simplices can be computed in O(n ω ) time, where n is the size of K and ω < 2.376 is a quantity so that two n×n matrices can be multiplied in O(n ω ) time. The precomputed annotations permit answering queries about the independence or the triviality of p-cycles efficiently.
Using annotations of edges in 2-complexes, we derive better algorithms for computing optimal basis and optimal homologous cycles in 1 - dimensional homology. Specifically, for computing an optimal basis of H1(K) , we improve the previously known time complexity from O(n 4) to O(n ω + n 2 g ω − 1). Here n denotes the size of the 2-skeleton of K and g the rank of H1(K) . Computing an optimal cycle homologous to a given 1-cycle is NP-hard even for surfaces and an algorithm taking 2 O(g) nlogn time is known for surfaces. We extend this algorithm to work with arbitrary 2-complexes in O(n ω ) + 2 O(g) n 2logn time using annotations.
},
author = {Busaryev, Oleksiy and Cabello, Sergio and Chen, Chao and Dey, Tamal and Wang, Yusu},
location = {Helsinki, Finland},
pages = {189 -- 200},
publisher = {Springer},
title = {{Annotating simplices with a homology basis and its applications}},
doi = {10.1007/978-3-642-31155-0_17},
volume = {7357},
year = {2012},
}
@article{3130,
abstract = {Essential genes code for fundamental cellular functions required for the viability of an organism. For this reason, essential genes are often highly conserved across organisms. However, this is not always the case: orthologues of genes that are essential in one organism are sometimes not essential in other organisms or are absent from their genomes. This suggests that, in the course of evolution, essential genes can be rendered nonessential. How can a gene become non-essential? Here we used genetic manipulation to deplete the products of 26 different essential genes in Escherichia coli. This depletion results in a lethal phenotype, which could often be rescued by the overexpression of a non-homologous, non-essential gene, most likely through replacement of the essential function. We also show that, in a smaller number of cases, the essential genes can be fully deleted from the genome, suggesting that complete functional replacement is possible. Finally, we show that essential genes whose function can be replaced in the laboratory are more likely to be non-essential or not present in other taxa. These results are consistent with the notion that patterns of evolutionary conservation of essential genes are influenced by their compensability-that is, by how easily they can be functionally replaced, for example through increased expression of other genes.},
author = {Bergmiller, Tobias and Ackermann, Martin and Silander, Olin},
journal = {PLoS Genetics},
number = {6},
publisher = {Public Library of Science},
title = {{Patterns of evolutionary conservation of essential genes correlate with their compensability}},
doi = {10.1371/journal.pgen.1002803},
volume = {8},
year = {2012},
}
@article{3131,
abstract = {In large populations, many beneficial mutations may be simultaneously available and may compete with one another, slowing adaptation. By finding the probability of fixation of a favorable allele in a simple model of a haploid sexual population, we find limits to the rate of adaptive substitution, Λ, that depend on simple parameter combinations. When variance in fitness is low and linkage is loose, the baseline rate of substitution is Λ 0=2NU〈s〉 is the population size, U is the rate of beneficial mutations per genome, and 〈s〉 is their mean selective advantage. Heritable variance ν in log fitness due to unlinked loci reduces Λ by e -4ν under polygamy and e -8ν under monogamy. With a linear genetic map of length R Morgans, interference is yet stronger. We use a scaling argument to show that the density of adaptive substitutions depends on s, N, U, and R only through the baseline density: Λ/R=F(Λ 0/R). Under the approximation that the interference due to different sweeps adds up, we show that Λ/R~(Λ 0/R)/(1+2Λ 0/R), implying that interference prevents the rate of adaptive substitution from exceeding one per centimorgan per 200 generations. Simulations and numerical calculations confirm the scaling argument and confirm the additive approximation for Λ 0/R 1; for higher Λ 0/R, the rate of adaptation grows above R/2, but only very slowly. We also consider the effect of sweeps on neutral diversity and show that, while even occasional sweeps can greatly reduce neutral diversity, this effect saturates as sweeps become more common-diversity can be maintained even in populations experiencing very strong interference. Our results indicate that for some organisms the rate of adaptive substitution may be primarily recombination-limited, depending only weakly on the mutation supply and the strength of selection.},
author = {Weissman, Daniel and Barton, Nicholas H},
journal = {PLoS Genetics},
number = {6},
publisher = {Public Library of Science},
title = {{Limits to the rate of adaptive substitution in sexual populations}},
doi = {10.1371/journal.pgen.1002740},
volume = {8},
year = {2012},
}
@article{3132,
abstract = {Reproductive division of labour is a characteristic trait of social insects. The dominant reproductive individual, often the queen, uses chemical communication and/or behaviour to maintain her social status. Queens of many social insects communicate their fertility status via cuticle-bound substances. As these substances usually possess a low volatility, their range in queen–worker communication is potentially limited. Here, we investigate the range and impact of behavioural and chemical queen signals on workers of the ant Temnothorax longispinosus. We compared the behaviour and ovary development of workers subjected to three different treatments: workers with direct chemical and physical contact to the queen, those solely under the influence of volatile queen substances and those entirely separated from the queen. In addition to short-ranged queen signals preventing ovary development in workers, we discovered a novel secondary pathway influencing worker behaviour. Workers with no physical contact to the queen, but exposed to volatile substances, started to develop their ovaries, but did not change their behaviour compared to workers in direct contact to the queen. In contrast, workers in queen-separated groups showed both increased ovary development and aggressive dominance interactions. We conclude that T. longispinosus queens influence worker ovary development and behaviour via two independent signals, both ensuring social harmony within the colony.},
author = {Konrad, Matthias and Pamminger, Tobias and Foitzik, Susanne},
journal = {Naturwissenschaften},
number = {8},
pages = {627 -- 636},
publisher = {Springer},
title = {{Two pathways ensuring social harmony}},
doi = {10.1007/s00114-012-0943-z},
volume = {99},
year = {2012},
}
@inproceedings{3133,
abstract = {This note contributes to the point calculus of persistent homology by extending Alexander duality from spaces to real-valued functions. Given a perfect Morse function f: S n+1 →[0, 1 and a decomposition S n+1 = U ∪ V into two (n + 1)-manifolds with common boundary M, we prove elementary relationships between the persistence diagrams of f restricted to U, to V, and to M. },
author = {Edelsbrunner, Herbert and Kerber, Michael},
booktitle = {Proceedings of the twenty-eighth annual symposium on Computational geometry },
location = {Chapel Hill, NC, USA},
pages = {249 -- 258},
publisher = {ACM},
title = {{Alexander duality for functions: The persistent behavior of land and water and shore}},
doi = {10.1145/2261250.2261287},
year = {2012},
}
@inproceedings{3134,
abstract = {It has been an open question whether the sum of finitely many isotropic Gaussian kernels in n ≥ 2 dimensions can have more modes than kernels, until in 2003 Carreira-Perpiñán and Williams exhibited n +1 isotropic Gaussian kernels in ℝ n with n + 2 modes. We give a detailed analysis of this example, showing that it has exponentially many critical points and that the resilience of the extra mode grows like √n. In addition, we exhibit finite configurations of isotropic Gaussian kernels with superlinearly many modes. },
author = {Edelsbrunner, Herbert and Fasy, Brittany and Rote, Günter},
booktitle = {Proceedings of the twenty-eighth annual symposium on Computational geometry },
location = {Chapel Hill, NC, USA},
pages = {91 -- 100},
publisher = {ACM},
title = {{Add isotropic Gaussian kernels at own risk: More and more resilient modes in higher dimensions}},
doi = {10.1145/2261250.2261265},
year = {2012},
}
@inproceedings{3135,
abstract = {We introduce consumption games, a model for discrete interactive system with multiple resources that are consumed or reloaded independently. More precisely, a consumption game is a finite-state graph where each transition is labeled by a vector of resource updates, where every update is a non-positive number or ω. The ω updates model the reloading of a given resource. Each vertex belongs either to player □ or player ◇, where the aim of player □ is to play so that the resources are never exhausted. We consider several natural algorithmic problems about consumption games, and show that although these problems are computationally hard in general, they are solvable in polynomial time for every fixed number of resource types (i.e., the dimension of the update vectors) and bounded resource updates. },
author = {Brázdil, Brázdil and Chatterjee, Krishnendu and Kučera, Antonín and Novotny, Petr},
location = {Berkeley, CA, USA},
pages = {23 -- 38},
publisher = {Springer},
title = {{Efficient controller synthesis for consumption games with multiple resource types}},
doi = {10.1007/978-3-642-31424-7_8},
volume = {7358},
year = {2012},
}
@inproceedings{3136,
abstract = {Continuous-time Markov chains (CTMC) with their rich theory and efficient simulation algorithms have been successfully used in modeling stochastic processes in diverse areas such as computer science, physics, and biology. However, systems that comprise non-instantaneous events cannot be accurately and efficiently modeled with CTMCs. In this paper we define delayed CTMCs, an extension of CTMCs that allows for the specification of a lower bound on the time interval between an event's initiation and its completion, and we propose an algorithm for the computation of their behavior. Our algorithm effectively decomposes the computation into two stages: a pure CTMC governs event initiations while a deterministic process guarantees lower bounds on event completion times. Furthermore, from the nature of delayed CTMCs, we obtain a parallelized version of our algorithm. We use our formalism to model genetic regulatory circuits (biological systems where delayed events are common) and report on the results of our numerical algorithm as run on a cluster. We compare performance and accuracy of our results with results obtained by using pure CTMCs. © 2012 Springer-Verlag.},
author = {Guet, Calin C and Gupta, Ashutosh and Henzinger, Thomas A and Mateescu, Maria and Sezgin, Ali},
location = {Berkeley, CA, USA},
pages = {294 -- 309},
publisher = {Springer},
title = {{Delayed continuous time Markov chains for genetic regulatory circuits}},
doi = {10.1007/978-3-642-31424-7_24},
volume = {7358 },
year = {2012},
}
@inproceedings{3155,
abstract = {We propose synchronous interfaces, a new interface theory for discrete-time systems. We use an application to time-triggered scheduling to drive the design choices for our formalism; in particular, additionally to deriving useful mathematical properties, we focus on providing a syntax which is adapted to natural high-level system modeling. As a result, we develop an interface model that relies on a guarded-command based language and is equipped with shared variables and explicit discrete-time clocks. We define all standard interface operations: compatibility checking, composition, refinement, and shared refinement. Apart from the synchronous interface model, the contribution of this paper is the establishment of a formal relation between interface theories and real-time scheduling, where we demonstrate a fully automatic framework for the incremental computation of time-triggered schedules.},
author = {Delahaye, Benoît and Fahrenberg, Uli and Henzinger, Thomas A and Legay, Axel and Nickovic, Dejan},
location = {Stockholm, Sweden},
pages = {203 -- 218},
publisher = {Springer},
title = {{Synchronous interface theories and time triggered scheduling}},
doi = {10.1007/978-3-642-30793-5_13},
volume = {7273},
year = {2012},
}
@article{3156,
abstract = {Dispersal is crucial for gene flow and often determines the long-term stability of meta-populations, particularly in rare species with specialized life cycles. Such species are often foci of conservation efforts because they suffer disproportionally from degradation and fragmentation of their habitat. However, detailed knowledge of effective gene flow through dispersal is often missing, so that conservation strategies have to be based on mark-recapture observations that are suspected to be poor predictors of long-distance dispersal. These constraints have been especially severe in the study of butterfly populations, where microsatellite markers have been difficult to develop. We used eight microsatellite markers to analyse genetic population structure of the Large Blue butterfly Maculinea arion in Sweden. During recent decades, this species has become an icon of insect conservation after massive decline throughout Europe and extinction in Britain followed by reintroduction of a seed population from the Swedish island of Öland. We find that populations are highly structured genetically, but that gene flow occurs over distances 15 times longer than the maximum distance recorded from mark-recapture studies, which can only be explained by maximum dispersal distances at least twice as large as previously accepted. However, we also find evidence that gaps between sites with suitable habitat exceeding ∼ 20 km induce genetic erosion that can be detected from bottleneck analyses. Although further work is needed, our results suggest that M. arion can maintain fully functional metapopulations when they consist of optimal habitat patches that are no further apart than ∼10 km.},
author = {Ugelvig, Line V and Andersen, Anne and Boomsma, Jacobus and Nash, David},
journal = {Molecular Ecology},
number = {13},
pages = {3224 -- 3236},
publisher = {Wiley-Blackwell},
title = {{Dispersal and gene flow in the rare parasitic Large Blue butterfly Maculinea arion}},
doi = {10.1111/j.1365-294X.2012.05592.x},
volume = {21},
year = {2012},
}
@article{3157,
abstract = {Colorectal tumours that are wild type for KRAS are often sensitive to EGFR blockade, but almost always develop resistance within several months of initiating therapy. The mechanisms underlying this acquired resistance to anti-EGFR antibodies are largely unknown. This situation is in marked contrast to that of small-molecule targeted agents, such as inhibitors of ABL, EGFR, BRAF and MEK, in which mutations in the genes encoding the protein targets render the tumours resistant to the effects of the drugs. The simplest hypothesis to account for the development of resistance to EGFR blockade is that rare cells with KRAS mutations pre-exist at low levels in tumours with ostensibly wild-type KRAS genes. Although this hypothesis would seem readily testable, there is no evidence in pre-clinical models to support it, nor is there data from patients. To test this hypothesis, we determined whether mutant KRAS DNA could be detected in the circulation of 28 patients receiving monotherapy with panitumumab, a therapeutic anti-EGFR antibody. We found that 9 out of 24 (38%) patients whose tumours were initially KRAS wild type developed detectable mutations in KRAS in their sera, three of which developed multiple different KRAS mutations. The appearance of these mutations was very consistent, generally occurring between 5 and 6months following treatment. Mathematical modelling indicated that the mutations were present in expanded subclones before the initiation of panitumumab treatment. These results suggest that the emergence of KRAS mutations is a mediator of acquired resistance to EGFR blockade and that these mutations can be detected in a non-invasive manner. They explain why solid tumours develop resistance to targeted therapies in a highly reproducible fashion.},
author = {Diaz Jr, Luis and Williams, Richard and Wu, Jian and Kinde, Isaac and Hecht, Joel and Berlin, Jordan and Allen, Benjamin and Božić, Ivana and Reiter, Johannes and Nowak, Martin and Kinzler, Kenneth and Oliner, Kelly and Vogelstein, Bert},
journal = {Nature},
number = {7404},
pages = {537 -- 540},
publisher = {Nature Publishing Group},
title = {{The molecular evolution of acquired resistance to targeted EGFR blockade in colorectal cancers}},
doi = {10.1038/nature11219},
volume = {486},
year = {2012},
}
@article{3158,
abstract = {We describe here the development and characterization of a conditionally inducible mouse model expressing Lifeact-GFP, a peptide that reports the dynamics of filamentous actin. We have used this model to study platelets, megakaryocytes and melanoblasts and we provide evidence that Lifeact-GFP is a useful reporter in these cell types ex vivo. In the case of platelets and megakaryocytes, these cells are not transfectable by traditional methods, so conditional activation of Lifeact allows the study of actin dynamics in these cells live. We studied melanoblasts in native skin explants from embryos, allowing the visualization of live actin dynamics during cytokinesis and migration. Our study revealed that melanoblasts lacking the small GTPase Rac1 show a delay in the formation of new pseudopodia following cytokinesis that accounts for the previously reported cytokinesis delay in these cells. Thus, through use of this mouse model, we were able to gain insights into the actin dynamics of cells that could only previously be studied using fixed specimens or following isolation from their native tissue environment.},
author = {Schachtner, Hannah and Li, Ang and Stevenson, David and Calaminus, Simon and Thomas, Steven and Watson, Steve and Sixt, Michael K and Wedlich Söldner, Roland and Strathdee, Douglas and Machesky, Laura},
journal = {European Journal of Cell Biology},
number = {11-12},
pages = {923 -- 929},
publisher = {Elsevier},
title = {{Tissue inducible Lifeact expression allows visualization of actin dynamics in vivo and ex vivo}},
doi = {10.1016/j.ejcb.2012.04.002},
volume = {91},
year = {2012},
}
@article{3159,
abstract = {The structure of hierarchical networks in biological and physical systems has long been characterized using the Horton-Strahler ordering scheme. The scheme assigns an integer order to each edge in the network based on the topology of branching such that the order increases from distal parts of the network (e.g., mountain streams or capillaries) to the "root" of the network (e.g., the river outlet or the aorta). However, Horton-Strahler ordering cannot be applied to networks with loops because they they create a contradiction in the edge ordering in terms of which edge precedes another in the hierarchy. Here, we present a generalization of the Horton-Strahler order to weighted planar reticular networks, where weights are assumed to correlate with the importance of network edges, e.g., weights estimated from edge widths may correlate to flow capacity. Our method assigns hierarchical levels not only to edges of the network, but also to its loops, and classifies the edges into reticular edges, which are responsible for loop formation, and tree edges. In addition, we perform a detailed and rigorous theoretical analysis of the sensitivity of the hierarchical levels to weight perturbations. In doing so, we show that the ordering of the reticular edges is more robust to noise in weight estimation than is the ordering of the tree edges. We discuss applications of this generalized Horton-Strahler ordering to the study of leaf venation and other biological networks.},
author = {Mileyko, Yuriy and Edelsbrunner, Herbert and Price, Charles and Weitz, Joshua},
journal = {PLoS One},
number = {6},
publisher = {Public Library of Science},
title = {{Hierarchical ordering of reticular networks}},
doi = {10.1371/journal.pone.0036715},
volume = {7},
year = {2012},
}
@article{3160,
abstract = {There is a long-running controversy about how early cell fate decisions are made in the developing mammalian embryo. 1,2 In particular, it is controversial when the first events that can predict the establishment of the pluripotent and extra-embryonic lineages in the blastocyst of the pre-implantation embryo occur. It has long been proposed that the position and polarity of cells at the 16- to 32-cell stage embryo influence their decision to either give rise to the pluripotent cell lineage that eventually contributes to the inner cell mass (ICM), comprising the primitive endoderm (PE) and the epiblast (EPI), or the extra-embryonic trophectoderm (TE) surrounding the blastocoel. The positioning of cells in the embryo at this developmental stage could largely be the result of random events, making this a stochastic model of cell lineage allocation. Contrary to such a stochastic model, some studies have detected putative differences in the lineage potential of individual blastomeres before compaction, indicating that the first cell fate decisions may occur as early as at the 4-cell stage. Using a non-invasive, quantitative in vivo imaging assay to study the kinetic behavior of Oct4 (also known as POU5F1), a key transcription factor (TF) controlling pre-implantation development in the mouse embryo, 3-5 a recent study identifies Oct4 kinetics as a predictive measure of cell lineage patterning in the early mouse embryo. 6 Here, we discuss the implications of such molecular heterogeneities in early development and offer potential avenues toward a mechanistic understanding of these observations, contributing to the resolution of the controversy of developmental cell lineage allocation.},
author = {Pantazis, Periklis and Bollenbach, Tobias},
journal = {Cell Cycle},
number = {11},
pages = {2055 -- 2058},
publisher = {Taylor and Francis},
title = {{Transcription factor kinetics and the emerging asymmetry in the early mammalian embryo}},
doi = {10.4161/cc.20118},
volume = {11},
year = {2012},
}
@article{3161,
abstract = {Some inflammatory stimuli trigger activation of the NLRP3 inflammasome by inducing efflux of cellular potassium. Loss of cellular potassium is known to potently suppress protein synthesis, leading us to test whether the inhibition of protein synthesis itself serves as an activating signal for the NLRP3 inflammasome. Murine bone marrow-derived macrophages, either primed by LPS or unprimed, were exposed to a panel of inhibitors of ribosomal function: ricin, cycloheximide, puromycin, pactamycin, and anisomycin. Macrophages were also exposed to nigericin, ATP, monosodium urate (MSU), and poly I:C. Synthesis of pro-IL-ß and release of IL-1ß from cells in response to these agents was detected by immunoblotting and ELISA. Release of intracellular potassium was measured by mass spectrometry. Inhibition of translation by each of the tested translation inhibitors led to processing of IL-1ß, which was released from cells. Processing and release of IL-1ß was reduced or absent from cells deficient in NLRP3, ASC, or caspase-1, demonstrating the role of the NLRP3 inflammasome. Despite the inability of these inhibitors to trigger efflux of intracellular potassium, the addition of high extracellular potassium suppressed activation of the NLRP3 inflammasome. MSU and double-stranded RNA, which are known to activate the NLRP3 inflammasome, also substantially inhibited protein translation, supporting a close association between inhibition of translation and inflammasome activation. These data demonstrate that translational inhibition itself constitutes a heretofore-unrecognized mechanism underlying IL-1ß dependent inflammatory signaling and that other physical, chemical, or pathogen-associated agents that impair translation may lead to IL-1ß-dependent inflammation through activation of the NLRP3 inflammasome. For agents that inhibit translation through decreased cellular potassium, the application of high extracellular potassium restores protein translation and suppresses activation of the NLRP inflammasome. For agents that inhibit translation through mechanisms that do not involve loss of potassium, high extracellular potassium suppresses IL-1ß processing through a mechanism that remains undefined.},
author = {Vyleta, Meghan and Wong, John and Magun, Bruce},
journal = {PLoS One},
number = {5},
publisher = {Public Library of Science},
title = {{Suppression of ribosomal function triggers innate immune signaling through activation of the NLRP3 inflammasome}},
doi = {10.1371/journal.pone.0036044},
volume = {7},
year = {2012},
}
@inproceedings{3162,
abstract = {Given a dense-time real-valued signal and a parameterized temporal logic formula with both magnitude and timing parameters, we compute the subset of the parameter space that renders the formula satisfied by the trace. We provide two preliminary implementations, one which follows the exact semantics and attempts to compute the validity domain by quantifier elimination in linear arithmetics and one which conducts adaptive search in the parameter space.},
author = {Asarin, Eugene and Donzé, Alexandre and Maler, Oded and Nickovic, Dejan},
location = {San Francisco, CA, United States},
pages = {147 -- 160},
publisher = {Springer},
title = {{Parametric identification of temporal properties}},
doi = {10.1007/978-3-642-29860-8_12},
volume = {7186},
year = {2012},
}
@article{3164,
abstract = {Overview of the Special Issue on structured prediction and inference.},
author = {Blaschko, Matthew and Lampert, Christoph},
journal = {International Journal of Computer Vision},
number = {3},
pages = {257 -- 258},
publisher = {Springer},
title = {{Guest editorial: Special issue on structured prediction and inference}},
doi = {10.1007/s11263-012-0530-y},
volume = {99},
year = {2012},
}
@inproceedings{3165,
abstract = {Computing the winning set for Büchi objectives in alternating games on graphs is a central problem in computer aided verification with a large number of applications. The long standing best known upper bound for solving the problem is Õ(n·m), where n is the number of vertices and m is the number of edges in the graph. We are the first to break the Õ(n·m) boundary by presenting a new technique that reduces the running time to O(n 2). This bound also leads to O(n 2) time algorithms for computing the set of almost-sure winning vertices for Büchi objectives (1) in alternating games with probabilistic transitions (improving an earlier bound of Õ(n·m)), (2) in concurrent graph games with constant actions (improving an earlier bound of O(n 3)), and (3) in Markov decision processes (improving for m > n 4/3 an earlier bound of O(min(m 1.5, m·n 2/3)). We also show that the same technique can be used to compute the maximal end-component decomposition of a graph in time O(n 2), which is an improvement over earlier bounds for m > n 4/3. Finally, we show how to maintain the winning set for Büchi objectives in alternating games under a sequence of edge insertions or a sequence of edge deletions in O(n) amortized time per operation. This is the first dynamic algorithm for this problem.},
author = {Chatterjee, Krishnendu and Henzinger, Monika},
booktitle = {Proceedings of the Annual ACM-SIAM Symposium on Discrete Algorithms},
location = {Kyoto, Japan},
pages = {1386 -- 1399},
publisher = {SIAM},
title = {{An O(n2) time algorithm for alternating Büchi games}},
doi = {10.1137/1.9781611973099.109},
year = {2012},
}
@article{3166,
abstract = {There is evidence that the genetic code was established prior to the existence of proteins, when metabolism was powered by ribozymes. Also, early proto-organisms had to rely on simple anaerobic bioenergetic processes. In this work I propose that amino acid fermentation powered metabolism in the RNA world, and that this was facilitated by proto-adapters, the precursors of the tRNAs. Amino acids were used as carbon sources rather than as catalytic or structural elements. In modern bacteria, amino acid fermentation is known as the Stickland reaction. This pathway involves two amino acids: the first undergoes oxidative deamination, and the second acts as an electron acceptor through reductive deamination. This redox reaction results in two keto acids that are employed to synthesise ATP via substrate-level phosphorylation. The Stickland reaction is the basic bioenergetic pathway of some bacteria of the genus Clostridium. Two other facts support Stickland fermentation in the RNA world. First, several Stickland amino acid pairs are synthesised in abiotic amino acid synthesis. This suggests that amino acids that could be used as an energy substrate were freely available. Second, anticodons that have complementary sequences often correspond to amino acids that form Stickland pairs. The main hypothesis of this paper is that pairs of complementary proto-adapters were assigned to Stickland amino acids pairs. There are signatures of this hypothesis in the genetic code. Furthermore, it is argued that the proto-adapters formed double strands that brought amino acid pairs into proximity to facilitate their mutual redox reaction, structurally constraining the anticodon pairs that are assigned to these amino acid pairs. Significance tests which randomise the code are performed to study the extent of the variability of the energetic (ATP) yield. Random assignments can lead to a substantial yield of ATP and maintain enough variability, thus selection can act and refine the assignments into a proto-code that optimises the energetic yield. Monte Carlo simulations are performed to evaluate the establishment of these simple proto-codes, based on amino acid substitutions and codon swapping. In all cases, donor amino acids are assigned to anticodons composed of U+G, and have low redundancy (1-2 codons), whereas acceptor amino acids are assigned to the the remaining codons. These bioenergetic and structural constraints allow for a metabolic role for amino acids before their co-option as catalyst cofactors. Reviewers: this article was reviewed by Prof. William Martin, Prof. Eors Szathmary (nominated by Dr. Gaspar Jekely) and Dr. Adam Kun (nominated by Dr. Sandor Pongor)},
author = {Vladar, Harold},
journal = {Biology Direct},
publisher = {BioMed Central},
title = {{Amino acid fermentation at the origin of the genetic code}},
doi = {10.1186/1745-6150-7-6},
volume = {7},
year = {2012},
}