@article{494,
abstract = {We solve the longstanding open problems of the blow-up involved in the translations, when possible, of a nondeterministic Büchi word automaton (NBW) to a nondeterministic co-Büchi word automaton (NCW) and to a deterministic co-Büchi word automaton (DCW). For the NBW to NCW translation, the currently known upper bound is 2o(nlog n) and the lower bound is 1.5n. We improve the upper bound to n2n and describe a matching lower bound of 2ω(n). For the NBW to DCW translation, the currently known upper bound is 2o(nlog n). We improve it to 2 o(n), which is asymptotically tight. Both of our upper-bound constructions are based on a simple subset construction, do not involve intermediate automata with richer acceptance conditions, and can be implemented symbolically. We continue and solve the open problems of translating nondeterministic Streett, Rabin, Muller, and parity word automata to NCW and to DCW. Going via an intermediate NBW is not optimal and we describe direct, simple, and asymptotically tight constructions, involving a 2o(n) blow-up. The constructions are variants of the subset construction, providing a unified approach for translating all common classes of automata to NCW and DCW. Beyond the theoretical importance of the results, we point to numerous applications of the new constructions. In particular, they imply a simple subset-construction based translation, when possible, of LTL to deterministic Büchi word automata.},
author = {Boker, Udi and Kupferman, Orna},
journal = {ACM Transactions on Computational Logic (TOCL)},
number = {4},
publisher = {ACM},
title = {{Translating to Co-Büchi made tight, unified, and useful}},
doi = {10.1145/2362355.2362357},
volume = {13},
year = {2012},
}
@inproceedings{2888,
abstract = {Formal verification aims to improve the quality of hardware and software by detecting errors before they do harm. At the basis of formal verification lies the logical notion of correctness, which purports to capture whether or not a circuit or program behaves as desired. We suggest that the boolean partition into correct and incorrect systems falls short of the practical need to assess the behavior of hardware and software in a more nuanced fashion against multiple criteria.},
author = {Henzinger, Thomas A},
booktitle = {Conference proceedings MODELS 2012},
location = {Innsbruck, Austria},
pages = {1 -- 2},
publisher = {Springer},
title = {{Quantitative reactive models}},
doi = {10.1007/978-3-642-33666-9_1},
volume = {7590},
year = {2012},
}
@inproceedings{2890,
abstract = {Systems are often specified using multiple requirements on their behavior. In practice, these requirements can be contradictory. The classical approach to specification, verification, and synthesis demands more detailed specifications that resolve any contradictions in the requirements. These detailed specifications are usually large, cumbersome, and hard to maintain or modify. In contrast, quantitative frameworks allow the formalization of the intuitive idea that what is desired is an implementation that comes "closest" to satisfying the mutually incompatible requirements, according to a measure of fit that can be defined by the requirements engineer. One flexible framework for quantifying how "well" an implementation satisfies a specification is offered by simulation distances that are parameterized by an error model. We introduce this framework, study its properties, and provide an algorithmic solution for the following quantitative synthesis question: given two (or more) behavioral requirements specified by possibly incompatible finite-state machines, and an error model, find the finite-state implementation that minimizes the maximal simulation distance to the given requirements. Furthermore, we generalize the framework to handle infinite alphabets (for example, realvalued domains). We also demonstrate how quantitative specifications based on simulation distances might lead to smaller and easier to modify specifications. Finally, we illustrate our approach using case studies on error correcting codes and scheduler synthesis.},
author = {Cerny, Pavol and Gopi, Sivakanth and Henzinger, Thomas A and Radhakrishna, Arjun and Totla, Nishant},
booktitle = {Proceedings of the tenth ACM international conference on Embedded software},
location = {Tampere, Finland},
pages = {53 -- 62},
publisher = {ACM},
title = {{Synthesis from incompatible specifications}},
doi = {10.1145/2380356.2380371},
year = {2012},
}
@article{2972,
abstract = {Energy parity games are infinite two-player turn-based games played on weighted graphs. The objective of the game combines a (qualitative) parity condition with the (quantitative) requirement that the sum of the weights (i.e., the level of energy in the game) must remain positive. Beside their own interest in the design and synthesis of resource-constrained omega-regular specifications, energy parity games provide one of the simplest model of games with combined qualitative and quantitative objectives. Our main results are as follows: (a) exponential memory is sufficient and may be necessary for winning strategies in energy parity games; (b) the problem of deciding the winner in energy parity games can be solved in NP ∩ coNP; and (c) we give an algorithm to solve energy parity by reduction to energy games. We also show that the problem of deciding the winner in energy parity games is logspace-equivalent to the problem of deciding the winner in mean-payoff parity games, which can thus be solved in NP ∩ coNP. As a consequence we also obtain a conceptually simple algorithm to solve mean-payoff parity games.},
author = {Chatterjee, Krishnendu and Doyen, Laurent},
journal = {Theoretical Computer Science},
pages = {49 -- 60},
publisher = {Elsevier},
title = {{Energy parity games}},
doi = {10.1016/j.tcs.2012.07.038},
volume = {458},
year = {2012},
}
@inproceedings{3165,
abstract = {Computing the winning set for Büchi objectives in alternating games on graphs is a central problem in computer aided verification with a large number of applications. The long standing best known upper bound for solving the problem is Õ(n·m), where n is the number of vertices and m is the number of edges in the graph. We are the first to break the Õ(n·m) boundary by presenting a new technique that reduces the running time to O(n 2). This bound also leads to O(n 2) time algorithms for computing the set of almost-sure winning vertices for Büchi objectives (1) in alternating games with probabilistic transitions (improving an earlier bound of Õ(n·m)), (2) in concurrent graph games with constant actions (improving an earlier bound of O(n 3)), and (3) in Markov decision processes (improving for m > n 4/3 an earlier bound of O(min(m 1.5, m·n 2/3)). We also show that the same technique can be used to compute the maximal end-component decomposition of a graph in time O(n 2), which is an improvement over earlier bounds for m > n 4/3. Finally, we show how to maintain the winning set for Büchi objectives in alternating games under a sequence of edge insertions or a sequence of edge deletions in O(n) amortized time per operation. This is the first dynamic algorithm for this problem.},
author = {Chatterjee, Krishnendu and Henzinger, Monika},
booktitle = {Proceedings of the Annual ACM-SIAM Symposium on Discrete Algorithms},
location = {Kyoto, Japan},
pages = {1386 -- 1399},
publisher = {SIAM},
title = {{An O(n2) time algorithm for alternating Büchi games}},
doi = {10.1137/1.9781611973099.109},
year = {2012},
}
@article{3254,
abstract = {The theory of graph games with ω-regular winning conditions is the foundation for modeling and synthesizing reactive processes. In the case of stochastic reactive processes, the corresponding stochastic graph games have three players, two of them (System and Environment) behaving adversarially, and the third (Uncertainty) behaving probabilistically. We consider two problems for stochastic graph games: the qualitative problem asks for the set of states from which a player can win with probability 1 (almost-sure winning); and the quantitative problem asks for the maximal probability of winning (optimal winning) from each state. We consider ω-regular winning conditions formalized as Müller winning conditions. We present optimal memory bounds for pure (deterministic) almost-sure winning and optimal winning strategies in stochastic graph games with Müller winning conditions. We also study the complexity of stochastic Müller games and show that both the qualitative and quantitative analysis problems are PSPACE-complete. Our results are relevant in synthesis of stochastic reactive processes.},
author = {Chatterjee, Krishnendu},
journal = {Information and Computation},
pages = {29 -- 48},
publisher = {Elsevier},
title = {{The complexity of stochastic Müller games}},
doi = {10.1016/j.ic.2011.11.004},
volume = {211},
year = {2012},
}
@inproceedings{2048,
abstract = {Leakage resilient cryptography attempts to incorporate side-channel leakage into the black-box security model and designs cryptographic schemes that are provably secure within it. Informally, a scheme is leakage-resilient if it remains secure even if an adversary learns a bounded amount of arbitrary information about the schemes internal state. Unfortunately, most leakage resilient schemes are unnecessarily complicated in order to achieve strong provable security guarantees. As advocated by Yu et al. [CCS’10], this mostly is an artefact of the security proof and in practice much simpler construction may already suffice to protect against realistic side-channel attacks. In this paper, we show that indeed for simpler constructions leakage-resilience can be obtained when we aim for relaxed security notions where the leakage-functions and/or the inputs to the primitive are chosen non-adaptively. For example, we show that a three round Feistel network instantiated with a leakage resilient PRF yields a leakage resilient PRP if the inputs are chosen non-adaptively (This complements the result of Dodis and Pietrzak [CRYPTO’10] who show that if a adaptive queries are allowed, a superlogarithmic number of rounds is necessary.) We also show that a minor variation of the classical GGM construction gives a leakage resilient PRF if both, the leakage-function and the inputs, are chosen non-adaptively.},
author = {Faust, Sebastian and Pietrzak, Krzysztof Z and Schipper, Joachim},
booktitle = { Conference proceedings CHES 2012},
location = {Leuven, Belgium},
pages = {213 -- 232},
publisher = {Springer},
title = {{Practical leakage-resilient symmetric cryptography}},
doi = {10.1007/978-3-642-33027-8_13},
volume = {7428},
year = {2012},
}
@article{2411,
abstract = {The kingdom of fungi provides model organisms for biotechnology, cell biology, genetics, and life sciences in general. Only when their phylogenetic relationships are stably resolved, can individual results from fungal research be integrated into a holistic picture of biology. However, and despite recent progress, many deep relationships within the fungi remain unclear. Here, we present the first phylogenomic study of an entire eukaryotic kingdom that uses a consistency criterion to strengthen phylogenetic conclusions. We reason that branches (splits) recovered with independent data and different tree reconstruction methods are likely to reflect true evolutionary relationships. Two complementary phylogenomic data sets based on 99 fungal genomes and 109 fungal expressed sequence tag (EST) sets analyzed with four different tree reconstruction methods shed light from different angles on the fungal tree of life. Eleven additional data sets address specifically the phylogenetic position of Blastocladiomycota, Ustilaginomycotina, and Dothideomycetes, respectively. The combined evidence from the resulting trees supports the deep-level stability of the fungal groups toward a comprehensive natural system of the fungi. In addition, our analysis reveals methodologically interesting aspects. Enrichment for EST encoded data-a common practice in phylogenomic analyses-introduces a strong bias toward slowly evolving and functionally correlated genes. Consequently, the generalization of phylogenomic data sets as collections of randomly selected genes cannot be taken for granted. A thorough characterization of the data to assess possible influences on the tree reconstruction should therefore become a standard in phylogenomic analyses.},
author = {Ebersberger, Ingo and De Matos Simoes, Ricardo and Kupczok, Anne and Gube, Matthias and Kothe, Erika and Voigt, Kerstin and Von Haeseler, Arndt},
journal = {Molecular Biology and Evolution},
number = {5},
pages = {1319 -- 1334},
publisher = {Oxford University Press},
title = {{A consistent phylogenetic backbone for the fungi}},
doi = {10.1093/molbev/msr285},
volume = {29},
year = {2012},
}
@article{2904,
abstract = {Generalized van der Corput sequences are onedimensional, infinite sequences in the unit interval. They are generated from permutations in integer base b and are the building blocks of the multi-dimensional Halton sequences. Motivated by recent progress of Atanassov on the uniform distribution behavior of Halton sequences, we study, among others, permutations of the form P(i) = ai (mod b) for coprime integers a and b. We show that multipliers a that either divide b - 1 or b + 1 generate van der Corput sequences with weak distribution properties. We give explicit lower bounds for the asymptotic distribution behavior of these sequences and relate them to sequences generated from the identity permutation in smaller bases, which are, due to Faure, the weakest distributed generalized van der Corput sequences.},
author = {Pausinger, Florian},
journal = {Journal de Theorie des Nombres des Bordeaux},
number = {3},
pages = {729 -- 749},
publisher = {Universite de Bordeaux III},
title = {{Weak multipliers for generalized van der Corput sequences}},
doi = {10.5802/jtnb.819},
volume = {24},
year = {2012},
}
@unpublished{2928,
abstract = { This paper addresses the problem of approximate MAP-MRF inference in general graphical models. Following [36], we consider a family of linear programming relaxations of the problem where each relaxation is specified by a set of nested pairs of factors for which the marginalization constraint needs to be enforced. We develop a generalization of the TRW-S algorithm [9] for this problem, where we use a decomposition into junction chains, monotonic w.r.t. some ordering on the nodes. This generalizes the monotonic chains in [9] in a natural way. We also show how to deal with nested factors in an efficient way. Experiments show an improvement over min-sum diffusion, MPLP and subgradient ascent algorithms on a number of computer vision and natural language processing problems. },
author = {Kolmogorov, Vladimir and Schoenemann, Thomas},
booktitle = {arXiv},
publisher = {ArXiv},
title = {{Generalized sequential tree-reweighted message passing}},
year = {2012},
}
@inproceedings{2930,
abstract = {In this paper we investigate k-submodular functions. This natural family of discrete functions includes submodular and bisubmodular functions as the special cases k = 1 and k = 2 respectively.
In particular we generalize the known Min-Max-Theorem for submodular and bisubmodular functions. This theorem asserts that the minimum of the (bi)submodular function can be found by solving a maximization problem over a (bi)submodular polyhedron. We define a k-submodular polyhedron, prove a Min-Max-Theorem for k-submodular functions, and give a greedy algorithm to construct the vertices of the polyhedron.
},
author = {Huber, Anna and Kolmogorov, Vladimir},
location = {Athens, Greece},
pages = {451 -- 462},
publisher = {Springer},
title = {{Towards minimizing k-submodular functions}},
doi = {10.1007/978-3-642-32147-4_40},
volume = {7422},
year = {2012},
}
@article{2959,
abstract = {We study maximum likelihood estimation in Gaussian graphical models from a geometric point of view. An algebraic elimination criterion allows us to find exact lower bounds on the number of observations needed to ensure that the maximum likelihood estimator (MLE) exists with probability one. This is applied to bipartite graphs, grids and colored graphs. We also study the ML degree, and we present the first instance of a graph for which the MLE exists with probability one, even when the number of observations equals the treewidth.},
author = {Uhler, Caroline},
journal = {Annals of Statistics},
number = {1},
pages = {238 -- 261},
publisher = {Institute of Mathematical Statistics},
title = {{Geometry of maximum likelihood estimation in Gaussian graphical models}},
doi = {10.1214/11-AOS957},
volume = {40},
year = {2012},
}
@article{2954,
abstract = {Spontaneous postsynaptic currents (PSCs) provide key information about the mechanisms of synaptic transmission and the activity modes of neuronal networks. However, detecting spontaneous PSCs in vitro and in vivo has been challenging, because of the small amplitude, the variable kinetics, and the undefined time of generation of these events. Here, we describe a, to our knowledge, new method for detecting spontaneous synaptic events by deconvolution, using a template that approximates the average time course of spontaneous PSCs. A recorded PSC trace is deconvolved from the template, resulting in a series of delta-like functions. The maxima of these delta-like events are reliably detected, revealing the precise onset times of the spontaneous PSCs. Among all detection methods, the deconvolution-based method has a unique temporal resolution, allowing the detection of individual events in high-frequency bursts. Furthermore, the deconvolution-based method has a high amplitude resolution, because deconvolution can substantially increase the signal/noise ratio. When tested against previously published methods using experimental data, the deconvolution-based method was superior for spontaneous PSCs recorded in vivo. Using the high-resolution deconvolution-based detection algorithm, we show that the frequency of spontaneous excitatory postsynaptic currents in dentate gyrus granule cells is 4.5 times higher in vivo than in vitro.},
author = {Pernia-Andrade, Alejandro and Goswami, Sarit and Stickler, Yvonne and Fröbe, Ulrich and Schlögl, Alois and Jonas, Peter M},
journal = {Biophysical Journal},
number = {7},
pages = {1429 -- 1439},
publisher = {Biophysical},
title = {{A deconvolution based method with high sensitivity and temporal resolution for detection of spontaneous synaptic currents in vitro and in vivo}},
doi = {10.1016/j.bpj.2012.08.039},
volume = {103},
year = {2012},
}
@article{2966,
abstract = {Background: The outcome of male-male competition can be predicted from the relative fighting qualities of the opponents, which often depend on their age. In insects, freshly emerged and still sexually inactive males are morphologically indistinct from older, sexually active males. These young inactive males may thus be easy targets for older males if they cannot conceal themselves from their attacks. The ant Cardiocondyla obscurior is characterised by lethal fighting between wingless (" ergatoid" ) males. Here, we analyse for how long young males are defenceless after eclosion, and how early adult males can detect the presence of rival males.Results: We found that old ergatoid males consistently won fights against ergatoid males younger than two days. Old males did not differentiate between different types of unpigmented pupae several days before emergence, but had more frequent contact to ready-to-eclose pupae of female sexuals and winged males than of workers and ergatoid males. In rare cases, old ergatoid males displayed alleviated biting of pigmented ergatoid male pupae shortly before adult eclosion, as well as copulation attempts to dark pupae of female sexuals and winged males. Ergatoid male behaviour may be promoted by a closer similarity of the chemical profile of ready-to-eclose pupae to the profile of adults than that of young pupae several days prior to emergence.Conclusion: Young ergatoid males of C. obscurior would benefit greatly by hiding their identity from older, resident males, as they are highly vulnerable during the first two days of their adult lives. In contrast to the winged males of the same species, which are able to prevent ergatoid male attacks by chemical female mimicry, young ergatoids do not seem to be able to produce a protective chemical profile. Conflicts in male-male competition between ergatoid males of different age thus seem to be resolved in favour of the older males. This might represent selection at the colony level rather than the individual level. © 2012 Cremer et al.; licensee BioMed Central Ltd.},
author = {Cremer, Sylvia and Suefuji, Masaki and Schrempf, Alexandra and Heinze, Jürgen},
journal = {BMC Ecology},
publisher = {BioMed Central},
title = {{The dynamics of male-male competition in Cardiocondyla obscurior ants}},
doi = {10.1186/1472-6785-12-7},
volume = {12},
year = {2012},
}
@article{3159,
abstract = {The structure of hierarchical networks in biological and physical systems has long been characterized using the Horton-Strahler ordering scheme. The scheme assigns an integer order to each edge in the network based on the topology of branching such that the order increases from distal parts of the network (e.g., mountain streams or capillaries) to the "root" of the network (e.g., the river outlet or the aorta). However, Horton-Strahler ordering cannot be applied to networks with loops because they they create a contradiction in the edge ordering in terms of which edge precedes another in the hierarchy. Here, we present a generalization of the Horton-Strahler order to weighted planar reticular networks, where weights are assumed to correlate with the importance of network edges, e.g., weights estimated from edge widths may correlate to flow capacity. Our method assigns hierarchical levels not only to edges of the network, but also to its loops, and classifies the edges into reticular edges, which are responsible for loop formation, and tree edges. In addition, we perform a detailed and rigorous theoretical analysis of the sensitivity of the hierarchical levels to weight perturbations. In doing so, we show that the ordering of the reticular edges is more robust to noise in weight estimation than is the ordering of the tree edges. We discuss applications of this generalized Horton-Strahler ordering to the study of leaf venation and other biological networks.},
author = {Mileyko, Yuriy and Edelsbrunner, Herbert and Price, Charles and Weitz, Joshua},
journal = {PLoS One},
number = {6},
publisher = {Public Library of Science},
title = {{Hierarchical ordering of reticular networks}},
doi = {10.1371/journal.pone.0036715},
volume = {7},
year = {2012},
}
@article{3161,
abstract = {Some inflammatory stimuli trigger activation of the NLRP3 inflammasome by inducing efflux of cellular potassium. Loss of cellular potassium is known to potently suppress protein synthesis, leading us to test whether the inhibition of protein synthesis itself serves as an activating signal for the NLRP3 inflammasome. Murine bone marrow-derived macrophages, either primed by LPS or unprimed, were exposed to a panel of inhibitors of ribosomal function: ricin, cycloheximide, puromycin, pactamycin, and anisomycin. Macrophages were also exposed to nigericin, ATP, monosodium urate (MSU), and poly I:C. Synthesis of pro-IL-ß and release of IL-1ß from cells in response to these agents was detected by immunoblotting and ELISA. Release of intracellular potassium was measured by mass spectrometry. Inhibition of translation by each of the tested translation inhibitors led to processing of IL-1ß, which was released from cells. Processing and release of IL-1ß was reduced or absent from cells deficient in NLRP3, ASC, or caspase-1, demonstrating the role of the NLRP3 inflammasome. Despite the inability of these inhibitors to trigger efflux of intracellular potassium, the addition of high extracellular potassium suppressed activation of the NLRP3 inflammasome. MSU and double-stranded RNA, which are known to activate the NLRP3 inflammasome, also substantially inhibited protein translation, supporting a close association between inhibition of translation and inflammasome activation. These data demonstrate that translational inhibition itself constitutes a heretofore-unrecognized mechanism underlying IL-1ß dependent inflammatory signaling and that other physical, chemical, or pathogen-associated agents that impair translation may lead to IL-1ß-dependent inflammation through activation of the NLRP3 inflammasome. For agents that inhibit translation through decreased cellular potassium, the application of high extracellular potassium restores protein translation and suppresses activation of the NLRP inflammasome. For agents that inhibit translation through mechanisms that do not involve loss of potassium, high extracellular potassium suppresses IL-1ß processing through a mechanism that remains undefined.},
author = {Vyleta, Meghan and Wong, John and Magun, Bruce},
journal = {PLoS One},
number = {5},
publisher = {Public Library of Science},
title = {{Suppression of ribosomal function triggers innate immune signaling through activation of the NLRP3 inflammasome}},
doi = {10.1371/journal.pone.0036044},
volume = {7},
year = {2012},
}
@inproceedings{3123,
abstract = {We introduce the idea of using an explicit triangle mesh to track the air/fluid interface in a smoothed particle hydrodynamics (SPH) simulator. Once an initial surface mesh is created, this mesh is carried forward in time using nearby particle velocities to advect the mesh vertices. The mesh connectivity remains mostly unchanged across time-steps; it is only modified locally for topology change events or for the improvement of triangle quality. In order to ensure that the surface mesh does not diverge from the underlying particle simulation, we periodically project the mesh surface onto an implicit surface defined by the physics simulation. The mesh surface gives us several advantages over previous SPH surface tracking techniques. We demonstrate a new method for surface tension calculations that clearly outperforms the state of the art in SPH surface tension for computer graphics. We also demonstrate a method for tracking detailed surface information (like colors) that is less susceptible to numerical diffusion than competing techniques. Finally, our temporally-coherent surface mesh allows us to simulate high-resolution surface wave dynamics without being limited by the particle resolution of the SPH simulation.},
author = {Yu, Jihun and Wojtan, Christopher J and Turk, Greg and Yap, Chee},
booktitle = {Computer Graphics Forum},
location = {Cagliari, Sardinia, Italy},
number = {2},
pages = {815 -- 824},
publisher = {Blackwell Publishing},
title = {{Explicit mesh surfaces for particle based fluids}},
doi = {10.1111/j.1467-8659.2012.03062.x},
volume = {31},
year = {2012},
}
@article{3130,
abstract = {Essential genes code for fundamental cellular functions required for the viability of an organism. For this reason, essential genes are often highly conserved across organisms. However, this is not always the case: orthologues of genes that are essential in one organism are sometimes not essential in other organisms or are absent from their genomes. This suggests that, in the course of evolution, essential genes can be rendered nonessential. How can a gene become non-essential? Here we used genetic manipulation to deplete the products of 26 different essential genes in Escherichia coli. This depletion results in a lethal phenotype, which could often be rescued by the overexpression of a non-homologous, non-essential gene, most likely through replacement of the essential function. We also show that, in a smaller number of cases, the essential genes can be fully deleted from the genome, suggesting that complete functional replacement is possible. Finally, we show that essential genes whose function can be replaced in the laboratory are more likely to be non-essential or not present in other taxa. These results are consistent with the notion that patterns of evolutionary conservation of essential genes are influenced by their compensability-that is, by how easily they can be functionally replaced, for example through increased expression of other genes.},
author = {Bergmiller, Tobias and Ackermann, Martin and Silander, Olin},
journal = {PLoS Genetics},
number = {6},
publisher = {Public Library of Science},
title = {{Patterns of evolutionary conservation of essential genes correlate with their compensability}},
doi = {10.1371/journal.pgen.1002803},
volume = {8},
year = {2012},
}
@article{3166,
abstract = {There is evidence that the genetic code was established prior to the existence of proteins, when metabolism was powered by ribozymes. Also, early proto-organisms had to rely on simple anaerobic bioenergetic processes. In this work I propose that amino acid fermentation powered metabolism in the RNA world, and that this was facilitated by proto-adapters, the precursors of the tRNAs. Amino acids were used as carbon sources rather than as catalytic or structural elements. In modern bacteria, amino acid fermentation is known as the Stickland reaction. This pathway involves two amino acids: the first undergoes oxidative deamination, and the second acts as an electron acceptor through reductive deamination. This redox reaction results in two keto acids that are employed to synthesise ATP via substrate-level phosphorylation. The Stickland reaction is the basic bioenergetic pathway of some bacteria of the genus Clostridium. Two other facts support Stickland fermentation in the RNA world. First, several Stickland amino acid pairs are synthesised in abiotic amino acid synthesis. This suggests that amino acids that could be used as an energy substrate were freely available. Second, anticodons that have complementary sequences often correspond to amino acids that form Stickland pairs. The main hypothesis of this paper is that pairs of complementary proto-adapters were assigned to Stickland amino acids pairs. There are signatures of this hypothesis in the genetic code. Furthermore, it is argued that the proto-adapters formed double strands that brought amino acid pairs into proximity to facilitate their mutual redox reaction, structurally constraining the anticodon pairs that are assigned to these amino acid pairs. Significance tests which randomise the code are performed to study the extent of the variability of the energetic (ATP) yield. Random assignments can lead to a substantial yield of ATP and maintain enough variability, thus selection can act and refine the assignments into a proto-code that optimises the energetic yield. Monte Carlo simulations are performed to evaluate the establishment of these simple proto-codes, based on amino acid substitutions and codon swapping. In all cases, donor amino acids are assigned to anticodons composed of U+G, and have low redundancy (1-2 codons), whereas acceptor amino acids are assigned to the the remaining codons. These bioenergetic and structural constraints allow for a metabolic role for amino acids before their co-option as catalyst cofactors. Reviewers: this article was reviewed by Prof. William Martin, Prof. Eors Szathmary (nominated by Dr. Gaspar Jekely) and Dr. Adam Kun (nominated by Dr. Sandor Pongor)},
author = {Vladar, Harold},
journal = {Biology Direct},
publisher = {BioMed Central},
title = {{Amino acid fermentation at the origin of the genetic code}},
doi = {10.1186/1745-6150-7-6},
volume = {7},
year = {2012},
}
@article{3262,
abstract = {Living cells must control the reading out or "expression" of information encoded in their genomes, and this regulation often is mediated by transcription factors--proteins that bind to DNA and either enhance or repress the expression of nearby genes. But the expression of transcription factor proteins is itself regulated, and many transcription factors regulate their own expression in addition to responding to other input signals. Here we analyze the simplest of such self-regulatory circuits, asking how parameters can be chosen to optimize information transmission from inputs to outputs in the steady state. Some nonzero level of self-regulation is almost always optimal, with self-activation dominant when transcription factor concentrations are low and self-repression dominant when concentrations are high. In steady state the optimal self-activation is never strong enough to induce bistability, although there is a limit in which the optimal parameters are very close to the critical point.},
author = {Tkacik, Gasper and Walczak, Aleksandra and Bialek, William},
journal = { Physical Review E statistical nonlinear and soft matter physics },
number = {4},
publisher = {American Institute of Physics},
title = {{Optimizing information flow in small genetic networks. III. A self-interacting gene}},
doi = {10.1103/PhysRevE.85.041903},
volume = {85},
year = {2012},
}