@article{3274,
abstract = {A boundary element model of a tunnel running through horizontally layered soil with anisotropic material properties is presented. Since there is no analytical fundamental solution for wave propagation inside a layered orthotropic medium in 3D, the fundamental displacements and stresses have to be calculated numerically. In our model this is done in the Fourier domain with respect to space and time. The assumption of a straight tunnel with infinite extension in the x direction makes it possible to decouple the system for every wave number kx, leading to a 2.5D-problem, which is suited for parallel computation. The special form of the fundamental solution, resulting from our Fourier ansatz, and the fact, that the calculation of the boundary integral equation is performed in the Fourier domain, enhances the stability and efficiency of the numerical calculations.},
author = {Rieckh, Georg and Kreuzer, Wolfgang and Waubke, Holger and Balazs, Peter},
journal = { Engineering Analysis with Boundary Elements},
number = {6},
pages = {960 -- 967},
publisher = {Elsevier},
title = {{A 2.5D-Fourier-BEM model for vibrations in a tunnel running through layered anisotropic soil}},
doi = {10.1016/j.enganabound.2011.12.014},
volume = {36},
year = {2012},
}
@inbook{3277,
abstract = {The problem of the origin of metazoa is becoming more urgent in the context of astrobiology. By now it is clear that clues to the understanding of this crucial transition in the evolution of life can arise in a fourth pathway besides the three possibilities in the quest for simplicity outlined by Bonner in his classical book. In other words, solar system exploration seems to be one way in the long-term to elucidate the simplicity of evolutionary development. We place these ideas in the context of different inheritance systems, namely the genotypic and phenotypic replicators with limited or unlimited heredity, and ask which of these can support multicellular development, and to which degree of complexity. However, the quest for evidence on the evolution of biotas from planets around other stars does not seem to be feasible with present technology with direct visualization of living organisms on exoplanets. But this may be attempted on the Galilean moons of Jupiter where there is a possibility of detecting reliable biomarkers in the next decade with the Europa Jupiter System Mission, in view of recent progress by landing micropenetrators on planetary, or satellite surfaces. Mars is a second possibility in the inner Solar System, in spite of the multiple difficulties faced by the fleet of past, present and future missions. We discuss a series of preliminary ideas for elucidating the origin of metazoan analogues with available instrumentation in potential payloads of feasible space missions to the Galilean moons.},
author = {de Vladar, Harold and Chela Flores, Julian},
booktitle = {Life on Earth and other planetary bodies},
pages = {387 -- 405},
publisher = {Springer},
title = {{Can the evolution of multicellularity be anticipated in the exploration of the solar system?}},
doi = {10.1007/978-94-007-4966-5_22},
volume = {24},
year = {2012},
}
@inproceedings{3279,
abstract = {We show a hardness-preserving construction of a PRF from any length doubling PRG which improves upon known constructions whenever we can put a non-trivial upper bound q on the number of queries to the PRF. Our construction requires only O(logq) invocations to the underlying PRG with each query. In comparison, the number of invocations by the best previous hardness-preserving construction (GGM using Levin's trick) is logarithmic in the hardness of the PRG. For example, starting from an exponentially secure PRG {0,1} n → {0,1} 2n, we get a PRF which is exponentially secure if queried at most q = exp(√n)times and where each invocation of the PRF requires Θ(√n) queries to the underlying PRG. This is much less than the Θ(n) required by known constructions.
},
author = {Jain, Abhishek and Pietrzak, Krzysztof Z and Tentes, Aris},
location = {Taormina, Sicily, Italy},
pages = {369 -- 382},
publisher = {Springer},
title = {{Hardness preserving constructions of pseudorandom functions}},
doi = {10.1007/978-3-642-28914-9_21},
volume = {7194},
year = {2012},
}
@inproceedings{3280,
abstract = {The (decisional) learning with errors problem (LWE) asks to distinguish "noisy" inner products of a secret vector with random vectors from uniform. The learning parities with noise problem (LPN) is the special case where the elements of the vectors are bits. In recent years, the LWE and LPN problems have found many applications in cryptography. In this paper we introduce a (seemingly) much stronger adaptive assumption, called "subspace LWE" (SLWE), where the adversary can learn the inner product of the secret and random vectors after they were projected into an adaptively and adversarially chosen subspace. We prove that, surprisingly, the SLWE problem mapping into subspaces of dimension d is almost as hard as LWE using secrets of length d (the other direction is trivial.) This result immediately implies that several existing cryptosystems whose security is based on the hardness of the LWE/LPN problems are provably secure in a much stronger sense than anticipated. As an illustrative example we show that the standard way of using LPN for symmetric CPA secure encryption is even secure against a very powerful class of related key attacks. },
author = {Pietrzak, Krzysztof Z},
location = {Taormina, Sicily, Italy},
pages = {548 -- 563},
publisher = {Springer},
title = {{Subspace LWE}},
doi = {10.1007/978-3-642-28914-9_31},
volume = {7194},
year = {2012},
}
@inproceedings{3281,
abstract = {We consider the problem of amplifying the "lossiness" of functions. We say that an oracle circuit C*: {0,1} m → {0,1}* amplifies relative lossiness from ℓ/n to L/m if for every function f:{0,1} n → {0,1} n it holds that 1 If f is injective then so is C f. 2 If f has image size of at most 2 n-ℓ, then C f has image size at most 2 m-L. The question is whether such C* exists for L/m ≫ ℓ/n. This problem arises naturally in the context of cryptographic "lossy functions," where the relative lossiness is the key parameter. We show that for every circuit C* that makes at most t queries to f, the relative lossiness of C f is at most L/m ≤ ℓ/n + O(log t)/n. In particular, no black-box method making a polynomial t = poly(n) number of queries can amplify relative lossiness by more than an O(logn)/n additive term. We show that this is tight by giving a simple construction (cascading with some randomization) that achieves such amplification.},
author = {Pietrzak, Krzysztof Z and Rosen, Alon and Segev, Gil},
location = {Taormina, Sicily, Italy},
pages = {458 -- 475},
publisher = {Springer},
title = {{Lossy functions do not amplify well}},
doi = {10.1007/978-3-642-28914-9_26},
volume = {7194},
year = {2012},
}
@inproceedings{3282,
abstract = {Traditionally, symmetric-key message authentication codes (MACs) are easily built from pseudorandom functions (PRFs). In this work we propose a wide variety of other approaches to building efficient MACs, without going through a PRF first. In particular, unlike deterministic PRF-based MACs, where each message has a unique valid tag, we give a number of probabilistic MAC constructions from various other primitives/assumptions. Our main results are summarized as follows: We show several new probabilistic MAC constructions from a variety of general assumptions, including CCA-secure encryption, Hash Proof Systems and key-homomorphic weak PRFs. By instantiating these frameworks under concrete number theoretic assumptions, we get several schemes which are more efficient than just using a state-of-the-art PRF instantiation under the corresponding assumption. For probabilistic MACs, unlike deterministic ones, unforgeability against a chosen message attack (uf-cma ) alone does not imply security if the adversary can additionally make verification queries (uf-cmva ). We give an efficient generic transformation from any uf-cma secure MAC which is "message-hiding" into a uf-cmva secure MAC. This resolves the main open problem of Kiltz et al. from Eurocrypt'11; By using our transformation on their constructions, we get the first efficient MACs from the LPN assumption. While all our new MAC constructions immediately give efficient actively secure, two-round symmetric-key identification schemes, we also show a very simple, three-round actively secure identification protocol from any weak PRF. In particular, the resulting protocol is much more efficient than the trivial approach of building a regular PRF from a weak PRF. © 2012 International Association for Cryptologic Research.},
author = {Dodis, Yevgeniy and Pietrzak, Krzysztof Z and Kiltz, Eike and Wichs, Daniel},
location = {Cambridge, UK},
pages = {355 -- 374},
publisher = {Springer},
title = {{Message authentication, revisited}},
doi = {10.1007/978-3-642-29011-4_22},
volume = {7237},
year = {2012},
}
@inproceedings{3284,
abstract = {We study the complexity of valued constraint satisfaction problems (VCSP). A problem from VCSP is characterised by a constraint language, a fixed set of cost functions over a finite domain. An instance of the problem is specified by a sum of cost functions from the language and the goal is to minimise the sum. Under the unique games conjecture, the approximability of finite-valued VCSPs is well-understood, see Raghavendra [FOCS’08]. However, there is no characterisation of finite-valued VCSPs, let alone general-valued VCSPs, that can be solved exactly in polynomial time, thus giving insights from a combinatorial optimisation perspective.
We consider the case of languages containing all possible unary cost functions. In the case of languages consisting of only {0, ∞}-valued cost functions (i.e. relations), such languages have been called conservative and studied by Bulatov [LICS’03] and recently by Barto [LICS’11]. Since we study valued languages, we call a language conservative if it contains all finite-valued unary cost functions. The computational complexity of conservative valued languages has been studied by Cohen et al. [AIJ’06] for languages over Boolean domains, by Deineko et al. [JACM’08] for {0,1}-valued languages (a.k.a Max-CSP), and by Takhanov [STACS’10] for {0,∞}-valued languages containing all finite- valued unary cost functions (a.k.a. Min-Cost-Hom).
We prove a Schaefer-like dichotomy theorem for conservative valued languages: if all cost functions in the language satisfy a certain condition (specified by a complementary combination of STP and MJN multimorphisms), then any instance can be solved in polynomial time (via a new algorithm developed in this paper), otherwise the language is NP-hard. This is the first complete complexity classification of general-valued constraint languages over non-Boolean domains. It is a common phenomenon that complexity classifications of problems over non-Boolean domains is significantly harder than the Boolean case. The polynomial-time algorithm we present for the tractable cases is a generalisation of the submodular minimisation problem and a result of Cohen et al. [TCS’08].
Our results generalise previous results by Takhanov [STACS’10] and (a subset of results) by Cohen et al. [AIJ’06] and Deineko et al. [JACM’08]. Moreover, our results do not rely on any computer-assisted search as in Deineko et al. [JACM’08], and provide a powerful tool for proving hardness of finite-valued and general-valued languages.},
author = {Vladimir Kolmogorov and Živný, Stanislav},
pages = {750 -- 759},
publisher = {SIAM},
title = {{The complexity of conservative valued CSPs}},
year = {2012},
}
@article{3289,
abstract = {Viral manipulation of transduction pathways associated with key cellular functions such as survival, response to microbial infection, and cytoskeleton reorganization can provide the supportive milieu for a productive infection. Here, we demonstrate that vaccinia virus (VACV) infection leads to activation of the stress-activated protein kinase (SAPK)/extracellular signal-regulated kinase (ERK) 4/7 (MKK4/7)-c-Jun N-terminal protein kinase 1/2 (JNK1/2) pathway; further, the stimulation of this pathway requires postpenetration, prereplicative events in the viral replication cycle. Although the formation of intracellular mature virus (IMV) was not affected in MKK4/7- or JNK1/2-knockout (KO) cells, we did note an accentuated deregulation of microtubule and actin network organization in infected JNK1/2-KO cells. This was followed by deregulated viral trafficking to the periphery and enhanced enveloped particle release. Furthermore, VACV infection induced alterations in the cell contractility and morphology, and cell migration was reduced in the JNK-KO cells. In addition, phosphorylation of proteins implicated with early cell contractility and cell migration, such as microtubule-associated protein 1B and paxillin, respectively, was not detected in the VACV-infected KO cells. In sum, our findings uncover a regulatory role played by the MKK4/7-JNK1/2 pathway in cytoskeleton reorganization during VACV infection.
},
author = {Pereira, Anna and Leite, Flávia and Brasil, Bruno and Soares Martins, Jamaria and Torres, Alice and Pimenta, Paulo and Souto Padrón, Thais and Tranktman, Paula and Ferreira, Paulo and Kroon, Erna and Bonjardim, Cláudio},
journal = {Journal of Virology},
number = {1},
pages = {172 -- 184},
publisher = {ASM},
title = {{A vaccinia virus-driven interplay between the MKK4/7-JNK1/2 pathway and cytoskeleton reorganization}},
doi = {10.1128/JVI.05638-11},
volume = {86},
year = {2012},
}
@article{330,
abstract = {A procedure for the continuous production of Cu 2ZnSnS 4 (CZTS) nanoparticles with controlled composition is presented. CZTS nanoparticles were prepared through the reaction of the metals' amino complexes with elemental sulfur in a continuous-flow reactor at moderate temperatures (300-330 °C). High-resolution transmission electron microscopy and X-ray diffraction analysis showed the nanocrystals to have a crystallographic structure compatible with that of the kesterite. Chemical characterization of the materials showed the presence of the four elements in each individual nanocrystal. Composition control was achieved by adjusting the solution flow rate through the reactor and the proper choice of the nominal precursor concentration within the flowing solution. Single-particle analysis revealed a composition distribution within each sample, which was optimized at the highest synthesis temperatures used. },
author = {Shavel, Alexey and Cadavid, Doris and Ibáñez, Maria and Carrete, Alex and Cabot, Andreu},
journal = {Journal of the American Chemical Society},
number = {3},
pages = {1438 -- 1441},
publisher = {ACS},
title = {{Continuous production of Cu inf 2 inf ZnSnS inf 4 inf nanocrystals in a flow reactor}},
doi = {10.1021/ja209688a},
volume = {134},
year = {2012},
}
@article{3310,
abstract = {The theory of persistent homology opens up the possibility to reason about topological features of a space or a function quantitatively and in combinatorial terms. We refer to this new angle at a classical subject within algebraic topology as a point calculus, which we present for the family of interlevel sets of a real-valued function. Our account of the subject is expository, devoid of proofs, and written for non-experts in algebraic topology.},
author = {Bendich, Paul and Cabello, Sergio and Edelsbrunner, Herbert},
journal = {Pattern Recognition Letters},
number = {11},
pages = {1436 -- 1444},
publisher = {Elsevier},
title = {{A point calculus for interlevel set homology}},
doi = {10.1016/j.patrec.2011.10.007},
volume = {33},
year = {2012},
}
@article{3314,
abstract = {We introduce two-level discounted and mean-payoff games played by two players on a perfect-information stochastic game graph. The upper level game is a discounted or mean-payoff game and the lower level game is a (undiscounted) reachability game. Two-level games model hierarchical and sequential decision making under uncertainty across different time scales. For both discounted and mean-payoff two-level games, we show the existence of pure memoryless optimal strategies for both players and an ordered field property. We show that if there is only one player (Markov decision processes), then the values can be computed in polynomial time. It follows that whether the value of a player is equal to a given rational constant in two-level discounted or mean-payoff games can be decided in NP ∩ coNP. We also give an alternate strategy improvement algorithm to compute the value. © 2012 World Scientific Publishing Company.},
author = {Chatterjee, Krishnendu and Majumdar, Ritankar},
journal = {International Journal of Foundations of Computer Science},
number = {3},
pages = {609 -- 625},
publisher = {World Scientific Publishing},
title = {{Discounting and averaging in games across time scales}},
doi = {10.1142/S0129054112400308},
volume = {23},
year = {2012},
}
@inproceedings{3251,
abstract = {Many infinite state systems can be seen as well-structured transition systems (WSTS), i.e., systems equipped with a well-quasi-ordering on states that is also a simulation relation. WSTS are an attractive target for formal analysis because there exist generic algorithms that decide interesting verification problems for this class. Among the most popular algorithms are acceleration-based forward analyses for computing the covering set. Termination of these algorithms can only be guaranteed for flattable WSTS. Yet, many WSTS of practical interest are not flattable and the question whether any given WSTS is flattable is itself undecidable. We therefore propose an analysis that computes the covering set and captures the essence of acceleration-based algorithms, but sacrifices precision for guaranteed termination. Our analysis is an abstract interpretation whose abstract domain builds on the ideal completion of the well-quasi-ordered state space, and a widening operator that mimics acceleration and controls the loss of precision of the analysis. We present instances of our framework for various classes of WSTS. Our experience with a prototype implementation indicates that, despite the inherent precision loss, our analysis often computes the precise covering set of the analyzed system.},
author = {Zufferey, Damien and Wies, Thomas and Henzinger, Thomas A},
location = {Philadelphia, PA, USA},
pages = {445 -- 460},
publisher = {Springer},
title = {{Ideal abstractions for well structured transition systems}},
doi = {10.1007/978-3-642-27940-9_29},
volume = {7148},
year = {2012},
}
@inproceedings{3252,
abstract = {We study the automatic synthesis of fair non-repudiation protocols, a class of fair exchange protocols, used for digital contract signing. First, we show how to specify the objectives of the participating agents, the trusted third party (TTP) and the protocols as path formulas in Linear Temporal Logic (LTL) and prove that the satisfaction of the objectives of the agents and the TTP imply satisfaction of the protocol objectives. We then show that weak (co-operative) co-synthesis and classical (strictly competitive) co-synthesis fail in synthesizing these protocols, whereas assume-guarantee synthesis (AGS) succeeds. We demonstrate the success of assume-guarantee synthesis as follows: (a) any solution of assume-guarantee synthesis is attack-free; no subset of participants can violate the objectives of the other participants without violating their own objectives; (b) the Asokan-Shoup-Waidner (ASW) certified mail protocol that has known vulnerabilities is not a solution of AGS; and (c) the Kremer-Markowitch (KM) non-repudiation protocol is a solution of AGS. To our knowledge this is the first application of synthesis to fair non-repudiation protocols, and our results show how synthesis can generate correct protocols and automatically discover vulnerabilities. The solution to assume-guarantee synthesis can be computed efficiently as the secure equilibrium solution of three-player graph games. © 2012 Springer-Verlag.},
author = {Chatterjee, Krishnendu and Raman, Vishwanath},
location = {Philadelphia, PA, USA},
pages = {152 -- 168},
publisher = {Springer},
title = {{Synthesizing protocols for digital contract signing}},
doi = {10.1007/978-3-642-27940-9_11},
volume = {7148},
year = {2012},
}
@inproceedings{3253,
abstract = {We describe a framework for reasoning about programs with lists carrying integer numerical data. We use abstract domains to describe and manipulate complex constraints on configurations of these programs mixing constraints on the shape of the heap, sizes of the lists, on the multisets of data stored in these lists, and on the data at their different positions. Moreover, we provide powerful techniques for automatic validation of Hoare-triples and invariant checking, as well as for automatic synthesis of invariants and procedure summaries using modular inter-procedural analysis. The approach has been implemented in a tool called Celia and experimented successfully on a large benchmark of programs.},
author = {Bouajjani, Ahmed and Dragoi, Cezara and Enea, Constantin and Sighireanu, Mihaela},
location = {Philadelphia, PA, USA},
pages = {1 -- 22},
publisher = {Springer},
title = {{Abstract domains for automated reasoning about list manipulating programs with infinite data}},
doi = {10.1007/978-3-642-27940-9_1},
volume = {7148},
year = {2012},
}
@article{3254,
abstract = {The theory of graph games with ω-regular winning conditions is the foundation for modeling and synthesizing reactive processes. In the case of stochastic reactive processes, the corresponding stochastic graph games have three players, two of them (System and Environment) behaving adversarially, and the third (Uncertainty) behaving probabilistically. We consider two problems for stochastic graph games: the qualitative problem asks for the set of states from which a player can win with probability 1 (almost-sure winning); and the quantitative problem asks for the maximal probability of winning (optimal winning) from each state. We consider ω-regular winning conditions formalized as Müller winning conditions. We present optimal memory bounds for pure (deterministic) almost-sure winning and optimal winning strategies in stochastic graph games with Müller winning conditions. We also study the complexity of stochastic Müller games and show that both the qualitative and quantitative analysis problems are PSPACE-complete. Our results are relevant in synthesis of stochastic reactive processes.},
author = {Chatterjee, Krishnendu},
journal = {Information and Computation},
pages = {29 -- 48},
publisher = {Elsevier},
title = {{The complexity of stochastic Müller games}},
doi = {10.1016/j.ic.2011.11.004},
volume = {211},
year = {2012},
}
@inproceedings{3255,
abstract = {In this paper we survey results of two-player games on graphs and Markov decision processes with parity, mean-payoff and energy objectives, and the combination of mean-payoff and energy objectives with parity objectives. These problems have applications in verification and synthesis of reactive systems in resource-constrained environments.},
author = {Chatterjee, Krishnendu and Doyen, Laurent},
location = {Lednice, Czech Republic},
pages = {37 -- 46},
publisher = {Springer},
title = {{Games and Markov decision processes with mean payoff parity and energy parity objectives}},
doi = {10.1007/978-3-642-25929-6_3},
volume = {7119},
year = {2012},
}
@article{3256,
abstract = {We use a distortion to define the dual complex of a cubical subdivision of ℝ n as an n-dimensional subcomplex of the nerve of the set of n-cubes. Motivated by the topological analysis of high-dimensional digital image data, we consider such subdivisions defined by generalizations of quad- and oct-trees to n dimensions. Assuming the subdivision is balanced, we show that mapping each vertex to the center of the corresponding n-cube gives a geometric realization of the dual complex in ℝ n.},
author = {Edelsbrunner, Herbert and Kerber, Michael},
journal = {Discrete & Computational Geometry},
number = {2},
pages = {393 -- 414},
publisher = {Springer},
title = {{Dual complexes of cubical subdivisions of ℝn}},
doi = {10.1007/s00454-011-9382-4},
volume = {47},
year = {2012},
}
@article{3258,
abstract = {CA3 pyramidal neurons are important for memory formation and pattern completion in the hippocampal network. It is generally thought that proximal synapses from the mossy fibers activate these neurons most efficiently, whereas distal inputs from the perforant path have a weaker modulatory influence. We used confocally targeted patch-clamp recording from dendrites and axons to map the activation of rat CA3 pyramidal neurons at the subcellular level. Our results reveal two distinct dendritic domains. In the proximal domain, action potentials initiated in the axon backpropagate actively with large amplitude and fast time course. In the distal domain, Na+ channel–mediated dendritic spikes are efficiently initiated by waveforms mimicking synaptic events. CA3 pyramidal neuron dendrites showed a high Na+-to-K+ conductance density ratio, providing ideal conditions for active backpropagation and dendritic spike initiation. Dendritic spikes may enhance the computational power of CA3 pyramidal neurons in the hippocampal network.},
author = {Kim, Sooyun and Guzmán, José and Hu, Hua and Jonas, Peter M},
journal = {Nature Neuroscience},
number = {4},
pages = {600 -- 606},
publisher = {Nature Publishing Group},
title = {{Active dendrites support efficient initiation of dendritic spikes in hippocampal CA3 pyramidal neurons}},
doi = {10.1038/nn.3060},
volume = {15},
year = {2012},
}
@phdthesis{2964,
abstract = {CA3 pyramidal neurons are important for memory formation and pattern completion in the hippocampal network. These neurons receive multiple excitatory inputs from numerous sources. Therefore, the rules of spatiotemporal integration of multiple synaptic inputs and propagation of action potentials are important to understand how CA3 neurons contribute to higher brain functions at cellular level. By using confocally targeted patch-clamp recording techniques, we investigated the biophysical properties of rat CA3 pyramidal neuron dendrites. We found two distinct dendritic domains critical for action potential initiation and propagation: In the proximal domain, action potentials initiated in the axon backpropagate actively with large amplitude and fast time course. In the distal domain, Na+-channel mediated dendritic spikes are efficiently evoked by local dendritic depolarization or waveforms mimicking synaptic events. These findings can be explained by a high Na+-to-K+ conductance density ratio of CA3 pyramidal neuron dendrites. The results challenge the prevailing view that proximal mossy fiber inputs activate CA3 pyramidal neurons more efficiently than distal perforant inputs by showing that the distal synapses trigger a different form of activity represented by dendritic spikes. The high probability of dendritic spike initiation in the distal area may enhance the computational power of CA3 pyramidal neurons in the hippocampal network. },
author = {Kim, Sooyun},
pages = {65},
publisher = {IST Austria},
title = {{Active properties of hippocampal CA3 pyramidal neuron dendrites}},
year = {2012},
}
@article{3257,
abstract = {Consider a convex relaxation f̂ of a pseudo-Boolean function f. We say that the relaxation is totally half-integral if f̂(x) is a polyhedral function with half-integral extreme points x, and this property is preserved after adding an arbitrary combination of constraints of the form x i=x j, x i=1-x j, and x i=γ where γ∈{0,1,1/2} is a constant. A well-known example is the roof duality relaxation for quadratic pseudo-Boolean functions f. We argue that total half-integrality is a natural requirement for generalizations of roof duality to arbitrary pseudo-Boolean functions. Our contributions are as follows. First, we provide a complete characterization of totally half-integral relaxations f̂ by establishing a one-to-one correspondence with bisubmodular functions. Second, we give a new characterization of bisubmodular functions. Finally, we show some relationships between general totally half-integral relaxations and relaxations based on the roof duality. On the conceptual level, our results show that bisubmodular functions provide a natural generalization of the roof duality approach to higher-order terms. This can be viewed as a non-submodular analogue of the fact that submodular functions generalize the s-t minimum cut problem with non-negative weights to higher-order terms.},
author = {Kolmogorov, Vladimir},
journal = {Discrete Applied Mathematics},
number = {4-5},
pages = {416 -- 426},
publisher = {Elsevier},
title = {{Generalized roof duality and bisubmodular functions}},
doi = {10.1016/j.dam.2011.10.026},
volume = {160},
year = {2012},
}
@article{3331,
abstract = {Computing the topology of an algebraic plane curve C means computing a combinatorial graph that is isotopic to C and thus represents its topology in R2. We prove that, for a polynomial of degree n with integer coefficients bounded by 2ρ, the topology of the induced curve can be computed with bit operations ( indicates that we omit logarithmic factors). Our analysis improves the previous best known complexity bounds by a factor of n2. The improvement is based on new techniques to compute and refine isolating intervals for the real roots of polynomials, and on the consequent amortized analysis of the critical fibers of the algebraic curve.},
author = {Kerber, Michael and Sagraloff, Michael},
journal = { Journal of Symbolic Computation},
number = {3},
pages = {239 -- 258},
publisher = {Elsevier},
title = {{A worst case bound for topology computation of algebraic curves}},
doi = {10.1016/j.jsc.2011.11.001},
volume = {47},
year = {2012},
}
@article{3317,
abstract = {The physical distance between presynaptic Ca2+ channels and the Ca2+ sensors that trigger exocytosis of neurotransmitter-containing vesicles is a key determinant of the signalling properties of synapses in the nervous system. Recent functional analysis indicates that in some fast central synapses, transmitter release is triggered by a small number of Ca2+ channels that are coupled to Ca2+ sensors at the nanometre scale. Molecular analysis suggests that this tight coupling is generated by protein–protein interactions involving Ca2+ channels, Ca2+ sensors and various other synaptic proteins. Nanodomain coupling has several functional advantages, as it increases the efficacy, speed and energy efficiency of synaptic transmission.},
author = {Eggermann, Emmanuel and Bucurenciu, Iancu and Goswami, Sarit and Jonas, Peter M},
journal = {Nature Reviews Neuroscience},
number = {1},
pages = {7 -- 21},
publisher = {Nature Publishing Group},
title = {{Nanodomain coupling between Ca(2+) channels and sensors of exocytosis at fast mammalian synapses}},
doi = {10.1038/nrn3125},
volume = {13},
year = {2012},
}
@article{3115,
abstract = {We consider the offset-deconstruction problem: Given a polygonal shape Q with n vertices, can it be expressed, up to a tolerance ε in Hausdorff distance, as the Minkowski sum of another polygonal shape P with a disk of fixed radius? If it does, we also seek a preferably simple-looking solution P; then, P's offset constitutes an accurate, vertex-reduced, and smoothened approximation of Q. We give an O(nlogn)-time exact decision algorithm that handles any polygonal shape, assuming the real-RAM model of computation. A variant of the algorithm, which we have implemented using the cgal library, is based on rational arithmetic and answers the same deconstruction problem up to an uncertainty parameter δ its running time additionally depends on δ. If the input shape is found to be approximable, this algorithm also computes an approximate solution for the problem. It also allows us to solve parameter-optimization problems induced by the offset-deconstruction problem. For convex shapes, the complexity of the exact decision algorithm drops to O(n), which is also the time required to compute a solution P with at most one more vertex than a vertex-minimal one.},
author = {Berberich, Eric and Halperin, Dan and Kerber, Michael and Pogalnikova, Roza},
journal = {Discrete & Computational Geometry},
number = {4},
pages = {964 -- 989},
publisher = {Springer},
title = {{Deconstructing approximate offsets}},
doi = {10.1007/s00454-012-9441-5},
volume = {48},
year = {2012},
}
@article{339,
abstract = {A high-yield and upscalable colloidal synthesis route for the production of quaternary I 2-II-IV-VI 4 nanocrystals, particularly stannite Cu 2+xCd 1-xSnSe 4, with narrow size distribution and precisely controlled composition is presented. It is also shown here how the diversity of valences in the constituent elements allows an effective control of their electrical conductivity through the adjustment of the cation ratios. At the same time, while the crystallographic complexity of quaternary chalcogenides is associated with intrinsically low thermal conductivities, the reduction of the lattice dimensions to the nanoscale further reduces the materials thermal conductivity. In the specific case of the stannite crystal structure, a convenient slab distribution of the valence band maximum states permits a partial decoupling of the p-type electrical conductivity from both the Seebeck coefficient and the thermal conductivity. Combining these features, we demonstrate how an initial optimization of the nanocrystals Cd/Cu ratio allowed us to obtain low-temperature solution-processed materials with ZT values up to 0.71 at 685 K.},
author = {Ibáñez, Maria and Cadavid, Doris and Zamani, Reza and García Castelló, Nuria and Izquierdo Roca, Victora and Li, Wenhua and Fairbrother, Andrew and Prades, Joan and Shavel, Alexey and Arbiol, Jordi and Pérez Rodríguez, Alejandro and Morante, Joan and Cabot, Andreu},
journal = {Chemistry of Materials},
number = {3},
pages = {562 -- 570},
publisher = {American Chemical Society},
title = {{Composition control and thermoelectric properties of quaternary chalcogenide nanocrystals: The case of stannite Cu2CdSnSe4}},
doi = {10.1021/cm2031812},
volume = {24},
year = {2012},
}
@article{340,
abstract = {A procedure for the continuous production of Cu 2ZnSnS 4 (CZTS) nanoparticles with controlled composition is presented. CZTS nanoparticles were prepared through the reaction of the metals' amino complexes with elemental sulfur in a continuous-flow reactor at moderate temperatures (300-330 °C). High-resolution transmission electron microscopy and X-ray diffraction analysis showed the nanocrystals to have a crystallographic structure compatible with that of the kesterite. Chemical characterization of the materials showed the presence of the four elements in each individual nanocrystal. Composition control was achieved by adjusting the solution flow rate through the reactor and the proper choice of the nominal precursor concentration within the flowing solution. Single-particle analysis revealed a composition distribution within each sample, which was optimized at the highest synthesis temperatures used. },
author = {Shavel, Alexey and Cadavid, Doris and Ibáñez, Maria and Carrete, Alex and Cabot, Andreu},
journal = {Journal of the American Chemical Society},
number = {3},
pages = {1438 -- 1441},
publisher = {ACS},
title = {{Continuous production of Cu inf 2 inf ZnSnS inf 4 inf nanocrystals in a flow reactor}},
doi = {10.1021/ja209688a},
volume = {134},
year = {2012},
}
@article{345,
abstract = {Nanocomposites are highly promising materials to enhance the efficiency of current thermoelectric devices. A straightforward and at the same time highly versatile and controllable approach to produce nanocomposites is the assembly of solution-processed nanocrystal building blocks. The convenience of this bottom-up approach to produce nanocomposites with homogeneous phase distributions and adjustable composition is demonstrated here by blending Ag2Te and PbTe colloidal nanocrystals to form Ag2Te–PbTe bulk nanocomposites. The thermoelectric properties of these nanocomposites are analyzed in the temperature range from 300 to 700 K. The evolution of their electrical conductivity and Seebeck coefficient is discussed in terms of the blend composition and the characteristics of the constituent materials. },
author = {Cadavid, Doris and Ibáñez, Maria and Gorsse, Stéphane and López, Antonio and Cirera, Albert and Morante, Joan and Cabot, Andreu},
journal = {Journal of Nanoparticle Research},
number = {12},
publisher = {Kluwer},
title = {{Bottom-up processing of thermoelectric nanocomposites from colloidal nanocrystal building blocks: The case of Ag2Te–PbTe}},
doi = {10.1007/s11051-012-1328-0},
volume = {14},
year = {2012},
}
@article{346,
abstract = {Arrays of vertically aligned ZnO : Cl/TiO2 and ZnO : Cl/ZnxTiOy/TiO2 core–shell nanowires (NWs) were prepared by means of the combination of two solution-growth processes. First, single-crystal ZnO NWs with controlled n-type doping were grown on conducting substrates by a low-cost, high-yield and seed-free electrochemical route. These NWs were covered by a titanium oxide shell of tunable thickness mediating successive adsorption-hydrolysis-condensation steps. Using this atomic-layer growth procedure, titania shells with controlled thickness and the anatase TiO2 phase were obtained after sintering at 450 °C. Higher sintering temperatures resulted in the formation of ZnO : Cl/ZnxTiOy/TiO2 core–shell NWs by the interdiffusion of Zn and Ti ions at the ZnO–TiO2 interface. The performance of ZnO : Cl/TiO2 and ZnO : Cl/ZnxTiOy/TiO2 core–shell NWs towards photoelectrochemical (PEC) water splitting was investigated as a function of the titania shell thickness. Furthermore, the performance of such core–shell NWs as photoelectrodes in dye-sensitized solar cells was also characterized. The TiO2 presence at the ZnO : Cl surface promoted a two-fold increase on the produced photocurrent densities, probing their potential for PEC and optoelectronic applications. Electrochemical impedance spectroscopy was used to corroborate the lower resistance for charge transfer between the NWs and the electrolyte in the presence of the TiO2 shell.},
author = {Fan, Jiandong and Zamani, Reza and Fábrega, Cristina and Shavel, Alexey and Flox, Cristina and Ibáñez, Maria and Andreu, Teresa and López, Amtonio and Arbiol, Jordi and Morante, Joan and Cabot, Andreu},
journal = {Journal of Physics D: Applied Physics},
number = {41},
publisher = {IOP Publishing Ltd.},
title = {{Solution-growth and optoelectronic performance of ZnO : Cl/TiO2 and ZnO : Cl/ZnxTiOy/TiO2 core–shell nanowires with tunable shell thickness}},
doi = {10.1088/0022-3727/45/41/415301},
volume = {45},
year = {2012},
}
@article{347,
abstract = {A synthetic route for producing Cu 2ZnGeSe 4 nanocrystals with narrow size distributions and controlled composition is presented. These nanocrystals were used to produce densely packed nanomaterials by hot-pressing. From the characterization of the thermoelectric properties of these nanomaterials, Cu 2ZnGeSe 4 is demonstrated to show excellent thermoelectric properties. A very preliminary adjustment of the nanocrystal composition has already resulted in a figure of merit of up to 0.55 at 450°C. },
author = {Ibáñez, Maria and Zamani, Reza and Lalonde, Aaron and Cadavid, Doris and Li, Wenhua and Shavel, Alexey and Arbiol, Jordi and Morante, Joan and Gorsse, Stéphane and Snyder, G Jeffrey and Cabot, Andreu},
journal = {Journal of the American Chemical Society},
number = {9},
pages = {4060 -- 4063},
publisher = {ACS},
title = {{Cu 2ZnGeSe 4 nanocrystals: Synthesis and thermoelectric properties}},
doi = {10.1021/ja211952z},
volume = {134},
year = {2012},
}
@article{338,
abstract = {The ample chemical and structural freedom of quaternary compounds permits engineering materials that fulfill the requirements of a wide variety of applications. In this work, the mechanisms to achieve unprecedented size, shape, and composition control in quaternary nanocrystals are detailed. The described procedure allows obtaining tetrahedral and penta-tetrahedral quaternary nanocrystals with tuned size distributions and controlled compositions from a plethora of I 2-II-IV-VI 4 semiconductors.},
author = {Ibáñez, Maria and Zamani, Reza and Li, Wenhua and Shavel, Alexey and Arbiol, Jordi and Morante, Joan and Cabot, Andreu},
journal = {Crystal Growth and Design },
number = {3},
pages = {1085 -- 1090},
publisher = {American Chemical Society (ACS)},
title = {{Extending the nanocrystal synthesis control to quaternary compositions}},
doi = {10.1021/cg201709c},
volume = {12},
year = {2012},
}
@article{377,
abstract = {The potential to control the composition and crystal phase at the nanometer scale enable the production of nanocrystalline materials with enhanced functionalities and new applications. In the present work, we detail a novel colloidal synthesis route to prepare nanoparticles of the ternary semiconductor Cu2GeSe3 (CGSe) with nanometer-scale control over their crystal phases. We also demonstrate the structural effect on the thermoelectric properties of bottom-up-prepared CGSe nanomaterials. By careful adjustment of the nucleation and growth temperatures, pure orthorhombic CGSe nanoparticles with cationic order or polytypic CGSe nanoparticles with disordered cation positions can be produced. In this second type of nanoparticle, a high density of twins can be created to periodically change the atomic plane stacking, forming a hexagonal wurtzite CGSe phase. The high yield of the synthetic routes reported here allows the production of single-phase and multiphase CGSe nanoparticles in the gram scale, which permits characterization of the thermoelectric properties of these materials. Reduced thermal conductivities and a related 2.5-fold increase of the thermoelectric figure of merit for multiphase nanomaterials compared to pure-phase CGSe are systematically obtained. These results are discussed in terms of the density and efficiency of phonon scattering centers in both types of materials.},
author = {Ibáñez, Maria and Zamani, Reza and Li, Wenhua and Cadavid, Doris and Gorse, Stéphane and Katchoi, Nebll and Shavel, Alexey and López, Antonioo and Morante, Joan and Arbiol, Jordi and Cabot, Andreu},
journal = {Chemistry of Materials},
number = {23},
pages = {4615 -- 4622},
publisher = {American Chemical Society},
title = {{Crystallographic control at the nanoscale to enhance functionality: Polytypic Cu2GeSe3 nanoparticles as thermoelectric materials}},
doi = {10.1021/cm303252q},
volume = {24},
year = {2012},
}
@article{3836,
abstract = {Hierarchical Timing Language (HTL) is a coordination language for distributed, hard real-time applications. HTL is a hierarchical extension of Giotto and, like its predecessor, based on the logical execution time (LET) paradigm of real-time programming. Giotto is compiled into code for a virtual machine, called the EmbeddedMachine (or E machine). If HTL is targeted to the E machine, then the hierarchicalprogram structure needs to be flattened; the flattening makes separatecompilation difficult, and may result in E machinecode of exponential size. In this paper, we propose a generalization of the E machine, which supports a hierarchicalprogram structure at runtime through real-time trigger mechanisms that are arranged in a tree. We present the generalized E machine, and a modular compiler for HTL that generates code of linear size. The compiler may generate code for any part of a given HTL program separately in any order.},
author = {Ghosal, Arkadeb and Iercan, Daniel and Kirsch, Christoph and Henzinger, Thomas A and Sangiovanni Vincentelli, Alberto},
journal = {Science of Computer Programming},
number = {2},
pages = {96 -- 112},
publisher = {Elsevier},
title = {{Separate compilation of hierarchical real-time programs into linear-bounded embedded machine code}},
doi = {10.1016/j.scico.2010.06.004},
volume = {77},
year = {2012},
}
@article{3168,
abstract = {The induction of a signaling pathway is characterized by transient complex formation and mutual posttranslational modification of proteins. To faithfully capture this combinatorial process in a mathematical model is an important challenge in systems biology. Exploiting the limited context on which most binding and modification events are conditioned, attempts have been made to reduce the combinatorial complexity by quotienting the reachable set of molecular species into species aggregates while preserving the deterministic semantics of the thermodynamic limit. Recently, we proposed a quotienting that also preserves the stochastic semantics and that is complete in the sense that the semantics of individual species can be recovered from the aggregate semantics. In this paper, we prove that this quotienting yields a sufficient condition for weak lumpability (that is to say that the quotient system is still Markovian for a given set of initial distributions) and that it gives rise to a backward Markov bisimulation between the original and aggregated transition system (which means that the conditional probability of being in a given state in the original system knowing that we are in its equivalence class is an invariant of the system). We illustrate the framework on a case study of the epidermal growth factor (EGF)/insulin receptor crosstalk.},
author = {Feret, Jérôme and Henzinger, Thomas A and Koeppl, Heinz and Petrov, Tatjana},
journal = {Theoretical Computer Science},
pages = {137 -- 164},
publisher = {Elsevier},
title = {{Lumpability abstractions of rule based systems}},
doi = {10.1016/j.tcs.2011.12.059},
volume = {431},
year = {2012},
}
@article{3846,
abstract = {We summarize classical and recent results about two-player games played on graphs with ω-regular objectives. These games have applications in the verification and synthesis of reactive systems. Important distinctions are whether a graph game is turn-based or concurrent; deterministic or stochastic; zero-sum or not. We cluster known results and open problems according to these classifications.},
author = {Chatterjee, Krishnendu and Henzinger, Thomas A},
journal = {Journal of Computer and System Sciences},
number = {2},
pages = {394 -- 413},
publisher = {Elsevier},
title = {{A survey of stochastic ω regular games}},
doi = {10.1016/j.jcss.2011.05.002},
volume = {78},
year = {2012},
}
@article{2972,
abstract = {Energy parity games are infinite two-player turn-based games played on weighted graphs. The objective of the game combines a (qualitative) parity condition with the (quantitative) requirement that the sum of the weights (i.e., the level of energy in the game) must remain positive. Beside their own interest in the design and synthesis of resource-constrained omega-regular specifications, energy parity games provide one of the simplest model of games with combined qualitative and quantitative objectives. Our main results are as follows: (a) exponential memory is sufficient and may be necessary for winning strategies in energy parity games; (b) the problem of deciding the winner in energy parity games can be solved in NP ∩ coNP; and (c) we give an algorithm to solve energy parity by reduction to energy games. We also show that the problem of deciding the winner in energy parity games is logspace-equivalent to the problem of deciding the winner in mean-payoff parity games, which can thus be solved in NP ∩ coNP. As a consequence we also obtain a conceptually simple algorithm to solve mean-payoff parity games.},
author = {Chatterjee, Krishnendu and Doyen, Laurent},
journal = {Theoretical Computer Science},
pages = {49 -- 60},
publisher = {Elsevier},
title = {{Energy parity games}},
doi = {10.1016/j.tcs.2012.07.038},
volume = {458},
year = {2012},
}
@article{387,
abstract = {In this Letter we present detailed study of the density of states near defects in Bi 2Se 3. In particular, we present data on the commonly found triangular defects in this system. While we do not find any measurable quasiparticle scattering interference effects, we do find localized resonances, which can be well fitted by theory once the potential is taken to be extended to properly account for the observed defects. The data together with the fits confirm that while the local density of states around the Dirac point of the electronic spectrum at the surface is significantly disrupted near the impurity by the creation of low-energy resonance state, the Dirac point is not locally destroyed. We discuss our results in terms of the expected protected surface state of topological insulators. © 2012 American Physical Society.},
author = {Alpichshev, Zhanybek and Biswas, Rudro and Balatsky, Alexander and Analytis, James and Chu, Jiunhaw and Fisher, Ian and Kapitulnik, Aharon},
journal = {Physical Review Letters},
number = {20},
publisher = {American Physical Society},
title = {{STM imaging of impurity resonances on Bi 2Se 3}},
doi = {10.1103/PhysRevLett.108.206402},
volume = {108},
year = {2012},
}
@article{2967,
abstract = {For programs whose data variables range over Boolean or finite domains, program verification is decidable, and this forms the basis of recent tools for software model checking. In this article, we consider algorithmic verification of programs that use Boolean variables, and in addition, access a single read-only array whose length is potentially unbounded, and whose elements range over an unbounded data domain. We show that the reachability problem, while undecidable in general, is (1) PSPACE-complete for programs in which the array-accessing for-loops are not nested, (2) decidable for a restricted class of programs with doubly nested loops. The second result establishes connections to automata and logics defining languages over data words.},
author = {Alur, Rajeev and Cerny, Pavol and Weinstein, Scott},
journal = {ACM Transactions on Computational Logic (TOCL)},
number = {3},
publisher = {ACM},
title = {{Algorithmic analysis of array-accessing programs}},
doi = {10.1145/2287718.2287727},
volume = {13},
year = {2012},
}
@article{492,
abstract = {Background: Characterizing root system architecture (RSA) is essential to understanding the development and function of vascular plants. Identifying RSA-associated genes also represents an underexplored opportunity for crop improvement. Software tools are needed to accelerate the pace at which quantitative traits of RSA are estimated from images of root networks.Results: We have developed GiA Roots (General Image Analysis of Roots), a semi-automated software tool designed specifically for the high-throughput analysis of root system images. GiA Roots includes user-assisted algorithms to distinguish root from background and a fully automated pipeline that extracts dozens of root system phenotypes. Quantitative information on each phenotype, along with intermediate steps for full reproducibility, is returned to the end-user for downstream analysis. GiA Roots has a GUI front end and a command-line interface for interweaving the software into large-scale workflows. GiA Roots can also be extended to estimate novel phenotypes specified by the end-user.Conclusions: We demonstrate the use of GiA Roots on a set of 2393 images of rice roots representing 12 genotypes from the species Oryza sativa. We validate trait measurements against prior analyses of this image set that demonstrated that RSA traits are likely heritable and associated with genotypic differences. Moreover, we demonstrate that GiA Roots is extensible and an end-user can add functionality so that GiA Roots can estimate novel RSA traits. In summary, we show that the software can function as an efficient tool as part of a workflow to move from large numbers of root images to downstream analysis.},
author = {Galkovskyi, Taras and Mileyko, Yuriy and Bucksch, Alexander and Moore, Brad and Symonova, Olga and Price, Charles and Topp, Chrostopher and Iyer Pascuzzi, Anjali and Zurek, Paul and Fang, Suqin and Harer, John and Benfey, Philip and Weitz, Joshua},
journal = {BMC Plant Biology},
publisher = {BioMed Central},
title = {{GiA Roots: Software for the high throughput analysis of plant root system architecture}},
doi = {10.1186/1471-2229-12-116},
volume = {12},
year = {2012},
}
@article{493,
abstract = {The BCI competition IV stands in the tradition of prior BCI competitions that aim to provide high quality neuroscientific data for open access to the scientific community. As experienced already in prior competitions not only scientists from the narrow field of BCI compete, but scholars with a broad variety of backgrounds and nationalities. They include high specialists as well as students.The goals of all BCI competitions have always been to challenge with respect to novel paradigms and complex data. We report on the following challenges: (1) asynchronous data, (2) synthetic, (3) multi-class continuous data, (4) sessionto-session transfer, (5) directionally modulated MEG, (6) finger movements recorded by ECoG. As after past competitions, our hope is that winning entries may enhance the analysis methods of future BCIs.},
author = {Tangermann, Michael and Müller, Klaus and Aertsen, Ad and Birbaumer, Niels and Braun, Christoph and Brunner, Clemens and Leeb, Robert and Mehring, Carsten and Miller, Kai and Müller Putz, Gernot and Nolte, Guido and Pfurtscheller, Gert and Preissl, Hubert and Schalk, Gerwin and Schlögl, Alois and Vidaurre, Carmen and Waldert, Stephan and Blankertz, Benjamin},
journal = {Frontiers in Neuroscience},
publisher = {Frontiers Research Foundation},
title = {{Review of the BCI competition IV}},
doi = {10.3389/fnins.2012.00055},
volume = {6},
year = {2012},
}
@article{494,
abstract = {We solve the longstanding open problems of the blow-up involved in the translations, when possible, of a nondeterministic Büchi word automaton (NBW) to a nondeterministic co-Büchi word automaton (NCW) and to a deterministic co-Büchi word automaton (DCW). For the NBW to NCW translation, the currently known upper bound is 2o(nlog n) and the lower bound is 1.5n. We improve the upper bound to n2n and describe a matching lower bound of 2ω(n). For the NBW to DCW translation, the currently known upper bound is 2o(nlog n). We improve it to 2 o(n), which is asymptotically tight. Both of our upper-bound constructions are based on a simple subset construction, do not involve intermediate automata with richer acceptance conditions, and can be implemented symbolically. We continue and solve the open problems of translating nondeterministic Streett, Rabin, Muller, and parity word automata to NCW and to DCW. Going via an intermediate NBW is not optimal and we describe direct, simple, and asymptotically tight constructions, involving a 2o(n) blow-up. The constructions are variants of the subset construction, providing a unified approach for translating all common classes of automata to NCW and DCW. Beyond the theoretical importance of the results, we point to numerous applications of the new constructions. In particular, they imply a simple subset-construction based translation, when possible, of LTL to deterministic Büchi word automata.},
author = {Boker, Udi and Kupferman, Orna},
journal = {ACM Transactions on Computational Logic (TOCL)},
number = {4},
publisher = {ACM},
title = {{Translating to Co-Büchi made tight, unified, and useful}},
doi = {10.1145/2362355.2362357},
volume = {13},
year = {2012},
}
@inproceedings{495,
abstract = {An automaton with advice is a finite state automaton which has access to an additional fixed infinite string called an advice tape. We refine the Myhill-Nerode theorem to characterize the languages of finite strings that are accepted by automata with advice. We do the same for tree automata with advice.},
author = {Kruckman, Alex and Rubin, Sasha and Sheridan, John and Zax, Ben},
booktitle = {Proceedings GandALF 2012},
location = {Napoli, Italy},
pages = {238 -- 246},
publisher = {Open Publishing Association},
title = {{A Myhill Nerode theorem for automata with advice}},
doi = {10.4204/EPTCS.96.18},
volume = {96},
year = {2012},
}
@inproceedings{496,
abstract = {We study the expressive power of logical interpretations on the class of scattered trees, namely those with countably many infinite branches. Scattered trees can be thought of as the tree analogue of scattered linear orders. Every scattered tree has an ordinal rank that reflects the structure of its infinite branches. We prove, roughly, that trees and orders of large rank cannot be interpreted in scattered trees of small rank. We consider a quite general notion of interpretation: each element of the interpreted structure is represented by a set of tuples of subsets of the interpreting tree. Our trees are countable, not necessarily finitely branching, and may have finitely many unary predicates as labellings. We also show how to replace injective set-interpretations in (not necessarily scattered) trees by 'finitary' set-interpretations.},
author = {Rabinovich, Alexander and Rubin, Sasha},
location = {Dubrovnik, Croatia},
publisher = {IEEE},
title = {{Interpretations in trees with countably many branches}},
doi = {10.1109/LICS.2012.65},
year = {2012},
}
@article{506,
author = {Sixt, Michael K},
journal = {Journal of Cell Biology},
number = {3},
pages = {347 -- 349},
publisher = {Rockefeller University Press},
title = {{Cell migration: Fibroblasts find a new way to get ahead}},
doi = {10.1083/jcb.201204039},
volume = {197},
year = {2012},
}
@article{498,
abstract = {Understanding patterns and correlates of local adaptation in heterogeneous landscapes can provide important information in the selection of appropriate seed sources for restoration. We assessed the extent of local adaptation of fitness components in 12 population pairs of the perennial herb Rutidosis leptorrhynchoides (Asteraceae) and examined whether spatial scale (0.7-600 km), environmental distance, quantitative (QST) and neutral (FST) genetic differentiation, and size of the local and foreign populations could predict patterns of adaptive differentiation. Local adaptation varied among populations and fitness components. Including all population pairs, local adaptation was observed for seedling survival, but not for biomass, while foreign genotype advantage was observed for reproduction (number of inflorescences). Among population pairs, local adaptation increased with QST and local population size for biomass. QST was associated with environmental distance, suggesting ecological selection for phenotypic divergence. However, low FST and variation in population structure in small populations demonstrates the interaction of gene flow and drift in constraining local adaptation in R. leptorrhynchoides. Our study indicates that for species in heterogeneous landscapes, collecting seed from large populations from similar environments to candidate sites is likely to provide the most appropriate seed sources for restoration.},
author = {Pickup, Melinda and Field, David and Rowell, David and Young, Andrew},
journal = {Evolutionary Applications},
number = {8},
pages = {913 -- 924},
publisher = {Wiley-Blackwell},
title = {{Predicting local adaptation in fragmented plant populations: Implications for restoration genetics}},
doi = {10.1111/j.1752-4571.2012.00284.x},
volume = {5},
year = {2012},
}
@inproceedings{497,
abstract = {One central issue in the formal design and analysis of reactive systems is the notion of refinement that asks whether all behaviors of the implementation is allowed by the specification. The local interpretation of behavior leads to the notion of simulation. Alternating transition systems (ATSs) provide a general model for composite reactive systems, and the simulation relation for ATSs is known as alternating simulation. The simulation relation for fair transition systems is called fair simulation. In this work our main contributions are as follows: (1) We present an improved algorithm for fair simulation with Büchi fairness constraints; our algorithm requires O(n 3·m) time as compared to the previous known O(n 6)-time algorithm, where n is the number of states and m is the number of transitions. (2) We present a game based algorithm for alternating simulation that requires O(m2)-time as compared to the previous known O((n·m)2)-time algorithm, where n is the number of states and m is the size of transition relation. (3) We present an iterative algorithm for alternating simulation that matches the time complexity of the game based algorithm, but is more space efficient than the game based algorithm. © Krishnendu Chatterjee, Siddhesh Chaubal, and Pritish Kamath.},
author = {Chatterjee, Krishnendu and Chaubal, Siddhesh and Kamath, Pritish},
location = {Fontainebleau, France},
pages = {167 -- 182},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{Faster algorithms for alternating refinement relations}},
doi = {10.4230/LIPIcs.CSL.2012.167},
volume = {16},
year = {2012},
}
@inproceedings{2956,
abstract = {Two-player games on graphs are central in many problems in formal verification and program analysis such as synthesis and verification of open systems. In this work we consider solving recursive game graphs (or pushdown game graphs) that can model the control flow of sequential programs with recursion. While pushdown games have been studied before with qualitative objectives, such as reachability and parity objectives, in this work we study for the first time such games with the most well-studied quantitative objective, namely, mean payoff objectives. In pushdown games two types of strategies are relevant: (1) global strategies, that depend on the entire global history; and (2) modular strategies, that have only local memory and thus do not depend on the context of invocation, but only on the history of the current invocation of the module. Our main results are as follows: (1) One-player pushdown games with mean-payoff objectives under global strategies are decidable in polynomial time. (2) Two-player pushdown games with mean-payoff objectives under global strategies are undecidable. (3) One-player pushdown games with mean-payoff objectives under modular strategies are NP-hard. (4) Two-player pushdown games with mean-payoff objectives under modular strategies can be solved in NP (i.e., both one-player and two-player pushdown games with mean-payoff objectives under modular strategies are NP-complete). We also establish the optimal strategy complexity showing that global strategies for mean-payoff objectives require infinite memory even in one-player pushdown games; and memoryless modular strategies are sufficient in two-player pushdown games. Finally we also show that all the problems have the same computational complexity if the stack boundedness condition is added, where along with the mean-payoff objective the player must also ensure that the stack height is bounded.},
author = {Chatterjee, Krishnendu and Velner, Yaron},
booktitle = {Proceedings of the 2012 27th Annual ACM/IEEE Symposium on Logic in Computer Science},
location = {Dubrovnik, Croatia },
publisher = {IEEE},
title = {{Mean payoff pushdown games}},
doi = {10.1109/LICS.2012.30},
year = {2012},
}
@inproceedings{3165,
abstract = {Computing the winning set for Büchi objectives in alternating games on graphs is a central problem in computer aided verification with a large number of applications. The long standing best known upper bound for solving the problem is Õ(n·m), where n is the number of vertices and m is the number of edges in the graph. We are the first to break the Õ(n·m) boundary by presenting a new technique that reduces the running time to O(n 2). This bound also leads to O(n 2) time algorithms for computing the set of almost-sure winning vertices for Büchi objectives (1) in alternating games with probabilistic transitions (improving an earlier bound of Õ(n·m)), (2) in concurrent graph games with constant actions (improving an earlier bound of O(n 3)), and (3) in Markov decision processes (improving for m > n 4/3 an earlier bound of O(min(m 1.5, m·n 2/3)). We also show that the same technique can be used to compute the maximal end-component decomposition of a graph in time O(n 2), which is an improvement over earlier bounds for m > n 4/3. Finally, we show how to maintain the winning set for Büchi objectives in alternating games under a sequence of edge insertions or a sequence of edge deletions in O(n) amortized time per operation. This is the first dynamic algorithm for this problem.},
author = {Chatterjee, Krishnendu and Henzinger, Monika},
booktitle = {Proceedings of the Annual ACM-SIAM Symposium on Discrete Algorithms},
location = {Kyoto, Japan},
pages = {1386 -- 1399},
publisher = {SIAM},
title = {{An O(n2) time algorithm for alternating Büchi games}},
doi = {10.1137/1.9781611973099.109},
year = {2012},
}
@misc{5377,
abstract = {Two-player games on graphs are central in many problems in formal verification and program analysis such as synthesis and verification of open systems. In this work we consider solving recursive game graphs (or pushdown game graphs) that can model the control flow of sequential programs with recursion. While pushdown games have been studied before with qualitative objectives, such as reachability and ω-regular objectives, in this work we study for the first time such games with the most well-studied quantitative objective, namely, mean-payoff objectives. In pushdown games two types of strategies are relevant: (1) global strategies, that depend on the entire global history; and (2) modular strategies, that have only local memory and thus do not depend on the context of invocation, but only on the history of the current invocation of the module. Our main results are as follows: (1) One-player pushdown games with mean-payoff objectives under global strategies are decidable in polynomial time. (2) Two- player pushdown games with mean-payoff objectives under global strategies are undecidable. (3) One-player pushdown games with mean-payoff objectives under modular strategies are NP- hard. (4) Two-player pushdown games with mean-payoff objectives under modular strategies can be solved in NP (i.e., both one-player and two-player pushdown games with mean-payoff objectives under modular strategies are NP-complete). We also establish the optimal strategy complexity showing that global strategies for mean-payoff objectives require infinite memory even in one-player pushdown games; and memoryless modular strategies are sufficient in two- player pushdown games. Finally we also show that all the problems have the same complexity if the stack boundedness condition is added, where along with the mean-payoff objective the player must also ensure that the stack height is bounded.},
author = {Chatterjee, Krishnendu and Velner, Yaron},
issn = {2664-1690},
pages = {33},
publisher = {IST Austria},
title = {{Mean-payoff pushdown games}},
doi = {10.15479/AT:IST-2012-0002},
year = {2012},
}
@misc{5378,
abstract = {One central issue in the formal design and analysis of reactive systems is the notion of refinement that asks whether all behaviors of the implementation is allowed by the specification. The local interpretation of behavior leads to the notion of simulation. Alternating transition systems (ATSs) provide a general model for composite reactive systems, and the simulation relation for ATSs is known as alternating simulation. The simulation relation for fair transition systems is called fair simulation. In this work our main contributions are as follows: (1) We present an improved algorithm for fair simulation with Büchi fairness constraints; our algorithm requires O(n3 · m) time as compared to the previous known O(n6)-time algorithm, where n is the number of states and m is the number of transitions. (2) We present a game based algorithm for alternating simulation that requires O(m2)-time as compared to the previous known O((n · m)2)-time algorithm, where n is the number of states and m is the size of transition relation. (3) We present an iterative algorithm for alternating simulation that matches the time complexity of the game based algorithm, but is more space efficient than the game based algorithm.},
author = {Chatterjee, Krishnendu and Chaubal, Siddhesh and Kamath, Pritish},
issn = {2664-1690},
pages = {21},
publisher = {IST Austria},
title = {{Faster algorithms for alternating refinement relations}},
doi = {10.15479/AT:IST-2012-0001},
year = {2012},
}
@inproceedings{2955,
abstract = {We consider two-player stochastic games played on finite graphs with reachability objectives where the first player tries to ensure a target state to be visited almost-surely (i.e., with probability 1), or positively (i.e., with positive probability), no matter the strategy of the second player. We classify such games according to the information and the power of randomization available to the players. On the basis of information, the game can be one-sided with either (a) player 1, or (b) player 2 having partial observation (and the other player has perfect observation), or two-sided with (c) both players having partial observation. On the basis of randomization, the players (a) may not be allowed to use randomization (pure strategies), or (b) may choose a probability distribution over actions but the actual random choice is external and not visible to the player (actions invisible), or (c) may use full randomization. Our main results for pure strategies are as follows. (1) For one-sided games with player 1 having partial observation we show that (in contrast to full randomized strategies) belief-based (subset-construction based) strategies are not sufficient, and we present an exponential upper bound on memory both for almostsure and positive winning strategies; we show that the problem of deciding the existence of almost-sure and positive winning strategies for player 1 is EXPTIME-complete. (2) For one-sided games with player 2 having partial observation we show that non-elementary memory is both necessary and sufficient for both almost-sure and positive winning strategies. (3) We show that for the general (two-sided) case finite-memory strategies are sufficient for both positive and almost-sure winning, and at least non-elementary memory is required. We establish the equivalence of the almost-sure winning problems for pure strategies and for randomized strategies with actions invisible. Our equivalence result exhibits serious flaws in previous results of the literature: we show a non-elementary memory lower bound for almost-sure winning whereas an exponential upper bound was previously claimed.},
author = {Chatterjee, Krishnendu and Doyen, Laurent},
booktitle = {Proceedings of the 2012 27th Annual ACM/IEEE Symposium on Logic in Computer Science},
location = {Dubrovnik, Croatia},
publisher = {IEEE},
title = {{Partial-observation stochastic games: How to win when belief fails}},
doi = {10.1109/LICS.2012.28},
year = {2012},
}
@inproceedings{3341,
abstract = {We consider two-player stochastic games played on a finite state space for an infinite number of rounds. The games are concurrent: in each round, the two players (player 1 and player 2) choose their moves independently and simultaneously; the current state and the two moves determine a probability distribution over the successor states. We also consider the important special case of turn-based stochastic games where players make moves in turns, rather than concurrently. We study concurrent games with \omega-regular winning conditions specified as parity objectives. The value for player 1 for a parity objective is the maximal probability with which the player can guarantee the satisfaction of the objective against all strategies of the opponent. We study the problem of continuity and robustness of the value function in concurrent and turn-based stochastic parity gameswith respect to imprecision in the transition probabilities. We present quantitative bounds on the difference of the value function (in terms of the imprecision of the transition probabilities) and show the value continuity for structurally equivalent concurrent games (two games are structurally equivalent if the support of the transition function is same and the probabilities differ). We also show robustness of optimal strategies for structurally equivalent turn-based stochastic parity games. Finally we show that the value continuity property breaks without the structurally equivalent assumption (even for Markov chains) and show that our quantitative bound is asymptotically optimal. Hence our results are tight (the assumption is both necessary and sufficient) and optimal (our quantitative bound is asymptotically optimal).},
author = {Chatterjee, Krishnendu},
location = {Tallinn, Estonia},
pages = {270 -- 285},
publisher = {Springer},
title = {{Robustness of structurally equivalent concurrent parity games}},
doi = {10.1007/978-3-642-28729-9_18},
volume = {7213},
year = {2012},
}
@inproceedings{2957,
abstract = {We consider probabilistic automata on infinite words with acceptance defined by parity conditions. We consider three qualitative decision problems: (i) the positive decision problem asks whether there is a word that is accepted with positive probability; (ii) the almost decision problem asks whether there is a word that is accepted with probability 1; and (iii) the limit decision problem asks whether words are accepted with probability arbitrarily close to 1. We unify and generalize several decidability results for probabilistic automata over infinite words, and identify a robust (closed under union and intersection) subclass of probabilistic automata for which all the qualitative decision problems are decidable for parity conditions. We also show that if the input words are restricted to lasso shape (regular) words, then the positive and almost problems are decidable for all probabilistic automata with parity conditions. For most decidable problems we show an optimal PSPACE-complete complexity bound.},
author = {Chatterjee, Krishnendu and Tracol, Mathieu},
booktitle = {Proceedings of the 2012 27th Annual ACM/IEEE Symposium on Logic in Computer Science},
location = {Dubrovnik, Croatia },
publisher = {IEEE},
title = {{Decidable problems for probabilistic automata on infinite words}},
doi = {10.1109/LICS.2012.29},
year = {2012},
}
@article{3249,
abstract = {Boolean notions of correctness are formalized by preorders on systems. Quantitative measures of correctness can be formalized by real-valued distance functions between systems, where the distance between implementation and specification provides a measure of "fit" or "desirability". We extend the simulation preorder to the quantitative setting by making each player of a simulation game pay a certain price for her choices. We use the resulting games with quantitative objectives to define three different simulation distances. The correctness distance measures how much the specification must be changed in order to be satisfied by the implementation. The coverage distance measures how much the implementation restricts the degrees of freedom offered by the specification. The robustness distance measures how much a system can deviate from the implementation description without violating the specification. We consider these distances for safety as well as liveness specifications. The distances can be computed in polynomial time for safety specifications, and for liveness specifications given by weak fairness constraints. We show that the distance functions satisfy the triangle inequality, that the distance between two systems does not increase under parallel composition with a third system, and that the distance between two systems can be bounded from above and below by distances between abstractions of the two systems. These properties suggest that our simulation distances provide an appropriate basis for a quantitative theory of discrete systems. We also demonstrate how the robustness distance can be used to measure how many transmission errors are tolerated by error correcting codes.},
author = {Cerny, Pavol and Henzinger, Thomas A and Radhakrishna, Arjun},
journal = {Theoretical Computer Science},
number = {1},
pages = {21 -- 35},
publisher = {Elsevier},
title = {{Simulation distances}},
doi = {10.1016/j.tcs.2011.08.002},
volume = {413},
year = {2012},
}
@misc{5396,
abstract = {We consider the problem of inference in agraphical model with binary variables. While in theory it is arguably preferable to compute marginal probabilities, in practice researchers often use MAP inference due to the availability of efficient discrete optimization algorithms. We bridge the gap between the two approaches by introducing the Discrete Marginals technique in which approximate marginals are obtained by minimizing an objective function with unary and pair-wise terms over a discretized domain. This allows the use of techniques originally devel-oped for MAP-MRF inference and learning. We explore two ways to set up the objective function - by discretizing the Bethe free energy and by learning it from training data. Experimental results show that for certain types of graphs a learned function can out-perform the Bethe approximation. We also establish a link between the Bethe free energy and submodular functions.},
author = {Korc, Filip and Kolmogorov, Vladimir and Lampert, Christoph},
issn = {2664-1690},
pages = {13},
publisher = {IST Austria},
title = {{Approximating marginals using discrete energy minimization}},
doi = {10.15479/AT:IST-2012-0003},
year = {2012},
}
@techreport{5398,
abstract = {This document is created as a part of the project “Repository for Research Data on IST Austria”. It summarises the actual state of research data at IST Austria, based on survey results. It supports the choice of appropriate software, which would best fit the requirements of their users, the researchers.},
author = {Porsche, Jana},
publisher = {IST Austria},
title = {{Actual state of research data @ ISTAustria}},
year = {2012},
}
@inproceedings{3124,
abstract = {We consider the problem of inference in a graphical model with binary variables. While in theory it is arguably preferable to compute marginal probabilities, in practice researchers often use MAP inference due to the availability of efficient discrete optimization algorithms. We bridge the gap between the two approaches by introducing the Discrete Marginals technique in which approximate marginals are obtained by minimizing an objective function with unary and pairwise terms over a discretized domain. This allows the use of techniques originally developed for MAP-MRF inference and learning. We explore two ways to set up the objective function - by discretizing the Bethe free energy and by learning it from training data. Experimental results show that for certain types of graphs a learned function can outperform the Bethe approximation. We also establish a link between the Bethe free energy and submodular functions.
},
author = {Korc, Filip and Kolmogorov, Vladimir and Lampert, Christoph},
location = {Edinburgh, Scotland},
publisher = {ICML},
title = {{Approximating marginals using discrete energy minimization}},
year = {2012},
}
@inbook{5745,
author = {Gupta, Ashutosh},
booktitle = {Automated Technology for Verification and Analysis},
isbn = {9783642333859},
issn = {0302-9743},
location = {Thiruvananthapuram, Kerala, India},
pages = {107--121},
publisher = {Springer Berlin Heidelberg},
title = {{Improved Single Pass Algorithms for Resolution Proof Reduction}},
doi = {10.1007/978-3-642-33386-6_10},
volume = {7561},
year = {2012},
}
@article{5839,
abstract = {Canny's edge detection algorithm is a classical and robust method for edge detection in gray-scale images. The two
significant features of this method are introduction of NMS (Non-Maximum Suppression) and double thresholding of
the gradient image. Due to poor illumination, the region boundaries in an image may become vague, creating
uncertainties in the gradient image. In this paper, we have proposed an algorithm based on the concept of type-2 fuzzy sets to handle uncertainties that automatically selects the threshold values needed to segment the gradient image using classical Canny’s edge detection algorithm. The results show that our algorithm works significantly well on different benchmark images as well as medical images (hand radiography images). },
author = {Biswas, Ranita and Sil, Jaya},
issn = {2212-0173},
journal = {Procedia Technology},
pages = {820--824},
publisher = {Elsevier},
title = {{An Improved Canny Edge Detection Algorithm Based on Type-2 Fuzzy Sets}},
doi = {10.1016/j.protcy.2012.05.134},
volume = {4},
year = {2012},
}
@article{596,
abstract = {The human Mediator complex controls RNA polymerase II (pol II) function in ways that remain incompletely understood. Activator-Mediator binding alters Mediator structure, and these activator-induced structural shifts appear to play key roles in regulating transcription. A recent cryo-electron microscopy (EM) analysis revealed that pol II adopted a stable orientation within a Mediator-pol II-TFIIF assembly in which Mediator was bound to the activation domain of viral protein 16 (VP16). Whereas TFIIF was shown to be important for orienting pol II within this assembly, the potential role of the activator was not assessed. To determine how activator binding might affect pol II orientation, we isolated human Mediator-pol II-TFIIF complexes in which Mediator was not bound to an activator. Cryo-EM analysis of this assembly, coupled with pol II crystal structure docking, revealed that pol II binds Mediator at the same general location; however, in contrast to VP16-bound Mediator, pol II does not appear to stably orient in the absence of an activator. Variability in pol II orientation might be important mechanistically, perhaps to enable sense and antisense transcription at human promoters. Because Mediator interacts extensively with pol II, these results suggest that Mediator structural shifts induced by activator binding help stably orient pol II prior to transcription initiation.},
author = {Bernecky, Carrie A and Taatjes, Dylan},
journal = {Journal of Molecular Biology},
number = {5},
pages = {387 -- 394},
publisher = {Elsevier},
title = {{Activator-mediator binding stabilizes RNA polymerase II orientation within the human mediator-RNA polymerase II-TFIIF assembly}},
doi = {10.1016/j.jmb.2012.02.014},
volume = {417},
year = {2012},
}
@article{6136,
abstract = {Tonic receptors convey stimulus duration and intensity and are implicated in homeostatic control. However, how tonic homeostatic signals are generated and how they reconfigure neural circuits and modify animal behavior is poorly understood. Here we show that Caenorhabditis elegans O2-sensing neurons are tonic receptors that continuously signal ambient [O2] to set the animal's behavioral state. Sustained signaling relied on a Ca2+ relay involving L-type voltage-gated Ca2+ channels, the ryanodine and the inositol-1,4,5-trisphosphate receptors. Tonic activity evoked continuous neuropeptide release, which helps elicit the enduring behavioral state associated with high [O2]. Sustained O2 receptor signaling was propagated to downstream neural circuits, including the hub interneuron RMG. O2 receptors evoked similar locomotory states at particular O2 concentrations, regardless of previous d[O2]/dt. However, a phasic component of the URX receptors' response to high d[O2]/dt, as well as tonic-to-phasic transformations in downstream interneurons, enabled transient reorientation movements shaped by d[O2]/dt. Our results highlight how tonic homeostatic signals can generate both transient and enduring behavioral change.},
author = {Busch, Karl Emanuel and Laurent, Patrick and Soltesz, Zoltan and Murphy, Robin Joseph and Faivre, Olivier and Hedwig, Berthold and Thomas, Martin and Smith, Heather L and de Bono, Mario},
issn = {1097-6256},
journal = {Nature Neuroscience},
number = {4},
pages = {581--591},
publisher = {Springer Nature},
title = {{Tonic signaling from O2 sensors sets neural circuit activity and behavioral state}},
doi = {10.1038/nn.3061},
volume = {15},
year = {2012},
}
@article{6588,
abstract = {First we note that the best polynomial approximation to vertical bar x vertical bar on the set, which consists of an interval on the positive half-axis and a point on the negative half-axis, can be given by means of the classical Chebyshev polynomials. Then we explore the cases when a solution of the related problem on two intervals can be given in elementary functions.},
author = {Pausinger, Florian},
issn = {1812-9471},
journal = {Journal of Mathematical Physics, Analysis, Geometry},
number = {1},
pages = {63--78},
publisher = {B. Verkin Institute for Low Temperature Physics and Engineering},
title = {{Elementary solutions of the Bernstein problem on two intervals}},
volume = {8},
year = {2012},
}
@inproceedings{6746,
abstract = {This paper proposes a novel cooperative approach for two-hop amplify-and-forward (A&F) relaying that exploits both the signal forwarded by the relay and the one directly transmitted by the source in impulse-radio ultra-wideband (IR-UWB) systems. Specifically, we focus on a non-coherent setup employing a double-differential encoding scheme at the source node and a single differential demodulation at the relay and destination. The log-likelihood ratio based decision rule is derived at the destination node. A semi-analytical power allocation strategy is presented by evaluating a closed-form expression for the effective signal to noise ratio (SNR) at the destination, which is maximized by exhaustive search. Numerical simulations show that the proposed system outperforms both the direct transmission with single differential encoding and the non-cooperative multi-hop approach in different scenarios.},
author = {Mondelli, Marco and Zhou, Qi and Ma, Xiaoli and Lottici, Vincenzo},
booktitle = {2012 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
issn = {1520-6149},
location = {Kyoto, Japan},
pages = {2905--2908},
publisher = {IEEE},
title = {{A cooperative approach for amplify-and-forward differential transmitted reference IR-UWB relay systems}},
doi = {10.1109/icassp.2012.6288524},
year = {2012},
}
@article{7074,
abstract = {The Seebeck coefficients, electrical resistivities, total thermal conductivities, and magnetization are reported for temperatures between 5 and 350 K for n-type Bi0.88Sb0.12 nano-composite alloys made by Ho-doping at the 0, 1, and 3 % atomic levels. The alloys were prepared using a dc hot-pressing method, and are shown to be single phase for both Ho contents with grain sizes on the average of 900 nm. We find the parent compound has a maximum of ZT = 0.28 at 231 K, while doping 1 % Ho increases the maximum ZT to 0.31 at 221 K and the 3 % doped sample suppresses the maximum ZT = 0.24 at a temperature of 260 K.},
author = {Lukas, K. C. and Joshi, G. and Modic, Kimberly A and Ren, Z. F. and Opeil, C. P.},
issn = {1573-4803},
journal = {Journal of Materials Science},
number = {15},
pages = {5729--5734},
publisher = {Springer Nature},
title = {{Thermoelectric properties of Ho-doped Bi0.88Sb0.12}},
doi = {10.1007/s10853-012-6463-6},
volume = {47},
year = {2012},
}
@article{7308,
abstract = {Carbon has been used widely as the basis of porous cathodes for nonaqueous Li–O2 cells. However, the stability of carbon and the effect of carbon on electrolyte decomposition in such cells are complex and depend on the hydrophobicity/hydrophilicity of the carbon surface. Analyzing carbon cathodes, cycled in Li–O2 cells between 2 and 4 V, using acid treatment and Fenton’s reagent, and combined with differential electrochemical mass spectrometry and FTIR, demonstrates the following: Carbon is relatively stable below 3.5 V (vs Li/Li+) on discharge or charge, especially so for hydrophobic carbon, but is unstable on charging above 3.5 V (in the presence of Li2O2), oxidatively decomposing to form Li2CO3. Direct chemical reaction with Li2O2 accounts for only a small proportion of the total carbon decomposition on cycling. Carbon promotes electrolyte decomposition during discharge and charge in a Li–O2 cell, giving rise to Li2CO3 and Li carboxylates (DMSO and tetraglyme electrolytes). The Li2CO3 and Li carboxylates present at the end of discharge and those that form on charge result in polarization on the subsequent charge. Li2CO3 (derived from carbon and from the electrolyte) as well as the Li carboxylates (derived from the electrolyte) decompose and form on charging. Oxidation of Li2CO3 on charging to ∼4 V is incomplete; Li2CO3 accumulates on cycling resulting in electrode passivation and capacity fading. Hydrophilic carbon is less stable and more catalytically active toward electrolyte decomposition than carbon with a hydrophobic surface. If the Li–O2 cell could be charged at or below 3.5 V, then carbon may be relatively stable, however, its ability to promote electrolyte decomposition, presenting problems for its use in a practical Li–O2 battery. The results emphasize that stable cycling of Li2O2 at the cathode in a Li–O2 cell depends on the synergy between electrolyte and electrode; the stability of the electrode and the electrolyte cannot be considered in isolation.},
author = {Ottakam Thotiyl, Muhammed M. and Freunberger, Stefan Alexander and Peng, Zhangquan and Bruce, Peter G.},
issn = {0002-7863},
journal = {Journal of the American Chemical Society},
number = {1},
pages = {494--500},
publisher = {ACS},
title = {{The carbon electrode in nonaqueous Li–O2 cells}},
doi = {10.1021/ja310258x},
volume = {135},
year = {2012},
}
@article{7309,
abstract = {Energy‐storage technologies, including electrical double‐layer capacitors and rechargeable batteries, have attracted significant attention for applications in portable electronic devices, electric vehicles, bulk electricity storage at power stations, and “load leveling” of renewable sources, such as solar energy and wind power. Transforming lithium batteries and electric double‐layer capacitors requires a step change in the science underpinning these devices, including the discovery of new materials, new electrochemistry, and an increased understanding of the processes on which the devices depend. The Review will consider some of the current scientific issues underpinning lithium batteries and electric double‐layer capacitors.},
author = {Choi, Nam-Soon and Chen, Zonghai and Freunberger, Stefan Alexander and Ji, Xiulei and Sun, Yang-Kook and Amine, Khalil and Yushin, Gleb and Nazar, Linda F. and Cho, Jaephil and Bruce, Peter G.},
issn = {1433-7851},
journal = {Angewandte Chemie International Edition},
number = {40},
pages = {9994--10024},
publisher = {Wiley},
title = {{Challenges facing Lithium batteries and electrical double-layer capacitors}},
doi = {10.1002/anie.201201429},
volume = {51},
year = {2012},
}
@article{7310,
abstract = {The rechargeable nonaqueous lithium-air (Li-O2) battery is receiving a great deal of interest because, theoretically, its specific energy far exceeds the best that can be achieved with lithium-ion cells. Operation of the rechargeable Li-O2 battery depends critically on repeated and highly reversible formation/decomposition of lithium peroxide (Li2O2) at the cathode upon cycling. Here, we show that this process is possible with the use of a dimethyl sulfoxide electrolyte and a porous gold electrode (95% capacity retention from cycles 1 to 100), whereas previously only partial Li2O2 formation/decomposition and limited cycling could occur. Furthermore, we present data indicating that the kinetics of Li2O2 oxidation on charge is approximately 10 times faster than on carbon electrodes.},
author = {Peng, Z. and Freunberger, Stefan Alexander and Chen, Y. and Bruce, P. G.},
issn = {0036-8075},
journal = {Science},
number = {6094},
pages = {563--566},
publisher = {AAAS},
title = {{A reversible and higher-rate Li-O2 battery}},
doi = {10.1126/science.1223985},
volume = {337},
year = {2012},
}
@article{7311,
abstract = {Stability of the electrolyte toward reduced oxygen species generated at the cathode is a crucial challenge for the rechargeable nonaqueous Li–O2 battery. Here, we investigate dimethylformamide as the basis of an electrolyte. Although reactions at the O2 cathode on the first discharge–charge cycle are dominated by reversible Li2O2 formation/decomposition, there is also electrolyte decomposition, which increases on cycling. The products of decomposition at the cathode on discharge are Li2O2, Li2CO3, HCO2Li, CH3CO2Li, NO, H2O, and CO2. Li2CO3 accumulates in the electrode with cycling. The stability of dimethylformamide toward reduced oxygen species is insufficient for its use in the rechargeable nonaqueous Li–O2 battery.},
author = {Chen, Yuhui and Freunberger, Stefan Alexander and Peng, Zhangquan and Bardé, Fanny and Bruce, Peter G.},
issn = {0002-7863},
journal = {Journal of the American Chemical Society},
number = {18},
pages = {7952--7957},
publisher = {ACS},
title = {{Li–O2 battery with a dimethylformamide electrolyte}},
doi = {10.1021/ja302178w},
volume = {134},
year = {2012},
}
@inproceedings{766,
abstract = {Asynchronous task allocation is a fundamental problem in distributed computing in which p asynchronous processes must execute a set of m tasks. Also known as write-all or do-all, this problem been studied extensively, both independently and as a key building block for various distributed algorithms. In this paper, we break new ground on this classic problem: we introduce the To-Do Tree concurrent data structure, which improves on the best known randomized and deterministic upper bounds. In the presence of an adaptive adversary, the randomized To-Do Tree algorithm has O(m + p log p log2 m) work complexity. We then show that there exists a deterministic variant of the To-Do Tree algorithm with work complexity O(m + p log5 m log2 max(m, p)). For all values of m and p, our algorithms are within log factors of the Ω(m + p log p) lower bound for this problem. The key technical ingredient in our results is a new approach for analyzing concurrent executions against a strong adaptive scheduler. This technique allows us to handle the complex dependencies between the processes' coin flips and their scheduling, and to tightly bound the work needed to perform subsets of the tasks.},
author = {Alistarh, Dan-Adrian and Bender, Michael and Gilbert, Seth and Guerraoui, Rachid},
pages = {331 -- 340},
publisher = {IEEE},
title = {{How to allocate tasks asynchronously}},
doi = {10.1109/FOCS.2012.41},
year = {2012},
}
@article{767,
abstract = {Synchronous distributed algorithms are easier to design and prove correct than algorithms that tolerate asynchrony. Yet, in the real world, networks experience asynchrony and other timing anomalies. In this paper, we address the question of how to efficiently transform an algorithm that relies on synchronous timing into an algorithm that tolerates asynchronous executions. We introduce a transformation technique from synchronous algorithms to indulgent algorithms (Guerraoui, in PODC, pp. 289-297, 2000), which induces only a constant overhead in terms of time complexity in well-behaved executions. Our technique is based on a new abstraction we call an asynchrony detector, which the participating processes implement collectively. The resulting transformation works for the class of colorless distributed tasks, including consensus and set agreement. Interestingly, we also show that our technique is relevant for colored tasks, by applying it to the renaming problem, to obtain the first indulgent renaming algorithm.},
author = {Alistarh, Dan-Adrian and Gilbert, Seth and Guerraoui, Rachid and Travers, Corentin},
journal = {Theory of Computing Systems},
number = {4},
pages = {404 -- 424},
publisher = {Elsevier},
title = {{Generating Fast Indulgent Algorithms}},
doi = {10.1007/s00224-012-9407-2},
volume = {51},
year = {2012},
}
@article{7748,
abstract = {Female mate choice acts as an important evolutionary force, yet the influence of the environment on both its expression and the selective pressures acting upon it remains unknown. We found consistent heritable differences between females in their choice of mate based on ornament size during a 25‐year study of a population of collared flycatchers. However, the fitness consequences of mate choice were dependent on environmental conditions experienced whilst breeding. Females breeding with highly ornamented males experienced high relative fitness during dry summer conditions, but low relative fitness during wetter years. Our results imply that sexual selection within a population can be highly variable and dependent upon the prevailing weather conditions experienced by individuals.},
author = {Robinson, Matthew Richard and Sander van Doorn, G. and Gustafsson, Lars and Qvarnström, Anna},
issn = {1461-023X},
journal = {Ecology Letters},
number = {6},
pages = {611--618},
publisher = {Wiley},
title = {{Environment-dependent selection on mate choice in a natural population of birds}},
doi = {10.1111/j.1461-0248.2012.01780.x},
volume = {15},
year = {2012},
}
@article{7749,
abstract = {Although studies on laboratory species and natural populations of vertebrates have shown reproduction to impair later performance, little is known of the age‐specific associations between reproduction and survival, and how such findings apply to the ageing of large, long‐lived species. Herein we develop a framework to examine population‐level patterns of reproduction and survival across lifespan in long‐lived organisms, and decompose those changes into individual‐level effects, and the effects of age‐specific trade‐offs between fitness components. We apply this to an extensive longitudinal dataset on female semi‐captive Asian timber elephants (Elephas maximus) and report the first evidence of age‐specific fitness declines that are driven by age‐specific associations between fitness components in a long‐lived mammal. Associations between reproduction and survival are positive in early life, but negative in later life with up to 71% of later‐life survival declines associated with investing in the production of offspring within this population of this critically endangered species.},
author = {Robinson, Matthew Richard and Mar, Khyne U and Lummaa, Virpi},
issn = {1461-023X},
journal = {Ecology Letters},
number = {3},
pages = {260--266},
publisher = {Wiley},
title = {{Senescence and age-specific trade-offs between reproduction and survival in female Asian elephants}},
doi = {10.1111/j.1461-0248.2011.01735.x},
volume = {15},
year = {2012},
}
@article{7776,
abstract = {We present an analysis of finite-size effects in jammed packings of N soft, frictionless spheres at zero temperature. There is a 1/N correction to the discrete jump in the contact number at the transition so that jammed packings exist only above isostaticity. As a result, the canonical power-law scalings of the contact number and elastic moduli break down at low pressure. These quantities exhibit scaling collapse with a nontrivial scaling function, demonstrating that the jamming transition can be considered a phase transition. Scaling is achieved as a function of N in both two and three dimensions, indicating an upper critical dimension of 2.},
author = {Goodrich, Carl Peter and Liu, Andrea J. and Nagel, Sidney R.},
issn = {0031-9007},
journal = {Physical Review Letters},
number = {9},
publisher = {American Physical Society},
title = {{Finite-size scaling at the jamming transition}},
doi = {10.1103/physrevlett.109.095704},
volume = {109},
year = {2012},
}
@article{808,
abstract = {Using correlated live-cell imaging and electron tomography we found that actin branch junctions in protruding and treadmilling lamellipodia are not concentrated at the front as previously supposed, but link actin filament subsets in which there is a continuum of distances from a junction to the filament plus ends, for up to at least 1 mm. When branch sites were observed closely spaced on the same filament their separation was commonly a multiple of the actin helical repeat of 36 nm. Image averaging of branch junctions in the tomograms yielded a model for the in vivo branch at 2.9 nm resolution, which was comparable with that derived for the in vitro actin- Arp2/3 complex. Lamellipodium initiation was monitored in an intracellular wound-healing model and was found to involve branching from the sides of actin filaments oriented parallel to the plasmalemma. Many filament plus ends, presumably capped, terminated behind the lamellipodium tip and localized on the dorsal and ventral surfaces of the actin network. These findings reveal how branching events initiate and maintain a network of actin filaments of variable length, and provide the first structural model of the branch junction in vivo. A possible role of filament capping in generating the lamellipodium leaflet is discussed and a mathematical model of protrusion is also presented.},
author = {Vinzenz, Marlene and Nemethova, Maria and Schur, Florian and Mueller, Jan and Narita, Akihiro and Urban, Edit and Winkler, Christoph and Schmeiser, Christian and Koestler, Stefan and Rottner, Klemens and Resch, Guenter and Maéda, Yuichiro and Small, John},
journal = {Journal of Cell Science},
number = {11},
pages = {2775 -- 2785},
publisher = {Company of Biologists},
title = {{Actin branching in the initiation and maintenance of lamellipodia}},
doi = {10.1242/jcs.107623},
volume = {125},
year = {2012},
}
@article{8246,
abstract = {The Staphylococcus aureus cell wall stress stimulon (CWSS) is activated by cell envelope-targeting antibiotics or depletion of essential cell wall biosynthesis enzymes. The functionally uncharacterized S. aureus LytR-CpsA-Psr (LCP) proteins, MsrR, SA0908 and SA2103, all belong to the CWSS. Although not essential, deletion of all three LCP proteins severely impairs cell division. We show here that VraSR-dependent CWSS expression was up to 250-fold higher in single, double and triple LCP mutants than in wild type S. aureus in the absence of external stress. The LCP triple mutant was virtually depleted of wall teichoic acids (WTA), which could be restored to different degrees by any of the single LCP proteins. Subinhibitory concentrations of tunicamycin, which inhibits the first WTA synthesis enzyme TarO (TagO), could partially complement the severe growth defect of the LCP triple mutant. Both of the latter findings support a role for S. aureus LCP proteins in late WTA synthesis, as in Bacillus subtilis where LCP proteins were recently proposed to transfer WTA from lipid carriers to the cell wall peptidoglycan. Intrinsic activation of the CWSS upon LCP deletion and the fact that LCP proteins were essential for WTA-loading of the cell wall, highlight their important role(s) in S. aureus cell envelope biogenesis.},
author = {Dengler, Vanina and Meier, Patricia Stutzmann and Heusser, Ronald and Kupferschmied, Peter and Fazekas, Judit and Friebe, Sarah and Staufer, Sibylle Burger and Majcherczyk, Paul A. and Moreillon, Philippe and Berger-Bächi, Brigitte and McCallum, Nadine},
issn = {0378-1097},
journal = {FEMS Microbiology Letters},
number = {2},
pages = {109--120},
publisher = {Oxford University Press},
title = {{Deletion of hypothetical wall teichoic acid ligases in Staphylococcus aureus activates the cell wall stress response}},
doi = {10.1111/j.1574-6968.2012.02603.x},
volume = {333},
year = {2012},
}
@article{846,
abstract = {Whether or not evolutionary change is inherently irreversible remains a controversial topic. Some examples of evolutionary irreversibility are known; however, this question has not been comprehensively addressed at the molecular level. Here, we use data from 221 human genes with known pathogenic mutations to estimate the rate of irreversibility in protein evolution. For these genes, we reconstruct ancestral amino acid sequences along the mammalian phylogeny and identify ancestral amino acid states that match known pathogenic mutations. Such cases represent inherent evolutionary irreversibility because, at the present moment, reversals to these ancestral amino acid states are impossible for the human lineage. We estimate that approximately 10% of all amino acid substitutions along the mammalian phylogeny are irreversible, such that a return to the ancestral amino acid state would lead to a pathogenic phenotype. For a subset of 51 genes with high rates of irreversibility, as much as 40% of all amino acid evolution was estimated to be irreversible. Because pathogenic phenotypes do not resemble ancestral phenotypes, the molecular nature of the high rate of irreversibility in proteins is best explained by evolution with a high prevalence of compensatory, epistatic interactions between amino acid sites. Under such mode of protein evolution, once an amino acid substitution is fixed, the probability of its reversal declines as the protein sequence accumulates changes that affect the phenotypic manifestation of the ancestral state. The prevalence of epistasis in evolution indicates that the observed high rate of irreversibility in protein evolution is an inherent property of protein structure and function.},
author = {Soylemez, Onuralp and Fyodor Kondrashov},
journal = {Genome Biology and Evolution},
number = {12},
pages = {1213 -- 1222},
publisher = {Oxford University Press},
title = {{Estimating the rate of irreversibility in protein evolution}},
doi = {10.1093/gbe/evs096},
volume = {4},
year = {2012},
}
@article{8463,
abstract = {The 1H dipolar network, which is the major obstacle for applying proton detection in the solid-state, can be reduced by deuteration, employing the RAP (Reduced Adjoining Protonation) labeling scheme, which yields random protonation at non-exchangeable sites. We present here a systematic study on the optimal degree of random sidechain protonation in RAP samples as a function of the MAS (magic angle spinning) frequency. In particular, we compare 1H sensitivity and linewidth of a microcrystalline protein, the SH3 domain of chicken α-spectrin, for samples, prepared with 5–25 % H2O in the E. coli growth medium, in the MAS frequency range of 20–60 kHz. At an external field of 19.96 T (850 MHz), we find that using a proton concentration between 15 and 25 % in the M9 medium yields the best compromise in terms of sensitivity and resolution, with an achievable average 1H linewidth on the order of 40–50 Hz. Comparing sensitivities at a MAS frequency of 60 versus 20 kHz, a gain in sensitivity by a factor of 4–4.5 is observed in INEPT-based 1H detected 1D 1H,13C correlation experiments. In total, we find that spectra recorded with a 1.3 mm rotor at 60 kHz have almost the same sensitivity as spectra recorded with a fully packed 3.2 mm rotor at 20 kHz, even though ~20× less material is employed. The improved sensitivity is attributed to 1H line narrowing due to fast MAS and to the increased efficiency of the 1.3 mm coil.},
author = {Asami, Sam and Szekely, Kathrin and Schanda, Paul and Meier, Beat H. and Reif, Bernd},
issn = {0925-2738},
journal = {Journal of Biomolecular NMR},
number = {2},
pages = {155--168},
publisher = {Springer Nature},
title = {{Optimal degree of protonation for 1H detection of aliphatic sites in randomly deuterated proteins as a function of the MAS frequency}},
doi = {10.1007/s10858-012-9659-9},
volume = {54},
year = {2012},
}
@article{8465,
abstract = {We demonstrate that conformational exchange processes in proteins on microsecond-to-millisecond time scales can be detected and quantified by solid-state NMR spectroscopy. We show two independent approaches that measure the effect of conformational exchange on transverse relaxation parameters, namely Carr–Purcell–Meiboom–Gill relaxation-dispersion experiments and measurement of differential multiple-quantum coherence decay. Long coherence lifetimes, as required for these experiments, are achieved by the use of highly deuterated samples and fast magic-angle spinning. The usefulness of the approaches is demonstrated by application to microcrystalline ubiquitin. We detect a conformational exchange process in a region of the protein for which dynamics have also been observed in solution. Interestingly, quantitative analysis of the data reveals that the exchange process is more than 1 order of magnitude slower than in solution, and this points to the impact of the crystalline environment on free energy barriers.},
author = {Tollinger, Martin and Sivertsen, Astrid C. and Meier, Beat H. and Ernst, Matthias and Schanda, Paul},
issn = {0002-7863},
journal = {Journal of the American Chemical Society},
number = {36},
pages = {14800--14807},
publisher = {American Chemical Society},
title = {{Site-resolved measurement of microsecond-to-millisecond conformational-exchange processes in proteins by solid-state NMR spectroscopy}},
doi = {10.1021/ja303591y},
volume = {134},
year = {2012},
}
@article{8466,
abstract = {Recent advances in NMR spectroscopy and the availability of high magnetic field strengths now offer the possibility to record real-time 3D NMR spectra of short-lived protein states, e.g., states that become transiently populated during protein folding. Here we present a strategy for obtaining sequential NMR assignments as well as atom-resolved information on structural and dynamic features within a folding intermediate of the amyloidogenic protein β2-microglobulin that has a half-lifetime of only 20 min.},
author = {Rennella, Enrico and Cutuil, Thomas and Schanda, Paul and Ayala, Isabel and Forge, Vincent and Brutscher, Bernhard},
issn = {0002-7863},
journal = {Journal of the American Chemical Society},
number = {19},
pages = {8066--8069},
publisher = {American Chemical Society},
title = {{Real-time NMR characterization of structure and dynamics in a transiently populated protein folding intermediate}},
doi = {10.1021/ja302598j},
volume = {134},
year = {2012},
}
@article{8467,
abstract = {Partial deuteration is a powerful tool to increase coherence life times and spectral resolution in proton solid-state NMR. The J coupling to deuterium needs, however, to be decoupled to maintain the good resolution in the (usually indirect) 13C dimension(s). We present a simple and reversible way to expand a commercial 1.3 mm HCN MAS probe with a 2H channel with sufficient field strength for J-decoupling of deuterium, namely 2–3 kHz. The coil is placed at the outside of the stator and requires no significant modifications to the probe. The performance and the realizable gains in sensitivity and resolution are demonstrated using perdeuterated ubiquitin, with selectively CHD2-labeled methyl groups.},
author = {Huber, Matthias and With, Oliver and Schanda, Paul and Verel, René and Ernst, Matthias and Meier, Beat H.},
issn = {1090-7807},
journal = {Journal of Magnetic Resonance},
pages = {76--80},
publisher = {Elsevier},
title = {{A supplementary coil for 2H decoupling with commercial HCN MAS probes}},
doi = {10.1016/j.jmr.2011.10.010},
volume = {214},
year = {2012},
}
@article{8502,
abstract = {The famous ergodic hypothesis suggests that for a typical Hamiltonian on a typical energy surface nearly all trajectories are dense. KAM theory disproves it. Ehrenfest (The Conceptual Foundations of the Statistical Approach in Mechanics. Ithaca, NY: Cornell University Press, 1959) and Birkhoff (Collected Math Papers. Vol 2, New York: Dover, pp 462–465, 1968) stated the quasi-ergodic hypothesis claiming that a typical Hamiltonian on a typical energy surface has a dense orbit. This question is wide open. Herman (Proceedings of the International Congress of Mathematicians, Vol II (Berlin, 1998). Doc Math 1998, Extra Vol II, Berlin: Int Math Union, pp 797–808, 1998) proposed to look for an example of a Hamiltonian near H0(I)=⟨I,I⟩2 with a dense orbit on the unit energy surface. In this paper we construct a Hamiltonian H0(I)+εH1(θ,I,ε) which has an orbit dense in a set of maximal Hausdorff dimension equal to 5 on the unit energy surface.},
author = {Kaloshin, Vadim and Saprykina, Maria},
issn = {0010-3616},
journal = {Communications in Mathematical Physics},
keywords = {Mathematical Physics, Statistical and Nonlinear Physics},
number = {3},
pages = {643--697},
publisher = {Springer Nature},
title = {{An example of a nearly integrable Hamiltonian system with a trajectory dense in a set of maximal Hausdorff dimension}},
doi = {10.1007/s00220-012-1532-x},
volume = {315},
year = {2012},
}
@article{8503,
abstract = {We prove there are finitely many isometry classes of planar central configurations (also called relative equilibria) in the Newtonian 5-body problem, except perhaps if the 5-tuple of positive masses belongs to a given codimension 2 subvariety of the mass space.},
author = {Albouy, Alain and Kaloshin, Vadim},
issn = {0003-486X},
journal = {Annals of Mathematics},
number = {1},
pages = {535--588},
publisher = {Princeton University Press},
title = {{Finiteness of central configurations of five bodies in the plane}},
doi = {10.4007/annals.2012.176.1.10},
volume = {176},
year = {2012},
}
@article{8504,
abstract = {In this paper we present a surprising example of a Cr unimodal map of an interval f:I→I whose number of periodic points Pn(f)=∣{x∈I:fnx=x}∣ grows faster than any ahead given sequence along a subsequence nk=3k. This example also shows that ‘non-flatness’ of critical points is necessary for the Martens–de Melo–van Strien theorem [M. Martens, W. de Melo and S. van Strien. Julia–Fatou–Sullivan theory for real one-dimensional dynamics. Acta Math.168(3–4) (1992), 273–318] to hold.},
author = {Kaloshin, Vadim and KOZLOVSKI, O. S.},
issn = {0143-3857},
journal = {Ergodic Theory and Dynamical Systems},
keywords = {Applied Mathematics, General Mathematics},
number = {1},
pages = {159--165},
publisher = {Cambridge University Press},
title = {{A Cr unimodal map with an arbitrary fast growth of the number of periodic points}},
doi = {10.1017/s0143385710000817},
volume = {32},
year = {2012},
}
@article{858,
abstract = {ackground: The evolution and genomic stop codon frequencies have not been rigorously studied with the exception of coding of non-canonical amino acids. Here we study the rate of evolution and frequency distribution of stop codons in bacterial genomes.Results: We show that in bacteria stop codons evolve slower than synonymous sites, suggesting the action of weak negative selection. However, the frequency of stop codons relative to genomic nucleotide content indicated that this selection regime is not straightforward. The frequency of TAA and TGA stop codons is GC-content dependent, with TAA decreasing and TGA increasing with GC-content, while TAG frequency is independent of GC-content. Applying a formal, analytical model to these data we found that the relationship between stop codon frequencies and nucleotide content cannot be explained by mutational biases or selection on nucleotide content. However, with weak nucleotide content-dependent selection on TAG, -0.5 < Nes < 1.5, the model fits all of the data and recapitulates the relationship between TAG and nucleotide content. For biologically plausible rates of mutations we show that, in bacteria, TAG stop codon is universally associated with lower fitness, with TAA being the optimal for G-content < 16% while for G-content > 16% TGA has a higher fitness than TAG.Conclusions: Our data indicate that TAG codon is universally suboptimal in the bacterial lineage, such that TAA is likely to be the preferred stop codon for low GC content while the TGA is the preferred stop codon for high GC content. The optimization of stop codon usage may therefore be useful in genome engineering or gene expression optimization applications.Reviewers: This article was reviewed by Michail Gelfand, Arcady Mushegian and Shamil Sunyaev. For the full reviews, please go to the Reviewers' Comments section.},
author = {Povolotskaya, Inna and Fyodor Kondrashov and Ledda, Alice and Vlasov, Peter K},
journal = {Biology Direct},
publisher = {BioMed Central},
title = {{Stop codons in bacteria are not selectively equivalent}},
doi = {10.1186/1745-6150-7-30},
volume = {7},
year = {2012},
}
@article{900,
abstract = {The main forces directing long-term molecular evolution remain obscure. A sizable fraction of amino-acid substitutions seem to be fixed by positive selection, but it is unclear to what degree long-term protein evolution is constrained by epistasis, that is, instances when substitutions that are accepted in one genotype are deleterious in another. Here we obtain a quantitative estimate of the prevalence of epistasis in long-term protein evolution by relating data on amino-acid usage in 14 organelle proteins and 2 nuclear-encoded proteins to their rates of short-term evolution. We studied multiple alignments of at least 1,000 orthologues for each of these 16 proteins from species from a diverse phylogenetic background and found that an average site contained approximately eight different amino acids. Thus, without epistasis an average site should accept two-fifths of all possible amino acids, and the average rate of amino-acid substitutions should therefore be about three-fifths lower than the rate of neutral evolution. However, we found that the measured rate of amino-acid substitution in recent evolution is 20 times lower than the rate of neutral evolution and an order of magnitude lower than that expected in the absence of epistasis. These data indicate that epistasis is pervasive throughout protein evolution: about 90 per cent of all amino-acid substitutions have a neutral or beneficial impact only in the genetic backgrounds in which they occur, and must therefore be deleterious in a different background of other species. Our findings show that most amino-acid substitutions have different fitness effects in different species and that epistasis provides the primary conceptual framework to describe the tempo and mode of long-term protein evolution.},
author = {Breen, Michael S and Kemena, Carsten and Vlasov, Peter K and Notredame, Cédric and Fyodor Kondrashov},
journal = {Nature},
number = {7421},
pages = {535 -- 538},
publisher = {Nature Publishing Group},
title = {{Epistasis as the primary factor in molecular evolution}},
doi = {10.1038/nature11510},
volume = {490},
year = {2012},
}
@article{3246,
abstract = {Visualizing and analyzing shape changes at various scales, ranging from single molecules to whole organisms, are essential for understanding complex morphogenetic processes, such as early embryonic development. Embryo morphogenesis relies on the interplay between different tissues, the properties of which are again determined by the interaction between their constituent cells. Cell interactions, on the other hand, are controlled by various molecules, such as signaling and adhesion molecules, which in order to exert their functions need to be spatiotemporally organized within and between the interacting cells. In this review, we will focus on the role of cell adhesion functioning at different scales to organize cell, tissue and embryo morphogenesis. We will specifically ask how the subcellular distribution of adhesion molecules controls the formation of cell-cell contacts, how cell-cell contacts determine tissue shape, and how tissue interactions regulate embryo morphogenesis.},
author = {Barone, Vanessa and Heisenberg, Carl-Philipp J},
journal = {Current Opinion in Cell Biology},
number = {1},
pages = {148 -- 153},
publisher = {Elsevier},
title = {{Cell adhesion in embryo morphogenesis}},
doi = {10.1016/j.ceb.2011.11.006},
volume = {24},
year = {2012},
}
@article{966,
abstract = {Motivated by recent experiments on Ba3NiSb2O 9, we investigate possible quantum spin liquid ground states for spin S=1 Heisenberg models on the triangular lattice. We use variational Monte Carlo techniques to calculate the energies of microscopic spin liquid wave functions where spin is represented by three flavors of fermionic spinon operators. These energies are compared with the energies of various competing three-sublattice ordered states. Our approach shows that the antiferromagnetic Heisenberg model with biquadratic term and single-ion anisotropy does not have a low-temperature spin liquid phase. However, for an SU(3)-invariant model with sufficiently strong ring-exchange terms, we find a paired chiral quantum spin liquid with a Fermi surface of deconfined spinons that is stable against all types of ordering patterns we considered. We discuss the physics of this exotic spin liquid state in relation to the recent experiment and suggest new ways to test this scenario.},
author = {Bieri, Samuel and Maksym Serbyn and Senthil, Todadri S and Lee, Patrick},
journal = {Physical Review B - Condensed Matter and Materials Physics},
number = {22},
publisher = {American Physical Society},
title = {{Paired chiral spin liquid with a Fermi surface in S=1 model on the triangular lattice}},
doi = {10.1103/PhysRevB.86.224409},
volume = {86},
year = {2012},
}
@inproceedings{762,
abstract = {Decades of research in distributed computing have led to a variety of perspectives on what it means for a concurrent algorithm to be efficient, depending on model assumptions, progress guarantees, and complexity metrics. It is therefore natural to ask whether one could compose algorithms that perform efficiently under different conditions, so that the composition preserves the performance of the original components when their conditions are met. In this paper, we evaluate the cost of composing shared-memory algorithms. First, we formally define the notion of safely composable algorithms and we show that every sequential type has a safely composable implementation, as long as enough state is transferred between modules. Since such generic implementations are inherently expensive, we present a more general light-weight specification that allows the designer to transfer very little state between modules, by taking advantage of the semantics of the implemented object. Using this framework, we implement a composed longlived test-and-set object, with the property that each of its modules is asymptotically optimal with respect to the progress condition it ensures, while the entire implementation only uses objects with consensus number at most two. Thus, we show that the overhead of composition can be negligible in the case of some important shared-memory abstractions.},
author = {Alistarh, Dan-Adrian and Guerraoui, Rachid and Kuznetsov, Petr and Losa, Giuliano},
pages = {298 -- 307},
publisher = {ACM},
title = {{On the cost of composing shared-memory algorithms}},
doi = {10.1145/2312005.2312057},
year = {2012},
}
@inproceedings{763,
abstract = {Renaming is a fundamental problem in distributed computing, in which a set of n processes need to pick unique names from a namespace of limited size. In this paper, we present the first early-deciding upper bounds for synchronous renaming, in which the running time adapts to the actual number of failures f in the execution. We show that, surprisingly, renaming can be solved in constant time if the number of failures f is limited to O(√n), while for general f ≤ n - 1 renaming can always be solved in O(log f) communication rounds. In the wait-free case, i.e. for f = n - 1, our upper bounds match the Ω(log n) lower bound of Chaudhuri et al. [13].},
author = {Alistarh, Dan-Adrian and Attiya, Hagit and Guerraoui, Rachid and Travers, Corentin},
pages = {195 -- 206},
publisher = {Springer},
title = {{Early deciding synchronous renaming in O(log f) rounds or less}},
doi = {10.1007/978-3-642-31104-8_17},
volume = {7355 LNCS},
year = {2012},
}
@article{764,
abstract = {Set agreement is a fundamental problem in distributed computing in which processes collectively choose a small subset of values from a larger set of proposals. The impossibility of fault-tolerant set agreement in asynchronous networks is one of the seminal results in distributed computing. In synchronous networks, too, the complexity of set agreement has been a significant research challenge that has now been resolved. Real systems, however, are neither purely synchronous nor purely asynchronous. Rather, they tend to alternate between periods of synchrony and periods of asynchrony. Nothing specific is known about the complexity of set agreement in such a "partially synchronous" setting. In this paper, we address this challenge, presenting the first (asymptotically) tight bound on the complexity of set agreement in such systems. We introduce a novel technique for simulating, in a fault-prone asynchronous shared memory, executions of an asynchronous and failure-prone message-passing system in which some fragments appear synchronous to some processes. We use this simulation technique to derive a lower bound on the round complexity of set agreement in a partially synchronous system by a reduction from asynchronous wait-free set agreement. Specifically, we show that every set agreement protocol requires at least $\lfloor\frac t k \rfloor + 2$ synchronous rounds to decide. We present an (asymptotically) matching algorithm that relies on a distributed asynchrony detection mechanism to decide as soon as possible during periods of synchrony. From these two results, we derive the size of the minimal window of synchrony needed to solve set agreement. By relating synchronous, asynchronous and partially synchronous environments, our simulation technique is of independent interest. In particular, it allows us to obtain a new lower bound on the complexity of early deciding k-set agreement complementary to that of Gafni et al. (in SIAM J. Comput. 40(1):63-78, 2011), and to re-derive the combinatorial topology lower bound of Guerraoui et al. (in Theor. Comput. Sci. 410(6-7):570-580, 2009) in an algorithmic way.},
author = {Alistarh, Dan-Adrian and Gilbert, Seth and Guerraoui, Rachid and Travers, Corentin},
journal = {Algorithmica (New York)},
number = {1-2},
pages = {595 -- 629},
publisher = {Springer},
title = {{Of choices, failures and asynchrony: the many faces of set agreement}},
doi = {10.1007/s00453-011-9581-7},
volume = {62},
year = {2012},
}
@article{8024,
abstract = {In dynamical models of cortical networks, the recurrent connectivity can amplify the input given to the network in two distinct ways. One is induced by the presence of near-critical eigenvalues in the connectivity matrix W, producing large but slow activity fluctuations along the corresponding eigenvectors (dynamical slowing). The other relies on W not being normal, which allows the network activity to make large but fast excursions along specific directions. Here we investigate the trade-off between non-normal amplification and dynamical slowing in the spontaneous activity of large random neuronal networks composed of excitatory and inhibitory neurons. We use a Schur decomposition of W to separate the two amplification mechanisms. Assuming linear stochastic dynamics, we derive an exact expression for the expected amount of purely non-normal amplification. We find that amplification is very limited if dynamical slowing must be kept weak. We conclude that, to achieve strong transient amplification with little slowing, the connectivity must be structured. We show that unidirectional connections between neurons of the same type together with reciprocal connections between neurons of different types, allow for amplification already in the fast dynamical regime. Finally, our results also shed light on the differences between balanced networks in which inhibition exactly cancels excitation and those where inhibition dominates.},
author = {Hennequin, Guillaume and Vogels, Tim P and Gerstner, Wulfram},
issn = {1539-3755},
journal = {Physical Review E},
number = {1},
publisher = {American Physical Society},
title = {{Non-normal amplification in random balanced neuronal networks}},
doi = {10.1103/physreve.86.011909},
volume = {86},
year = {2012},
}
@article{801,
abstract = {Fungal cell walls frequently contain a polymer of mannose and galactose called galactomannan. In the pathogenic filamentous fungus Aspergillus fumigatus, this polysaccharide is made of a linear mannan backbone with side chains of galactofuran and is anchored to the plasma membrane via a glycosylphosphatidylinositol or is covalently linked to the cell wall. To date, the biosynthesis and significance of this polysaccharide are unknown. The present data demonstrate that deletion of the Golgi UDP-galactofuranose transporter GlfB or the GDP-mannose transporter GmtA leads to the absence of galactofuran or galactomannan, respectively. This indicates that the biosynthesis of galactomannan probably occurs in the lumen of the Golgi apparatus and thus contrasts with the biosynthesis of other fungal cell wall polysaccharides studied to date that takes place at the plasma membrane. Transglycosylation of galactomannan from the membrane to the cell wall is hypothesized because both the cell wall-bound and membrane-bound polysaccharide forms are affected in the generated mutants. Considering the severe growth defect of the A. fumigatus GmtA-deficient mutant, proving this paradigm might provide new targets for antifungal therapy.},
author = {Engel, Jakob and Philipp Schmalhorst and Routier, Françoise H},
journal = {Journal of Biological Chemistry},
number = {53},
pages = {44418 -- 44424},
publisher = {American Society for Biochemistry and Molecular Biology},
title = {{Biosynthesis of the fungal cell wall polysaccharide galactomannan requires intraluminal GDP-mannose}},
doi = {10.1074/jbc.M112.398321},
volume = {287},
year = {2012},
}
@article{826,
abstract = {Plants exhibit a unique developmental flexibility to ever-changing environmental conditions. To achieve their profound adaptability, plants are able to maintain permanent stem cell populations and form new organs during the entire plant life cycle. Signaling substances, called plant hormones, such as auxin, cytokinin, abscisic acid, brassinosteroid, ethylene, gibberellin, jasmonic acid, and strigolactone, govern and coordinate these developmental processes. Physiological and genetic studies have dissected the molecular components of signal perception and transduction of the individual hormonal pathways. However, over recent years it has become evident that hormones do not act only in a linear pathway. Hormonal pathways are interconnected by a complex network of interactions and feedback circuits that determines the final outcome of the individual hormone actions. This raises questions about the molecular mechanisms underlying hormonal cross talk and about how these hormonal networks are established, maintained, and modulated throughout plant development.},
author = {Vanstraelen, Marleen and Eva Benková},
journal = {Annual Review of Cell and Developmental Biology},
pages = {463 -- 487},
publisher = {Annual Reviews},
title = {{Hormonal interactions in the regulation of plant development}},
doi = {10.1146/annurev-cellbio-101011-155741},
volume = {28},
year = {2012},
}
@article{829,
abstract = {The architecture of a plant's root system, established postembryonically, results from both coordinated root growth and lateral root branching. The plant hormones auxin and cytokinin are central endogenous signaling molecules that regulate lateral root organogenesis positively and negatively, respectively. Tight control and mutual balance of their antagonistic activities are particularly important during the early phases of lateral root organogenesis to ensure continuous lateral root initiation (LRI) and proper development of lateral root primordia (LRP). Here, we show that the early phases of lateral root organogenesis, including priming and initiation, take place in root zones with a repressed cytokinin response. Accordingly, ectopic overproduction of cytokinin in the root basal meristem most efficiently inhibits LRI. Enhanced cytokinin responses in pericycle cells between existing LRP might restrict LRI near existing LRP and, when compromised, ectopic LRI occurs. Furthermore, our results demonstrate that young LRP are more sensitive to perturbations in the cytokinin activity than are developmentally more advanced primordia. We hypothesize that the effect of cytokinin on the development of primordia possibly depends on the robustness and stability of the auxin gradient.},
author = {Bielach, Agnieszka and Podlesakova, Katerina and Peter Marhavy and Duclercq, Jérôme and Candela Cuesta and Muller, Bruno and Grunewald, Wim and Tarkowski, Petr and Eva Benková},
journal = {The Plant Cell},
number = {10},
pages = {3967 -- 3981},
publisher = {American Society of Plant Biologists},
title = {{Spatiotemporal regulation of lateral root organogenesis in Arabidopsis by cytokinin}},
doi = {10.1105/tpc.112.103044},
volume = {24},
year = {2012},
}
@article{887,
abstract = {A subject of extensive study in evolutionary theory has been the issue of how neutral, redundant copies can be maintained in the genome for long periods of time. Concurrently, examples of adaptive gene duplications to various environmental conditions in different species have been described. At this point, it is too early to tell whether or not a substantial fraction of gene copies have initially achieved fixation by positive selection for increased dosage. Nevertheless, enough examples have accumulated in the literature that such a possibility should be considered. Here, I review the recent examples of adaptive gene duplications and make an attempt to draw generalizations on what types of genes may be particularly prone to be selected for under certain environmental conditions. The identification of copy-number variation in ecological field studies of species adapting to stressful or novel environmental conditions may improve our understanding of gene duplications as a mechanism of adaptation and its relevance to the long-term persistence of gene duplications.},
author = {Fyodor Kondrashov},
journal = {Proceedings of the Royal Society of London Series B Biological Sciences},
number = {1749},
pages = {5048 -- 5057},
publisher = {Royal Society, The},
title = {{Gene duplication as a mechanism of genomic adaptation to a changing environment}},
doi = {10.1098/rspb.2012.1108},
volume = {279},
year = {2012},
}
@article{922,
abstract = {We study theoretically the morphologies of biological tubes affected by various pathologies. When epithelial cells grow, the negative tension produced by their division provokes a buckling instability. Several shapes are investigated: varicose, dilated, sinuous, or sausagelike. They are all found in pathologies of tracheal, renal tubes, or arteries. The final shape depends crucially on the mechanical parameters of the tissues: Young's modulus, wall-to-lumen ratio, homeostatic pressure. We argue that since tissues must be in quasistatic mechanical equilibrium, abnormal shapes convey information as to what causes the pathology. We calculate a phase diagram of tubular instabilities which could be a helpful guide for investigating the underlying genetic regulation.},
author = {Hannezo, Edouard B and Prost, Jacques and Joanny, Jean},
journal = {Physical Review Letters},
number = {1},
publisher = {American Physical Society},
title = {{Mechanical instabilities of biological tubes}},
doi = {10.1103/PhysRevLett.109.018101},
volume = {109},
year = {2012},
}
@article{91,
abstract = {We demonstrate how to appropriately estimate the zero-frequency (static) hyperpolarizability of an organic molecule from its charge distribution, and we explore applications of these estimates for identifying and evaluating new organic nonlinear optical (NLO) materials. First, we calculate hyperpolarizabilities from Hartree-Fock-derived charge distributions and find order-of-magnitude agreement with experimental values. We show that these simple arithmetic calculations will enable systematic searches for new organic NLO molecules. Second, we derive hyperpolarizabilities from crystallographic data using a multipolar charge-density analysis and find good agreement with empirical calculations. This demonstrates an experimental determination of the full static hyperpolarizability tensor in a solid-state sample. },
author = {Higginbotham, Andrew P and Cole, Jacqueline and Blood Forsythe, Martin and Hickstein, Daniel},
journal = {Journal of Applied Physics},
number = {3},
publisher = {American Institute of Physics},
title = {{Identifying and evaluating organic nonlinear optical materials via molecular moments}},
doi = {10.1063/1.3678593},
volume = {111},
year = {2012},
}