@article{2009,
abstract = {Traditional statistical methods for confidentiality protection of statistical databases do not scale well to deal with GWAS databases especially in terms of guarantees regarding protection from linkage to external information. The more recent concept of differential privacy, introduced by the cryptographic community, is an approach which provides a rigorous definition of privacy with meaningful privacy guarantees in the presence of arbitrary external information, although the guarantees may come at a serious price in terms of data utility. Building on such notions, we propose new methods to release aggregate GWAS data without compromising an individual’s privacy. We present methods for releasing differentially private minor allele frequencies, chi-square statistics and p-values. We compare these approaches on simulated data and on a GWAS study of canine hair length involving 685 dogs. We also propose a privacy-preserving method for finding genome-wide associations based on a differentially-private approach to penalized logistic regression.},
author = {Uhler, Caroline and Slavkovic, Aleksandra and Fienberg, Stephen},
journal = {Journal of Privacy and Confidentiality },
number = {1},
pages = {137 -- 166},
publisher = {Carnegie Mellon University},
title = {{Privacy-preserving data sharing for genome-wide association studies}},
doi = {10.29012/jpc.v5i1.629},
volume = {5},
year = {2013},
}
@article{2010,
abstract = {Many algorithms for inferring causality rely heavily on the faithfulness assumption. The main justification for imposing this assumption is that the set of unfaithful distributions has Lebesgue measure zero, since it can be seen as a collection of hypersurfaces in a hypercube. However, due to sampling error the faithfulness condition alone is not sufficient for statistical estimation, and strong-faithfulness has been proposed and assumed to achieve uniform or high-dimensional consistency. In contrast to the plain faithfulness assumption, the set of distributions that is not strong-faithful has nonzero Lebesgue measure and in fact, can be surprisingly large as we show in this paper. We study the strong-faithfulness condition from a geometric and combinatorial point of view and give upper and lower bounds on the Lebesgue measure of strong-faithful distributions for various classes of directed acyclic graphs. Our results imply fundamental limitations for the PC-algorithm and potentially also for other algorithms based on partial correlation testing in the Gaussian case.},
author = {Uhler, Caroline and Raskutti, Garvesh and Bühlmann, Peter and Yu, Bin},
journal = {The Annals of Statistics},
number = {2},
pages = {436 -- 463},
publisher = {Institute of Mathematical Statistics},
title = {{Geometry of the faithfulness assumption in causal inference}},
doi = {10.1214/12-AOS1080},
volume = {41},
year = {2013},
}
@inproceedings{2181,
abstract = {There is a trade-off between performance and correctness in implementing concurrent data structures. Better performance may be achieved at the expense of relaxing correctness, by redefining the semantics of data structures. We address such a redefinition of data structure semantics and present a systematic and formal framework for obtaining new data structures by quantitatively relaxing existing ones. We view a data structure as a sequential specification S containing all "legal" sequences over an alphabet of method calls. Relaxing the data structure corresponds to defining a distance from any sequence over the alphabet to the sequential specification: the k-relaxed sequential specification contains all sequences over the alphabet within distance k from the original specification. In contrast to other existing work, our relaxations are semantic (distance in terms of data structure states). As an instantiation of our framework, we present two simple yet generic relaxation schemes, called out-of-order and stuttering relaxation, along with several ways of computing distances. We show that the out-of-order relaxation, when further instantiated to stacks, queues, and priority queues, amounts to tolerating bounded out-of-order behavior, which cannot be captured by a purely syntactic relaxation (distance in terms of sequence manipulation, e.g. edit distance). We give concurrent implementations of relaxed data structures and demonstrate that bounded relaxations provide the means for trading correctness for performance in a controlled way. The relaxations are monotonic which further highlights the trade-off: increasing k increases the number of permitted sequences, which as we demonstrate can lead to better performance. Finally, since a relaxed stack or queue also implements a pool, we actually have new concurrent pool implementations that outperform the state-of-the-art ones.},
author = {Henzinger, Thomas A and Kirsch, Christoph and Payer, Hannes and Sezgin, Ali and Sokolova, Ana},
booktitle = {Proceedings of the 40th annual ACM SIGPLAN-SIGACT symposium on Principles of programming language},
isbn = {978-1-4503-1832-7},
location = {Rome, Italy},
pages = {317 -- 328},
publisher = {ACM},
title = {{Quantitative relaxation of concurrent data structures}},
doi = {10.1145/2429069.2429109},
year = {2013},
}
@inproceedings{2182,
abstract = {We propose a general framework for abstraction with respect to quantitative properties, such as worst-case execution time, or power consumption. Our framework provides a systematic way for counter-example guided abstraction refinement for quantitative properties. The salient aspect of the framework is that it allows anytime verification, that is, verification algorithms that can be stopped at any time (for example, due to exhaustion of memory), and report approximations that improve monotonically when the algorithms are given more time. We instantiate the framework with a number of quantitative abstractions and refinement schemes, which differ in terms of how much quantitative information they keep from the original system. We introduce both state-based and trace-based quantitative abstractions, and we describe conditions that define classes of quantitative properties for which the abstractions provide over-approximations. We give algorithms for evaluating the quantitative properties on the abstract systems. We present algorithms for counter-example based refinements for quantitative properties for both state-based and segment-based abstractions. We perform a case study on worst-case execution time of executables to evaluate the anytime verification aspect and the quantitative abstractions we proposed.},
author = {Cerny, Pavol and Henzinger, Thomas A and Radhakrishna, Arjun},
booktitle = {Proceedings of the 40th annual ACM SIGPLAN-SIGACT symposium on Principles of programming language},
location = {Rome, Italy},
pages = {115 -- 128},
publisher = {ACM},
title = {{Quantitative abstraction refinement}},
doi = {10.1145/2429069.2429085},
year = {2013},
}
@inproceedings{2209,
abstract = {A straight skeleton is a well-known geometric structure, and several algorithms exist to construct the straight skeleton for a given polygon or planar straight-line graph. In this paper, we ask the reverse question: Given the straight skeleton (in form of a planar straight-line graph, with some rays to infinity), can we reconstruct a planar straight-line graph for which this was the straight skeleton? We show how to reduce this problem to the problem of finding a line that intersects a set of convex polygons. We can find these convex polygons and all such lines in $O(nlog n)$ time in the Real RAM computer model, where $n$ denotes the number of edges of the input graph. We also explain how our approach can be used for recognizing Voronoi diagrams of points, thereby completing a partial solution provided by Ash and Bolker in 1985.
},
author = {Biedl, Therese and Held, Martin and Huber, Stefan},
location = {St. Petersburg, Russia},
pages = {37 -- 46},
publisher = {IEEE},
title = {{Recognizing straight skeletons and Voronoi diagrams and reconstructing their input}},
doi = {10.1109/ISVD.2013.11},
year = {2013},
}
@inproceedings{2210,
abstract = {A straight skeleton is a well-known geometric structure, and several algorithms exist to construct the straight skeleton for a given polygon. In this paper, we ask the reverse question: Given the straight skeleton (in form of a tree with a drawing in the plane, but with the exact position of the leaves unspecified), can we reconstruct the polygon? We show that in most cases there exists at most one polygon; in the remaining case there is an infinite number of polygons determined by one angle that can range in an interval. We can find this (set of) polygon(s) in linear time in the Real RAM computer model.},
author = {Biedl, Therese and Held, Martin and Huber, Stefan},
booktitle = {29th European Workshop on Computational Geometry},
location = {Braunschweig, Germany},
pages = {95 -- 98},
publisher = {TU Braunschweig},
title = {{Reconstructing polygons from embedded straight skeletons}},
year = {2013},
}
@inproceedings{2237,
abstract = {We describe new extensions of the Vampire theorem prover for computing tree interpolants. These extensions generalize Craig interpolation in Vampire, and can also be used to derive sequence interpolants. We evaluated our implementation on a large number of examples over the theory of linear integer arithmetic and integer-indexed arrays, with and without quantifiers. When compared to other methods, our experiments show that some examples could only be solved by our implementation.},
author = {Blanc, Régis and Gupta, Ashutosh and Kovács, Laura and Kragl, Bernhard},
location = {Stellenbosch, South Africa},
pages = {173 -- 181},
publisher = {Springer},
title = {{Tree interpolation in Vampire}},
doi = {10.1007/978-3-642-45221-5_13},
volume = {8312},
year = {2013},
}
@inproceedings{2238,
abstract = {We study the problem of achieving a given value in Markov decision processes (MDPs) with several independent discounted reward objectives. We consider a generalised version of discounted reward objectives, in which the amount of discounting depends on the states visited and on the objective. This definition extends the usual definition of discounted reward, and allows to capture the systems in which the value of different commodities diminish at different and variable rates.
We establish results for two prominent subclasses of the problem, namely state-discount models where the discount factors are only dependent on the state of the MDP (and independent of the objective), and reward-discount models where they are only dependent on the objective (but not on the state of the MDP). For the state-discount models we use a straightforward reduction to expected total reward and show that the problem whether a value is achievable can be solved in polynomial time. For the reward-discount model we show that memory and randomisation of the strategies are required, but nevertheless that the problem is decidable and it is sufficient to consider strategies which after a certain number of steps behave in a memoryless way.
For the general case, we show that when restricted to graphs (i.e. MDPs with no randomisation), pure strategies and discount factors of the form 1/n where n is an integer, the problem is in PSPACE and finite memory suffices for achieving a given value. We also show that when the discount factors are not of the form 1/n, the memory required by a strategy can be infinite.
},
author = {Chatterjee, Krishnendu and Forejt, Vojtěch and Wojtczak, Dominik},
location = {Stellenbosch, South Africa},
pages = {228 -- 242},
publisher = {Springer},
title = {{Multi-objective discounted reward verification in graphs and MDPs}},
doi = {10.1007/978-3-642-45221-5_17},
volume = {8312},
year = {2013},
}
@inproceedings{2243,
abstract = {We show that modal logic over universally first-order definable classes of transitive frames is decidable. More precisely, let K be an arbitrary class of transitive Kripke frames definable by a universal first-order sentence. We show that the global and finite global satisfiability problems of modal logic over K are decidable in NP, regardless of choice of K. We also show that the local satisfiability and the finite local satisfiability problems of modal logic over K are decidable in NEXPTIME.},
author = {Michaliszyn, Jakub and Otop, Jan},
location = {Torino, Italy},
pages = {563 -- 577},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{Elementary modal logics over transitive structures}},
doi = {10.4230/LIPIcs.CSL.2013.563},
volume = {23},
year = {2013},
}
@inproceedings{2244,
abstract = {We consider two systems (α1,...,αm) and (β1,...,βn) of curves drawn on a compact two-dimensional surface ℳ with boundary. Each αi and each βj is either an arc meeting the boundary of ℳ at its two endpoints, or a closed curve. The αi are pairwise disjoint except for possibly sharing endpoints, and similarly for the βj. We want to "untangle" the βj from the αi by a self-homeomorphism of ℳ; more precisely, we seek an homeomorphism φ: ℳ → ℳ fixing the boundary of ℳ pointwise such that the total number of crossings of the αi with the φ(βj) is as small as possible. This problem is motivated by an application in the algorithmic theory of embeddings and 3-manifolds. We prove that if ℳ is planar, i.e., a sphere with h ≥ 0 boundary components ("holes"), then O(mn) crossings can be achieved (independently of h), which is asymptotically tight, as an easy lower bound shows. In general, for an arbitrary (orientable or nonorientable) surface ℳ with h holes and of (orientable or nonorientable) genus g ≥ 0, we obtain an O((m + n)4) upper bound, again independent of h and g. },
author = {Matoušek, Jiří and Sedgwick, Eric and Tancer, Martin and Wagner, Uli},
location = {Bordeaux, France},
pages = {472 -- 483},
publisher = {Springer},
title = {{Untangling two systems of noncrossing curves}},
doi = {10.1007/978-3-319-03841-4_41},
volume = {8242},
year = {2013},
}
@article{2247,
abstract = {Cooperative behavior, where one individual incurs a cost to help another, is a wide spread phenomenon. Here we study direct reciprocity in the context of the alternating Prisoner's Dilemma. We consider all strategies that can be implemented by one and two-state automata. We calculate the payoff matrix of all pairwise encounters in the presence of noise. We explore deterministic selection dynamics with and without mutation. Using different error rates and payoff values, we observe convergence to a small number of distinct equilibria. Two of them are uncooperative strict Nash equilibria representing always-defect (ALLD) and Grim. The third equilibrium is mixed and represents a cooperative alliance of several strategies, dominated by a strategy which we call Forgiver. Forgiver cooperates whenever the opponent has cooperated; it defects once when the opponent has defected, but subsequently Forgiver attempts to re-establish cooperation even if the opponent has defected again. Forgiver is not an evolutionarily stable strategy, but the alliance, which it rules, is asymptotically stable. For a wide range of parameter values the most commonly observed outcome is convergence to the mixed equilibrium, dominated by Forgiver. Our results show that although forgiving might incur a short-term loss it can lead to a long-term gain. Forgiveness facilitates stable cooperation in the presence of exploitation and noise.},
author = {Zagorsky, Benjamin and Reiter, Johannes and Chatterjee, Krishnendu and Nowak, Martin},
journal = {PLoS One},
number = {12},
publisher = {Public Library of Science},
title = {{Forgiver triumphs in alternating prisoner's dilemma }},
doi = {10.1371/journal.pone.0080814},
volume = {8},
year = {2013},
}
@article{2256,
abstract = {Linked (Open) Data - bibliographic data on the Semantic Web. Report of the Working Group on Linked Data to the plenary assembly of the Austrian Library Network (translation of the title). Linked Data stands for a certain approach to publishing data on the Web. The underlying idea is to harmonise heterogeneous data sources of different origin in order to improve their accessibility and interoperability, effectively making them queryable as a big distributed database. This report summarises relevant developments in Europe as well as the Linked Data Working Group‘s strategic and technical considerations regarding the publishing of the Austrian Library Network’s (OBV’s) bibliographic datasets. It concludes with the mutual agreement that the implementation of Linked Data principles within the OBV can only be taken into consideration accompanied by a discussion about the provision of the datasets under a free license.},
author = {Danowski, Patrick and Goldfarb, Doron and Schaffner, Verena and Seidler, Wolfram},
journal = {VÖB Mitteilungen},
number = {3/4},
pages = {559 -- 587},
publisher = {Verein Österreichischer Bibliothekarinnen und Bibliothekare},
title = {{Linked (Open) Data - Bibliographische Daten im Semantic Web}},
volume = {66},
year = {2013},
}
@inproceedings{2258,
abstract = {In a digital signature scheme with message recovery, rather than transmitting the message m and its signature σ, a single enhanced signature τ is transmitted. The verifier is able to recover m from τ and at the same time verify its authenticity. The two most important parameters of such a scheme are its security and overhead |τ| − |m|. A simple argument shows that for any scheme with “n bits security” |τ| − |m| ≥ n, i.e., the overhead is lower bounded by the security parameter n. Currently, the best known constructions in the random oracle model are far from this lower bound requiring an overhead of n + logq h , where q h is the number of queries to the random oracle. In this paper we give a construction which basically matches the n bit lower bound. We propose a simple digital signature scheme with n + o(logq h ) bits overhead, where q h denotes the number of random oracle queries.
Our construction works in two steps. First, we propose a signature scheme with message recovery having optimal overhead in a new ideal model, the random invertible function model. Second, we show that a four-round Feistel network with random oracles as round functions is tightly “public-indifferentiable” from a random invertible function. At the core of our indifferentiability proof is an almost tight upper bound for the expected number of edges of the densest “small” subgraph of a random Cayley graph, which may be of independent interest.
},
author = {Kiltz, Eike and Pietrzak, Krzysztof Z and Szegedy, Mario},
location = {Santa Barbara, CA, United States},
pages = {571 -- 588},
publisher = {Springer},
title = {{Digital signatures with minimal overhead from indifferentiable random invertible functions}},
doi = {10.1007/978-3-642-40041-4_31},
volume = {8042},
year = {2013},
}
@inproceedings{2259,
abstract = {The learning with rounding (LWR) problem, introduced by Banerjee, Peikert and Rosen at EUROCRYPT ’12, is a variant of learning with errors (LWE), where one replaces random errors with deterministic rounding. The LWR problem was shown to be as hard as LWE for a setting of parameters where the modulus and modulus-to-error ratio are super-polynomial. In this work we resolve the main open problem and give a new reduction that works for a larger range of parameters, allowing for a polynomial modulus and modulus-to-error ratio. In particular, a smaller modulus gives us greater efficiency, and a smaller modulus-to-error ratio gives us greater security, which now follows from the worst-case hardness of GapSVP with polynomial (rather than super-polynomial) approximation factors.
As a tool in the reduction, we show that there is a “lossy mode” for the LWR problem, in which LWR samples only reveal partial information about the secret. This property gives us several interesting new applications, including a proof that LWR remains secure with weakly random secrets of sufficient min-entropy, and very simple constructions of deterministic encryption, lossy trapdoor functions and reusable extractors.
Our approach is inspired by a technique of Goldwasser et al. from ICS ’10, which implicitly showed the existence of a “lossy mode” for LWE. By refining this technique, we also improve on the parameters of that work to only requiring a polynomial (instead of super-polynomial) modulus and modulus-to-error ratio.
},
author = {Alwen, Joel F and Krenn, Stephan and Pietrzak, Krzysztof Z and Wichs, Daniel},
location = {Santa Barbara, CA, United States},
number = {1},
pages = {57 -- 74},
publisher = {Springer},
title = {{Learning with rounding, revisited: New reduction properties and applications}},
doi = {10.1007/978-3-642-40041-4_4},
volume = {8042},
year = {2013},
}
@article{476,
abstract = {Maternal exposure to infection occurring mid-gestation produces a three-fold increase in the risk of schizophrenia in the offspring. The critical initiating factor appears to be the maternal immune activation (MIA) that follows infection. This process can be induced in rodents by exposure of pregnant dams to the viral mimic Poly I:C, which triggers an immune response that results in structural, functional, behavioral, and electrophysiological phenotypes in the adult offspring that model those seen in schizophrenia. We used this model to explore the role of synchronization in brain neural networks, a process thought to be dysfunctional in schizophrenia and previously associated with positive, negative, and cognitive symptoms of schizophrenia. Exposure of pregnant dams to Poly I:C on GD15 produced an impairment in long-range neural synchrony in adult offspring between two regions implicated in schizophrenia pathology; the hippocampus and the medial prefrontal cortex (mPFC). This reduction in synchrony was ameliorated by acute doses of the antipsychotic clozapine. MIA animals have previously been shown to have impaired pre-pulse inhibition (PPI), a gold-standard measure of schizophrenia-like deficits in animal models. Our data showed that deficits in synchrony were positively correlated with the impairments in PPI. Subsequent analysis of LFP activity during the PPI response also showed that reduced coupling between the mPFC and the hippocampus following processing of the pre-pulse was associated with reduced PPI. The ability of the MIA intervention to model neurodevelopmental aspects of schizophrenia pathology provides a useful platform from which to investigate the ontogeny of aberrant synchronous processes. Further, the way in which the model expresses translatable deficits such as aberrant synchrony and reduced PPI will allow researchers to explore novel intervention strategies targeted to these changes. },
author = {Dickerson, Desiree and Bilkey, David},
journal = {Frontiers in Behavioral Neuroscience},
number = {DEC},
publisher = {Frontiers Research Foundation},
title = {{Aberrant neural synchrony in the maternal immune activation model: Using translatable measures to explore targeted interventions}},
doi = {10.3389/fnbeh.2013.00217},
volume = {7},
year = {2013},
}
@article{499,
abstract = {Exposure of an isogenic bacterial population to a cidal antibiotic typically fails to eliminate a small fraction of refractory cells. Historically, fractional killing has been attributed to infrequently dividing or nondividing "persisters." Using microfluidic cultures and time-lapse microscopy, we found that Mycobacterium smegmatis persists by dividing in the presence of the drug isoniazid (INH). Although persistence in these studies was characterized by stable numbers of cells, this apparent stability was actually a dynamic state of balanced division and death. Single cells expressed catalase-peroxidase (KatG), which activates INH, in stochastic pulses that were negatively correlated with cell survival. These behaviors may reflect epigenetic effects, because KatG pulsing and death were correlated between sibling cells. Selection of lineages characterized by infrequent KatG pulsing could allow nonresponsive adaptation during prolonged drug exposure.},
author = {Wakamoto, Yurichi and Dhar, Neraaj and Chait, Remy P and Schneider, Katrin and Signorino Gelo, François and Leibler, Stanislas and Mckinney, John},
journal = {Science},
number = {6115},
pages = {91 -- 95},
publisher = {American Association for the Advancement of Science},
title = {{Dynamic persistence of antibiotic-stressed mycobacteria}},
doi = {10.1126/science.1229858},
volume = {339},
year = {2013},
}
@article{500,
abstract = {Background: Reassortment between the RNA segments encoding haemagglutinin (HA) and neuraminidase (NA), the major antigenic influenza proteins, produces viruses with novel HA and NA subtype combinations and has preceded the emergence of pandemic strains. It has been suggested that productive viral infection requires a balance in the level of functional activity of HA and NA, arising from their closely interacting roles in the viral life cycle, and that this functional balance could be mediated by genetic changes in the HA and NA. Here, we investigate how the selective pressure varies for H7 avian influenza HA on different NA subtype backgrounds. Results: By extending Bayesian stochastic mutational mapping methods to calculate the ratio of the rate of non-synonymous change to the rate of synonymous change (d N/d S), we found the average d N/d S across the avian influenza H7 HA1 region to be significantly greater on an N2 NA subtype background than on an N1, N3 or N7 background. Observed differences in evolutionary rates of H7 HA on different NA subtype backgrounds could not be attributed to underlying differences between avian host species or virus pathogenicity. Examination of d N/d S values for each subtype on a site-by-site basis indicated that the elevated d N/d S on the N2 NA background was a result of increased selection, rather than a relaxation of selective constraint. Conclusions: Our results are consistent with the hypothesis that reassortment exposes influenza HA to significant changes in selective pressure through genetic interactions with NA. Such epistatic effects might be explicitly accounted for in future models of influenza evolution.},
author = {Ward, Melissa and Lycett, Samantha and Avila, Dorita and Bollback, Jonathan P and Leigh Brown, Andrew},
journal = {BMC Evolutionary Biology},
number = {1},
publisher = {BioMed Central},
title = {{Evolutionary interactions between haemagglutinin and neuraminidase in avian influenza}},
doi = {10.1186/1471-2148-13-222},
volume = {13},
year = {2013},
}
@article{501,
abstract = {All known species of extant tapirs are allopatric: 1 in southeastern Asia and 3 in Central and South America. The fossil record for tapirs, however, is much wider in geographical range, including Europe, Asia, and North and South America, going back to the late Oligocene, making the present distribution a relict of the original one. We here describe a new species of living Tapirus from the Amazon rain forest, the 1st since T. bairdii Gill, 1865, and the 1st new Perissodactyla in more than 100 years, from both morphological and molecular characters. It is shorter in stature than T. terrestris (Linnaeus, 1758) and has distinctive skull morphology, and it is basal to the clade formed by T. terrestris and T. pinchaque (Roulin, 1829). This highlights the unrecognized biodiversity in western Amazonia, where the biota faces increasing threats. Local peoples have long recognized our new species, suggesting a key role for traditional knowledge in understanding the biodiversity of the region.},
author = {Cozzuol, Mario and Clozato, Camila and Holanda, Elizete and Rodrigues, Flávio and Nienow, Samuel and De Thoisy, Benoit and Fernandes Redondo, Rodrigo A and Santos, Fabrício},
journal = {Journal of Mammalogy},
number = {6},
pages = {1331 -- 1345},
publisher = {Oxford University Press},
title = {{A new species of tapir from the Amazon}},
doi = {10.1644/12-MAMM-A-169.1},
volume = {94},
year = {2013},
}
@article{502,
abstract = {Blind signatures allow users to obtain signatures on messages hidden from the signer; moreover, the signer cannot link the resulting message/signature pair to the signing session. This paper presents blind signature schemes, in which the number of interactions between the user and the signer is minimal and whose blind signatures are short. Our schemes are defined over bilinear groups and are proved secure in the common-reference-string model without random oracles and under standard assumptions: CDH and the decision-linear assumption. (We also give variants over asymmetric groups based on similar assumptions.) The blind signatures are Waters signatures, which consist of 2 group elements. Moreover, we instantiate partially blind signatures, where the message consists of a part hidden from the signer and a commonly known public part, and schemes achieving perfect blindness. We propose new variants of blind signatures, such as signer-friendly partially blind signatures, where the public part can be chosen by the signer without prior agreement, 3-party blind signatures, as well as blind signatures on multiple aggregated messages provided by independent sources. We also extend Waters signatures to non-binary alphabets by proving a new result on the underlying hash function. },
author = {Blazy, Olivier and Fuchsbauer, Georg and Pointcheval, David and Vergnaud, Damien},
journal = {Journal of Computer Security},
number = {5},
pages = {627 -- 661},
publisher = {IOS Press},
title = {{Short blind signatures}},
doi = {10.3233/JCS-130477},
volume = {21},
year = {2013},
}
@article{505,
abstract = {Alkyd resins are polyesters containing unsaturated fatty acids that are used as binding agents in paints and coatings. Chemical drying of these polyesters is based on heavy metal catalyzed cross-linking of the unsaturated fatty acid moieties. Among the heavy-metal catalysts, cobalt complexes are the most effective, yet they have been proven to be carcinogenic. Therefore, strategies to replace the cobalt-based catalyst by environmentally friendlier and less toxic alternatives are under development. Here, we demonstrate for the first time that a laccase-mediator system can effectively replace the heavy-metal catalyst and cross-link alkyd resins. Interestingly, the biocatalytic reaction does not only work in aqueous media, but also in a solid film, where enzyme diffusion is limited. Within the catalytic cycle, the mediator oxidizes the alkyd resin and is regenerated by the laccase, which is uniformly distributed within the drying film as evidenced by confocal laser scanning microscopy. During gradual build-up of molecular weight, there is a concomitant decrease of the oxygen content in the film. A new optical sensor to follow oxygen consumption during the cross-linking reaction was developed and validated with state of the art techniques. A remarkable feature is the low sample amount required, which allows faster screening of new catalysts.},
author = {Greimel, Katrin and Perz, Veronika and Koren, Klaus and Feola, Roland and Temel, Armin and Sohar, Christian and Herrero Acero, Enrique and Klimant, Ingo and Guebitz, Georg},
journal = {Green Chemistry},
number = {2},
pages = {381 -- 388},
publisher = {Royal Society of Chemistry},
title = {{Banning toxic heavy-metal catalysts from paints: Enzymatic cross-linking of alkyd resins}},
doi = {10.1039/c2gc36666e},
volume = {15},
year = {2013},
}
@article{507,
abstract = {Fertilization in flowering plants requires the temporal and spatial coordination of many developmental processes, including pollen production, anther dehiscence, ovule production, and pollen tube elongation. However, it remains elusive as to how this coordination occurs during reproduction. Here, we present evidence that endocytosis, involving heterotetrameric adaptor protein complex 2 (AP-2), plays a crucial role in fertilization. An Arabidopsis thaliana mutant ap2m displays multiple defects in pollen production and viability, as well as elongation of staminal filaments and pollen tubes, all of which are pivotal processes needed for fertilization. Of these abnormalities, the defects in elongation of staminal filaments and pollen tubes were partially rescued by exogenous auxin. Moreover, DR5rev:GFP (for green fluorescent protein) expression was greatly reduced in filaments and anthers in ap2m mutant plants. At the cellular level, ap2m mutants displayed defects in both endocytosis of N-(3-triethylammonium-propyl)-4- (4-diethylaminophenylhexatrienyl) pyridinium dibromide, a lypophilic dye used as an endocytosis marker, and polar localization of auxin-efflux carrier PIN FORMED2 (PIN2) in the stamen filaments. Moreover, these defects were phenocopied by treatment with Tyrphostin A23, an inhibitor of endocytosis. Based on these results, we propose that AP-2-dependent endocytosis plays a crucial role in coordinating the multiple developmental aspects of male reproductive organs by modulating cellular auxin level through the regulation of the amount and polarity of PINs.},
author = {Kim, Soo and Xu, Zheng and Song, Kyungyoung and Kim, Dae and Kang, Hyangju and Reichardt, Ilka and Sohn, Eun and Friml, Jirí and Juergens, Gerd and Hwang, Inhwan},
journal = {Plant Cell},
number = {8},
pages = {2970 -- 2985},
publisher = {American Society of Plant Biologists},
title = {{Adaptor protein complex 2-mediated endocytosis is crucial for male reproductive organ development in arabidopsis}},
doi = {10.1105/tpc.113.114264},
volume = {25},
year = {2013},
}
@article{508,
abstract = {The phagocyte NADPH oxidase catalyzes the reduction of O2 to reactive oxygen species with microbicidal activity. It is composed of two membrane-spanning subunits, gp91-phox and p22-phox (encoded by CYBB and CYBA, respectively), and three cytoplasmic subunits, p40-phox, p47-phox, and p67-phox (encoded by NCF4, NCF1, and NCF2, respectively). Mutations in any of these genes can result in chronic granulomatous disease, a primary immunodeficiency characterized by recurrent infections. Using evolutionary mapping, we determined that episodes of adaptive natural selection have shaped the extracellular portion of gp91-phox during the evolution of mammals, which suggests that this region may have a function in host-pathogen interactions. On the basis of a resequencing analysis of approximately 35 kb of CYBB, CYBA, NCF2, and NCF4 in 102 ethnically diverse individuals (24 of African ancestry, 31 of European ancestry, 24 of Asian/Oceanians, and 23 US Hispanics), we show that the pattern of CYBA diversity is compatible with balancing natural selection, perhaps mediated by catalase-positive pathogens. NCF2 in Asian populations shows a pattern of diversity characterized by a differentiated haplotype structure. Our study provides insight into the role of pathogen-driven natural selection in an innate immune pathway and sheds light on the role of CYBA in endothelial, nonphagocytic NADPH oxidases, which are relevant in the pathogenesis of cardiovascular and other complex diseases.},
author = {Tarazona Santos, Eduardo and Machado, Moara and Magalhães, Wagner and Chen, Renee and Lyon, Fernanda and Burdett, Laurie and Crenshaw, Andrew and Fabbri, Cristina and Pereira, Latife and Pinto, Laelia and Fernandes Redondo, Rodrigo A and Sestanovich, Ben and Yeager, Meredith and Chanock, Stephen},
journal = {Molecular Biology and Evolution},
number = {9},
pages = {2157 -- 2167},
publisher = {Oxford University Press},
title = {{Evolutionary dynamics of the human NADPH oxidase genes CYBB, CYBA, NCF2, and NCF4: Functional implications}},
doi = {10.1093/molbev/mst119},
volume = {30},
year = {2013},
}
@article{509,
abstract = {Clathrin-mediated endocytosis (CME) regulates many aspects of plant development, including hormone signaling and responses to environmental stresses. Despite the importance of this process, the machinery that regulates CME in plants is largely unknown. In mammals, the heterotetrameric ADAPTOR PROTEIN COMPLEX-2 (AP-2) is required for the formation of clathrin-coated vesicles at the plasma membrane (PM). Although the existence of AP-2 has been predicted in Arabidopsis thaliana, the biochemistry and functionality of the complex is still uncharacterized. Here, we identified all the subunits of the Arabidopsis AP-2 by tandem affinity purification and found that one of the large AP-2 subunits, AP2A1, localized at the PM and interacted with clathrin. Furthermore, endocytosis of the leucine-rich repeat receptor kinase, BRASSINOSTEROID INSENSITIVE1 (BRI1), was shown to depend on AP-2. Knockdown of the two Arabidopsis AP2A genes or overexpression of a dominant-negative version of the medium AP-2 subunit, AP2M, impaired BRI1 endocytosis and enhanced the brassinosteroid signaling. Our data reveal that the CME machinery in Arabidopsis is evolutionarily conserved and that AP-2 functions in receptormediated endocytosis. },
author = {Di Rubbo, Simone and Irani, Niloufer and Kim, Soo and Xu, Zheng and Gadeyne, Astrid and Dejonghe, Wim and Vanhoutte, Isabelle and Persiau, Geert and Eeckhout, Dominique and Simon, Sibu and Song, Kyungyoung and Kleine Vehn, Jürgen and Friml, Jirí and De Jaeger, Geert and Van Damme, Daniël and Hwang, Inhwan and Russinova, Eugenia},
journal = {Plant Cell},
number = {8},
pages = {2986 -- 2997},
publisher = {American Society of Plant Biologists},
title = {{The clathrin adaptor complex AP-2 mediates endocytosis of brassinosteroid INSENSITIVE1 in arabidopsis}},
doi = {10.1105/tpc.113.114058},
volume = {25},
year = {2013},
}
@article{511,
abstract = {The native auxin, indole-3-acetic acid (IAA), is a major regulator of plant growth and development. Its nonuniform distribution between cells and tissues underlies the spatiotemporal coordination of many developmental events and responses to environmental stimuli. The regulation of auxin gradients and the formation of auxin maxima/minima most likely involve the regulation of both metabolic and transport processes. In this article, we have demonstrated that 2-oxindole-3-acetic acid (oxIAA) is a major primary IAA catabolite formed in Arabidopsis thaliana root tissues. OxIAA had little biological activity and was formed rapidly and irreversibly in response to increases in auxin levels. We further showed that there is cell type-specific regulation of oxIAA levels in the Arabidopsis root apex. We propose that oxIAA is an important element in the regulation of output from auxin gradients and, therefore, in the regulation of auxin homeostasis and response mechanisms.},
author = {Pěnčík, Aleš and Simonovik, Biljana and Petersson, Sara and Henyková, Eva and Simon, Sibu and Greenham, Kathleen and Zhang, Yi and Kowalczyk, Mariusz and Estelle, Mark and Zažímalová, Eva and Novák, Ondřej and Sandberg, Göran and Ljung, Karin},
journal = {Plant Cell},
number = {10},
pages = {3858 -- 3870},
publisher = {American Society of Plant Biologists},
title = {{Regulation of auxin homeostasis and gradients in Arabidopsis roots through the formation of the indole-3-acetic acid catabolite 2-oxindole-3-acetic acid}},
doi = {10.1105/tpc.113.114421},
volume = {25},
year = {2013},
}
@article{516,
abstract = {In plants, changes in local auxin concentrations can trigger a range of developmental processes as distinct tissues respond differently to the same auxin stimulus. However, little is known about how auxin is interpreted by individual cell types. We performed a transcriptomic analysis of responses to auxin within four distinct tissues of the Arabidopsis thaliana root and demonstrate that different cell types show competence for discrete responses. The majority of auxin‐responsive genes displayed a spatial bias in their induction or repression. The novel data set was used to examine how auxin influences tissue‐specific transcriptional regulation of cell‐identity markers. Additionally, the data were used in combination with spatial expression maps of the root to plot a transcriptomic auxin‐response gradient across the apical and basal meristem. The readout revealed a strong correlation for thousands of genes between the relative response to auxin and expression along the longitudinal axis of the root. This data set and comparative analysis provide a transcriptome‐level spatial breakdown of the response to auxin within an organ where this hormone mediates many aspects of development.},
author = {Bargmann, Bastiaan and Vanneste, Steffen and Krouk, Gabriel and Nawy, Tal and Efroni, Idan and Shani, Eilon and Choe, Goh and Friml, Jirí and Bergmann, Dominique and Estelle, Mark and Birnbaum, Kenneth},
journal = {Molecular Systems Biology},
number = {1},
publisher = {Nature Publishing Group},
title = {{A map of cell type‐specific auxin responses}},
doi = {10.1038/msb.2013.40},
volume = {9},
year = {2013},
}
@article{522,
abstract = {Podoplanin, a mucin-like plasma membrane protein, is expressed by lymphatic endothelial cells and responsible for separation of blood and lymphatic circulation through activation of platelets. Here we show that podoplanin is also expressed by thymic fibroblastic reticular cells (tFRC), a novel thymic medulla stroma cell type associated with thymic conduits, and involved in development of natural regulatory T cells (nTreg). Young mice deficient in podoplanin lack nTreg owing to retardation of CD4+CD25+ thymocytes in the cortex and missing differentiation of Foxp3+ thymocytes in the medulla. This might be due to CCL21 that delocalizes upon deletion of the CCL21-binding podoplanin from medullar tFRC to cortex areas. The animals do not remain devoid of nTreg but generate them delayed within the first month resulting in Th2-biased hypergammaglobulinemia but not in the death-causing autoimmune phenotype of Foxp3-deficient Scurfy mice.},
author = {Fuertbauer, Elke and Zaujec, Jan and Uhrin, Pavel and Raab, Ingrid and Weber, Michele and Schachner, Helga and Bauer, Miroslav and Schütz, Gerhard and Binder, Bernd and Sixt, Michael K and Kerjaschki, Dontscho and Stockinger, Hannes},
journal = {Immunology Letters},
number = {1-2},
pages = {31 -- 41},
publisher = {Elsevier},
title = {{Thymic medullar conduits-associated podoplanin promotes natural regulatory T cells}},
doi = {10.1016/j.imlet.2013.07.007},
volume = {154},
year = {2013},
}
@article{527,
abstract = {The apical-basal axis of the early plant embryo determines the body plan of the adult organism. To establish a polarized embryonic axis, plants evolved a unique mechanism that involves directional, cell-to-cell transport of the growth regulator auxin. Auxin transport relies on PIN auxin transporters [1], whose polar subcellular localization determines the flow directionality. PIN-mediated auxin transport mediates the spatial and temporal activity of the auxin response machinery [2-7] that contributes to embryo patterning processes, including establishment of the apical (shoot) and basal (root) embryo poles [8]. However, little is known of upstream mechanisms guiding the (re)polarization of auxin fluxes during embryogenesis [9]. Here, we developed a model of plant embryogenesis that correctly generates emergent cell polarities and auxin-mediated sequential initiation of apical-basal axis of plant embryo. The model relies on two precisely localized auxin sources and a feedback between auxin and the polar, subcellular PIN transporter localization. Simulations reproduced PIN polarity and auxin distribution, as well as previously unknown polarization events during early embryogenesis. The spectrum of validated model predictions suggests that our model corresponds to a minimal mechanistic framework for initiation and orientation of the apical-basal axis to guide both embryonic and postembryonic plant development.},
author = {Wabnik, Krzysztof T and Robert, Hélène and Smith, Richard and Friml, Jirí},
journal = {Current Biology},
number = {24},
pages = {2513 -- 2518},
publisher = {Cell Press},
title = {{Modeling framework for the establishment of the apical-basal embryonic axis in plants}},
doi = {10.1016/j.cub.2013.10.038},
volume = {23},
year = {2013},
}
@article{528,
abstract = {Establishment of the embryonic axis foreshadows the main body axis of adults both in plants and in animals, but underlying mechanisms are considered distinct. Plants utilize directional, cell-to-cell transport of the growth hormone auxin [1, 2] to generate an asymmetric auxin response that specifies the embryonic apical-basal axis [3-6]. The auxin flow directionality depends on the polarized subcellular localization of PIN-FORMED (PIN) auxin transporters [7, 8]. It remains unknown which mechanisms and spatial cues guide cell polarization and axis orientation in early embryos. Herein, we provide conceptually novel insights into the formation of embryonic axis in Arabidopsis by identifying a crucial role of localized tryptophan-dependent auxin biosynthesis [9-12]. Local auxin production at the base of young embryos and the accompanying PIN7-mediated auxin flow toward the proembryo are required for the apical auxin response maximum and the specification of apical embryonic structures. Later in embryogenesis, the precisely timed onset of localized apical auxin biosynthesis mediates PIN1 polarization, basal auxin response maximum, and specification of the root pole. Thus, the tight spatiotemporal control of distinct local auxin sources provides a necessary, non-cell-autonomous trigger for the coordinated cell polarization and subsequent apical-basal axis orientation during embryogenesis and, presumably, also for other polarization events during postembryonic plant life [13, 14].},
author = {Robert, Hélène and Grones, Peter and Stepanova, Anna and Robles, Linda and Lokerse, Annemarie and Alonso, Jose and Weijers, Dolf and Friml, Jirí},
journal = {Current Biology},
number = {24},
pages = {2506 -- 2512},
publisher = {Cell Press},
title = {{Local auxin sources orient the apical basal axis in arabidopsis embryos}},
doi = {10.1016/j.cub.2013.09.039},
volume = {23},
year = {2013},
}
@misc{5399,
abstract = {In this work we present a flexible tool for tumor progression, which simulates the evolutionary dynamics of cancer. Tumor progression implements a multi-type branching process where the key parameters are the fitness landscape, the mutation rate, and the average time of cell division. The fitness of a cancer cell depends on the mutations it has accumulated. The input to our tool could be any fitness landscape, mutation rate, and cell division time, and the tool produces the growth dynamics and all relevant statistics.},
author = {Reiter, Johannes and Bozic, Ivana and Chatterjee, Krishnendu and Nowak, Martin},
issn = {2664-1690},
pages = {17},
publisher = {IST Austria},
title = {{TTP: Tool for Tumor Progression}},
doi = {10.15479/AT:IST-2013-104-v1-1},
year = {2013},
}
@misc{5400,
abstract = {We consider partially observable Markov decision processes (POMDPs) with ω-regular conditions specified as parity objectives. The class of ω-regular languages extends regular languages to infinite strings and provides a robust specification language to express all properties used in verification, and parity objectives are canonical forms to express ω-regular conditions. The qualitative analysis problem given a POMDP and a parity objective asks whether there is a strategy to ensure that the objective is satis- fied with probability 1 (resp. positive probability). While the qualitative analysis problems are known to be undecidable even for very special cases of parity objectives, we establish decidability (with optimal complexity) of the qualitative analysis problems for POMDPs with all parity objectives under finite- memory strategies. We establish asymptotically optimal (exponential) memory bounds and EXPTIME- completeness of the qualitative analysis problems under finite-memory strategies for POMDPs with parity objectives.},
author = {Chatterjee, Krishnendu and Chmelik, Martin and Tracol, Mathieu},
issn = {2664-1690},
pages = {41},
publisher = {IST Austria},
title = {{What is decidable about partially observable Markov decision processes with ω-regular objectives}},
doi = {10.15479/AT:IST-2013-109-v1-1},
year = {2013},
}
@techreport{5401,
abstract = {This document is created as a part of the project “Repository for Research Data at IST Austria”. It summarises the actual initiatives, projects and standards related to the project. It supports the preparation of standards and specifications for the project, which should be considered and followed to ensure interoperability and visibility of the uploaded data.},
author = {Porsche, Jana},
publisher = {IST Austria},
title = {{Initiatives and projects related to RD}},
year = {2013},
}
@misc{5402,
abstract = {Linearizability requires that the outcome of calls by competing threads to a concurrent data structure is the same as some sequential execution where each thread has exclusive access to the data structure. In an ordered data structure, such as a queue or a stack, linearizability is ensured by requiring threads commit in the order dictated by the sequential semantics of the data structure; e.g., in a concurrent queue implementation a dequeue can only remove the oldest element.
In this paper, we investigate the impact of this strict ordering, by comparing what linearizability allows to what existing implementations do. We first give an operational definition for linearizability which allows us to build the most general linearizable implementation as a transition system for any given sequential specification. We then use this operational definition to categorize linearizable implementations based on whether they are bound or free. In a bound implementation, whenever all threads observe the same logical state, the updates to the logical state and the temporal order of commits coincide. All existing queue implementations we know of are bound. We then proceed to present, to the best of our knowledge, the first ever free queue implementation. Our experiments show that free implementations have the potential for better performance by suffering less from contention.},
author = {Henzinger, Thomas A and Sezgin, Ali},
issn = {2664-1690},
pages = {16},
publisher = {IST Austria},
title = {{How free is your linearizable concurrent data structure?}},
doi = {10.15479/AT:IST-2013-123-v1-1},
year = {2013},
}
@misc{5403,
abstract = {We consider concurrent games played by two-players on a finite state graph, where in every round the players simultaneously choose a move, and the current state along with the joint moves determine the successor state. We study the most fundamental objective for concurrent games, namely, mean-payoff or limit-average objective, where a reward is associated to every transition, and the goal of player 1 is to maximize the long-run average of the rewards, and the objective of player 2 is strictly the opposite (i.e., the games are zero-sum). The path constraint for player 1 could be qualitative, i.e., the mean-payoff is the maximal reward, or arbitrarily close to it; or quantitative, i.e., a given threshold between the minimal and maximal reward. We consider the computation of the almost-sure (resp. positive) winning sets, where player 1 can ensure that the path constraint is satisfied with probability 1 (resp. positive probability). Almost-sure winning with qualitative constraint exactly corresponds to the question whether there exists a strategy to ensure that the payoff is the maximal reward of the game. Our main results for qualitative path constraints are as follows: (1) we establish qualitative determinacy results that show for every state either player 1 has a strategy to ensure almost-sure (resp. positive) winning against all player-2 strategies or player 2 has a spoiling strategy to falsify almost-sure (resp. positive) winning against all player-1 strategies; (2) we present optimal strategy complexity results that precisely characterize the classes of strategies required for almost-sure and positive winning for both players; and (3) we present quadratic time algorithms to compute the almost-sure and the positive winning sets, matching the best known bound of the algorithms for much simpler problems (such as reachability objectives). For quantitative constraints we show that a polynomial time solution for the almost-sure or the positive winning set would imply a solution to a long-standing open problem (of solving the value problem of mean-payoff games) that is not known to be in polynomial time.},
author = {Chatterjee, Krishnendu and Ibsen-Jensen, Rasmus},
issn = {2664-1690},
pages = {33},
publisher = {IST Austria},
title = {{Qualitative analysis of concurrent mean-payoff games}},
doi = {10.15479/AT:IST-2013-126-v1-1},
year = {2013},
}
@misc{5404,
abstract = {We study finite-state two-player (zero-sum) concurrent mean-payoff games played on a graph. We focus on the important sub-class of ergodic games where all states are visited infinitely often with probability 1. The algorithmic study of ergodic games was initiated in a seminal work of Hoffman and Karp in 1966, but all basic complexity questions have remained unresolved. Our main results for ergodic games are as follows: We establish (1) an optimal exponential bound on the patience of stationary strategies (where patience of a distribution is the inverse of the smallest positive probability and represents a complexity measure of a stationary strategy); (2) the approximation problem lie in FNP; (3) the approximation problem is at least as hard as the decision problem for simple stochastic games (for which NP and coNP is the long-standing best known bound). We show that the exact value can be expressed in the existential theory of the reals, and also establish square-root sum hardness for a related class of games.},
author = {Chatterjee, Krishnendu and Ibsen-Jensen, Rasmus},
issn = {2664-1690},
pages = {29},
publisher = {IST Austria},
title = {{The complexity of ergodic games}},
doi = {10.15479/AT:IST-2013-127-v1-1},
year = {2013},
}
@misc{5405,
abstract = {The theory of graph games is the foundation for modeling and synthesizing reactive processes. In the synthesis of stochastic processes, we use 2-1/2-player games where some transitions of the game graph are controlled by two adversarial players, the System and the Environment, and the other transitions are determined probabilistically. We consider 2-1/2-player games where the objective of the System is the conjunction of a qualitative objective (specified as a parity condition) and a quantitative objective (specified as a mean-payoff condition). We establish that the problem of deciding whether the System can ensure that the probability to satisfy the mean-payoff parity objective is at least a given threshold is in NP ∩ coNP, matching the best known bound in the special case of 2-player games (where all transitions are deterministic) with only parity objectives, or with only mean-payoff objectives. We present an algorithm running
in time O(d · n^{2d}·MeanGame) to compute the set of almost-sure winning states from which the objective
can be ensured with probability 1, where n is the number of states of the game, d the number of priorities
of the parity objective, and MeanGame is the complexity to compute the set of almost-sure winning states
in 2-1/2-player mean-payoff games. Our results are useful in the synthesis of stochastic reactive systems
with both functional requirement (given as a qualitative objective) and performance requirement (given
as a quantitative objective).},
author = {Chatterjee, Krishnendu and Doyen, Laurent and Gimbert, Hugo and Oualhadj, Youssouf},
issn = {2664-1690},
pages = {22},
publisher = {IST Austria},
title = {{Perfect-information stochastic mean-payoff parity games}},
doi = {10.15479/AT:IST-2013-128-v1-1},
year = {2013},
}
@misc{5406,
abstract = {We consider the distributed synthesis problem fortemporal logic specifications. Traditionally, the problem has been studied for LTL, and the previous results show that the problem is decidable iff there is no information fork in the architecture. We consider the problem for fragments of LTLand our main results are as follows: (1) We show that the problem is undecidable for architectures with information forks even for the fragment of LTL with temporal operators restricted to next and eventually. (2) For specifications restricted to globally along with non-nested next operators, we establish decidability (in EXPSPACE) for star architectures where the processes receive disjoint inputs, whereas we establish undecidability for architectures containing an information fork-meet structure. (3)Finally, we consider LTL without the next operator, and establish decidability (NEXPTIME-complete) for all architectures for a fragment that consists of a set of safety assumptions, and a set of guarantees where each guarantee is a safety, reachability, or liveness condition.},
author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Otop, Jan and Pavlogiannis, Andreas},
issn = {2664-1690},
pages = {11},
publisher = {IST Austria},
title = {{Distributed synthesis for LTL Fragments}},
doi = {10.15479/AT:IST-2013-130-v1-1},
year = {2013},
}
@techreport{5407,
abstract = {This document is created as a part of the project “Repository for Research Data at IST Austria”. It summarises the mandatory features, which need to be fulfilled to provide an institutional repository as a platform and also a service to the scientists at the institute. It also includes optional features, which would be of strong benefit for the scientists and would increase the usage of the repository, and hence the visibility of research at IST Austria.},
author = {Porsche, Jana},
publisher = {IST Austria},
title = {{Technical requirements and features}},
year = {2013},
}
@misc{5408,
abstract = {We consider two-player partial-observation stochastic games where player 1 has partial observation and player 2 has perfect observation. The winning condition we study are omega-regular conditions specified as parity objectives. The qualitative analysis problem given a partial-observation stochastic game and a parity objective asks whether there is a strategy to ensure that the objective is satisfied with probability 1 (resp. positive probability). While the qualitative analysis problems are known to be undecidable even for very special cases of parity objectives, they were shown to be decidable in 2EXPTIME under finite-memory strategies. We improve the complexity and show that the qualitative analysis problems for partial-observation stochastic parity games under finite-memory strategies are
EXPTIME-complete; and also establish optimal (exponential) memory bounds for finite-memory strategies required for qualitative analysis. },
author = {Chatterjee, Krishnendu and Doyen, Laurent and Nain, Sumit and Vardi, Moshe},
issn = {2664-1690},
pages = {17},
publisher = {IST Austria},
title = {{The complexity of partial-observation stochastic parity games with finite-memory strategies}},
doi = {10.15479/AT:IST-2013-141-v1-1},
year = {2013},
}
@misc{5409,
abstract = {The edit distance between two (untimed) traces is the minimum cost of a sequence of edit operations (insertion, deletion, or substitution) needed to transform one trace to the other. Edit distances have been extensively studied in the untimed setting, and form the basis for approximate matching of sequences in different domains such as coding theory, parsing, and speech recognition.
In this paper, we lift the study of edit distances from untimed languages to the timed setting. We define an edit distance between timed words which incorporates both the edit distance between the untimed words and the absolute difference in timestamps. Our edit distance between two timed words is computable in polynomial time. Further, we show that the edit distance between a timed word and a timed language generated by a timed automaton, defined as the edit distance between the word and the closest word in the language, is PSPACE-complete. While computing the edit distance between two timed automata is undecidable, we show that the approximate version, where we decide if the edit distance between two timed automata is either less than a given parameter or more than delta away from the parameter, for delta>0, can be solved in exponential space and is EXPSPACE-hard. Our definitions and techniques can be generalized to the setting of hybrid systems, and we show analogous decidability results for rectangular automata.},
author = {Chatterjee, Krishnendu and Ibsen-Jensen, Rasmus and Majumdar, Rupak},
issn = {2664-1690},
pages = {12},
publisher = {IST Austria},
title = {{Edit distance for timed automata}},
doi = {10.15479/AT:IST-2013-144-v1-1},
year = {2013},
}
@misc{5410,
abstract = {Board games, like Tic-Tac-Toe and CONNECT-4, play an important role not only in development of mathematical and logical skills, but also in emotional and social development. In this paper, we address the problem of generating targeted starting positions for such games. This can facilitate new approaches for bringing novice players to mastery, and also leads to discovery of interesting game variants.
Our approach generates starting states of varying hardness levels for player 1 in a two-player board game, given rules of the board game, the desired number of steps required for player 1 to win, and the expertise levels of the two players. Our approach leverages symbolic methods and iterative simulation to efficiently search the extremely large state space. We present experimental results that include discovery of states of varying hardness levels for several simple grid-based board games. Also, the presence of such states for standard game variants like Tic-Tac-Toe on board size 4x4 opens up new games to be played that have not been played for ages since the default start state is heavily biased. },
author = {Ahmed, Umair and Chatterjee, Krishnendu and Gulwani, Sumit},
issn = {2664-1690},
pages = {13},
publisher = {IST Austria},
title = {{Automatic generation of alternative starting positions for traditional board games}},
doi = {10.15479/AT:IST-2013-146-v1-1},
year = {2013},
}
@inbook{5747,
author = {Dragoi, Cezara and Gupta, Ashutosh and Henzinger, Thomas A},
booktitle = {Computer Aided Verification},
isbn = {9783642397981},
issn = {0302-9743},
location = {Saint Petersburg, Russia},
pages = {174--190},
publisher = {Springer Berlin Heidelberg},
title = {{Automatic Linearizability Proofs of Concurrent Objects with Cooperating Updates}},
doi = {10.1007/978-3-642-39799-8_11},
volume = {8044},
year = {2013},
}
@misc{6440,
abstract = {In order to guarantee that each method of a data structure updates the logical state exactly once, al-most all non-blocking implementations employ Compare-And-Swap (CAS) based synchronization. For FIFO queue implementations this translates into concurrent enqueue or dequeue methods competing among themselves to update the same variable, the tail or the head, respectively, leading to high contention and poor scalability. Recent non-blocking queue implementations try to alleviate high contentionby increasing the number of contention points, all the while using CAS-based synchronization. Furthermore, obtaining a wait-free implementation with competition is achieved by additional synchronization which leads to further degradation of performance.In this paper we formalize the notion of competitiveness of a synchronizing statement which can beused as a measure for the scalability of concurrent implementations. We present a new queue implementation, the Speculative Pairing (SP) queue, which, as we show, decreases competitiveness by using Fetch-And-Increment (FAI) instead of CAS. We prove that the SP queue is linearizable and lock-free.We also show that replacing CAS with FAI leads to wait-freedom for dequeue methods without an adverse effect on performance. In fact, our experiments suggest that the SP queue can perform and scale better than the state-of-the-art queue implementations.},
author = {Henzinger, Thomas A and Payer, Hannes and Sezgin, Ali},
issn = {2664-1690},
pages = {23},
publisher = {IST Austria},
title = {{Replacing competition with cooperation to achieve scalable lock-free FIFO queues }},
doi = {10.15479/AT:IST-2013-124-v1-1},
year = {2013},
}
@inproceedings{1374,
abstract = {We study two-player zero-sum games over infinite-state graphs equipped with ωB and finitary conditions. Our first contribution is about the strategy complexity, i.e the memory required for winning strategies: we prove that over general infinite-state graphs, memoryless strategies are sufficient for finitary Büchi, and finite-memory suffices for finitary parity games. We then study pushdown games with boundedness conditions, with two contributions. First we prove a collapse result for pushdown games with ωB-conditions, implying the decidability of solving these games. Second we consider pushdown games with finitary parity along with stack boundedness conditions, and show that solving these games is EXPTIME-complete.},
author = {Chatterjee, Krishnendu and Fijalkow, Nathanaël},
booktitle = {22nd EACSL Annual Conference on Computer Science Logic},
location = {Torino, Italy},
pages = {181 -- 196},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{Infinite-state games with finitary conditions}},
doi = {10.4230/LIPIcs.CSL.2013.181},
volume = {23},
year = {2013},
}
@inproceedings{1376,
abstract = {We consider the distributed synthesis problem for temporal logic specifications. Traditionally, the problem has been studied for LTL, and the previous results show that the problem is decidable iff there is no information fork in the architecture. We consider the problem for fragments of LTL and our main results are as follows: (1) We show that the problem is undecidable for architectures with information forks even for the fragment of LTL with temporal operators restricted to next and eventually. (2) For specifications restricted to globally along with non-nested next operators, we establish decidability (in EXPSPACE) for star architectures where the processes receive disjoint inputs, whereas we establish undecidability for architectures containing an information fork-meet structure. (3) Finally, we consider LTL without the next operator, and establish decidability (NEXPTIME-complete) for all architectures for a fragment that consists of a set of safety assumptions, and a set of guarantees where each guarantee is a safety, reachability, or liveness condition.},
author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Otop, Jan and Pavlogiannis, Andreas},
booktitle = {13th International Conference on Formal Methods in Computer-Aided Design},
location = {Portland, OR, United States},
pages = {18 -- 25},
publisher = {IEEE},
title = {{Distributed synthesis for LTL fragments}},
doi = {10.1109/FMCAD.2013.6679386},
year = {2013},
}
@inproceedings{1385,
abstract = {It is often difficult to correctly implement a Boolean controller for a complex system, especially when concurrency is involved. Yet, it may be easy to formally specify a controller. For instance, for a pipelined processor it suffices to state that the visible behavior of the pipelined system should be identical to a non-pipelined reference system (Burch-Dill paradigm). We present a novel procedure to efficiently synthesize multiple Boolean control signals from a specification given as a quantified first-order formula (with a specific quantifier structure). Our approach uses uninterpreted functions to abstract details of the design. We construct an unsatisfiable SMT formula from the given specification. Then, from just one proof of unsatisfiability, we use a variant of Craig interpolation to compute multiple coordinated interpolants that implement the Boolean control signals. Our method avoids iterative learning and back-substitution of the control functions. We applied our approach to synthesize a controller for a simple two-stage pipelined processor, and present first experimental results.},
author = {Hofferek, Georg and Gupta, Ashutosh and Könighofer, Bettina and Jiang, Jie and Bloem, Roderick},
booktitle = {2013 Formal Methods in Computer-Aided Design},
location = {Portland, OR, United States},
pages = {77 -- 84},
publisher = {IEEE},
title = {{Synthesizing multiple boolean functions using interpolation on a single proof}},
doi = {10.1109/FMCAD.2013.6679394},
year = {2013},
}
@inproceedings{1387,
abstract = {Choices made by nondeterministic word automata depend on both the past (the prefix of the word read so far) and the future (the suffix yet to be read). In several applications, most notably synthesis, the future is diverse or unknown, leading to algorithms that are based on deterministic automata. Hoping to retain some of the advantages of nondeterministic automata, researchers have studied restricted classes of nondeterministic automata. Three such classes are nondeterministic automata that are good for trees (GFT; i.e., ones that can be expanded to tree automata accepting the derived tree languages, thus whose choices should satisfy diverse futures), good for games (GFG; i.e., ones whose choices depend only on the past), and determinizable by pruning (DBP; i.e., ones that embody equivalent deterministic automata). The theoretical properties and relative merits of the different classes are still open, having vagueness on whether they really differ from deterministic automata. In particular, while DBP ⊆ GFG ⊆ GFT, it is not known whether every GFT automaton is GFG and whether every GFG automaton is DBP. Also open is the possible succinctness of GFG and GFT automata compared to deterministic automata. We study these problems for ω-regular automata with all common acceptance conditions. We show that GFT=GFG⊃DBP, and describe a determinization construction for GFG automata.},
author = {Boker, Udi and Kuperberg, Denis and Kupferman, Orna and Skrzypczak, Michał},
location = {Riga, Latvia},
number = {PART 2},
pages = {89 -- 100},
publisher = {Springer},
title = {{Nondeterminism in the presence of a diverse or unknown future}},
doi = {10.1007/978-3-642-39212-2_11},
volume = {7966},
year = {2013},
}
@phdthesis{1406,
abstract = {Epithelial spreading is a critical part of various developmental and wound repair processes. Here we use zebrafish epiboly as a model system to study the cellular and molecular mechanisms underlying the spreading of epithelial sheets. During zebrafish epiboly the enveloping cell layer (EVL), a simple squamous epithelium, spreads over the embryo to eventually cover the entire yolk cell by the end of gastrulation. The EVL leading edge is anchored through tight junctions to the yolk syncytial layer (YSL), where directly adjacent to the EVL margin a contractile actomyosin ring is formed that is thought to drive EVL epiboly. The prevalent view in the field was that the contractile ring exerts a pulling force on the EVL margin, which pulls the EVL towards the vegetal pole. However, how this force is generated and how it affects EVL morphology still remains elusive. Moreover, the cellular mechanisms mediating the increase in EVL surface area, while maintaining tissue integrity and function are still unclear. Here we show that the YSL actomyosin ring pulls on the EVL margin by two distinct force-generating mechanisms. One mechanism is based on contraction of the ring around its circumference, as previously proposed. The second mechanism is based on actomyosin retrogade flows, generating force through resistance against the substrate. The latter can function at any epiboly stage even in situations where the contraction-based mechanism is unproductive. Additionally, we demonstrate that during epiboly the EVL is subjected to anisotropic tension, which guides the orientation of EVL cell division along the main axis (animal-vegetal) of tension. The influence of tension in cell division orientation involves cell elongation and requires myosin-2 activity for proper spindle alignment. Strikingly, we reveal that tension-oriented cell divisions release anisotropic tension within the EVL and that in the absence of such divisions, EVL cells undergo ectopic fusions. We conclude that forces applied to the EVL by the action of the YSL actomyosin ring generate a tension anisotropy in the EVL that orients cell divisions, which in turn limit tissue tension increase thereby facilitating tissue spreading.},
author = {Campinho, Pedro},
pages = {123},
publisher = {IST Austria},
title = {{Mechanics of zebrafish epiboly: Tension-oriented cell divisions limit anisotropic tissue tension in epithelial spreading}},
year = {2013},
}
@article{9055,
abstract = {Spontaneous formation of colonies of bacteria or flocks of birds are examples of self-organization in active living matter. Here, we demonstrate a form of self-organization from nonequilibrium driving forces in a suspension of synthetic photoactivated colloidal particles. They lead to two-dimensional "living crystals," which form, break, explode, and re-form elsewhere. The dynamic assembly results from a competition between self-propulsion of particles and an attractive interaction induced respectively by osmotic and phoretic effects and activated by light. We measured a transition from normal to giant-number fluctuations. Our experiments are quantitatively described by simple numerical simulations. We show that the existence of the living crystals is intrinsically related to the out-of-equilibrium collisions of the self-propelled particles.},
author = {Palacci, Jérémie A and Sacanna, S. and Steinberg, A. P. and Pine, D. J. and Chaikin, P. M.},
issn = {1095-9203},
journal = {Science},
keywords = {Multidisciplinary},
number = {6122},
pages = {936--940},
publisher = {American Association for the Advancement of Science },
title = {{Living crystals of light-activated colloidal surfers}},
doi = {10.1126/science.1230020},
volume = {339},
year = {2013},
}
@article{450,
abstract = {Understanding the relative importance of heterosis and outbreeding depression over multiple generations is a key question in evolutionary biology and is essential for identifying appropriate genetic sources for population and ecosystem restoration. Here we use 2455 experimental crosses between 12 population pairs of the rare perennial plant Rutidosis leptorrhynchoides (Asteraceae) to investigate the multi-generational (F1, F2, F3) fitness outcomes of inter-population hybridization. We detected no evidence of outbreeding depression, with inter-population hybrids and backcrosses showing either similar fitness or significant heterosis for fitness components across the three generations. Variation in heterosis among population pairs was best explained by characteristics of the foreign source or home population, and was greatest when the source population was large, with high genetic diversity and low inbreeding, and the home population was small and inbred. Our results indicate that the primary consideration for maximizing progeny fitness following population augmentation or restoration is the use of seed from large, genetically diverse populations.},
author = {Pickup, Melinda and Field, David and Rowell, David and Young, Andrew},
journal = {Proceedings of the Royal Society of London Series B Biological Sciences},
number = {1750},
publisher = {Royal Society, The},
title = {{Source population characteristics affect heterosis following genetic rescue of fragmented plant populations}},
doi = {10.1098/rspb.2012.2058},
volume = {280},
year = {2013},
}
@article{3116,
abstract = {Multithreaded programs coordinate their interaction through synchronization primitives like mutexes and semaphores, which are managed by an OS-provided resource manager. We propose algorithms for the automatic construction of code-aware resource managers for multithreaded embedded applications. Such managers use knowledge about the structure and resource usage (mutex and semaphore usage) of the threads to guarantee deadlock freedom and progress while managing resources in an efficient way. Our algorithms compute managers as winning strategies in certain infinite games, and produce a compact code description of these strategies. We have implemented the algorithms in the tool Cynthesis. Given a multithreaded program in C, the tool produces C code implementing a code-aware resource manager. We show in experiments that Cynthesis produces compact resource managers within a few minutes on a set of embedded benchmarks with up to 6 threads. © 2012 Springer Science+Business Media, LLC.},
author = {Chatterjee, Krishnendu and De Alfaro, Luca and Faella, Marco and Majumdar, Ritankar and Raman, Vishwanath},
journal = {Formal Methods in System Design},
number = {2},
pages = {142 -- 174},
publisher = {Springer},
title = {{Code aware resource management}},
doi = {10.1007/s10703-012-0170-4},
volume = {42},
year = {2013},
}
@article{3261,
abstract = {Cells in a developing embryo have no direct way of "measuring" their physical position. Through a variety of processes, however, the expression levels of multiple genes come to be correlated with position, and these expression levels thus form a code for "positional information." We show how to measure this information, in bits, using the gap genes in the Drosophila embryo as an example. Individual genes carry nearly two bits of information, twice as much as expected if the expression patterns consisted only of on/off domains separated by sharp boundaries. Taken together, four gap genes carry enough information to define a cell's location with an error bar of ~1% along the anterior-posterior axis of the embryo. This precision is nearly enough for each cell to have a unique identity, which is the maximum information the system can use, and is nearly constant along the length of the embryo. We argue that this constancy is a signature of optimality in the transmission of information from primary morphogen inputs to the output of the gap gene network.},
author = {Dubuis, Julien and Tkacik, Gasper and Wieschaus, Eric and Gregor, Thomas and Bialek, William},
journal = {PNAS},
number = {41},
pages = {16301 -- 16308},
publisher = {National Academy of Sciences},
title = {{Positional information, in bits}},
doi = {10.1073/pnas.1315642110},
volume = {110},
year = {2013},
}
@misc{3321,
author = {Quadrianto, Novi and Lampert, Christoph},
booktitle = {Encyclopedia of Systems Biology},
editor = {Dubitzky, Werner and Wolkenhauer, Olaf and Cho, Kwang and Yokota, Hiroki},
pages = {1069 -- 1069},
publisher = {Springer},
title = {{Kernel based learning}},
doi = {10.1007/978-1-4419-9863-7_604},
volume = {3},
year = {2013},
}
@phdthesis{1405,
abstract = {Motivated by the analysis of highly dynamic message-passing systems, i.e. unbounded thread creation, mobility, etc. we present a framework for the analysis of depth-bounded systems. Depth-bounded systems are one of the most expressive known fragment of the π-calculus for which interesting verification problems are still decidable. Even though they are infinite state systems depth-bounded systems are well-structured, thus can be analyzed algorithmically. We give an interpretation of depth-bounded systems as graph-rewriting systems. This gives more flexibility and ease of use to apply depth-bounded systems to other type of systems like shared memory concurrency.
First, we develop an adequate domain of limits for depth-bounded systems, a prerequisite for the effective representation of downward-closed sets. Downward-closed sets are needed by forward saturation-based algorithms to represent potentially infinite sets of states. Then, we present an abstract interpretation framework to compute the covering set of well-structured transition systems. Because, in general, the covering set is not computable, our abstraction over-approximates the actual covering set. Our abstraction captures the essence of acceleration based-algorithms while giving up enough precision to ensure convergence. We have implemented the analysis in the PICASSO tool and show that it is accurate in practice. Finally, we build some further analyses like termination using the covering set as starting point.},
author = {Zufferey, Damien},
pages = {134},
publisher = {IST Austria},
title = {{Analysis of dynamic message passing programs}},
year = {2013},
}
@inproceedings{2847,
abstract = {Depth-Bounded Systems form an expressive class of well-structured transition systems. They can model a wide range of concurrent infinite-state systems including those with dynamic thread creation, dynamically changing communication topology, and complex shared heap structures. We present the first method to automatically prove fair termination of depth-bounded systems. Our method uses a numerical abstraction of the system, which we obtain by systematically augmenting an over-approximation of the system’s reachable states with a finite set of counters. This numerical abstraction can be analyzed with existing termination provers. What makes our approach unique is the way in which it exploits the well-structuredness of the analyzed system. We have implemented our work in a prototype tool and used it to automatically prove liveness properties of complex concurrent systems, including nonblocking algorithms such as Treiber’s stack and several distributed processes. Many of these examples are beyond the scope of termination analyses that are based on traditional counter abstractions.},
author = {Bansal, Kshitij and Koskinen, Eric and Wies, Thomas and Zufferey, Damien},
editor = {Piterman, Nir and Smolka, Scott},
location = {Rome, Italy},
pages = {62 -- 77},
publisher = {Springer},
title = {{Structural Counter Abstraction}},
doi = {10.1007/978-3-642-36742-7_5},
volume = {7795},
year = {2013},
}
@inproceedings{2445,
abstract = {We develop program synthesis techniques that can help programmers fix concurrency-related bugs. We make two new contributions to synthesis for concurrency, the first improving the efficiency of the synthesized code, and the second improving the efficiency of the synthesis procedure itself. The first contribution is to have the synthesis procedure explore a variety of (sequential) semantics-preserving program transformations. Classically, only one such transformation has been considered, namely, the insertion of synchronization primitives (such as locks). Based on common manual bug-fixing techniques used by Linux device-driver developers, we explore additional, more efficient transformations, such as the reordering of independent instructions. The second contribution is to speed up the counterexample-guided removal of concurrency bugs within the synthesis procedure by considering partial-order traces (instead of linear traces) as counterexamples. A partial-order error trace represents a set of linear (interleaved) traces of a concurrent program all of which lead to the same error. By eliminating a partial-order error trace, we eliminate in a single iteration of the synthesis procedure all linearizations of the partial-order trace. We evaluated our techniques on several simplified examples of real concurrency bugs that occurred in Linux device drivers.},
author = {Cerny, Pavol and Henzinger, Thomas A and Radhakrishna, Arjun and Ryzhyk, Leonid and Tarrach, Thorsten},
location = {St. Petersburg, Russia},
pages = {951 -- 967},
publisher = {Springer},
title = {{Efficient synthesis for concurrency by semantics-preserving transformations}},
doi = {10.1007/978-3-642-39799-8_68},
volume = {8044},
year = {2013},
}
@article{2263,
abstract = {Nestin-cre transgenic mice have been widely used to direct recombination to neural stem cells (NSCs) and intermediate neural progenitor cells (NPCs). Here we report that a readily utilized, and the only commercially available, Nestin-cre line is insufficient for directing recombination in early embryonic NSCs and NPCs. Analysis of recombination efficiency in multiple cre-dependent reporters and a genetic mosaic line revealed consistent temporal and spatial patterns of recombination in NSCs and NPCs. For comparison we utilized a knock-in Emx1cre line and found robust recombination in NSCs and NPCs in ventricular and subventricular zones of the cerebral cortices as early as embryonic day 12.5. In addition we found that the rate of Nestin-cre driven recombination only reaches sufficiently high levels in NSCs and NPCs during late embryonic and early postnatal periods. These findings are important when commercially available cre lines are considered for directing recombination to embryonic NSCs and NPCs.},
author = {Liang, Huixuan and Hippenmeyer, Simon and Ghashghaei, H.},
journal = {Biology open},
number = {12},
pages = {1200 -- 1203},
publisher = {The Company of Biologists},
title = {{A Nestin-cre transgenic mouse is insufficient for recombination in early embryonic neural progenitors}},
doi = {10.1242/bio.20122287},
volume = {1},
year = {2012},
}
@article{2302,
abstract = {We introduce propagation models (PMs), a formalism able to express several kinds of equations that describe the behavior of biochemical reaction networks. Furthermore, we introduce the propagation abstract data type (PADT), which separates concerns regarding different numerical algorithms for the transient analysis of biochemical reaction networks from concerns regarding their implementation, thus allowing for portable and efficient solutions. The state of a propagation abstract data type is given by a vector that assigns mass values to a set of nodes, and its (next) operator propagates mass values through this set of nodes. We propose an approximate implementation of the (next) operator, based on threshold abstraction, which propagates only "significant" mass values and thus achieves a compromise between efficiency and accuracy. Finally, we give three use cases for propagation models: the chemical master equation (CME), the reaction rate equation (RRE), and a hybrid method that combines these two equations. These three applications use propagation models in order to propagate probabilities and/or expected values and variances of the model's variables.},
author = {Henzinger, Thomas A and Mateescu, Maria},
journal = {IEEE ACM Transactions on Computational Biology and Bioinformatics},
number = {2},
pages = {310 -- 322},
publisher = {IEEE},
title = {{The propagation approach for computing biochemical reaction networks}},
doi = {10.1109/TCBB.2012.91},
volume = {10},
year = {2012},
}
@article{2318,
abstract = {We show that bosons interacting via pair potentials with negative scattering length form bound states for a suitable number of particles. In other words, the absence of many-particle bound states of any kind implies the non-negativity of the scattering length of the interaction potential. },
author = {Seiringer, Robert},
journal = {Journal of Spectral Theory},
number = {3},
pages = {321--328},
publisher = {European Mathematical Society},
title = {{Absence of bound states implies non-negativity of the scattering length}},
doi = {10.4171/JST/31},
volume = {2},
year = {2012},
}
@article{2411,
abstract = {The kingdom of fungi provides model organisms for biotechnology, cell biology, genetics, and life sciences in general. Only when their phylogenetic relationships are stably resolved, can individual results from fungal research be integrated into a holistic picture of biology. However, and despite recent progress, many deep relationships within the fungi remain unclear. Here, we present the first phylogenomic study of an entire eukaryotic kingdom that uses a consistency criterion to strengthen phylogenetic conclusions. We reason that branches (splits) recovered with independent data and different tree reconstruction methods are likely to reflect true evolutionary relationships. Two complementary phylogenomic data sets based on 99 fungal genomes and 109 fungal expressed sequence tag (EST) sets analyzed with four different tree reconstruction methods shed light from different angles on the fungal tree of life. Eleven additional data sets address specifically the phylogenetic position of Blastocladiomycota, Ustilaginomycotina, and Dothideomycetes, respectively. The combined evidence from the resulting trees supports the deep-level stability of the fungal groups toward a comprehensive natural system of the fungi. In addition, our analysis reveals methodologically interesting aspects. Enrichment for EST encoded data-a common practice in phylogenomic analyses-introduces a strong bias toward slowly evolving and functionally correlated genes. Consequently, the generalization of phylogenomic data sets as collections of randomly selected genes cannot be taken for granted. A thorough characterization of the data to assess possible influences on the tree reconstruction should therefore become a standard in phylogenomic analyses.},
author = {Ebersberger, Ingo and De Matos Simoes, Ricardo and Kupczok, Anne and Gube, Matthias and Kothe, Erika and Voigt, Kerstin and Von Haeseler, Arndt},
journal = {Molecular Biology and Evolution},
number = {5},
pages = {1319 -- 1334},
publisher = {Oxford University Press},
title = {{A consistent phylogenetic backbone for the fungi}},
doi = {10.1093/molbev/msr285},
volume = {29},
year = {2012},
}
@inproceedings{2715,
abstract = {We consider Markov decision processes (MDPs) with specifications given as Büchi (liveness) objectives. We consider the problem of computing the set of almost-sure winning vertices from where the objective can be ensured with probability 1. We study for the first time the average case complexity of the classical algorithm for computing the set of almost-sure winning vertices for MDPs with Büchi objectives. Our contributions are as follows: First, we show that for MDPs with constant out-degree the expected number of iterations is at most logarithmic and the average case running time is linear (as compared to the worst case linear number of iterations and quadratic time complexity). Second, for the average case analysis over all MDPs we show that the expected number of iterations is constant and the average case running time is linear (again as compared to the worst case linear number of iterations and quadratic time complexity). Finally we also show that given that all MDPs are equally likely, the probability that the classical algorithm requires more than constant number of iterations is exponentially small.},
author = {Chatterjee, Krishnendu and Joglekar, Manas and Shah, Nisarg},
location = {Hyderabad, India},
pages = {461 -- 473},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{Average case analysis of the classical algorithm for Markov decision processes with Büchi objectives}},
doi = {10.4230/LIPIcs.FSTTCS.2012.461},
volume = {18},
year = {2012},
}
@inproceedings{2825,
abstract = {We study the problem of maximum marginal prediction (MMP) in probabilistic graphical models, a task that occurs, for example, as the Bayes optimal decision rule under a Hamming loss. MMP is typically performed as a two-stage procedure: one estimates each variable's marginal probability and then forms a prediction from the states of maximal probability. In this work we propose a simple yet effective technique for accelerating MMP when inference is sampling-based: instead of the above two-stage procedure we directly estimate the posterior probability of each decision variable. This allows us to identify the point of time when we are sufficiently certain about any individual decision. Whenever this is the case, we dynamically prune the variables we are confident about from the underlying factor graph. Consequently, at any time only samples of variables whose decision is still uncertain need to be created. Experiments in two prototypical scenarios, multi-label classification and image inpainting, show that adaptive sampling can drastically accelerate MMP without sacrificing prediction accuracy.},
author = {Lampert, Christoph},
location = {Lake Tahoe, NV, United States},
pages = {82 -- 90},
publisher = {Neural Information Processing Systems},
title = {{Dynamic pruning of factor graphs for maximum marginal prediction}},
volume = {1},
year = {2012},
}
@article{2848,
abstract = {We study evolutionary game theory in a setting where individuals learn from each other. We extend the traditional approach by assuming that a population contains individuals with different learning abilities. In particular, we explore the situation where individuals have different search spaces, when attempting to learn the strategies of others. The search space of an individual specifies the set of strategies learnable by that individual. The search space is genetically given and does not change under social evolutionary dynamics. We introduce a general framework and study a specific example in the context of direct reciprocity. For this example, we obtain the counter intuitive result that cooperation can only evolve for intermediate benefit-to-cost ratios, while small and large benefit-to-cost ratios favor defection. Our paper is a step toward making a connection between computational learning theory and evolutionary game dynamics.},
author = {Chatterjee, Krishnendu and Zufferey, Damien and Nowak, Martin},
journal = {Journal of Theoretical Biology},
pages = {161 -- 173},
publisher = {Elsevier},
title = {{Evolutionary game dynamics in populations with different learners}},
doi = {10.1016/j.jtbi.2012.02.021},
volume = {301},
year = {2012},
}
@article{2849,
author = {Edelsbrunner, Herbert and Strelkova, Nataliya},
journal = {Russian Mathematical Surveys},
number = {6},
pages = {1167 -- 1168},
publisher = {IOP Publishing Ltd.},
title = {{On the configuration space of Steiner minimal trees}},
doi = {10.1070/RM2012v067n06ABEH004820},
volume = {67},
year = {2012},
}
@inproceedings{2888,
abstract = {Formal verification aims to improve the quality of hardware and software by detecting errors before they do harm. At the basis of formal verification lies the logical notion of correctness, which purports to capture whether or not a circuit or program behaves as desired. We suggest that the boolean partition into correct and incorrect systems falls short of the practical need to assess the behavior of hardware and software in a more nuanced fashion against multiple criteria.},
author = {Henzinger, Thomas A},
booktitle = {Conference proceedings MODELS 2012},
location = {Innsbruck, Austria},
pages = {1 -- 2},
publisher = {Springer},
title = {{Quantitative reactive models}},
doi = {10.1007/978-3-642-33666-9_1},
volume = {7590},
year = {2012},
}
@inproceedings{2890,
abstract = {Systems are often specified using multiple requirements on their behavior. In practice, these requirements can be contradictory. The classical approach to specification, verification, and synthesis demands more detailed specifications that resolve any contradictions in the requirements. These detailed specifications are usually large, cumbersome, and hard to maintain or modify. In contrast, quantitative frameworks allow the formalization of the intuitive idea that what is desired is an implementation that comes "closest" to satisfying the mutually incompatible requirements, according to a measure of fit that can be defined by the requirements engineer. One flexible framework for quantifying how "well" an implementation satisfies a specification is offered by simulation distances that are parameterized by an error model. We introduce this framework, study its properties, and provide an algorithmic solution for the following quantitative synthesis question: given two (or more) behavioral requirements specified by possibly incompatible finite-state machines, and an error model, find the finite-state implementation that minimizes the maximal simulation distance to the given requirements. Furthermore, we generalize the framework to handle infinite alphabets (for example, realvalued domains). We also demonstrate how quantitative specifications based on simulation distances might lead to smaller and easier to modify specifications. Finally, we illustrate our approach using case studies on error correcting codes and scheduler synthesis.},
author = {Cerny, Pavol and Gopi, Sivakanth and Henzinger, Thomas A and Radhakrishna, Arjun and Totla, Nishant},
booktitle = {Proceedings of the tenth ACM international conference on Embedded software},
location = {Tampere, Finland},
pages = {53 -- 62},
publisher = {ACM},
title = {{Synthesis from incompatible specifications}},
doi = {10.1145/2380356.2380371},
year = {2012},
}
@inproceedings{2891,
abstract = {Quantitative automata are nondeterministic finite automata with edge weights. They value a
run by some function from the sequence of visited weights to the reals, and value a word by its
minimal/maximal run. They generalize boolean automata, and have gained much attention in
recent years. Unfortunately, important automaton classes, such as sum, discounted-sum, and
limit-average automata, cannot be determinized. Yet, the quantitative setting provides the potential
of approximate determinization. We define approximate determinization with respect to
a distance function, and investigate this potential.
We show that sum automata cannot be determinized approximately with respect to any
distance function. However, restricting to nonnegative weights allows for approximate determinization
with respect to some distance functions.
Discounted-sum automata allow for approximate determinization, as the influence of a word’s
suffix is decaying. However, the naive approach, of unfolding the automaton computations up
to a sufficient level, is shown to be doubly exponential in the discount factor. We provide an
alternative construction that is singly exponential in the discount factor, in the precision, and
in the number of states. We prove matching lower bounds, showing exponential dependency on
each of these three parameters.
Average and limit-average automata are shown to prohibit approximate determinization with
respect to any distance function, and this is the case even for two weights, 0 and 1.},
author = {Boker, Udi and Henzinger, Thomas A},
booktitle = {Leibniz International Proceedings in Informatics},
location = {Hyderabad, India},
pages = {362 -- 373},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{Approximate determinization of quantitative automata}},
doi = {10.4230/LIPIcs.FSTTCS.2012.362},
volume = {18},
year = {2012},
}
@article{2902,
abstract = {We present an algorithm for simplifying linear cartographic objects and results obtained with a computer program implementing this algorithm. },
author = {Edelsbrunner, Herbert and Musin, Oleg and Ukhalov, Alexey and Yakimova, Olga and Alexeev, Vladislav and Bogaevskaya, Victoriya and Gorohov, Andrey and Preobrazhenskaya, Margarita},
journal = {Modeling and Analysis of Information Systems},
number = {6},
pages = {152 -- 160},
publisher = {Technische Universität Darmstadt},
title = {{Fractal and computational geometry for generalizing cartographic objects}},
volume = {19},
year = {2012},
}
@inproceedings{2903,
abstract = {In order to enjoy a digital version of the Jordan Curve Theorem, it is common to use the closed topology for the foreground and the open topology for the background of a 2-dimensional binary image. In this paper, we introduce a single topology that enjoys this theorem for all thresholds decomposing a real-valued image into foreground and background. This topology is easy to construct and it generalizes to n-dimensional images.},
author = {Edelsbrunner, Herbert and Symonova, Olga},
location = {New Brunswick, NJ, USA },
pages = {41 -- 48},
publisher = {IEEE},
title = {{The adaptive topology of a digital image}},
doi = {10.1109/ISVD.2012.11},
year = {2012},
}
@article{2904,
abstract = {Generalized van der Corput sequences are onedimensional, infinite sequences in the unit interval. They are generated from permutations in integer base b and are the building blocks of the multi-dimensional Halton sequences. Motivated by recent progress of Atanassov on the uniform distribution behavior of Halton sequences, we study, among others, permutations of the form P(i) = ai (mod b) for coprime integers a and b. We show that multipliers a that either divide b - 1 or b + 1 generate van der Corput sequences with weak distribution properties. We give explicit lower bounds for the asymptotic distribution behavior of these sequences and relate them to sequences generated from the identity permutation in smaller bases, which are, due to Faure, the weakest distributed generalized van der Corput sequences.},
author = {Pausinger, Florian},
issn = {2118-8572},
journal = {Journal de Theorie des Nombres des Bordeaux},
number = {3},
pages = {729 -- 749},
publisher = {Universite de Bordeaux},
title = {{Weak multipliers for generalized van der Corput sequences}},
doi = {10.5802/jtnb.819},
volume = {24},
year = {2012},
}
@article{2912,
author = {Edelsbrunner, Herbert and Strelkova, Nataliya},
journal = { Uspekhi Mat. Nauk},
number = {6},
pages = {203 -- 204},
publisher = {Moscow Mathematical Society },
title = {{Configuration space for shortest networks }},
doi = {10.4213/rm9503},
volume = {67},
year = {2012},
}
@inproceedings{2915,
author = {Kroemer, Oliver and Lampert, Christoph and Peters, Jan},
publisher = {Deutsches Zentrum für Luft und Raumfahrt},
title = {{Multi-modal learning for dynamic tactile sensing}},
year = {2012},
}
@inproceedings{2916,
abstract = {The classical (boolean) notion of refinement for behavioral interfaces of system components is the alternating refinement preorder. In this paper, we define a quantitative measure for interfaces, called interface simulation distance. It makes the alternating refinement preorder quantitative by, intu- itively, tolerating errors (while counting them) in the alternating simulation game. We show that the interface simulation distance satisfies the triangle inequality, that the distance between two interfaces does not increase under parallel composition with a third interface, and that the distance between two interfaces can be bounded from above and below by distances between abstractions of the two interfaces. We illustrate the framework, and the properties of the distances under composition of interfaces, with two case studies.},
author = {Cerny, Pavol and Chmelik, Martin and Henzinger, Thomas A and Radhakrishna, Arjun},
booktitle = {Electronic Proceedings in Theoretical Computer Science},
location = {Napoli, Italy},
pages = {29 -- 42},
publisher = {EPTCS},
title = {{Interface Simulation Distances}},
doi = {10.4204/EPTCS.96.3},
volume = {96},
year = {2012},
}
@article{2917,
abstract = {The search for extra-terrestrial intelligence (SETI) has been performed principally as a one-way survey, listening of radio frequencies across the Milky Way and other galaxies. However, scientists have engaged in an active messaging only rarely. This suggests the simple rationale that if other civilizations exist and take a similar approach to ours, namely listening but not broadcasting, the result is a silent universe. A simple game theoretical model, the prisoner's dilemma, explains this situation: each player (civilization) can passively search (defect), or actively search and broadcast (cooperate). In order to maximize the payoff (or, equivalently, minimize the risks) the best strategy is not to broadcast. In fact, the active search has been opposed on the basis that it might be dangerous to expose ourselves. However, most of these ideas have not been based on objective arguments, and ignore accounting of the possible gains and losses. Thus, the question stands: should we perform an active search? I develop a game-theoretical framework where civilizations can be of different types, and explicitly apply it to a situation where societies are either interested in establishing a two-way communication or belligerent and in urge to exploit ours. The framework gives a quantitative solution (a mixed-strategy), which is how frequent we should perform the active SETI. This frequency is roughly proportional to the inverse of the risk, and can be extremely small. However, given the immense amount of stars being scanned, it supports active SETI. The model is compared with simulations, and the possible actions are evaluated through the San Marino scale, measuring the risks of messaging.},
author = {Vladar, Harold},
journal = {International Journal of Astrobiology},
number = {1},
pages = {53 -- 62},
publisher = {Cambridge University Press},
title = {{The game of active search for extra terrestrial intelligence Breaking the Great Silence }},
doi = {10.1017/S1473550412000407},
volume = {12},
year = {2012},
}
@unpublished{2928,
abstract = { This paper addresses the problem of approximate MAP-MRF inference in general graphical models. Following [36], we consider a family of linear programming relaxations of the problem where each relaxation is specified by a set of nested pairs of factors for which the marginalization constraint needs to be enforced. We develop a generalization of the TRW-S algorithm [9] for this problem, where we use a decomposition into junction chains, monotonic w.r.t. some ordering on the nodes. This generalizes the monotonic chains in [9] in a natural way. We also show how to deal with nested factors in an efficient way. Experiments show an improvement over min-sum diffusion, MPLP and subgradient ascent algorithms on a number of computer vision and natural language processing problems. },
author = {Kolmogorov, Vladimir and Schoenemann, Thomas},
booktitle = {arXiv},
pages = {16},
publisher = {ArXiv},
title = {{Generalized sequential tree-reweighted message passing}},
year = {2012},
}
@inproceedings{2930,
abstract = {In this paper we investigate k-submodular functions. This natural family of discrete functions includes submodular and bisubmodular functions as the special cases k = 1 and k = 2 respectively.
In particular we generalize the known Min-Max-Theorem for submodular and bisubmodular functions. This theorem asserts that the minimum of the (bi)submodular function can be found by solving a maximization problem over a (bi)submodular polyhedron. We define a k-submodular polyhedron, prove a Min-Max-Theorem for k-submodular functions, and give a greedy algorithm to construct the vertices of the polyhedron.
},
author = {Huber, Anna and Kolmogorov, Vladimir},
location = {Athens, Greece},
pages = {451 -- 462},
publisher = {Springer},
title = {{Towards minimizing k-submodular functions}},
doi = {10.1007/978-3-642-32147-4_40},
volume = {7422},
year = {2012},
}
@article{2931,
abstract = {In this paper, we present a new approach for establishing correspondences between sparse image features related by an unknown nonrigid mapping and corrupted by clutter and occlusion, such as points extracted from images of different instances of the same object category. We formulate this matching task as an energy minimization problem by defining an elaborate objective function of the appearance and the spatial arrangement of the features. Optimization of this energy is an instance of graph matching, which is in general an NP-hard problem. We describe a novel graph matching optimization technique, which we refer to as dual decomposition (DD), and demonstrate on a variety of examples that this method outperforms existing graph matching algorithms. In the majority of our examples, DD is able to find the global minimum within a minute. The ability to globally optimize the objective allows us to accurately learn the parameters of our matching model from training examples. We show on several matching tasks that our learned model yields results superior to those of state-of-the-art methods.
},
author = {Torresani, Lorenzo and Kolmogorov, Vladimir and Rother, Carsten},
journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence},
number = {2},
pages = {259 -- 271},
publisher = {IEEE},
title = {{A dual decomposition approach to feature correspondence}},
doi = {10.1109/TPAMI.2012.105},
volume = {35},
year = {2012},
}
@inproceedings{2936,
abstract = {The notion of delays arises naturally in many computational models, such as, in the design of circuits, control systems, and dataflow languages. In this work, we introduce automata with delay blocks (ADBs), extending finite state automata with variable time delay blocks, for deferring individual transition output symbols, in a discrete-time setting. We show that the ADB languages strictly subsume the regular languages, and are incomparable in expressive power to the context-free languages. We show that ADBs are closed under union, concatenation and Kleene star, and under intersection with regular languages, but not closed under complementation and intersection with other ADB languages. We show that the emptiness and the membership problems are decidable in polynomial time for ADBs, whereas the universality problem is undecidable. Finally we consider the linear-time model checking problem, i.e., whether the language of an ADB is contained in a regular language, and show that the model checking problem is PSPACE-complete. Copyright 2012 ACM.},
author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Prabhu, Vinayak},
booktitle = {roceedings of the tenth ACM international conference on Embedded software},
location = {Tampere, Finland},
pages = {43 -- 52},
publisher = {ACM},
title = {{Finite automata with time delay blocks}},
doi = {10.1145/2380356.2380370},
year = {2012},
}
@inproceedings{2937,
abstract = {Developers building cryptography into security-sensitive applications face a daunting task. Not only must they understand the security guarantees delivered by the constructions they choose, they must also implement and combine them correctly and efficiently. Cryptographic compilers free developers from this task by turning high-level specifications of security goals into efficient implementations. Yet, trusting such tools is hard as they rely on complex mathematical machinery and claim security properties that are subtle and difficult to verify. In this paper we present ZKCrypt, an optimizing cryptographic compiler achieving an unprecedented level of assurance without sacrificing practicality for a comprehensive class of cryptographic protocols, known as Zero-Knowledge Proofs of Knowledge. The pipeline of ZKCrypt integrates purpose-built verified compilers and verifying compilers producing formal proofs in the CertiCrypt framework. By combining the guarantees delivered by each stage, ZKCrypt provides assurance that the output implementation securely realizes the abstract proof goal given as input. We report on the main characteristics of ZKCrypt, highlight new definitions and concepts at its foundations, and illustrate its applicability through a representative example of an anonymous credential system.},
author = {Almeida, José and Barbosa, Manuel and Bangerter, Endre and Barthe, Gilles and Krenn, Stephan and Béguelin, Santiago},
booktitle = {Proceedings of the 2012 ACM conference on Computer and communications security},
location = {Raleigh, NC, USA},
pages = {488 -- 500},
publisher = {ACM},
title = {{Full proof cryptography: Verifiable compilation of efficient zero-knowledge protocols}},
doi = {10.1145/2382196.2382249},
year = {2012},
}
@article{2938,
abstract = {Social insects have a very high potential to become invasive pest species. Here, we explore how their social lifestyle and their interaction with parasites may contribute to this invasive success. Similar to solitary species, parasite release followed by the evolution of increased competitive ability can promote establishment of introduced social insect hosts in their introduced range. Genetic bottlenecks during introduction of low numbers of founder individuals decrease the genetic diversity at three levels: the population, the colony and the individual, with the colony level being specific to social insects. Reduced genetic diversity can affect both the individual immune system and the collective colony-level disease defences (social immunity). Still, the dual immune system is likely to make social insects more robust to parasite attack. Changes in social structure from small, family-based, territorially aggressive societies in native populations towards huge networks of cooperating nests (unicoloniality) occur in some invasive social insects, for example, most invasive ants and some termites. Unicoloniality is likely to affect disease dynamics in multiple ways. The free exchange of individuals within the population leads to an increased genetic heterogeneity among individuals of a single nest, thereby decreasing disease transmission. However, the multitude of reproductively active queens per colony buffers the effect of individual diseased queens and their offspring, which may result in a higher level of vertical disease transmission in unicolonial societies. Lastly, unicoloniality provides a competitive advantage over native species, allowing them to quickly become the dominant species in the habitat, which in turn selects for parasite adaptation to this common host genotype and thus eventually a high parasite pressure. Overall, invasions by insect societies are characterized by general features applying to all introduced species, as well as idiosyncrasies that emerge from their social lifestyle. It is important to study these effects in concert to be able to develop efficient management and biocontrol strategies. © 2012 British Ecological Society.},
author = {Ugelvig, Line V and Cremer, Sylvia},
journal = {Functional Ecology},
number = {6},
pages = {1300 -- 1312},
publisher = {Wiley-Blackwell},
title = {{Effects of social immunity and unicoloniality on host parasite interactions in invasive insect societies}},
doi = {10.1111/1365-2435.12013},
volume = {26},
year = {2012},
}
@article{2941,
author = {Dolbilin, Nikolai and Edelsbrunner, Herbert and Musin, Oleg},
journal = {Russian Mathematical Surveys},
number = {4},
pages = {781 -- 783},
publisher = {IOP Publishing},
title = {{On the optimality of functionals over triangulations of Delaunay sets}},
doi = {10.1070/RM2012v067n04ABEH004807},
volume = {67},
year = {2012},
}
@inproceedings{2942,
abstract = {Interface theories provide a formal framework for component-based development of software and hardware which supports the incremental design of systems and the independent implementability of components. These capabilities are ensured through mathematical properties of the parallel composition operator and the refinement relation for components. More recently, a conjunction operation was added to interface theories in order to provide support for handling multiple viewpoints, requirements engineering, and component reuse. Unfortunately, the conjunction operator does not allow independent implementability in general. In this paper, we study conditions that need to be imposed on interface models in order to enforce independent implementability with respect to conjunction. We focus on multiple viewpoint specifications and propose a new compatibility criterion between two interfaces, which we call orthogonality. We show that orthogonal interfaces can be refined separately, while preserving both orthogonality and composability with other interfaces. We illustrate the independent implementability of different viewpoints with a FIFO buffer example.},
author = {Henzinger, Thomas A and Nickovic, Dejan},
booktitle = { Conference proceedings Monterey Workshop 2012},
location = {Oxford, UK},
pages = {380 -- 395},
publisher = {Springer},
title = {{Independent implementability of viewpoints}},
doi = {10.1007/978-3-642-34059-8_20},
volume = {7539},
year = {2012},
}
@article{2943,
abstract = {We examine whether the Escherichia coli chromosome is folded into a self-adherent nucleoprotein complex, or alternately is a confined but otherwise unconstrained self-avoiding polymer. We address this through in vivo visualization, using an inducible GFP fusion to the nucleoid-associated protein Fis to non-specifically decorate the entire chromosome. For a range of different growth conditions, the chromosome is a compact structure that does not fill the volume of the cell, and which moves from the new pole to the cell centre. During rapid growth, chromosome segregation occurs well before cell division, with daughter chromosomes coupled by a thin inter-daughter filament before complete segregation, whereas during slow growth chromosomes stay adjacent until cell division occurs. Image correlation analysis indicates that sub-nucleoid structure is stable on a 1min timescale, comparable to the timescale for redistribution time measured for GFP-Fis after photobleaching. Optical deconvolution and writhe calculation analysis indicate that the nucleoid has a large-scale coiled organization rather than being an amorphous mass. Our observations are consistent with the chromosome having a self-adherent filament organization.},
author = {Hadizadeh Yazdi, Nastaran and Guet, Calin C and Johnson, Reid and Marko, John},
journal = {Molecular Microbiology},
number = {6},
pages = {1318 -- 1333},
publisher = {Wiley-Blackwell},
title = {{Variation of the folding and dynamics of the Escherichia coli chromosome with growth conditions}},
doi = {10.1111/mmi.12071},
volume = {86},
year = {2012},
}
@article{2945,
abstract = {In search of foreign antigens, lymphocytes recirculate from the blood, through lymph nodes, into lymphatics and back to the blood. Dendritic cells also migrate to lymph nodes for optimal interaction with lymphocytes. This continuous trafficking of immune cells into and out of lymph nodes is essential for immune surveillance of foreign invaders. In this article, we review our current understanding of the functions of high endothelial venules (HEVs), stroma and lymphatics in the entry, positioning and exit of immune cells in lymph nodes during homeostasis, and we highlight the unexpected role of dendritic cells in the control of lymphocyte homing through HEVs.},
author = {Girard, Jean and Moussion, Christine and Förster, Reinhold},
journal = {Nature Reviews Immunology},
number = {11},
pages = {762 -- 773},
publisher = {Nature Publishing Group},
title = {{HEVs, lymphatics and homeostatic immune cell trafficking in lymph nodes}},
doi = {10.1038/nri3298},
volume = {12},
year = {2012},
}
@article{2946,
abstract = {MicroRNAs (miRNAs) are small noncoding RNAs that function in literally all cellular processes. miRNAs interact with Argonaute (Ago) proteins and guide them to specific target sites located in the 3′-untranslated region (3′-UTR) of target mRNAs leading to translational repression and deadenylation-induced mRNA degradation. Most miRNAs are processed from hairpin-structured precursors by the consecutive action of the RNase III enzymes Drosha and Dicer. However, processing of miR-451 is Dicer independent and cleavage is mediated by the endonuclease Ago2. Here we have characterized miR-451 sequence and structure requirements for processing as well as sorting of miRNAs into different Ago proteins. Pre-miR-451 appears to be optimized for Ago2 cleavage and changes result in reduced processing. In addition, we show that the mature miR-451 only associates with Ago2 suggesting that mature miRNAs are not exchanged between different members of the Ago protein family. Based on cloning and deep sequencing of endogenous miRNAs associated with Ago1-3, we do not find evidence for miRNA sorting in human cells. However, Ago identity appears to influence the length of some miRNAs, while others remain unaffected.},
author = {Dueck, Anne and Ziegler, Christian and Eichner, Alexander and Berezikov, Eugène and Meister, Gunter},
journal = {Nucleic Acids Research},
number = {19},
pages = {9850 -- 9862},
publisher = {Oxford University Press},
title = {{MicroRNAs associated with the different human Argonaute proteins}},
doi = {10.1093/nar/gks705},
volume = {40},
year = {2012},
}
@inproceedings{2947,
abstract = {We introduce games with probabilistic uncertainty, a model for controller synthesis in which the controller observes the state through imprecise sensors that provide correct information about the current state with a fixed probability. That is, in each step, the sensors return an observed state, and given the observed state, there is a probability distribution (due to the estimation error) over the actual current state. The controller must base its decision on the observed state (rather than the actual current state, which it does not know). On the other hand, we assume that the environment can perfectly observe the current state. We show that controller synthesis for qualitative ω-regular objectives in our model can be reduced in polynomial time to standard partial-observation stochastic games, and vice-versa. As a consequence we establish the precise decidability frontier for the new class of games, and establish optimal complexity results for all the decidable problems.},
author = {Chatterjee, Krishnendu and Chmelik, Martin and Majumdar, Ritankar},
location = {Thiruvananthapuram, India},
pages = {385 -- 399},
publisher = {Springer},
title = {{Equivalence of games with probabilistic uncertainty and partial observation games}},
doi = {10.1007/978-3-642-33386-6_30},
volume = {7561},
year = {2012},
}
@article{2949,
author = {Dupret, David and Csicsvari, Jozsef L},
journal = {Nature Neuroscience},
number = {11},
pages = {1471 -- 1472},
publisher = {Nature Publishing Group},
title = {{The medial entorhinal cortex keeps Up}},
doi = {10.1038/nn.3245},
volume = {15},
year = {2012},
}
@article{2950,
abstract = {Contractile actomyosin rings drive various fundamental morphogenetic processes ranging from cytokinesis to wound healing. Actomyosin rings are generally thought to function by circumferential contraction. Here, we show that the spreading of the enveloping cell layer (EVL) over the yolk cell during zebrafish gastrulation is driven by a contractile actomyosin ring. In contrast to previous suggestions, we find that this ring functions not only by circumferential contraction but also by a flow-friction mechanism. This generates a pulling force through resistance against retrograde actomyosin flow. EVL spreading proceeds normally in situations where circumferential contraction is unproductive, indicating that the flow-friction mechanism is sufficient. Thus, actomyosin rings can function in epithelial morphogenesis through a combination of cable-constriction and flow-friction mechanisms.},
author = {Behrndt, Martin and Salbreux, Guillaume and Campinho, Pedro and Hauschild, Robert and Oswald, Felix and Roensch, Julia and Grill, Stephan and Heisenberg, Carl-Philipp J},
journal = {Science},
number = {6104},
pages = {257 -- 260},
publisher = {American Association for the Advancement of Science},
title = {{Forces driving epithelial spreading in zebrafish gastrulation}},
doi = {10.1126/science.1224143},
volume = {338},
year = {2012},
}
@article{2951,
abstract = {Differential cell adhesion and cortex tension are thought to drive cell sorting by controlling cell-cell contact formation. Here, we show that cell adhesion and cortex tension have different mechanical functions in controlling progenitor cell-cell contact formation and sorting during zebrafish gastrulation. Cortex tension controls cell-cell contact expansion by modulating interfacial tension at the contact. By contrast, adhesion has little direct function in contact expansion, but instead is needed to mechanically couple the cortices of adhering cells at their contacts, allowing cortex tension to control contact expansion. The coupling function of adhesion is mediated by E-cadherin and limited by the mechanical anchoring of E-cadherin to the cortex. Thus, cell adhesion provides the mechanical scaffold for cell cortex tension to drive cell sorting during gastrulation.},
author = {Maître, Jean-Léon and Berthoumieux, Hélène and Krens, Gabriel and Salbreux, Guillaume and Julicher, Frank and Paluch, Ewa and Heisenberg, Carl-Philipp J},
journal = {Science},
number = {6104},
pages = {253 -- 256},
publisher = {American Association for the Advancement of Science},
title = {{Adhesion functions in cell sorting by mechanically coupling the cortices of adhering cells}},
doi = {10.1126/science.1225399},
volume = {338},
year = {2012},
}
@article{2952,
abstract = {Body axis elongation represents a common and fundamental morphogenetic process in development. A key mechanism triggering body axis elongation without additional growth is convergent extension (CE), whereby a tissue undergoes simultaneous narrowing and extension. Both collective cell migration and cell intercalation are thought to drive CE and are used to different degrees in various species as they elongate their body axis. Here, we provide an overview of CE as a general strategy for body axis elongation and discuss conserved and divergent mechanisms underlying CE among different species.},
author = {Tada, Masazumi and Heisenberg, Carl-Philipp J},
journal = {Development},
number = {21},
pages = {3897 -- 3904},
publisher = {Company of Biologists},
title = {{Convergent extension Using collective cell migration and cell intercalation to shape embryos}},
doi = {10.1242/dev.073007},
volume = {139},
year = {2012},
}
@article{2953,
author = {Heisenberg, Carl-Philipp J and Fässler, Reinhard},
journal = {Current Opinion in Cell Biology},
number = {5},
pages = {559 -- 561},
publisher = {Elsevier},
title = {{Cell-cell adhesion and extracellular matrix diversity counts}},
doi = {10.1016/j.ceb.2012.09.002},
volume = {24},
year = {2012},
}
@article{2954,
abstract = {Spontaneous postsynaptic currents (PSCs) provide key information about the mechanisms of synaptic transmission and the activity modes of neuronal networks. However, detecting spontaneous PSCs in vitro and in vivo has been challenging, because of the small amplitude, the variable kinetics, and the undefined time of generation of these events. Here, we describe a, to our knowledge, new method for detecting spontaneous synaptic events by deconvolution, using a template that approximates the average time course of spontaneous PSCs. A recorded PSC trace is deconvolved from the template, resulting in a series of delta-like functions. The maxima of these delta-like events are reliably detected, revealing the precise onset times of the spontaneous PSCs. Among all detection methods, the deconvolution-based method has a unique temporal resolution, allowing the detection of individual events in high-frequency bursts. Furthermore, the deconvolution-based method has a high amplitude resolution, because deconvolution can substantially increase the signal/noise ratio. When tested against previously published methods using experimental data, the deconvolution-based method was superior for spontaneous PSCs recorded in vivo. Using the high-resolution deconvolution-based detection algorithm, we show that the frequency of spontaneous excitatory postsynaptic currents in dentate gyrus granule cells is 4.5 times higher in vivo than in vitro.},
author = {Pernia-Andrade, Alejandro and Goswami, Sarit and Stickler, Yvonne and Fröbe, Ulrich and Schlögl, Alois and Jonas, Peter M},
journal = {Biophysical Journal},
number = {7},
pages = {1429 -- 1439},
publisher = {Biophysical},
title = {{A deconvolution based method with high sensitivity and temporal resolution for detection of spontaneous synaptic currents in vitro and in vivo}},
doi = {10.1016/j.bpj.2012.08.039},
volume = {103},
year = {2012},
}
@inproceedings{2955,
abstract = {We consider two-player stochastic games played on finite graphs with reachability objectives where the first player tries to ensure a target state to be visited almost-surely (i.e., with probability 1), or positively (i.e., with positive probability), no matter the strategy of the second player. We classify such games according to the information and the power of randomization available to the players. On the basis of information, the game can be one-sided with either (a) player 1, or (b) player 2 having partial observation (and the other player has perfect observation), or two-sided with (c) both players having partial observation. On the basis of randomization, the players (a) may not be allowed to use randomization (pure strategies), or (b) may choose a probability distribution over actions but the actual random choice is external and not visible to the player (actions invisible), or (c) may use full randomization. Our main results for pure strategies are as follows. (1) For one-sided games with player 1 having partial observation we show that (in contrast to full randomized strategies) belief-based (subset-construction based) strategies are not sufficient, and we present an exponential upper bound on memory both for almostsure and positive winning strategies; we show that the problem of deciding the existence of almost-sure and positive winning strategies for player 1 is EXPTIME-complete. (2) For one-sided games with player 2 having partial observation we show that non-elementary memory is both necessary and sufficient for both almost-sure and positive winning strategies. (3) We show that for the general (two-sided) case finite-memory strategies are sufficient for both positive and almost-sure winning, and at least non-elementary memory is required. We establish the equivalence of the almost-sure winning problems for pure strategies and for randomized strategies with actions invisible. Our equivalence result exhibits serious flaws in previous results of the literature: we show a non-elementary memory lower bound for almost-sure winning whereas an exponential upper bound was previously claimed.},
author = {Chatterjee, Krishnendu and Doyen, Laurent},
booktitle = {Proceedings of the 2012 27th Annual ACM/IEEE Symposium on Logic in Computer Science},
location = {Dubrovnik, Croatia},
publisher = {IEEE},
title = {{Partial-observation stochastic games: How to win when belief fails}},
doi = {10.1109/LICS.2012.28},
year = {2012},
}
@inproceedings{2956,
abstract = {Two-player games on graphs are central in many problems in formal verification and program analysis such as synthesis and verification of open systems. In this work we consider solving recursive game graphs (or pushdown game graphs) that can model the control flow of sequential programs with recursion. While pushdown games have been studied before with qualitative objectives, such as reachability and parity objectives, in this work we study for the first time such games with the most well-studied quantitative objective, namely, mean payoff objectives. In pushdown games two types of strategies are relevant: (1) global strategies, that depend on the entire global history; and (2) modular strategies, that have only local memory and thus do not depend on the context of invocation, but only on the history of the current invocation of the module. Our main results are as follows: (1) One-player pushdown games with mean-payoff objectives under global strategies are decidable in polynomial time. (2) Two-player pushdown games with mean-payoff objectives under global strategies are undecidable. (3) One-player pushdown games with mean-payoff objectives under modular strategies are NP-hard. (4) Two-player pushdown games with mean-payoff objectives under modular strategies can be solved in NP (i.e., both one-player and two-player pushdown games with mean-payoff objectives under modular strategies are NP-complete). We also establish the optimal strategy complexity showing that global strategies for mean-payoff objectives require infinite memory even in one-player pushdown games; and memoryless modular strategies are sufficient in two-player pushdown games. Finally we also show that all the problems have the same computational complexity if the stack boundedness condition is added, where along with the mean-payoff objective the player must also ensure that the stack height is bounded.},
author = {Chatterjee, Krishnendu and Velner, Yaron},
booktitle = {Proceedings of the 2012 27th Annual ACM/IEEE Symposium on Logic in Computer Science},
location = {Dubrovnik, Croatia },
publisher = {IEEE},
title = {{Mean payoff pushdown games}},
doi = {10.1109/LICS.2012.30},
year = {2012},
}
@inproceedings{2957,
abstract = {We consider probabilistic automata on infinite words with acceptance defined by parity conditions. We consider three qualitative decision problems: (i) the positive decision problem asks whether there is a word that is accepted with positive probability; (ii) the almost decision problem asks whether there is a word that is accepted with probability 1; and (iii) the limit decision problem asks whether words are accepted with probability arbitrarily close to 1. We unify and generalize several decidability results for probabilistic automata over infinite words, and identify a robust (closed under union and intersection) subclass of probabilistic automata for which all the qualitative decision problems are decidable for parity conditions. We also show that if the input words are restricted to lasso shape (regular) words, then the positive and almost problems are decidable for all probabilistic automata with parity conditions. For most decidable problems we show an optimal PSPACE-complete complexity bound.},
author = {Chatterjee, Krishnendu and Tracol, Mathieu},
booktitle = {Proceedings of the 2012 27th Annual ACM/IEEE Symposium on Logic in Computer Science},
location = {Dubrovnik, Croatia },
publisher = {IEEE},
title = {{Decidable problems for probabilistic automata on infinite words}},
doi = {10.1109/LICS.2012.29},
year = {2012},
}
@article{2958,
abstract = {The activity of hippocampal pyramidal cells reflects both the current position of the animal and information related to its current behavior. Here we investigated whether single hippocampal neurons can encode several independent features defining trials during a memory task. We also tested whether task-related information is represented by partial remapping of the place cell population or, instead, via firing rate modulation of spatially stable place cells. To address these two questions, the activity of hippocampal neurons was recorded in rats performing a conditional discrimination task on a modified T-maze in which the identity of a food reward guided behavior. When the rat was on the central arm of the maze, the firing rate of pyramidal cells changed depending on two independent factors: (1) the identity of the food reward given to the animal and (2) the previous location of the animal on the maze. Importantly, some pyramidal cells encoded information relative to both factors. This trial-type specific and retrospective coding did not interfere with the spatial representation of the maze: hippocampal cells had stable place fields and their theta-phase precession profiles were unaltered during the task, indicating that trial-related information was encoded via rate remapping. During error trials, encoding of both trial-related information and spatial location was impaired. Finally, we found that pyramidal cells also encode trial-related information via rate remapping during the continuous version of the rewarded alternation task without delays. These results suggest that hippocampal neurons can encode several task-related cognitive aspects via rate remapping.},
author = {Allen, Kevin and Rawlins, J Nick and Bannerman, David and Csicsvari, Jozsef L},
journal = {Journal of Neuroscience},
number = {42},
pages = {14752 -- 14766},
publisher = {Society for Neuroscience},
title = {{Hippocampal place cells can encode multiple trial-dependent features through rate remapping}},
doi = {10.1523/JNEUROSCI.6175-11.2012},
volume = {32},
year = {2012},
}
@article{2959,
abstract = {We study maximum likelihood estimation in Gaussian graphical models from a geometric point of view. An algebraic elimination criterion allows us to find exact lower bounds on the number of observations needed to ensure that the maximum likelihood estimator (MLE) exists with probability one. This is applied to bipartite graphs, grids and colored graphs. We also study the ML degree, and we present the first instance of a graph for which the MLE exists with probability one, even when the number of observations equals the treewidth.},
author = {Uhler, Caroline},
journal = {Annals of Statistics},
number = {1},
pages = {238 -- 261},
publisher = {Institute of Mathematical Statistics},
title = {{Geometry of maximum likelihood estimation in Gaussian graphical models}},
doi = {10.1214/11-AOS957},
volume = {40},
year = {2012},
}
@article{2962,
abstract = {The choice of summary statistics is a crucial step in approximate Bayesian computation (ABC). Since statistics are often not sufficient, this choice involves a trade-off between loss of information and reduction of dimensionality. The latter may increase the efficiency of ABC. Here, we propose an approach for choosing summary statistics based on boosting, a technique from the machine learning literature. We consider different types of boosting and compare them to partial least squares regression as an alternative. To mitigate the lack of sufficiency, we also propose an approach for choosing summary statistics locally, in the putative neighborhood of the true parameter value. We study a demographic model motivated by the re-introduction of Alpine ibex (Capra ibex) into the Swiss Alps. The parameters of interest are the mean and standard deviation across microsatellites of the scaled ancestral mutation rate (θanc = 4 Ne u), and the proportion of males obtaining access to matings per breeding season (ω). By simulation, we assess the properties of the posterior distribution obtained with the various methods. According to our criteria, ABC with summary statistics chosen locally via boosting with the L2-loss performs best. Applying that method to the ibex data, we estimate θanc ≈ 1.288, and find that most of the variation across loci of the ancestral mutation rate u is between 7.7×10−4 and 3.5×10−3 per locus per generation. The proportion of males with access to matings is estimated to ω ≈ 0.21, which is in good agreement with recent independent estimates.},
author = {Aeschbacher, Simon and Beaumont, Mark and Futschik, Andreas},
journal = {Genetics},
number = {3},
pages = {1027 -- 1047},
publisher = {Genetics Society of America},
title = {{A novel approach for choosing summary statistics in approximate Bayesian computation}},
doi = {10.1534/genetics.112.143164},
volume = {192},
year = {2012},
}
@article{2963,
abstract = {Zebra finches are an ubiquitous model system for the study of vocal learning in animal communication. Their song has been well described, but its possible function(s) in social communication are only partly understood. The so-called ‘directed song’ is a high-intensity, high-performance song given during courtship in close proximity to the female, which is known to mediate mate choice and mating. However, this singing mode constitutes only a fraction of zebra finch males’ prolific song output. Potential communicative functions of their second, ‘undirected’ singing mode remain unresolved in the face of contradicting reports of both facilitating and inhibiting effects of social company on singing. We addressed this issue by experimentally manipulating social contexts in a within-subject design, comparing a solo versus male or female only company condition, each lasting for 24 hours. Males’ total song output was significantly higher when a conspecific was in audible and visible distance than when they were alone. Male and female company had an equally facilitating effect on song output. Our findings thus indicate that singing motivation is facilitated rather than inhibited by social company, suggesting that singing in zebra finches might function both in inter- and intrasexual communication. },
author = {Jesse, Fabienne and Riebel, Katharina},
journal = {Behavioural Processes},
number = {3},
pages = {262 -- 266},
publisher = {Elsevier},
title = {{Social facilitation of male song by male and female conspecifics in the zebra finch, Taeniopygia guttata}},
doi = {10.1016/j.beproc.2012.09.006},
volume = {91},
year = {2012},
}
@phdthesis{2964,
abstract = {CA3 pyramidal neurons are important for memory formation and pattern completion in the hippocampal network. These neurons receive multiple excitatory inputs from numerous sources. Therefore, the rules of spatiotemporal integration of multiple synaptic inputs and propagation of action potentials are important to understand how CA3 neurons contribute to higher brain functions at cellular level. By using confocally targeted patch-clamp recording techniques, we investigated the biophysical properties of rat CA3 pyramidal neuron dendrites. We found two distinct dendritic domains critical for action potential initiation and propagation: In the proximal domain, action potentials initiated in the axon backpropagate actively with large amplitude and fast time course. In the distal domain, Na+-channel mediated dendritic spikes are efficiently evoked by local dendritic depolarization or waveforms mimicking synaptic events. These findings can be explained by a high Na+-to-K+ conductance density ratio of CA3 pyramidal neuron dendrites. The results challenge the prevailing view that proximal mossy fiber inputs activate CA3 pyramidal neurons more efficiently than distal perforant inputs by showing that the distal synapses trigger a different form of activity represented by dendritic spikes. The high probability of dendritic spike initiation in the distal area may enhance the computational power of CA3 pyramidal neurons in the hippocampal network. },
author = {Kim, Sooyun},
pages = {65},
publisher = {IST Austria},
title = {{Active properties of hippocampal CA3 pyramidal neuron dendrites}},
year = {2012},
}
@article{2965,
abstract = {Dieser Artikel soll die sechs verschiedenen Creative Commons Lizenzen erläutern und ihre Bedeutung im Rahmen des wissenschaftlichen Publizierens und des Open Access erklären (CC-BY, CC-BY-SA, CC-BY-NC, CC-BY-ND, CC-BYNC-SA, CC-BY-NC-ND).},
author = {Danowski, Patrick},
journal = {Mitteilungen der Vereinigung Österreichischer Bibliothekarinnen & Bibliothekare},
number = {2},
pages = {200 -- 212},
publisher = {VÖB},
title = {{Kontext Open Access: Creative Commons}},
volume = {65},
year = {2012},
}