@inproceedings{3281,
abstract = {We consider the problem of amplifying the "lossiness" of functions. We say that an oracle circuit C*: {0,1} m → {0,1}* amplifies relative lossiness from ℓ/n to L/m if for every function f:{0,1} n → {0,1} n it holds that 1 If f is injective then so is C f. 2 If f has image size of at most 2 n-ℓ, then C f has image size at most 2 m-L. The question is whether such C* exists for L/m ≫ ℓ/n. This problem arises naturally in the context of cryptographic "lossy functions," where the relative lossiness is the key parameter. We show that for every circuit C* that makes at most t queries to f, the relative lossiness of C f is at most L/m ≤ ℓ/n + O(log t)/n. In particular, no black-box method making a polynomial t = poly(n) number of queries can amplify relative lossiness by more than an O(logn)/n additive term. We show that this is tight by giving a simple construction (cascading with some randomization) that achieves such amplification.},
author = {Pietrzak, Krzysztof Z and Rosen, Alon and Segev, Gil},
location = {Taormina, Sicily, Italy},
pages = {458 -- 475},
publisher = {Springer},
title = {{Lossy functions do not amplify well}},
doi = {10.1007/978-3-642-28914-9_26},
volume = {7194},
year = {2012},
}
@inproceedings{3250,
abstract = {The Learning Parity with Noise (LPN) problem has recently found many applications in cryptography as the hardness assumption underlying the constructions of "provably secure" cryptographic schemes like encryption or authentication protocols. Being provably secure means that the scheme comes with a proof showing that the existence of an efficient adversary against the scheme implies that the underlying hardness assumption is wrong. LPN based schemes are appealing for theoretical and practical reasons. On the theoretical side, LPN based schemes offer a very strong security guarantee. The LPN problem is equivalent to the problem of decoding random linear codes, a problem that has been extensively studied in the last half century. The fastest known algorithms run in exponential time and unlike most number-theoretic problems used in cryptography, the LPN problem does not succumb to known quantum algorithms. On the practical side, LPN based schemes are often extremely simple and efficient in terms of code-size as well as time and space requirements. This makes them prime candidates for light-weight devices like RFID tags, which are too weak to implement standard cryptographic primitives like the AES block-cipher. This talk will be a gentle introduction to provable security using simple LPN based schemes as examples. Starting from pseudorandom generators and symmetric key encryption, over secret-key authentication protocols, and, if time admits, touching on recent constructions of public-key identification, commitments and zero-knowledge proofs.},
author = {Pietrzak, Krzysztof Z},
location = {Špindlerův Mlýn, Czech Republic},
pages = {99 -- 114},
publisher = {Springer},
title = {{Cryptography from learning parity with noise}},
doi = {10.1007/978-3-642-27660-6_9},
volume = {7147},
year = {2012},
}
@article{3248,
abstract = {We describe RTblob, a high speed vision system that detects objects in cluttered scenes based on their color and shape at a speed of over 800 frames/s. Because the system is available as open-source software and relies only on off-the-shelf PC hardware components, it can provide the basis for multiple application scenarios. As an illustrative example, we show how RTblob can be used in a robotic table tennis scenario to estimate ball trajectories through 3D space simultaneously from four cameras images at a speed of 200 Hz.},
author = {Lampert, Christoph and Peters, Jan},
journal = {Journal of Real-Time Image Processing},
number = {1},
pages = {31 -- 41},
publisher = {Springer},
title = {{Real-time detection of colored objects in multiple camera streams with off-the-shelf hardware components}},
doi = {10.1007/s11554-010-0168-3},
volume = {7},
year = {2012},
}
@article{3243,
author = {Danowski, Patrick},
journal = {Büchereiperspektiven},
pages = {11},
publisher = {Buchereiverband Österreichs},
title = {{Zwischen Technologie und Information}},
volume = {1/2012},
year = {2012},
}
@article{387,
abstract = {In this Letter we present detailed study of the density of states near defects in Bi 2Se 3. In particular, we present data on the commonly found triangular defects in this system. While we do not find any measurable quasiparticle scattering interference effects, we do find localized resonances, which can be well fitted by theory once the potential is taken to be extended to properly account for the observed defects. The data together with the fits confirm that while the local density of states around the Dirac point of the electronic spectrum at the surface is significantly disrupted near the impurity by the creation of low-energy resonance state, the Dirac point is not locally destroyed. We discuss our results in terms of the expected protected surface state of topological insulators. © 2012 American Physical Society.},
author = {Alpichshev, Zhanybek and Biswas, Rudro and Balatsky, Alexander and Analytis, James and Chu, Jiunhaw and Fisher, Ian and Kapitulnik, Aharon},
journal = {Physical Review Letters},
number = {20},
publisher = {American Physical Society},
title = {{STM imaging of impurity resonances on Bi 2Se 3}},
doi = {10.1103/PhysRevLett.108.206402},
volume = {108},
year = {2012},
}
@article{596,
abstract = {The human Mediator complex controls RNA polymerase II (pol II) function in ways that remain incompletely understood. Activator-Mediator binding alters Mediator structure, and these activator-induced structural shifts appear to play key roles in regulating transcription. A recent cryo-electron microscopy (EM) analysis revealed that pol II adopted a stable orientation within a Mediator-pol II-TFIIF assembly in which Mediator was bound to the activation domain of viral protein 16 (VP16). Whereas TFIIF was shown to be important for orienting pol II within this assembly, the potential role of the activator was not assessed. To determine how activator binding might affect pol II orientation, we isolated human Mediator-pol II-TFIIF complexes in which Mediator was not bound to an activator. Cryo-EM analysis of this assembly, coupled with pol II crystal structure docking, revealed that pol II binds Mediator at the same general location; however, in contrast to VP16-bound Mediator, pol II does not appear to stably orient in the absence of an activator. Variability in pol II orientation might be important mechanistically, perhaps to enable sense and antisense transcription at human promoters. Because Mediator interacts extensively with pol II, these results suggest that Mediator structural shifts induced by activator binding help stably orient pol II prior to transcription initiation.},
author = {Bernecky, Carrie A and Taatjes, Dylan},
journal = {Journal of Molecular Biology},
number = {5},
pages = {387 -- 394},
publisher = {Elsevier},
title = {{Activator-mediator binding stabilizes RNA polymerase II orientation within the human mediator-RNA polymerase II-TFIIF assembly}},
doi = {10.1016/j.jmb.2012.02.014},
volume = {417},
year = {2012},
}
@article{7074,
abstract = {The Seebeck coefficients, electrical resistivities, total thermal conductivities, and magnetization are reported for temperatures between 5 and 350 K for n-type Bi0.88Sb0.12 nano-composite alloys made by Ho-doping at the 0, 1, and 3 % atomic levels. The alloys were prepared using a dc hot-pressing method, and are shown to be single phase for both Ho contents with grain sizes on the average of 900 nm. We find the parent compound has a maximum of ZT = 0.28 at 231 K, while doping 1 % Ho increases the maximum ZT to 0.31 at 221 K and the 3 % doped sample suppresses the maximum ZT = 0.24 at a temperature of 260 K.},
author = {Lukas, K. C. and Joshi, G. and Modic, Kimberly A and Ren, Z. F. and Opeil, C. P.},
issn = {1573-4803},
journal = {Journal of Materials Science},
number = {15},
pages = {5729--5734},
publisher = {Springer Nature},
title = {{Thermoelectric properties of Ho-doped Bi0.88Sb0.12}},
doi = {10.1007/s10853-012-6463-6},
volume = {47},
year = {2012},
}
@inproceedings{762,
abstract = {Decades of research in distributed computing have led to a variety of perspectives on what it means for a concurrent algorithm to be efficient, depending on model assumptions, progress guarantees, and complexity metrics. It is therefore natural to ask whether one could compose algorithms that perform efficiently under different conditions, so that the composition preserves the performance of the original components when their conditions are met. In this paper, we evaluate the cost of composing shared-memory algorithms. First, we formally define the notion of safely composable algorithms and we show that every sequential type has a safely composable implementation, as long as enough state is transferred between modules. Since such generic implementations are inherently expensive, we present a more general light-weight specification that allows the designer to transfer very little state between modules, by taking advantage of the semantics of the implemented object. Using this framework, we implement a composed longlived test-and-set object, with the property that each of its modules is asymptotically optimal with respect to the progress condition it ensures, while the entire implementation only uses objects with consensus number at most two. Thus, we show that the overhead of composition can be negligible in the case of some important shared-memory abstractions.},
author = {Alistarh, Dan and Guerraoui, Rachid and Kuznetsov, Petr V and Losa, Giuliano},
pages = {298 -- 307},
publisher = {ACM},
title = {{On the cost of composing shared-memory algorithms}},
doi = {10.1145/2312005.2312057},
year = {2012},
}
@article{767,
abstract = {Synchronous distributed algorithms are easier to design and prove correct than algorithms that tolerate asynchrony. Yet, in the real world, networks experience asynchrony and other timing anomalies. In this paper, we address the question of how to efficiently transform an algorithm that relies on synchronous timing into an algorithm that tolerates asynchronous executions. We introduce a transformation technique from synchronous algorithms to indulgent algorithms (Guerraoui, in PODC, pp. 289-297, 2000), which induces only a constant overhead in terms of time complexity in well-behaved executions. Our technique is based on a new abstraction we call an asynchrony detector, which the participating processes implement collectively. The resulting transformation works for the class of colorless distributed tasks, including consensus and set agreement. Interestingly, we also show that our technique is relevant for colored tasks, by applying it to the renaming problem, to obtain the first indulgent renaming algorithm.},
author = {Alistarh, Dan and Gilbert, Seth and Guerraoui, Rachid and Travers, Corentin},
journal = {Theory of Computing Systems},
number = {4},
pages = {404 -- 424},
publisher = {Elsevier},
title = {{Generating Fast Indulgent Algorithms}},
doi = {10.1007/s00224-012-9407-2},
volume = {51},
year = {2012},
}
@article{801,
abstract = {Fungal cell walls frequently contain a polymer of mannose and galactose called galactomannan. In the pathogenic filamentous fungus Aspergillus fumigatus, this polysaccharide is made of a linear mannan backbone with side chains of galactofuran and is anchored to the plasma membrane via a glycosylphosphatidylinositol or is covalently linked to the cell wall. To date, the biosynthesis and significance of this polysaccharide are unknown. The present data demonstrate that deletion of the Golgi UDP-galactofuranose transporter GlfB or the GDP-mannose transporter GmtA leads to the absence of galactofuran or galactomannan, respectively. This indicates that the biosynthesis of galactomannan probably occurs in the lumen of the Golgi apparatus and thus contrasts with the biosynthesis of other fungal cell wall polysaccharides studied to date that takes place at the plasma membrane. Transglycosylation of galactomannan from the membrane to the cell wall is hypothesized because both the cell wall-bound and membrane-bound polysaccharide forms are affected in the generated mutants. Considering the severe growth defect of the A. fumigatus GmtA-deficient mutant, proving this paradigm might provide new targets for antifungal therapy.},
author = {Engel, Jakob and Philipp Schmalhorst and Routier, Françoise H},
journal = {Journal of Biological Chemistry},
number = {53},
pages = {44418 -- 44424},
publisher = {American Society for Biochemistry and Molecular Biology},
title = {{Biosynthesis of the fungal cell wall polysaccharide galactomannan requires intraluminal GDP-mannose}},
doi = {10.1074/jbc.M112.398321},
volume = {287},
year = {2012},
}
@article{887,
abstract = {A subject of extensive study in evolutionary theory has been the issue of how neutral, redundant copies can be maintained in the genome for long periods of time. Concurrently, examples of adaptive gene duplications to various environmental conditions in different species have been described. At this point, it is too early to tell whether or not a substantial fraction of gene copies have initially achieved fixation by positive selection for increased dosage. Nevertheless, enough examples have accumulated in the literature that such a possibility should be considered. Here, I review the recent examples of adaptive gene duplications and make an attempt to draw generalizations on what types of genes may be particularly prone to be selected for under certain environmental conditions. The identification of copy-number variation in ecological field studies of species adapting to stressful or novel environmental conditions may improve our understanding of gene duplications as a mechanism of adaptation and its relevance to the long-term persistence of gene duplications.},
author = {Fyodor Kondrashov},
journal = {Proceedings of the Royal Society of London Series B Biological Sciences},
number = {1749},
pages = {5048 -- 5057},
publisher = {Royal Society, The},
title = {{Gene duplication as a mechanism of genomic adaptation to a changing environment}},
doi = {10.1098/rspb.2012.1108},
volume = {279},
year = {2012},
}
@article{91,
abstract = {We demonstrate how to appropriately estimate the zero-frequency (static) hyperpolarizability of an organic molecule from its charge distribution, and we explore applications of these estimates for identifying and evaluating new organic nonlinear optical (NLO) materials. First, we calculate hyperpolarizabilities from Hartree-Fock-derived charge distributions and find order-of-magnitude agreement with experimental values. We show that these simple arithmetic calculations will enable systematic searches for new organic NLO molecules. Second, we derive hyperpolarizabilities from crystallographic data using a multipolar charge-density analysis and find good agreement with empirical calculations. This demonstrates an experimental determination of the full static hyperpolarizability tensor in a solid-state sample. },
author = {Higginbotham, Andrew P and Cole, Jacqueline and Blood Forsythe, Martin and Hickstein, Daniel},
journal = {Journal of Applied Physics},
number = {3},
publisher = {American Institute of Physics},
title = {{Identifying and evaluating organic nonlinear optical materials via molecular moments}},
doi = {10.1063/1.3678593},
volume = {111},
year = {2012},
}
@inproceedings{2891,
abstract = {Quantitative automata are nondeterministic finite automata with edge weights. They value a
run by some function from the sequence of visited weights to the reals, and value a word by its
minimal/maximal run. They generalize boolean automata, and have gained much attention in
recent years. Unfortunately, important automaton classes, such as sum, discounted-sum, and
limit-average automata, cannot be determinized. Yet, the quantitative setting provides the potential
of approximate determinization. We define approximate determinization with respect to
a distance function, and investigate this potential.
We show that sum automata cannot be determinized approximately with respect to any
distance function. However, restricting to nonnegative weights allows for approximate determinization
with respect to some distance functions.
Discounted-sum automata allow for approximate determinization, as the influence of a word’s
suffix is decaying. However, the naive approach, of unfolding the automaton computations up
to a sufficient level, is shown to be doubly exponential in the discount factor. We provide an
alternative construction that is singly exponential in the discount factor, in the precision, and
in the number of states. We prove matching lower bounds, showing exponential dependency on
each of these three parameters.
Average and limit-average automata are shown to prohibit approximate determinization with
respect to any distance function, and this is the case even for two weights, 0 and 1.},
author = {Boker, Udi and Henzinger, Thomas A},
booktitle = {Leibniz International Proceedings in Informatics},
location = {Hyderabad, India},
pages = {362 -- 373},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{Approximate determinization of quantitative automata}},
doi = {10.4230/LIPIcs.FSTTCS.2012.362},
volume = {18},
year = {2012},
}
@inproceedings{2916,
abstract = {The classical (boolean) notion of refinement for behavioral interfaces of system components is the alternating refinement preorder. In this paper, we define a quantitative measure for interfaces, called interface simulation distance. It makes the alternating refinement preorder quantitative by, intu- itively, tolerating errors (while counting them) in the alternating simulation game. We show that the interface simulation distance satisfies the triangle inequality, that the distance between two interfaces does not increase under parallel composition with a third interface, and that the distance between two interfaces can be bounded from above and below by distances between abstractions of the two interfaces. We illustrate the framework, and the properties of the distances under composition of interfaces, with two case studies.},
author = {Cerny, Pavol and Chmelik, Martin and Henzinger, Thomas A and Radhakrishna, Arjun},
booktitle = {Electronic Proceedings in Theoretical Computer Science},
location = {Napoli, Italy},
pages = {29 -- 42},
publisher = {EPTCS},
title = {{Interface Simulation Distances}},
doi = {10.4204/EPTCS.96.3},
volume = {96},
year = {2012},
}
@inproceedings{2942,
abstract = {Interface theories provide a formal framework for component-based development of software and hardware which supports the incremental design of systems and the independent implementability of components. These capabilities are ensured through mathematical properties of the parallel composition operator and the refinement relation for components. More recently, a conjunction operation was added to interface theories in order to provide support for handling multiple viewpoints, requirements engineering, and component reuse. Unfortunately, the conjunction operator does not allow independent implementability in general. In this paper, we study conditions that need to be imposed on interface models in order to enforce independent implementability with respect to conjunction. We focus on multiple viewpoint specifications and propose a new compatibility criterion between two interfaces, which we call orthogonality. We show that orthogonal interfaces can be refined separately, while preserving both orthogonality and composability with other interfaces. We illustrate the independent implementability of different viewpoints with a FIFO buffer example.},
author = {Henzinger, Thomas A and Nickovic, Dejan},
booktitle = { Conference proceedings Monterey Workshop 2012},
location = {Oxford, UK},
pages = {380 -- 395},
publisher = {Springer},
title = {{Independent implementability of viewpoints}},
doi = {10.1007/978-3-642-34059-8_20},
volume = {7539},
year = {2012},
}
@inproceedings{2947,
abstract = {We introduce games with probabilistic uncertainty, a model for controller synthesis in which the controller observes the state through imprecise sensors that provide correct information about the current state with a fixed probability. That is, in each step, the sensors return an observed state, and given the observed state, there is a probability distribution (due to the estimation error) over the actual current state. The controller must base its decision on the observed state (rather than the actual current state, which it does not know). On the other hand, we assume that the environment can perfectly observe the current state. We show that controller synthesis for qualitative ω-regular objectives in our model can be reduced in polynomial time to standard partial-observation stochastic games, and vice-versa. As a consequence we establish the precise decidability frontier for the new class of games, and establish optimal complexity results for all the decidable problems.},
author = {Chatterjee, Krishnendu and Chmelik, Martin and Majumdar, Ritankar},
location = {Thiruvananthapuram, India},
pages = {385 -- 399},
publisher = {Springer},
title = {{Equivalence of games with probabilistic uncertainty and partial observation games}},
doi = {10.1007/978-3-642-33386-6_30},
volume = {7561},
year = {2012},
}
@article{3128,
abstract = {We consider two-player zero-sum stochastic games on graphs with ω-regular winning conditions specified as parity objectives. These games have applications in the design and control of reactive systems. We survey the complexity results for the problem of deciding the winner in such games, and in classes of interest obtained as special cases, based on the information and the power of randomization available to the players, on the class of objectives and on the winning mode. On the basis of information, these games can be classified as follows: (a) partial-observation (both players have partial view of the game); (b) one-sided partial-observation (one player has partial-observation and the other player has complete-observation); and (c) complete-observation (both players have complete view of the game). The one-sided partial-observation games have two important subclasses: the one-player games, known as partial-observation Markov decision processes (POMDPs), and the blind one-player games, known as probabilistic automata. On the basis of randomization, (a) the players may not be allowed to use randomization (pure strategies), or (b) they may choose a probability distribution over actions but the actual random choice is external and not visible to the player (actions invisible), or (c) they may use full randomization. Finally, various classes of games are obtained by restricting the parity objective to a reachability, safety, Büchi, or coBüchi condition. We also consider several winning modes, such as sure-winning (i.e., all outcomes of a strategy have to satisfy the winning condition), almost-sure winning (i.e., winning with probability 1), limit-sure winning (i.e., winning with probability arbitrarily close to 1), and value-threshold winning (i.e., winning with probability at least ν, where ν is a given rational). },
author = {Chatterjee, Krishnendu and Doyen, Laurent and Henzinger, Thomas A},
journal = {Formal Methods in System Design},
number = {2},
pages = {268 -- 284},
publisher = {Springer},
title = {{A survey of partial-observation stochastic parity games}},
doi = {10.1007/s10703-012-0164-2},
volume = {43},
year = {2012},
}
@inproceedings{3135,
abstract = {We introduce consumption games, a model for discrete interactive system with multiple resources that are consumed or reloaded independently. More precisely, a consumption game is a finite-state graph where each transition is labeled by a vector of resource updates, where every update is a non-positive number or ω. The ω updates model the reloading of a given resource. Each vertex belongs either to player □ or player ◇, where the aim of player □ is to play so that the resources are never exhausted. We consider several natural algorithmic problems about consumption games, and show that although these problems are computationally hard in general, they are solvable in polynomial time for every fixed number of resource types (i.e., the dimension of the update vectors) and bounded resource updates. },
author = {Brázdil, Brázdil and Chatterjee, Krishnendu and Kučera, Antonín and Novotny, Petr},
location = {Berkeley, CA, USA},
pages = {23 -- 38},
publisher = {Springer},
title = {{Efficient controller synthesis for consumption games with multiple resource types}},
doi = {10.1007/978-3-642-31424-7_8},
volume = {7358},
year = {2012},
}
@inproceedings{3255,
abstract = {In this paper we survey results of two-player games on graphs and Markov decision processes with parity, mean-payoff and energy objectives, and the combination of mean-payoff and energy objectives with parity objectives. These problems have applications in verification and synthesis of reactive systems in resource-constrained environments.},
author = {Chatterjee, Krishnendu and Doyen, Laurent},
location = {Lednice, Czech Republic},
pages = {37 -- 46},
publisher = {Springer},
title = {{Games and Markov decision processes with mean payoff parity and energy parity objectives}},
doi = {10.1007/978-3-642-25929-6_3},
volume = {7119},
year = {2012},
}
@inproceedings{495,
abstract = {An automaton with advice is a finite state automaton which has access to an additional fixed infinite string called an advice tape. We refine the Myhill-Nerode theorem to characterize the languages of finite strings that are accepted by automata with advice. We do the same for tree automata with advice.},
author = {Kruckman, Alex and Rubin, Sasha and Sheridan, John and Zax, Ben},
booktitle = {Proceedings GandALF 2012},
location = {Napoli, Italy},
pages = {238 -- 246},
publisher = {Open Publishing Association},
title = {{A Myhill Nerode theorem for automata with advice}},
doi = {10.4204/EPTCS.96.18},
volume = {96},
year = {2012},
}