@inproceedings{2048,
abstract = {Leakage resilient cryptography attempts to incorporate side-channel leakage into the black-box security model and designs cryptographic schemes that are provably secure within it. Informally, a scheme is leakage-resilient if it remains secure even if an adversary learns a bounded amount of arbitrary information about the schemes internal state. Unfortunately, most leakage resilient schemes are unnecessarily complicated in order to achieve strong provable security guarantees. As advocated by Yu et al. [CCS’10], this mostly is an artefact of the security proof and in practice much simpler construction may already suffice to protect against realistic side-channel attacks. In this paper, we show that indeed for simpler constructions leakage-resilience can be obtained when we aim for relaxed security notions where the leakage-functions and/or the inputs to the primitive are chosen non-adaptively. For example, we show that a three round Feistel network instantiated with a leakage resilient PRF yields a leakage resilient PRP if the inputs are chosen non-adaptively (This complements the result of Dodis and Pietrzak [CRYPTO’10] who show that if a adaptive queries are allowed, a superlogarithmic number of rounds is necessary.) We also show that a minor variation of the classical GGM construction gives a leakage resilient PRF if both, the leakage-function and the inputs, are chosen non-adaptively.},
author = {Faust, Sebastian and Pietrzak, Krzysztof Z and Schipper, Joachim},
booktitle = { Conference proceedings CHES 2012},
location = {Leuven, Belgium},
pages = {213 -- 232},
publisher = {Springer},
title = {{Practical leakage-resilient symmetric cryptography}},
doi = {10.1007/978-3-642-33027-8_13},
volume = {7428},
year = {2012},
}
@article{2411,
abstract = {The kingdom of fungi provides model organisms for biotechnology, cell biology, genetics, and life sciences in general. Only when their phylogenetic relationships are stably resolved, can individual results from fungal research be integrated into a holistic picture of biology. However, and despite recent progress, many deep relationships within the fungi remain unclear. Here, we present the first phylogenomic study of an entire eukaryotic kingdom that uses a consistency criterion to strengthen phylogenetic conclusions. We reason that branches (splits) recovered with independent data and different tree reconstruction methods are likely to reflect true evolutionary relationships. Two complementary phylogenomic data sets based on 99 fungal genomes and 109 fungal expressed sequence tag (EST) sets analyzed with four different tree reconstruction methods shed light from different angles on the fungal tree of life. Eleven additional data sets address specifically the phylogenetic position of Blastocladiomycota, Ustilaginomycotina, and Dothideomycetes, respectively. The combined evidence from the resulting trees supports the deep-level stability of the fungal groups toward a comprehensive natural system of the fungi. In addition, our analysis reveals methodologically interesting aspects. Enrichment for EST encoded data-a common practice in phylogenomic analyses-introduces a strong bias toward slowly evolving and functionally correlated genes. Consequently, the generalization of phylogenomic data sets as collections of randomly selected genes cannot be taken for granted. A thorough characterization of the data to assess possible influences on the tree reconstruction should therefore become a standard in phylogenomic analyses.},
author = {Ebersberger, Ingo and De Matos Simoes, Ricardo and Kupczok, Anne and Gube, Matthias and Kothe, Erika and Voigt, Kerstin and Von Haeseler, Arndt},
journal = {Molecular Biology and Evolution},
number = {5},
pages = {1319 -- 1334},
publisher = {Oxford University Press},
title = {{A consistent phylogenetic backbone for the fungi}},
doi = {10.1093/molbev/msr285},
volume = {29},
year = {2012},
}
@article{2904,
abstract = {Generalized van der Corput sequences are onedimensional, infinite sequences in the unit interval. They are generated from permutations in integer base b and are the building blocks of the multi-dimensional Halton sequences. Motivated by recent progress of Atanassov on the uniform distribution behavior of Halton sequences, we study, among others, permutations of the form P(i) = ai (mod b) for coprime integers a and b. We show that multipliers a that either divide b - 1 or b + 1 generate van der Corput sequences with weak distribution properties. We give explicit lower bounds for the asymptotic distribution behavior of these sequences and relate them to sequences generated from the identity permutation in smaller bases, which are, due to Faure, the weakest distributed generalized van der Corput sequences.},
author = {Pausinger, Florian},
journal = {Journal de Theorie des Nombres des Bordeaux},
number = {3},
pages = {729 -- 749},
publisher = {Universite de Bordeaux III},
title = {{Weak multipliers for generalized van der Corput sequences}},
doi = {10.5802/jtnb.819},
volume = {24},
year = {2012},
}
@unpublished{2928,
abstract = { This paper addresses the problem of approximate MAP-MRF inference in general graphical models. Following [36], we consider a family of linear programming relaxations of the problem where each relaxation is specified by a set of nested pairs of factors for which the marginalization constraint needs to be enforced. We develop a generalization of the TRW-S algorithm [9] for this problem, where we use a decomposition into junction chains, monotonic w.r.t. some ordering on the nodes. This generalizes the monotonic chains in [9] in a natural way. We also show how to deal with nested factors in an efficient way. Experiments show an improvement over min-sum diffusion, MPLP and subgradient ascent algorithms on a number of computer vision and natural language processing problems. },
author = {Kolmogorov, Vladimir and Schoenemann, Thomas},
booktitle = {arXiv},
publisher = {ArXiv},
title = {{Generalized sequential tree-reweighted message passing}},
year = {2012},
}
@inproceedings{2930,
abstract = {In this paper we investigate k-submodular functions. This natural family of discrete functions includes submodular and bisubmodular functions as the special cases k = 1 and k = 2 respectively.
In particular we generalize the known Min-Max-Theorem for submodular and bisubmodular functions. This theorem asserts that the minimum of the (bi)submodular function can be found by solving a maximization problem over a (bi)submodular polyhedron. We define a k-submodular polyhedron, prove a Min-Max-Theorem for k-submodular functions, and give a greedy algorithm to construct the vertices of the polyhedron.
},
author = {Huber, Anna and Kolmogorov, Vladimir},
location = {Athens, Greece},
pages = {451 -- 462},
publisher = {Springer},
title = {{Towards minimizing k-submodular functions}},
doi = {10.1007/978-3-642-32147-4_40},
volume = {7422},
year = {2012},
}
@article{2959,
abstract = {We study maximum likelihood estimation in Gaussian graphical models from a geometric point of view. An algebraic elimination criterion allows us to find exact lower bounds on the number of observations needed to ensure that the maximum likelihood estimator (MLE) exists with probability one. This is applied to bipartite graphs, grids and colored graphs. We also study the ML degree, and we present the first instance of a graph for which the MLE exists with probability one, even when the number of observations equals the treewidth.},
author = {Uhler, Caroline},
journal = {Annals of Statistics},
number = {1},
pages = {238 -- 261},
publisher = {Institute of Mathematical Statistics},
title = {{Geometry of maximum likelihood estimation in Gaussian graphical models}},
doi = {10.1214/11-AOS957},
volume = {40},
year = {2012},
}
@article{2954,
abstract = {Spontaneous postsynaptic currents (PSCs) provide key information about the mechanisms of synaptic transmission and the activity modes of neuronal networks. However, detecting spontaneous PSCs in vitro and in vivo has been challenging, because of the small amplitude, the variable kinetics, and the undefined time of generation of these events. Here, we describe a, to our knowledge, new method for detecting spontaneous synaptic events by deconvolution, using a template that approximates the average time course of spontaneous PSCs. A recorded PSC trace is deconvolved from the template, resulting in a series of delta-like functions. The maxima of these delta-like events are reliably detected, revealing the precise onset times of the spontaneous PSCs. Among all detection methods, the deconvolution-based method has a unique temporal resolution, allowing the detection of individual events in high-frequency bursts. Furthermore, the deconvolution-based method has a high amplitude resolution, because deconvolution can substantially increase the signal/noise ratio. When tested against previously published methods using experimental data, the deconvolution-based method was superior for spontaneous PSCs recorded in vivo. Using the high-resolution deconvolution-based detection algorithm, we show that the frequency of spontaneous excitatory postsynaptic currents in dentate gyrus granule cells is 4.5 times higher in vivo than in vitro.},
author = {Pernia-Andrade, Alejandro and Goswami, Sarit and Stickler, Yvonne and Fröbe, Ulrich and Schlögl, Alois and Jonas, Peter M},
journal = {Biophysical Journal},
number = {7},
pages = {1429 -- 1439},
publisher = {Biophysical},
title = {{A deconvolution based method with high sensitivity and temporal resolution for detection of spontaneous synaptic currents in vitro and in vivo}},
doi = {10.1016/j.bpj.2012.08.039},
volume = {103},
year = {2012},
}
@article{2966,
abstract = {Background: The outcome of male-male competition can be predicted from the relative fighting qualities of the opponents, which often depend on their age. In insects, freshly emerged and still sexually inactive males are morphologically indistinct from older, sexually active males. These young inactive males may thus be easy targets for older males if they cannot conceal themselves from their attacks. The ant Cardiocondyla obscurior is characterised by lethal fighting between wingless (" ergatoid" ) males. Here, we analyse for how long young males are defenceless after eclosion, and how early adult males can detect the presence of rival males.Results: We found that old ergatoid males consistently won fights against ergatoid males younger than two days. Old males did not differentiate between different types of unpigmented pupae several days before emergence, but had more frequent contact to ready-to-eclose pupae of female sexuals and winged males than of workers and ergatoid males. In rare cases, old ergatoid males displayed alleviated biting of pigmented ergatoid male pupae shortly before adult eclosion, as well as copulation attempts to dark pupae of female sexuals and winged males. Ergatoid male behaviour may be promoted by a closer similarity of the chemical profile of ready-to-eclose pupae to the profile of adults than that of young pupae several days prior to emergence.Conclusion: Young ergatoid males of C. obscurior would benefit greatly by hiding their identity from older, resident males, as they are highly vulnerable during the first two days of their adult lives. In contrast to the winged males of the same species, which are able to prevent ergatoid male attacks by chemical female mimicry, young ergatoids do not seem to be able to produce a protective chemical profile. Conflicts in male-male competition between ergatoid males of different age thus seem to be resolved in favour of the older males. This might represent selection at the colony level rather than the individual level. © 2012 Cremer et al.; licensee BioMed Central Ltd.},
author = {Cremer, Sylvia and Suefuji, Masaki and Schrempf, Alexandra and Heinze, Jürgen},
journal = {BMC Ecology},
publisher = {BioMed Central},
title = {{The dynamics of male-male competition in Cardiocondyla obscurior ants}},
doi = {10.1186/1472-6785-12-7},
volume = {12},
year = {2012},
}
@article{3159,
abstract = {The structure of hierarchical networks in biological and physical systems has long been characterized using the Horton-Strahler ordering scheme. The scheme assigns an integer order to each edge in the network based on the topology of branching such that the order increases from distal parts of the network (e.g., mountain streams or capillaries) to the "root" of the network (e.g., the river outlet or the aorta). However, Horton-Strahler ordering cannot be applied to networks with loops because they they create a contradiction in the edge ordering in terms of which edge precedes another in the hierarchy. Here, we present a generalization of the Horton-Strahler order to weighted planar reticular networks, where weights are assumed to correlate with the importance of network edges, e.g., weights estimated from edge widths may correlate to flow capacity. Our method assigns hierarchical levels not only to edges of the network, but also to its loops, and classifies the edges into reticular edges, which are responsible for loop formation, and tree edges. In addition, we perform a detailed and rigorous theoretical analysis of the sensitivity of the hierarchical levels to weight perturbations. In doing so, we show that the ordering of the reticular edges is more robust to noise in weight estimation than is the ordering of the tree edges. We discuss applications of this generalized Horton-Strahler ordering to the study of leaf venation and other biological networks.},
author = {Mileyko, Yuriy and Edelsbrunner, Herbert and Price, Charles and Weitz, Joshua},
journal = {PLoS One},
number = {6},
publisher = {Public Library of Science},
title = {{Hierarchical ordering of reticular networks}},
doi = {10.1371/journal.pone.0036715},
volume = {7},
year = {2012},
}
@article{3161,
abstract = {Some inflammatory stimuli trigger activation of the NLRP3 inflammasome by inducing efflux of cellular potassium. Loss of cellular potassium is known to potently suppress protein synthesis, leading us to test whether the inhibition of protein synthesis itself serves as an activating signal for the NLRP3 inflammasome. Murine bone marrow-derived macrophages, either primed by LPS or unprimed, were exposed to a panel of inhibitors of ribosomal function: ricin, cycloheximide, puromycin, pactamycin, and anisomycin. Macrophages were also exposed to nigericin, ATP, monosodium urate (MSU), and poly I:C. Synthesis of pro-IL-ß and release of IL-1ß from cells in response to these agents was detected by immunoblotting and ELISA. Release of intracellular potassium was measured by mass spectrometry. Inhibition of translation by each of the tested translation inhibitors led to processing of IL-1ß, which was released from cells. Processing and release of IL-1ß was reduced or absent from cells deficient in NLRP3, ASC, or caspase-1, demonstrating the role of the NLRP3 inflammasome. Despite the inability of these inhibitors to trigger efflux of intracellular potassium, the addition of high extracellular potassium suppressed activation of the NLRP3 inflammasome. MSU and double-stranded RNA, which are known to activate the NLRP3 inflammasome, also substantially inhibited protein translation, supporting a close association between inhibition of translation and inflammasome activation. These data demonstrate that translational inhibition itself constitutes a heretofore-unrecognized mechanism underlying IL-1ß dependent inflammatory signaling and that other physical, chemical, or pathogen-associated agents that impair translation may lead to IL-1ß-dependent inflammation through activation of the NLRP3 inflammasome. For agents that inhibit translation through decreased cellular potassium, the application of high extracellular potassium restores protein translation and suppresses activation of the NLRP inflammasome. For agents that inhibit translation through mechanisms that do not involve loss of potassium, high extracellular potassium suppresses IL-1ß processing through a mechanism that remains undefined.},
author = {Vyleta, Meghan and Wong, John and Magun, Bruce},
journal = {PLoS One},
number = {5},
publisher = {Public Library of Science},
title = {{Suppression of ribosomal function triggers innate immune signaling through activation of the NLRP3 inflammasome}},
doi = {10.1371/journal.pone.0036044},
volume = {7},
year = {2012},
}
@inproceedings{3123,
abstract = {We introduce the idea of using an explicit triangle mesh to track the air/fluid interface in a smoothed particle hydrodynamics (SPH) simulator. Once an initial surface mesh is created, this mesh is carried forward in time using nearby particle velocities to advect the mesh vertices. The mesh connectivity remains mostly unchanged across time-steps; it is only modified locally for topology change events or for the improvement of triangle quality. In order to ensure that the surface mesh does not diverge from the underlying particle simulation, we periodically project the mesh surface onto an implicit surface defined by the physics simulation. The mesh surface gives us several advantages over previous SPH surface tracking techniques. We demonstrate a new method for surface tension calculations that clearly outperforms the state of the art in SPH surface tension for computer graphics. We also demonstrate a method for tracking detailed surface information (like colors) that is less susceptible to numerical diffusion than competing techniques. Finally, our temporally-coherent surface mesh allows us to simulate high-resolution surface wave dynamics without being limited by the particle resolution of the SPH simulation.},
author = {Yu, Jihun and Wojtan, Christopher J and Turk, Greg and Yap, Chee},
booktitle = {Computer Graphics Forum},
location = {Cagliari, Sardinia, Italy},
number = {2},
pages = {815 -- 824},
publisher = {Blackwell Publishing},
title = {{Explicit mesh surfaces for particle based fluids}},
doi = {10.1111/j.1467-8659.2012.03062.x},
volume = {31},
year = {2012},
}
@article{3130,
abstract = {Essential genes code for fundamental cellular functions required for the viability of an organism. For this reason, essential genes are often highly conserved across organisms. However, this is not always the case: orthologues of genes that are essential in one organism are sometimes not essential in other organisms or are absent from their genomes. This suggests that, in the course of evolution, essential genes can be rendered nonessential. How can a gene become non-essential? Here we used genetic manipulation to deplete the products of 26 different essential genes in Escherichia coli. This depletion results in a lethal phenotype, which could often be rescued by the overexpression of a non-homologous, non-essential gene, most likely through replacement of the essential function. We also show that, in a smaller number of cases, the essential genes can be fully deleted from the genome, suggesting that complete functional replacement is possible. Finally, we show that essential genes whose function can be replaced in the laboratory are more likely to be non-essential or not present in other taxa. These results are consistent with the notion that patterns of evolutionary conservation of essential genes are influenced by their compensability-that is, by how easily they can be functionally replaced, for example through increased expression of other genes.},
author = {Bergmiller, Tobias and Ackermann, Martin and Silander, Olin},
journal = {PLoS Genetics},
number = {6},
publisher = {Public Library of Science},
title = {{Patterns of evolutionary conservation of essential genes correlate with their compensability}},
doi = {10.1371/journal.pgen.1002803},
volume = {8},
year = {2012},
}
@article{3166,
abstract = {There is evidence that the genetic code was established prior to the existence of proteins, when metabolism was powered by ribozymes. Also, early proto-organisms had to rely on simple anaerobic bioenergetic processes. In this work I propose that amino acid fermentation powered metabolism in the RNA world, and that this was facilitated by proto-adapters, the precursors of the tRNAs. Amino acids were used as carbon sources rather than as catalytic or structural elements. In modern bacteria, amino acid fermentation is known as the Stickland reaction. This pathway involves two amino acids: the first undergoes oxidative deamination, and the second acts as an electron acceptor through reductive deamination. This redox reaction results in two keto acids that are employed to synthesise ATP via substrate-level phosphorylation. The Stickland reaction is the basic bioenergetic pathway of some bacteria of the genus Clostridium. Two other facts support Stickland fermentation in the RNA world. First, several Stickland amino acid pairs are synthesised in abiotic amino acid synthesis. This suggests that amino acids that could be used as an energy substrate were freely available. Second, anticodons that have complementary sequences often correspond to amino acids that form Stickland pairs. The main hypothesis of this paper is that pairs of complementary proto-adapters were assigned to Stickland amino acids pairs. There are signatures of this hypothesis in the genetic code. Furthermore, it is argued that the proto-adapters formed double strands that brought amino acid pairs into proximity to facilitate their mutual redox reaction, structurally constraining the anticodon pairs that are assigned to these amino acid pairs. Significance tests which randomise the code are performed to study the extent of the variability of the energetic (ATP) yield. Random assignments can lead to a substantial yield of ATP and maintain enough variability, thus selection can act and refine the assignments into a proto-code that optimises the energetic yield. Monte Carlo simulations are performed to evaluate the establishment of these simple proto-codes, based on amino acid substitutions and codon swapping. In all cases, donor amino acids are assigned to anticodons composed of U+G, and have low redundancy (1-2 codons), whereas acceptor amino acids are assigned to the the remaining codons. These bioenergetic and structural constraints allow for a metabolic role for amino acids before their co-option as catalyst cofactors. Reviewers: this article was reviewed by Prof. William Martin, Prof. Eors Szathmary (nominated by Dr. Gaspar Jekely) and Dr. Adam Kun (nominated by Dr. Sandor Pongor)},
author = {Vladar, Harold},
journal = {Biology Direct},
publisher = {BioMed Central},
title = {{Amino acid fermentation at the origin of the genetic code}},
doi = {10.1186/1745-6150-7-6},
volume = {7},
year = {2012},
}
@article{3262,
abstract = {Living cells must control the reading out or "expression" of information encoded in their genomes, and this regulation often is mediated by transcription factors--proteins that bind to DNA and either enhance or repress the expression of nearby genes. But the expression of transcription factor proteins is itself regulated, and many transcription factors regulate their own expression in addition to responding to other input signals. Here we analyze the simplest of such self-regulatory circuits, asking how parameters can be chosen to optimize information transmission from inputs to outputs in the steady state. Some nonzero level of self-regulation is almost always optimal, with self-activation dominant when transcription factor concentrations are low and self-repression dominant when concentrations are high. In steady state the optimal self-activation is never strong enough to induce bistability, although there is a limit in which the optimal parameters are very close to the critical point.},
author = {Tkacik, Gasper and Walczak, Aleksandra and Bialek, William},
journal = { Physical Review E statistical nonlinear and soft matter physics },
number = {4},
publisher = {American Institute of Physics},
title = {{Optimizing information flow in small genetic networks. III. A self-interacting gene}},
doi = {10.1103/PhysRevE.85.041903},
volume = {85},
year = {2012},
}
@article{3274,
abstract = {A boundary element model of a tunnel running through horizontally layered soil with anisotropic material properties is presented. Since there is no analytical fundamental solution for wave propagation inside a layered orthotropic medium in 3D, the fundamental displacements and stresses have to be calculated numerically. In our model this is done in the Fourier domain with respect to space and time. The assumption of a straight tunnel with infinite extension in the x direction makes it possible to decouple the system for every wave number kx, leading to a 2.5D-problem, which is suited for parallel computation. The special form of the fundamental solution, resulting from our Fourier ansatz, and the fact, that the calculation of the boundary integral equation is performed in the Fourier domain, enhances the stability and efficiency of the numerical calculations.},
author = {Rieckh, Georg and Kreuzer, Wolfgang and Waubke, Holger and Balazs, Peter},
journal = { Engineering Analysis with Boundary Elements},
number = {6},
pages = {960 -- 967},
publisher = {Elsevier},
title = {{A 2.5D-Fourier-BEM model for vibrations in a tunnel running through layered anisotropic soil}},
doi = {10.1016/j.enganabound.2011.12.014},
volume = {36},
year = {2012},
}
@inproceedings{3279,
abstract = {We show a hardness-preserving construction of a PRF from any length doubling PRG which improves upon known constructions whenever we can put a non-trivial upper bound q on the number of queries to the PRF. Our construction requires only O(logq) invocations to the underlying PRG with each query. In comparison, the number of invocations by the best previous hardness-preserving construction (GGM using Levin's trick) is logarithmic in the hardness of the PRG. For example, starting from an exponentially secure PRG {0,1} n → {0,1} 2n, we get a PRF which is exponentially secure if queried at most q = exp(√n)times and where each invocation of the PRF requires Θ(√n) queries to the underlying PRG. This is much less than the Θ(n) required by known constructions.
},
author = {Jain, Abhishek and Pietrzak, Krzysztof Z and Tentes, Aris},
location = {Taormina, Sicily, Italy},
pages = {369 -- 382},
publisher = {Springer},
title = {{Hardness preserving constructions of pseudorandom functions}},
doi = {10.1007/978-3-642-28914-9_21},
volume = {7194},
year = {2012},
}
@inproceedings{3281,
abstract = {We consider the problem of amplifying the "lossiness" of functions. We say that an oracle circuit C*: {0,1} m → {0,1}* amplifies relative lossiness from ℓ/n to L/m if for every function f:{0,1} n → {0,1} n it holds that 1 If f is injective then so is C f. 2 If f has image size of at most 2 n-ℓ, then C f has image size at most 2 m-L. The question is whether such C* exists for L/m ≫ ℓ/n. This problem arises naturally in the context of cryptographic "lossy functions," where the relative lossiness is the key parameter. We show that for every circuit C* that makes at most t queries to f, the relative lossiness of C f is at most L/m ≤ ℓ/n + O(log t)/n. In particular, no black-box method making a polynomial t = poly(n) number of queries can amplify relative lossiness by more than an O(logn)/n additive term. We show that this is tight by giving a simple construction (cascading with some randomization) that achieves such amplification.},
author = {Pietrzak, Krzysztof Z and Rosen, Alon and Segev, Gil},
location = {Taormina, Sicily, Italy},
pages = {458 -- 475},
publisher = {Springer},
title = {{Lossy functions do not amplify well}},
doi = {10.1007/978-3-642-28914-9_26},
volume = {7194},
year = {2012},
}
@inproceedings{3250,
abstract = {The Learning Parity with Noise (LPN) problem has recently found many applications in cryptography as the hardness assumption underlying the constructions of "provably secure" cryptographic schemes like encryption or authentication protocols. Being provably secure means that the scheme comes with a proof showing that the existence of an efficient adversary against the scheme implies that the underlying hardness assumption is wrong. LPN based schemes are appealing for theoretical and practical reasons. On the theoretical side, LPN based schemes offer a very strong security guarantee. The LPN problem is equivalent to the problem of decoding random linear codes, a problem that has been extensively studied in the last half century. The fastest known algorithms run in exponential time and unlike most number-theoretic problems used in cryptography, the LPN problem does not succumb to known quantum algorithms. On the practical side, LPN based schemes are often extremely simple and efficient in terms of code-size as well as time and space requirements. This makes them prime candidates for light-weight devices like RFID tags, which are too weak to implement standard cryptographic primitives like the AES block-cipher. This talk will be a gentle introduction to provable security using simple LPN based schemes as examples. Starting from pseudorandom generators and symmetric key encryption, over secret-key authentication protocols, and, if time admits, touching on recent constructions of public-key identification, commitments and zero-knowledge proofs.},
author = {Pietrzak, Krzysztof Z},
location = {Špindlerův Mlýn, Czech Republic},
pages = {99 -- 114},
publisher = {Springer},
title = {{Cryptography from learning parity with noise}},
doi = {10.1007/978-3-642-27660-6_9},
volume = {7147},
year = {2012},
}
@article{3248,
abstract = {We describe RTblob, a high speed vision system that detects objects in cluttered scenes based on their color and shape at a speed of over 800 frames/s. Because the system is available as open-source software and relies only on off-the-shelf PC hardware components, it can provide the basis for multiple application scenarios. As an illustrative example, we show how RTblob can be used in a robotic table tennis scenario to estimate ball trajectories through 3D space simultaneously from four cameras images at a speed of 200 Hz.},
author = {Lampert, Christoph and Peters, Jan},
journal = {Journal of Real-Time Image Processing},
number = {1},
pages = {31 -- 41},
publisher = {Springer},
title = {{Real-time detection of colored objects in multiple camera streams with off-the-shelf hardware components}},
doi = {10.1007/s11554-010-0168-3},
volume = {7},
year = {2012},
}
@article{3243,
author = {Danowski, Patrick},
journal = {Büchereiperspektiven},
pages = {11},
publisher = {Buchereiverband Österreichs},
title = {{Zwischen Technologie und Information}},
volume = {1/2012},
year = {2012},
}