@article{3185,
abstract = {This paper describes models and algorithms for the real-time segmentation of foreground from background layers in stereo video sequences. Automatic separation of layers from color/contrast or from stereo alone is known to be error-prone. Here, color, contrast, and stereo matching information are fused to infer layers accurately and efficiently. The first algorithm, Layered Dynamic Programming (LDP), solves stereo in an extended six-state space that represents both foreground/background layers and occluded regions. The stereo-match likelihood is then fused with a contrast-sensitive color model that is learned on-the-fly and stereo disparities are obtained by dynamic programming. The second algorithm, Layered Graph Cut (LGC), does not directly solve stereo. Instead, the stereo match likelihood is marginalized over disparities to evaluate foreground and background hypotheses and then fused with a contrast-sensitive color model like the one used in LDP. Segmentation is solved efficiently by ternary graph cut. Both algorithms are evaluated with respect to ground truth data and found to have similar performance, substantially better than either stereo or color/contrast alone. However, their characteristics with respect to computational efficiency are rather different. The algorithms are demonstrated in the application of background substitution and shown to give good quality composite video output.},
author = {Vladimir Kolmogorov and Criminisi, Antonio and Blake, Andrew and Cross, Geoffrey and Rother, Carsten},
journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence},
number = {9},
pages = {1480 -- 1492},
publisher = {IEEE},
title = {{Probabilistic fusion of stereo with color and contrast for bilayer segmentation}},
doi = {10.1109/TPAMI.2006.193},
volume = {28},
year = {2006},
}
@inproceedings{3186,
abstract = {We introduce a new approach to modelling gradient flows of contours and surfaces. While standard variational methods (e.g. level sets) compute local interface motion in a differential fashion by estimating local contour velocity via energy derivatives, we propose to solve surface evolution PDEs by explicitly estimating integral motion of the whole surface. We formulate an optimization problem directly based on an integral characterization of gradient flow as an infinitesimal move of the (whole) surface giving the largest energy decrease among all moves of equal size. We show that this problem can be efficiently solved using recent advances in algorithms for global hypersurface optimization [4, 2, 11]. In particular, we employ the geo-cuts method [4] that uses ideas from integral geometry to represent continuous surfaces as cuts on discrete graphs. The resulting interface evolution algorithm is validated on some 2D and 3D examples similar to typical demonstrations of level-set methods. Our method can compute gradient flows of hypersurfaces with respect to a fairly general class of continuous functional and it is flexible with respect to distance metrics on the space of contours/surfaces. Preliminary tests for standard L2 distance metric demonstrate numerical stability, topological changes and an absence of any oscillatory motion.},
author = {Boykov, Yuri and Vladimir Kolmogorov and Cremers, Daniel and Delong, Andrew},
pages = {409 -- 422},
publisher = {Springer},
title = {{An integral solution to surface evolution PDEs via geo cuts}},
doi = {10.1007/11744078_32},
volume = {3953},
year = {2006},
}
@inproceedings{3188,
abstract = {We introduce the term cosegmentation which denotes the task of segmenting simultaneously the common parts of an image pair. A generative model for cosegmentation is presented. Inference in the model leads to minimizing an energy with an MRF term encoding spatial coherency and a global constraint which attempts to match the appearance histograms of the common parts. This energy has not been proposed previously and its optimization is challenging and NP-hard. For this problem a novel optimization scheme which we call trust region graph cuts is presented. We demonstrate that this framework has the potential to improve a wide range of research: Object driven image retrieval, video tracking and segmentation, and interactive image editing. The power of the framework lies in its generality, the common part can be a rigid/non-rigid object (or scene), observed from different viewpoints or even similar objects of the same class.},
author = {Rother, Carsten and Vladimir Kolmogorov and Minka, Thomas P and Blake, Andrew},
pages = {993 -- 1000},
publisher = {IEEE},
title = {{Cosegmentation of image pairs by histogram matching - Incorporating a global constraint into MRFs}},
doi = {10.1109/CVPR.2006.91},
year = {2006},
}
@inproceedings{3189,
abstract = {This paper presents an algorithm capable of real-time separation of foreground from background in monocular video sequences. Automatic segmentation of layers from colour/contrast or from motion alone is known to be error-prone. Here motion, colour and contrast cues are probabilistically fused together with spatial and temporal priors to infer layers accurately and efficiently. Central to our algorithm is the fact that pixel velocities are not needed, thus removing the need for optical flow estimation, with its tendency to error and computational expense. Instead, an efficient motion vs non-motion classifier is trained to operate directly and jointly on intensity-change and contrast. Its output is then fused with colour information. The prior on segmentation is represented by a second order, temporal, Hidden Markov Model, together with a spatial MRF favouring coherence except where contrast is high. Finally, accurate layer segmentation and explicit occlusion detection are efficiently achieved by binary graph cut. The segmentation accuracy of the proposed algorithm is quantitatively evaluated with respect to existing ground-truth data and found to be comparable to the accuracy of a state of the art stereo segmentation algorithm. Fore-ground/background segmentation is demonstrated in the application of live background substitution and shown to generate convincingly good quality composite video.},
author = {Criminisi, Antonio and Cross, Geoffrey and Blake, Andrew and Vladimir Kolmogorov},
pages = {53 -- 60},
publisher = {IEEE},
title = {{Bilayer segmentation of live video}},
doi = {10.1109/CVPR.2006.69},
volume = {1},
year = {2006},
}
@article{3190,
abstract = {Algorithms for discrete energy minimization are of fundamental importance in computer vision. In this paper, we focus on the recent technique proposed by Wainwright et al. (Nov. 2005)- tree-reweighted max-product message passing (TRW). It was inspired by the problem of maximizing a lower bound on the energy. However, the algorithm is not guaranteed to increase this bound - it may actually go down. In addition, TRW does not always converge. We develop a modification of this algorithm which we call sequential tree-reweighted message passing. Its main property is that the bound is guaranteed not to decrease. We also give a weak tree agreement condition which characterizes local maxima of the bound with respect to TRW algorithms. We prove that our algorithm has a limit point that achieves weak tree agreement. Finally, we show that, our algorithm requires half as much memory as traditional message passing approaches. Experimental results demonstrate that on certain synthetic and real problems, our algorithm outperforms both the ordinary belief propagation and tree-reweighted algorithm in (M. J. Wainwright, et al., Nov. 2005). In addition, on stereo problems with Potts interactions, we obtain a lower energy than graph cuts.},
author = {Vladimir Kolmogorov},
journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence},
number = {10},
pages = {1568 -- 1583},
publisher = {IEEE},
title = {{Convergent tree reweighted message passing for energy minimization}},
doi = {10.1109/TPAMI.2006.200},
volume = {28},
year = {2006},
}
@inproceedings{3214,
abstract = {The Feistel-network is a popular structure underlying many block-ciphers where the cipher is constructed from many simpler rounds, each defined by some function which is derived from the secret key.
Luby and Rackoff showed that the three-round Feistel-network – each round instantiated with a pseudorandom function secure against adaptive chosen plaintext attacks (CPA) – is a CPA secure pseudorandom permutation, thus giving some confidence in the soundness of using a Feistel-network to design block-ciphers.
But the round functions used in actual block-ciphers are – for efficiency reasons – far from being pseudorandom. We investigate the security of the Feistel-network against CPA distinguishers when the only security guarantee we have for the round functions is that they are secure against non-adaptive chosen plaintext attacks (nCPA). We show that in the information-theoretic setting, four rounds with nCPA secure round functions are sufficient (and necessary) to get a CPA secure permutation. Unfortunately, this result does not translate into the more interesting pseudorandom setting. In fact, under the so-called Inverse Decisional Diffie-Hellman assumption the Feistel-network with four rounds, each instantiated with a nCPA secure pseudorandom function, is in general not a CPA secure pseudorandom permutation.},
author = {Maurer, Ueli M and Oswald, Yvonne A and Krzysztof Pietrzak and Sjödin, Johan},
pages = {391 -- 408},
publisher = {Springer},
title = {{Luby Rackoff ciphers from weak round functions }},
doi = {10.1007/11761679_24},
volume = {4004},
year = {2006},
}
@inproceedings{3215,
abstract = {Most cryptographic primitives such as encryption, authentication or secret sharing require randomness. Usually one assumes that perfect randomness is available, but those primitives might also be realized under weaker assumptions. In this work we continue the study of building secure cryptographic primitives from imperfect random sources initiated by Dodis and Spencer (FOCS’02). Their main result shows that there exists a (high-entropy) source of randomness allowing for perfect encryption of a bit, and yet from which one cannot extract even a single weakly random bit, separating encryption from extraction. Our main result separates encryption from 2-out-2 secret sharing (both in the information-theoretic and in the computational settings): any source which can be used to achieve one-bit encryption also can be used for 2-out-2 secret sharing of one bit, but the converse is false, even for high-entropy sources. Therefore, possibility of extraction strictly implies encryption, which in turn strictly implies 2-out-2 secret sharing.},
author = {Dodis, Yevgeniy and Krzysztof Pietrzak and Przydatek, Bartosz},
pages = {601 -- 616},
publisher = {Springer},
title = {{Separating sources for encryption and secret sharing}},
doi = {10.1007/11681878_31},
volume = {3876},
year = {2006},
}
@inproceedings{3216,
abstract = {We prove a new upper bound on the advantage of any adversary for distinguishing the encrypted CBC-MAC (EMAC) based on random permutations from a random function. Our proof uses techniques recently introduced in [BPR05], which again were inspired by [DGH + 04].
The bound we prove is tight — in the sense that it matches the advantage of known attacks up to a constant factor — for a wide range of the parameters: let n denote the block-size, q the number of queries the adversary is allowed to make and ℓ an upper bound on the length (i.e. number of blocks) of the messages, then for ℓ ≤ 2 n/8 and q≥ł2 the advantage is in the order of q 2/2 n (and in particular independent of ℓ). This improves on the previous bound of q 2ℓΘ(1/ln ln ℓ)/2 n from [BPR05] and matches the trivial attack (which thus is basically optimal) where one simply asks random queries until a collision is found.},
author = {Krzysztof Pietrzak},
pages = {168 -- 179},
publisher = {Springer},
title = {{A tight bound for EMAC}},
doi = {10.1007/11787006_15},
volume = {4052},
year = {2006},
}
@inproceedings{3217,
abstract = {To prove that a secure key-agreement protocol exists one must at least show P ≠NP. Moreover any proof that the sequential composition of two non-adaptively secure pseudorandom functions is secure against at least two adaptive queries must falsify the decisional Diffie-Hellman assumption, a standard assumption from public-key cryptography. Hence proving any of this two seemingly unrelated statements would require a significant breakthrough. We show that at least one of the two statements is true.
To our knowledge this gives the first positive cryptographic result (namely that composition implies some weak adaptive security) which holds in Minicrypt, but not in Cryptomania, i.e. under the assumption that one-way functions exist, but public-key cryptography does not.},
author = {Krzysztof Pietrzak},
pages = {328 -- 338},
publisher = {Springer},
title = {{Composition implies adaptive security in minicrypt}},
doi = {10.1007/11761679_20},
volume = {4004},
year = {2006},
}
@inbook{3404,
author = {Harald Janovjak and Sawhney, Ravi K and Stark, Martin and Mueller, Daniel J},
booktitle = {Techniques in Microscopy for Biomedical Applications},
pages = {213 -- 284},
publisher = {World Scientific Publishing},
title = {{Atomic force microscopy}},
volume = {2},
year = {2006},
}
@article{3413,
abstract = {Despite their crucial importance for cellular function, little is known about the folding mechanisms of membrane proteins. Recently details of the folding energy landscape were elucidated by atomic force microscope (AFM)-based single molecule force spectroscopy. Upon unfolding and extraction of individual membrane proteins energy barriers in structural elements such as loops and helices were mapped and quantified with the precision of a few amino acids.
Here we report on the next logical step: controlled refolding of single proteins into the membrane. First individual bacteriorhodopsin monomers were partially unfolded and extracted from the purple membrane by pulling at the C-terminal end with an AFM tip. Then by gradually lowering the tip, the protein was allowed to refold into the membrane while the folding force was recorded.
We discovered that upon refolding certain helices are pulled into the membraneagainst a sizable externalforce of several tens of picoNewton. From the mechanical work, which the helix performs on the AFM cantilever, we derive an upper limit for the Gibbs free folding energy. Subsequent unfolding allowed us to analyze the pattern of unfolding barriers and corroborate that the protein had refolded into the native state.},
author = {Kessler, Max and Gottschalk, Kay E and Harald Janovjak and Mueller, Daniel J and Gaub, Hermann},
journal = {Journal of Molecular Biology},
number = {2},
pages = {644 -- 654},
publisher = {Elsevier},
title = {{Bacteriorhodopsin folds into the membrane against an external force}},
doi = {10.1016/j.jmb.2005.12.065},
volume = {357},
year = {2006},
}
@article{3414,
abstract = {Mechanisms of folding and misfolding of membrane proteins are of interest in cell biology. Recently, we have established single-molecule force spectroscopy to observe directly the stepwise folding of the Na+/H+antiporter NhaA from Escherichia coli in vitro. Here, we improved this approach significantly to track the folding intermediates of asingle NhaA polypeptide forming structural segments such as the Na+-binding site, transmembrane α-helices, and helical pairs. The folding rates of structural segments ranged from 0.31 s−1 to 47 s−1, providing detailed insight into a distinct folding hierarchy of an unfolded polypeptide into the native membrane protein structure. In some cases, however, the folding chain formed stable and kinetically trapped non-native structures, which could be assigned to misfolding events of the antiporter.},
author = {Kedrov, Alexej and Harald Janovjak and Ziegler, Christine and Kühlbrandt, Werner and Mueller, Daniel J},
journal = {Journal of Molecular Biology},
number = {1},
pages = {2 -- 8},
publisher = {Elsevier},
title = {{Observing folding pathways and kinetics of a single sodium-proton antiporter from Escherichia coli}},
doi = {10.1016/j.jmb.2005.10.028},
volume = {355},
year = {2006},
}
@misc{3415,
author = {Harald Janovjak and Kedrov, Alexej and Cisneros, David and Sapra, Tanuj K and Struckmeier, Jens and Mueller, Daniel J},
booktitle = {Neurobiology of Aging},
pages = {546 -- 561},
publisher = {Elsevier},
title = {{Imaging and detecting molecular interactions of single membrane proteins}},
doi = {10.1016/j.neurobiolaging.2005.03.031},
volume = {27},
year = {2006},
}
@unpublished{3431,
abstract = {Ising models with pairwise interactions are the least structured, or maximum-entropy, probability distributions that exactly reproduce measured pairwise correlations between spins. Here we use this equivalence to construct Ising models that describe the correlated spiking activity of populations of 40 neurons in the retina, and show that pairwise interactions account for observed higher-order correlations. By first finding a representative ensemble for observed networks we can create synthetic networks of 120 neurons, and find that with increasing size the networks operate closer to a critical point and start exhibiting collective behaviors reminiscent of spin glasses.},
author = {Gasper Tkacik and Schneidman, E. and Berry, M. J. and Bialek, William S},
booktitle = {ArXiv},
pages = {1 -- 4},
publisher = {ArXiv},
title = {{Ising models for networks of real neurons}},
year = {2006},
}
@article{3437,
abstract = {The mutational landscape model is a theoretical model describing sequence evolution in natural populations. However, recent experimental work has begun to test its predictions in laboratory populations of microbes. Several of these studies have focused on testing the prediction that the effects of beneficial mutations should be roughly exponentially distributed. The prediction appears to be borne out by most of these studies, at least qualitatively. Another study showed that a modified version of the model was able to predict, with reasonable accuracy, which of a ranked set of beneficial alleles will be fixed next. Although it remains to be seen whether the mutational landscape model adequately describes adaptation in organisms other than microbes, together these studies suggest that adaptive evolution has surprisingly general properties that can be successfully captured by theoretical models.},
author = {Betancourt, Andrea J and Jonathan Bollback},
journal = {Current Opinion in Genetics & Development},
number = {6},
pages = {618 -- 623},
publisher = {Elsevier},
title = {{Fitness effects of beneficial mutations: the mutational landscape model in experimental evolution}},
doi = {10.1016/j.gde.2006.10.006},
volume = {16},
year = {2006},
}
@inproceedings{3449,
abstract = {We argue that games are expressive enough to encompass (history-based) access control, (resource) usage control (e.g., dynamic adaptive access control of reputation systems), accountability based controls (e.g., insurance), controls derived from rationality assumptions on participants (e.g., network mechanisms), and their composition. Building on the extensive research into games, we demonstrate that this expressive power coexists with a formal analysis framework comparable to that available for access control.},
author = {Krishnendu Chatterjee and Jagadeesan, Rhada and Pitcher, Corin},
pages = {70 -- 82},
publisher = {IEEE},
title = {{Games for controls}},
doi = {10.1109/CSFW.2006.14},
year = {2006},
}
@misc{3463,
abstract = {It is widely accepted that the hippocampus plays a major role in learning and memory. The mossy fiber synapse between granule cells in the dentate gyrus and pyramidal neurons in the CA3 region is a key component of the hippocampal trisynaptic circuit. Recent work, partially based on direct presynaptic patch-clamp recordings from hippocampal mossy fiber boutons, sheds light on the mechanisms of synaptic transmission and plasticity at mossy fiber synapses. A high Na(+) channel density in mossy fiber boutons leads to a large amplitude of the presynaptic action potential. Together with the fast gating of presynaptic Ca(2+) channels, this generates a large and brief presynaptic Ca(2+) influx, which can trigger transmitter release with high efficiency and temporal precision. The large number of release sites, the large size of the releasable pool of vesicles, and the huge extent of presynaptic plasticity confer unique strength to this synapse, suggesting a large impact onto the CA3 pyramidal cell network under specific behavioral conditions. The characteristic properties of the hippocampal mossy fiber synapse may be important for pattern separation and information storage in the dentate gyrus-CA3 cell network.},
author = {Bischofberger, Joseph and Engel, Dominique and Frotscher, Michael and Peter Jonas},
booktitle = {Pflugers Archiv : European Journal of Physiology},
number = {3},
pages = {361 -- 372},
publisher = {Springer},
title = {{Timing and efficacy of transmitter release at mossy fiber synapses in the hippocampal network. (Review)}},
doi = {10.1007/s00424-006-0093-2},
volume = {453},
year = {2006},
}
@misc{3510,
abstract = {Embodiments automatically generate an accurate network of watertight NURBS patches from polygonal models of objects while automatically detecting and preserving character lines thereon. These embodiments generate from an initial triangulation of the surface, a hierarchy of progressively coarser triangulations of the surface by performing a sequence of edge contractions using a greedy algorithm that selects edge contractions by their numerical properties. Operations are also performed to connect the triangulations in the hierarchy using homeomorphisms that preserve the topology of the initial triangulation in the coarsest triangulation. A desired quadrangulation of the surface can then be generated by homeomorphically mapping edges of a coarsest triangulation in the hierarchy back to the initial triangulation. This quadrangulation is topologically consistent with the initial triangulation and is defined by a plurality of quadrangular patches. These quadrangular patches are linked together by a (U, V) mesh that is guaranteed to be continuous at patch boundaries. A grid is then preferably fit to each of the quadrangles in the resulting quadrangulation by decomposing each of the quadrangles into k.sup.2 smaller quadrangles. A watertight NURBS model may be generated from the resulting quadrangulation.},
author = {Herbert Edelsbrunner and Fu, Ping and Nekhayev, Dmitry V and Facello, Michael and Williams, Steven P},
publisher = {Elsevier},
title = {{Method, apparatus and computer program products for automatically generating NURBS models of triangulated surfaces using homeomorphism}},
doi = {US 6,996,505 B1},
year = {2006},
}
@misc{3511,
abstract = {Methods, apparatus and computer program products provide efficient techniques for designing and printing shells of hearing-aid devices with a high degree of quality assurance and reliability and with a reduced number of manual and time consuming production steps and operations. These techniques also preferably provide hearing-aid shells having internal volumes that can approach a maximum allowable ratio of internal volume relative to external volume. These high internal volumes facilitate the inclusion of hearing-aid electrical components having higher degrees of functionality and/or the use of smaller and less conspicuous hearing-aid shells. A preferred method includes operations to generate a watertight digital model of a hearing-aid shell by thickening a three-dimensional digital model of a shell surface in a manner that eliminates self-intersections and results in a thickened model having an internal volume that is a high percentage of an external volume of the model. },
author = {Fu, Ping and Nekhayev, Dmitry V and Herbert Edelsbrunner},
publisher = {Elsevier},
title = {{Manufacturing methods and systems for rapid production of hearing-aid shells}},
doi = {US 7,050,876 B1},
year = {2006},
}
@misc{3512,
abstract = {Methods, apparatus and computer program products provide efficient techniques for reconstructing surfaces from data point sets. These techniques include reconstructing surfaces from sets of scanned data points that have preferably undergone preprocessing operations to improve their quality by, for example, reducing noise and removing outliers. These techniques include reconstructing a dense and locally two-dimensionally distributed 3D point set (e.g., point cloud) by merging stars in two-dimensional weighted Delaunay triangulations within estimated tangent planes. The techniques include determining a plurality of stars from a plurality of points p.sub.i in a 3D point set S that at least partially describes the 3D surface, by projecting the plurality of points p.sub.i onto planes T.sub.i that are each estimated to be tangent about a respective one of the plurality of points p.sub.i. The plurality of stars are then merged into a digital model of the 3D surface.},
author = {Fletcher, Yates G and Gloth, Tobias and Herbert Edelsbrunner and Fu, Ping},
publisher = {Elsevier},
title = {{Method, apparatus and computer products that reconstruct surfaces from data points}},
doi = {US 7,023,432 B2},
year = {2006},
}