@article{3018,
abstract = {The directional flow of the plant hormone auxin mediates multiple developmental processes, including patterning and tropisms. Apical and basal plasma membrane localization of AUXIN-RESISTANT1 (AUX1) and PIN-FORMED1 (PIN1) auxin transport components underpins the directionality of intercellular auxin flow in Arabidopsis thaliana roots. Here, we examined the mechanism of polar trafficking of AUX1. Real-time live cell analysis along with subcellular markers revealed that AUX1 resides at the apical plasma membrane of protophloem cells and at highly dynamic subpopulations of Golgi apparatus and endosomes in all cell types. Plasma membrane and intracellular pools of AUX1 are interconnected by actin-dependent constitutive trafficking, which is not sensitive to the vesicle trafficking inhibitor brefeldin A. AUX1 subcellular dynamics are not influenced by the auxin influx inhibitor NOA but are blocked by the auxin efflux inhibitors TIBA and PBA. Furthermore, auxin transport inhibitors and interference with the sterol composition of membranes disrupt polar AUX1 distribution at the plasma membrane. Compared with PIN1 trafficking, AUX1 dynamics display different sensitivities to trafficking inhibitors and are independent of the endosomal trafficking regulator ARF GEF GNOM. Hence, AUX1 uses a novel trafficking pathway in plants that is distinct from PIN trafficking, providing an additional mechanism for the fine regulation of auxin transport.},
author = {Kleine-Vehn, Jürgen and Dhonukshe, Pankaj and Swarup, Ranjan and Bennett, Malcolm and Jirí Friml},
journal = {Plant Cell},
number = {11},
pages = {3171 -- 3181},
publisher = {American Society of Plant Biologists},
title = {{Subcellular trafficking of the Arabidopsis auxin influx carrier AUX1 uses a novel pathway distinct from PIN1}},
doi = {10.1105/tpc.106.042770},
volume = {18},
year = {2006},
}
@article{3020,
abstract = {High throughput microarray transcription analyses provide us with the expression profiles for large amounts of plant genes. However, their tissue and cellular resolution is limited. Thus, for detailed functional analysis, it is still necessary to examine the expression pattern of selected candidate genes at a cellular level. Here, we present an in situ mRNA hybridization method that is routinely used for the analysis of plant gene expression patterns. The protocol is optimized for whole mount mRNA localizations in Arabidopsis seedling tissues including embryos, roots, hypocotyls and young primary leaves. It can also be used for comparable tissues in other species. Part of the protocol can also be automated and performed by a liquid handling robot. Here we present a detailed protocol, recommended controls and troubleshooting, along with examples of several applications. The total time to carry out the entire procedure is ∼7 d, depending on the tissue used.},
author = {Hejátko, Jan and Blilou, Ikram and Brewer, Philip B and Jirí Friml and Scheres, Ben and Eva Benková},
journal = {Nature Protocols},
number = {4},
pages = {1939 -- 1946},
publisher = {Nature Publishing Group},
title = {{In situ hybridization technique for mRNA detection in whole mount Arabidopsis samples}},
doi = {10.1038/nprot.2006.333},
volume = {1},
year = {2006},
}
@article{3152,
abstract = {The basic concepts of the molecular machinery that mediates cell migration have been gleaned from cell culture systems. However, the three-dimensional environment within an organism presents migrating cells with a much greater challenge. They must move between and among other cells while interpreting multiple attractive and repulsive cues to choose their proper path. They must coordinate their cell adhesion with their surroundings and know when to start and stop moving. New insights into the control of these remaining mysteries have emerged from genetic dissection and live imaging of germ cell migration in Drosophila, zebrafish, and mouse embryos. In this review, we first describe germ cell migration in cellular and mechanistic detail in these different model systems. We then compare these systems to highlight the emerging principles. Finally, we contrast the migration of germ cells with that of immune and cancer cells to outline the conserved and different mechanisms.},
author = {Kunwar, Prabhat S and Daria Siekhaus and Lehmann, Ruth},
journal = {Annual Review of Cell and Developmental Biology},
pages = {237 -- 265},
publisher = {Annual Reviews},
title = {{In vivo migration A germ cell perspective}},
doi = {10.1146/annurev.cellbio.22.010305.103337},
volume = {22},
year = {2006},
}
@inproceedings{3180,
abstract = {One of the most exciting advances in early vision has been the development of efficient energy minimization algorithms. Many early vision tasks require labeling each pixel with some quantity such as depth or texture. While many such problems can be elegantly expressed in the language of Markov Random Fields (MRF's), the resulting energy minimization problems were widely viewed as intractable. Recently, algorithms such as graph cuts and loopy belief propagation (LBP) have proven to be very powerful: for example, such methods form the basis for almost all the top-performing stereo methods. Unfortunately, most papers define their own energy function, which is minimized with a specific algorithm of their choice. As a result, the tradeoffs among different energy minimization algorithms are not well understood. In this paper we describe a set of energy minimization benchmarks, which we use to compare the solution quality and running time of several common energy minimization algorithms. We investigate three promising recent methods - graph cuts, LBP, and tree-reweighted message passing - as well as the well-known older iterated conditional modes (ICM) algorithm. Our benchmark problems are drawn from published energy functions used for stereo, image stitching and interactive segmentation. We also provide a general-purpose software interface that allows vision researchers to easily switch between optimization methods with minimal overhead. We expect that the availability of our benchmarks and interface will make it significantly easier for vision researchers to adopt the best method for their specific problems. Benchmarks, code, results and images are available at http://vision.middlebury.edu/MRF.},
author = {Szeliski, Richard S and Zabih, Ramin and Scharstein, Daniel and Veksler, Olga and Vladimir Kolmogorov and Agarwala, Aseem and Tappen, Marshall F and Rother, Carsten},
pages = {16 -- 29},
publisher = {Springer},
title = {{A comparative study of energy minimization methods for Markov random fields}},
doi = {10.1007/11744047_2},
volume = {3952},
year = {2006},
}
@inproceedings{3184,
abstract = {Algorithms for discrete energy minimization play a fundamental role for low-level vision. Known techniques include graph cuts, belief propagation (BP) and recently introduced tree-reweighted message passing (TRW). So far, the standard benchmark for their comparison has been a 4-connected grid-graph arising in pixel-labelling stereo. This minimization problem, however, has been largely solved: recent work shows that for many scenes TRW finds the global optimum. Furthermore, it is known that a 4-connecled grid-graph is a poor stereo model since it does not take occlusions into account. We propose the problem of stereo with occlusions as a new test bed for minimization algorithms. This is a more challenging graph since it has much larger connectivity, and it also serves as a better stereo model. An attractive feature of this problem is that increased connectivity does not result in increased complexity of message passing algorithms. Indeed, one contribution of this paper is to show that sophisticated implementations of BP and TRW have the same time and memory complexity as that of 4-connecled grid-graph stereo. The main conclusion of our experimental study is that for our problem graph cut outperforms both TRW and BP considerably. TRW achieves consistently a lower energy than BP. However, as connectivity increases the speed of convergence of TRW becomes slower. Unlike 4-connected grids, the difference between the energy of the best optimization method and the lower bound of TRW appears significant. This shows the hardness of the problem and motivates future research.},
author = {Vladimir Kolmogorov and Rother, Carsten},
pages = {1 -- 15},
publisher = {Springer},
title = {{Comparison of energy minimization algorithms for highly connected graphs}},
doi = {10.1007/11744047_1},
volume = {3952 LNCS},
year = {2006},
}
@article{3185,
abstract = {This paper describes models and algorithms for the real-time segmentation of foreground from background layers in stereo video sequences. Automatic separation of layers from color/contrast or from stereo alone is known to be error-prone. Here, color, contrast, and stereo matching information are fused to infer layers accurately and efficiently. The first algorithm, Layered Dynamic Programming (LDP), solves stereo in an extended six-state space that represents both foreground/background layers and occluded regions. The stereo-match likelihood is then fused with a contrast-sensitive color model that is learned on-the-fly and stereo disparities are obtained by dynamic programming. The second algorithm, Layered Graph Cut (LGC), does not directly solve stereo. Instead, the stereo match likelihood is marginalized over disparities to evaluate foreground and background hypotheses and then fused with a contrast-sensitive color model like the one used in LDP. Segmentation is solved efficiently by ternary graph cut. Both algorithms are evaluated with respect to ground truth data and found to have similar performance, substantially better than either stereo or color/contrast alone. However, their characteristics with respect to computational efficiency are rather different. The algorithms are demonstrated in the application of background substitution and shown to give good quality composite video output.},
author = {Vladimir Kolmogorov and Criminisi, Antonio and Blake, Andrew and Cross, Geoffrey and Rother, Carsten},
journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence},
number = {9},
pages = {1480 -- 1492},
publisher = {IEEE},
title = {{Probabilistic fusion of stereo with color and contrast for bilayer segmentation}},
doi = {10.1109/TPAMI.2006.193},
volume = {28},
year = {2006},
}
@inproceedings{3186,
abstract = {We introduce a new approach to modelling gradient flows of contours and surfaces. While standard variational methods (e.g. level sets) compute local interface motion in a differential fashion by estimating local contour velocity via energy derivatives, we propose to solve surface evolution PDEs by explicitly estimating integral motion of the whole surface. We formulate an optimization problem directly based on an integral characterization of gradient flow as an infinitesimal move of the (whole) surface giving the largest energy decrease among all moves of equal size. We show that this problem can be efficiently solved using recent advances in algorithms for global hypersurface optimization [4, 2, 11]. In particular, we employ the geo-cuts method [4] that uses ideas from integral geometry to represent continuous surfaces as cuts on discrete graphs. The resulting interface evolution algorithm is validated on some 2D and 3D examples similar to typical demonstrations of level-set methods. Our method can compute gradient flows of hypersurfaces with respect to a fairly general class of continuous functional and it is flexible with respect to distance metrics on the space of contours/surfaces. Preliminary tests for standard L2 distance metric demonstrate numerical stability, topological changes and an absence of any oscillatory motion.},
author = {Boykov, Yuri and Vladimir Kolmogorov and Cremers, Daniel and Delong, Andrew},
pages = {409 -- 422},
publisher = {Springer},
title = {{An integral solution to surface evolution PDEs via geo cuts}},
doi = {10.1007/11744078_32},
volume = {3953},
year = {2006},
}
@inproceedings{3188,
abstract = {We introduce the term cosegmentation which denotes the task of segmenting simultaneously the common parts of an image pair. A generative model for cosegmentation is presented. Inference in the model leads to minimizing an energy with an MRF term encoding spatial coherency and a global constraint which attempts to match the appearance histograms of the common parts. This energy has not been proposed previously and its optimization is challenging and NP-hard. For this problem a novel optimization scheme which we call trust region graph cuts is presented. We demonstrate that this framework has the potential to improve a wide range of research: Object driven image retrieval, video tracking and segmentation, and interactive image editing. The power of the framework lies in its generality, the common part can be a rigid/non-rigid object (or scene), observed from different viewpoints or even similar objects of the same class.},
author = {Rother, Carsten and Vladimir Kolmogorov and Minka, Thomas P and Blake, Andrew},
pages = {993 -- 1000},
publisher = {IEEE},
title = {{Cosegmentation of image pairs by histogram matching - Incorporating a global constraint into MRFs}},
doi = {10.1109/CVPR.2006.91},
year = {2006},
}
@inproceedings{3189,
abstract = {This paper presents an algorithm capable of real-time separation of foreground from background in monocular video sequences. Automatic segmentation of layers from colour/contrast or from motion alone is known to be error-prone. Here motion, colour and contrast cues are probabilistically fused together with spatial and temporal priors to infer layers accurately and efficiently. Central to our algorithm is the fact that pixel velocities are not needed, thus removing the need for optical flow estimation, with its tendency to error and computational expense. Instead, an efficient motion vs non-motion classifier is trained to operate directly and jointly on intensity-change and contrast. Its output is then fused with colour information. The prior on segmentation is represented by a second order, temporal, Hidden Markov Model, together with a spatial MRF favouring coherence except where contrast is high. Finally, accurate layer segmentation and explicit occlusion detection are efficiently achieved by binary graph cut. The segmentation accuracy of the proposed algorithm is quantitatively evaluated with respect to existing ground-truth data and found to be comparable to the accuracy of a state of the art stereo segmentation algorithm. Fore-ground/background segmentation is demonstrated in the application of live background substitution and shown to generate convincingly good quality composite video.},
author = {Criminisi, Antonio and Cross, Geoffrey and Blake, Andrew and Vladimir Kolmogorov},
pages = {53 -- 60},
publisher = {IEEE},
title = {{Bilayer segmentation of live video}},
doi = {10.1109/CVPR.2006.69},
volume = {1},
year = {2006},
}
@article{3190,
abstract = {Algorithms for discrete energy minimization are of fundamental importance in computer vision. In this paper, we focus on the recent technique proposed by Wainwright et al. (Nov. 2005)- tree-reweighted max-product message passing (TRW). It was inspired by the problem of maximizing a lower bound on the energy. However, the algorithm is not guaranteed to increase this bound - it may actually go down. In addition, TRW does not always converge. We develop a modification of this algorithm which we call sequential tree-reweighted message passing. Its main property is that the bound is guaranteed not to decrease. We also give a weak tree agreement condition which characterizes local maxima of the bound with respect to TRW algorithms. We prove that our algorithm has a limit point that achieves weak tree agreement. Finally, we show that, our algorithm requires half as much memory as traditional message passing approaches. Experimental results demonstrate that on certain synthetic and real problems, our algorithm outperforms both the ordinary belief propagation and tree-reweighted algorithm in (M. J. Wainwright, et al., Nov. 2005). In addition, on stereo problems with Potts interactions, we obtain a lower energy than graph cuts.},
author = {Vladimir Kolmogorov},
journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence},
number = {10},
pages = {1568 -- 1583},
publisher = {IEEE},
title = {{Convergent tree reweighted message passing for energy minimization}},
doi = {10.1109/TPAMI.2006.200},
volume = {28},
year = {2006},
}
@inproceedings{3214,
abstract = {The Feistel-network is a popular structure underlying many block-ciphers where the cipher is constructed from many simpler rounds, each defined by some function which is derived from the secret key.
Luby and Rackoff showed that the three-round Feistel-network – each round instantiated with a pseudorandom function secure against adaptive chosen plaintext attacks (CPA) – is a CPA secure pseudorandom permutation, thus giving some confidence in the soundness of using a Feistel-network to design block-ciphers.
But the round functions used in actual block-ciphers are – for efficiency reasons – far from being pseudorandom. We investigate the security of the Feistel-network against CPA distinguishers when the only security guarantee we have for the round functions is that they are secure against non-adaptive chosen plaintext attacks (nCPA). We show that in the information-theoretic setting, four rounds with nCPA secure round functions are sufficient (and necessary) to get a CPA secure permutation. Unfortunately, this result does not translate into the more interesting pseudorandom setting. In fact, under the so-called Inverse Decisional Diffie-Hellman assumption the Feistel-network with four rounds, each instantiated with a nCPA secure pseudorandom function, is in general not a CPA secure pseudorandom permutation.},
author = {Maurer, Ueli M and Oswald, Yvonne A and Krzysztof Pietrzak and Sjödin, Johan},
pages = {391 -- 408},
publisher = {Springer},
title = {{Luby Rackoff ciphers from weak round functions }},
doi = {10.1007/11761679_24},
volume = {4004},
year = {2006},
}
@inproceedings{3215,
abstract = {Most cryptographic primitives such as encryption, authentication or secret sharing require randomness. Usually one assumes that perfect randomness is available, but those primitives might also be realized under weaker assumptions. In this work we continue the study of building secure cryptographic primitives from imperfect random sources initiated by Dodis and Spencer (FOCS’02). Their main result shows that there exists a (high-entropy) source of randomness allowing for perfect encryption of a bit, and yet from which one cannot extract even a single weakly random bit, separating encryption from extraction. Our main result separates encryption from 2-out-2 secret sharing (both in the information-theoretic and in the computational settings): any source which can be used to achieve one-bit encryption also can be used for 2-out-2 secret sharing of one bit, but the converse is false, even for high-entropy sources. Therefore, possibility of extraction strictly implies encryption, which in turn strictly implies 2-out-2 secret sharing.},
author = {Dodis, Yevgeniy and Krzysztof Pietrzak and Przydatek, Bartosz},
pages = {601 -- 616},
publisher = {Springer},
title = {{Separating sources for encryption and secret sharing}},
doi = {10.1007/11681878_31},
volume = {3876},
year = {2006},
}
@inproceedings{3216,
abstract = {We prove a new upper bound on the advantage of any adversary for distinguishing the encrypted CBC-MAC (EMAC) based on random permutations from a random function. Our proof uses techniques recently introduced in [BPR05], which again were inspired by [DGH + 04].
The bound we prove is tight — in the sense that it matches the advantage of known attacks up to a constant factor — for a wide range of the parameters: let n denote the block-size, q the number of queries the adversary is allowed to make and ℓ an upper bound on the length (i.e. number of blocks) of the messages, then for ℓ ≤ 2 n/8 and q≥ł2 the advantage is in the order of q 2/2 n (and in particular independent of ℓ). This improves on the previous bound of q 2ℓΘ(1/ln ln ℓ)/2 n from [BPR05] and matches the trivial attack (which thus is basically optimal) where one simply asks random queries until a collision is found.},
author = {Krzysztof Pietrzak},
pages = {168 -- 179},
publisher = {Springer},
title = {{A tight bound for EMAC}},
doi = {10.1007/11787006_15},
volume = {4052},
year = {2006},
}
@inproceedings{3217,
abstract = {To prove that a secure key-agreement protocol exists one must at least show P ≠NP. Moreover any proof that the sequential composition of two non-adaptively secure pseudorandom functions is secure against at least two adaptive queries must falsify the decisional Diffie-Hellman assumption, a standard assumption from public-key cryptography. Hence proving any of this two seemingly unrelated statements would require a significant breakthrough. We show that at least one of the two statements is true.
To our knowledge this gives the first positive cryptographic result (namely that composition implies some weak adaptive security) which holds in Minicrypt, but not in Cryptomania, i.e. under the assumption that one-way functions exist, but public-key cryptography does not.},
author = {Krzysztof Pietrzak},
pages = {328 -- 338},
publisher = {Springer},
title = {{Composition implies adaptive security in minicrypt}},
doi = {10.1007/11761679_20},
volume = {4004},
year = {2006},
}
@inbook{3404,
author = {Harald Janovjak and Sawhney, Ravi K and Stark, Martin and Mueller, Daniel J},
booktitle = {Techniques in Microscopy for Biomedical Applications},
pages = {213 -- 284},
publisher = {World Scientific Publishing},
title = {{Atomic force microscopy}},
volume = {2},
year = {2006},
}
@article{3413,
abstract = {Despite their crucial importance for cellular function, little is known about the folding mechanisms of membrane proteins. Recently details of the folding energy landscape were elucidated by atomic force microscope (AFM)-based single molecule force spectroscopy. Upon unfolding and extraction of individual membrane proteins energy barriers in structural elements such as loops and helices were mapped and quantified with the precision of a few amino acids.
Here we report on the next logical step: controlled refolding of single proteins into the membrane. First individual bacteriorhodopsin monomers were partially unfolded and extracted from the purple membrane by pulling at the C-terminal end with an AFM tip. Then by gradually lowering the tip, the protein was allowed to refold into the membrane while the folding force was recorded.
We discovered that upon refolding certain helices are pulled into the membraneagainst a sizable externalforce of several tens of picoNewton. From the mechanical work, which the helix performs on the AFM cantilever, we derive an upper limit for the Gibbs free folding energy. Subsequent unfolding allowed us to analyze the pattern of unfolding barriers and corroborate that the protein had refolded into the native state.},
author = {Kessler, Max and Gottschalk, Kay E and Harald Janovjak and Mueller, Daniel J and Gaub, Hermann},
journal = {Journal of Molecular Biology},
number = {2},
pages = {644 -- 654},
publisher = {Elsevier},
title = {{Bacteriorhodopsin folds into the membrane against an external force}},
doi = {10.1016/j.jmb.2005.12.065},
volume = {357},
year = {2006},
}
@article{3414,
abstract = {Mechanisms of folding and misfolding of membrane proteins are of interest in cell biology. Recently, we have established single-molecule force spectroscopy to observe directly the stepwise folding of the Na+/H+antiporter NhaA from Escherichia coli in vitro. Here, we improved this approach significantly to track the folding intermediates of asingle NhaA polypeptide forming structural segments such as the Na+-binding site, transmembrane α-helices, and helical pairs. The folding rates of structural segments ranged from 0.31 s−1 to 47 s−1, providing detailed insight into a distinct folding hierarchy of an unfolded polypeptide into the native membrane protein structure. In some cases, however, the folding chain formed stable and kinetically trapped non-native structures, which could be assigned to misfolding events of the antiporter.},
author = {Kedrov, Alexej and Harald Janovjak and Ziegler, Christine and Kühlbrandt, Werner and Mueller, Daniel J},
journal = {Journal of Molecular Biology},
number = {1},
pages = {2 -- 8},
publisher = {Elsevier},
title = {{Observing folding pathways and kinetics of a single sodium-proton antiporter from Escherichia coli}},
doi = {10.1016/j.jmb.2005.10.028},
volume = {355},
year = {2006},
}
@misc{3415,
author = {Harald Janovjak and Kedrov, Alexej and Cisneros, David and Sapra, Tanuj K and Struckmeier, Jens and Mueller, Daniel J},
booktitle = {Neurobiology of Aging},
pages = {546 -- 561},
publisher = {Elsevier},
title = {{Imaging and detecting molecular interactions of single membrane proteins}},
doi = {10.1016/j.neurobiolaging.2005.03.031},
volume = {27},
year = {2006},
}
@unpublished{3431,
abstract = {Ising models with pairwise interactions are the least structured, or maximum-entropy, probability distributions that exactly reproduce measured pairwise correlations between spins. Here we use this equivalence to construct Ising models that describe the correlated spiking activity of populations of 40 neurons in the retina, and show that pairwise interactions account for observed higher-order correlations. By first finding a representative ensemble for observed networks we can create synthetic networks of 120 neurons, and find that with increasing size the networks operate closer to a critical point and start exhibiting collective behaviors reminiscent of spin glasses.},
author = {Gasper Tkacik and Schneidman, E. and Berry, M. J. and Bialek, William S},
booktitle = {ArXiv},
pages = {1 -- 4},
publisher = {ArXiv},
title = {{Ising models for networks of real neurons}},
year = {2006},
}
@article{3437,
abstract = {The mutational landscape model is a theoretical model describing sequence evolution in natural populations. However, recent experimental work has begun to test its predictions in laboratory populations of microbes. Several of these studies have focused on testing the prediction that the effects of beneficial mutations should be roughly exponentially distributed. The prediction appears to be borne out by most of these studies, at least qualitatively. Another study showed that a modified version of the model was able to predict, with reasonable accuracy, which of a ranked set of beneficial alleles will be fixed next. Although it remains to be seen whether the mutational landscape model adequately describes adaptation in organisms other than microbes, together these studies suggest that adaptive evolution has surprisingly general properties that can be successfully captured by theoretical models.},
author = {Betancourt, Andrea J and Jonathan Bollback},
journal = {Current Opinion in Genetics & Development},
number = {6},
pages = {618 -- 623},
publisher = {Elsevier},
title = {{Fitness effects of beneficial mutations: the mutational landscape model in experimental evolution}},
doi = {10.1016/j.gde.2006.10.006},
volume = {16},
year = {2006},
}
@inproceedings{3449,
abstract = {We argue that games are expressive enough to encompass (history-based) access control, (resource) usage control (e.g., dynamic adaptive access control of reputation systems), accountability based controls (e.g., insurance), controls derived from rationality assumptions on participants (e.g., network mechanisms), and their composition. Building on the extensive research into games, we demonstrate that this expressive power coexists with a formal analysis framework comparable to that available for access control.},
author = {Krishnendu Chatterjee and Jagadeesan, Rhada and Pitcher, Corin},
pages = {70 -- 82},
publisher = {IEEE},
title = {{Games for controls}},
doi = {10.1109/CSFW.2006.14},
year = {2006},
}
@misc{3463,
abstract = {It is widely accepted that the hippocampus plays a major role in learning and memory. The mossy fiber synapse between granule cells in the dentate gyrus and pyramidal neurons in the CA3 region is a key component of the hippocampal trisynaptic circuit. Recent work, partially based on direct presynaptic patch-clamp recordings from hippocampal mossy fiber boutons, sheds light on the mechanisms of synaptic transmission and plasticity at mossy fiber synapses. A high Na(+) channel density in mossy fiber boutons leads to a large amplitude of the presynaptic action potential. Together with the fast gating of presynaptic Ca(2+) channels, this generates a large and brief presynaptic Ca(2+) influx, which can trigger transmitter release with high efficiency and temporal precision. The large number of release sites, the large size of the releasable pool of vesicles, and the huge extent of presynaptic plasticity confer unique strength to this synapse, suggesting a large impact onto the CA3 pyramidal cell network under specific behavioral conditions. The characteristic properties of the hippocampal mossy fiber synapse may be important for pattern separation and information storage in the dentate gyrus-CA3 cell network.},
author = {Bischofberger, Joseph and Engel, Dominique and Frotscher, Michael and Peter Jonas},
booktitle = {Pflugers Archiv : European Journal of Physiology},
number = {3},
pages = {361 -- 372},
publisher = {Springer},
title = {{Timing and efficacy of transmitter release at mossy fiber synapses in the hippocampal network. (Review)}},
doi = {10.1007/s00424-006-0093-2},
volume = {453},
year = {2006},
}
@inproceedings{3499,
abstract = {We study infinite stochastic games played by n-players on a finite graph with goals specified by sets of infinite traces. The games are concurrent (each player simultaneously and independently chooses an action at each round), stochastic (the next state is determined by a probability distribution depending on the current state and the chosen actions), infinite (the game continues for an infinite number of rounds), nonzero-sum (the players’ goals are not necessarily conflicting), and undiscounted. We show that if each player has an upward-closed objective, then there exists an ε-Nash equilibrium in memoryless strategies, for every ε>0; and exact Nash equilibria need not exist. Upward-closure of an objective means that if a set Z of infinitely repeating states is winning, then all supersets of Z of infinitely repeating states are also winning. Memoryless strategies are strategies that are independent of history of plays and depend only on the current state. We also study the complexity of finding values (payoff profile) of an ε-Nash equilibrium. We show that the values of an ε-Nash equilibrium in nonzero-sum concurrent games with upward-closed objectives for all players can be computed by computing ε-Nash equilibrium values of nonzero-sum concurrent games with reachability objectives for all players and a polynomial procedure. As a consequence we establish that values of an ε-Nash equilibrium can be computed in TFNP (total functional NP), and hence in EXPTIME. },
author = {Krishnendu Chatterjee},
pages = {271 -- 286},
publisher = {Springer},
title = {{Nash equilibrium for upward-closed objectives}},
doi = {10.1007/11874683_18},
volume = {4207},
year = {2006},
}
@inproceedings{3500,
abstract = {The classical algorithm for solving Bu ̈chi games requires time O(n · m) for game graphs with n states and m edges. For game graphs with constant outdegree, the best known algorithm has running time O(n2/logn). We present two new algorithms for Bu ̈chi games. First, we give an algorithm that performs at most O(m) more work than the classical algorithm, but runs in time O(n) on infinitely many graphs of constant outdegree on which the classical algorithm requires time O(n2). Second, we give an algorithm with running time O(n · m · log δ(n)/ log n), where 1 ≤ δ(n) ≤ n is the outdegree of the game graph. Note that this algorithm performs asymptotically better than the classical algorithm if δ(n) = O(log n).},
author = {Krishnendu Chatterjee and Thomas Henzinger and Piterman, Nir},
publisher = {ACM},
title = {{Algorithms for Büchi Games}},
year = {2006},
}
@misc{3510,
abstract = {Embodiments automatically generate an accurate network of watertight NURBS patches from polygonal models of objects while automatically detecting and preserving character lines thereon. These embodiments generate from an initial triangulation of the surface, a hierarchy of progressively coarser triangulations of the surface by performing a sequence of edge contractions using a greedy algorithm that selects edge contractions by their numerical properties. Operations are also performed to connect the triangulations in the hierarchy using homeomorphisms that preserve the topology of the initial triangulation in the coarsest triangulation. A desired quadrangulation of the surface can then be generated by homeomorphically mapping edges of a coarsest triangulation in the hierarchy back to the initial triangulation. This quadrangulation is topologically consistent with the initial triangulation and is defined by a plurality of quadrangular patches. These quadrangular patches are linked together by a (U, V) mesh that is guaranteed to be continuous at patch boundaries. A grid is then preferably fit to each of the quadrangles in the resulting quadrangulation by decomposing each of the quadrangles into k.sup.2 smaller quadrangles. A watertight NURBS model may be generated from the resulting quadrangulation.},
author = {Herbert Edelsbrunner and Fu, Ping and Nekhayev, Dmitry V and Facello, Michael and Williams, Steven P},
publisher = {Elsevier},
title = {{Method, apparatus and computer program products for automatically generating NURBS models of triangulated surfaces using homeomorphism}},
doi = {US 6,996,505 B1},
year = {2006},
}
@misc{3511,
abstract = {Methods, apparatus and computer program products provide efficient techniques for designing and printing shells of hearing-aid devices with a high degree of quality assurance and reliability and with a reduced number of manual and time consuming production steps and operations. These techniques also preferably provide hearing-aid shells having internal volumes that can approach a maximum allowable ratio of internal volume relative to external volume. These high internal volumes facilitate the inclusion of hearing-aid electrical components having higher degrees of functionality and/or the use of smaller and less conspicuous hearing-aid shells. A preferred method includes operations to generate a watertight digital model of a hearing-aid shell by thickening a three-dimensional digital model of a shell surface in a manner that eliminates self-intersections and results in a thickened model having an internal volume that is a high percentage of an external volume of the model. },
author = {Fu, Ping and Nekhayev, Dmitry V and Herbert Edelsbrunner},
publisher = {Elsevier},
title = {{Manufacturing methods and systems for rapid production of hearing-aid shells}},
doi = {US 7,050,876 B1},
year = {2006},
}
@misc{3512,
abstract = {Methods, apparatus and computer program products provide efficient techniques for reconstructing surfaces from data point sets. These techniques include reconstructing surfaces from sets of scanned data points that have preferably undergone preprocessing operations to improve their quality by, for example, reducing noise and removing outliers. These techniques include reconstructing a dense and locally two-dimensionally distributed 3D point set (e.g., point cloud) by merging stars in two-dimensional weighted Delaunay triangulations within estimated tangent planes. The techniques include determining a plurality of stars from a plurality of points p.sub.i in a 3D point set S that at least partially describes the 3D surface, by projecting the plurality of points p.sub.i onto planes T.sub.i that are each estimated to be tangent about a respective one of the plurality of points p.sub.i. The plurality of stars are then merged into a digital model of the 3D surface.},
author = {Fletcher, Yates G and Gloth, Tobias and Herbert Edelsbrunner and Fu, Ping},
publisher = {Elsevier},
title = {{Method, apparatus and computer products that reconstruct surfaces from data points}},
doi = {US 7,023,432 B2},
year = {2006},
}
@article{3522,
abstract = {We observed sharp wave/ripples (SWR) during exploration within brief (< 2.4 s) interruptions of or during theta oscillations. CA1 network responses of SWRs occurring during exploration (eSWR) and SWRs detected in waking immobility or sleep were similar. However, neuronal activity during eSWR was location dependent, and eSWR-related firing was stronger inside the place field than outside. The eSPW-related firing increase was stronger than the baseline increase inside compared to outside, suggesting a “supralinear” summation of eSWR and place-selective inputs. Pairs of cells with similar place fields and/or correlated firing during exploration showed stronger coactivation during eSWRs and subsequent sleep-SWRs. Sequential activation of place cells was not required for the reactivation of waking co-firing patterns; cell pairs with symmetrical cross-correlations still showed reactivated waking co-firing patterns during sleep-SWRs. We suggest that place-selective firing during eSWRs facilitates initial associations between cells with similar place fields that enable place-related ensemble patterns to recur during subsequent sleep-SWRs.},
author = {Joseph O'Neill and Senior,Timothy and Jozsef Csicsvari},
journal = {Neuron},
number = {1},
pages = {143 -- 155},
publisher = {Elsevier},
title = {{Place-selective firing of CA1 pyramidal cells during sharp wave/ripple network patterns in exploratory behavior}},
doi = {10.1016/j.neuron.2005.10.037},
volume = {49},
year = {2006},
}
@article{3545,
abstract = {The functional organization of the basal ganglia ( BG) is often defined according to one of two opposing schemes. The first proposes multiple, essentially independent channels of information processing. The second posits convergence and lateral integration of striatal channels at the level of the globus pallidus ( GP). We tested the hypothesis that these proposed aspects of functional connectivity within the striatopallidal axis are dynamic and related to brain state. Local field potentials ( LFPs) were simultaneously recorded from multiple sites in striatum and GP in anesthetized rats during slow-wave activity( SWA) and during global activation evoked by sensory stimulation. Functional connectivity was inferred from comparative analyses of the internuclear and intranuclear coherence between bipolar derivations of LFPs. During prominent SWA, as shown in the electrocorticogram and local field potentials in the basal ganglia, intranuclear coherence, and, thus, lateral functional connectivity within striatum or globus pallidus was relatively weak. Furthermore, the temporal coupling of LFPs recorded across these two nuclei involved functional convergence at the level of GP. Global activation, indicated by a loss of SWA, was accompanied by a rapid functional reorganization of the striatopallidal axis. Prominent lateral functional connectivity developed within GP and, to a significantly more constrained spatial extent, striatum. Additionally, functional convergence on GP was no longer apparent, despite increased internuclear coherence. These data demonstrate that functional connectivity within the BG is highly dynamic and suggest that the relative expression of organizational principles, such as parallel, independent processing channels, striatopallidal convergence, and lateral integration within BG nuclei, is dependent on brain state.},
author = {Magill,Peter J and Pogosyan,Alek and Sharott,Andrew and Jozsef Csicsvari and Bolam, John Paul and Brown,Peter},
journal = {Journal of Neuroscience},
number = {23},
pages = {6318 -- 6329},
publisher = {Society for Neuroscience},
title = {{Changes in functional connectivity within the rat striatopallidal axis during global brain activation in vivo}},
doi = {10.1523/JNEUROSCI.0620-06.2006},
volume = {26},
year = {2006},
}
@inproceedings{3559,
abstract = {Persistent homology is the mathematical core of recent work on shape, including reconstruction, recognition, and matching. Its per- tinent information is encapsulated by a pairing of the critical values of a function, visualized by points forming a diagram in the plane. The original algorithm in [10] computes the pairs from an ordering of the simplices in a triangulation and takes worst-case time cubic in the number of simplices. The main result of this paper is an algorithm that maintains the pairing in worst-case linear time per transposition in the ordering. A side-effect of the algorithm’s anal- ysis is an elementary proof of the stability of persistence diagrams [7] in the special case of piecewise-linear functions. We use the algorithm to compute 1-parameter families of diagrams which we apply to the study of protein folding trajectories.},
author = {Cohen-Steiner, David and Herbert Edelsbrunner and Morozov, Dmitriy},
pages = {119 -- 126},
publisher = {ACM},
title = {{Vines and vineyards by updating persistence in linear time}},
doi = {10.1145/1137856.1137877},
year = {2006},
}
@inproceedings{3560,
abstract = {We continue the study of topological persistence [5] by investigat- ing the problem of simplifying a function f in a way that removes topological noise as determined by its persistence diagram [2]. To state our results, we call a function g an ε-simplification of another function f if ∥f − g∥∞ ≤ ε, and the persistence diagrams of g are the same as those of f except all points within L1-distance at most ε from the diagonal have been removed. We prove that for func- tions f on a 2-manifold such ε-simplification exists, and we give an algorithm to construct them in the piecewise linear case.},
author = {Herbert Edelsbrunner and Morozov, Dmitriy and Pascucci, Valerio},
pages = {127 -- 134},
publisher = {ACM},
title = {{Persistence-sensitive simplification of functions on 2-manifolds}},
doi = {10.1145/1137856.1137878},
year = {2006},
}
@misc{3594,
author = {Pemberton, Josephine M and Swanson, Graeme M and Nicholas Barton and Livingstone, Suzanne R and Senn, Helen V},
booktitle = {Deer},
number = {9},
pages = {22 -- 26},
publisher = {BDS },
title = {{Hybridisation between red and sika deer in Scotland}},
volume = {13},
year = {2006},
}
@article{3607,
abstract = {We apply new analytical methods to understand the consequences of population bottlenecks for expected additive genetic variance. We analyze essentially all models for multilocus epistasis that have been numerically simulated to demonstrate increased additive variance. We conclude that for biologically plausible models, large increases in expected additive variance–attributable to epistasis rather than dominance–are unlikely. Naciri-Graven and Goudet (2003) found that as the number of epistatically interacting loci increases, additive variance tends to be inflated more after a bottleneck. We argue that this result reflects biologically unrealistic aspects of their models. Specifically, as the number of loci increases, higher-order epistatic interactions become increasingly important in these models, with an increasing fraction of the genetic variance becoming nonadditive, contrary to empirical observations. As shown by Barton and Turelli (2004), without dominance, conversion of nonadditive to additive variance depends only on the variance components and not on the number of loci per se. Numerical results indicating that more inbreeding is needed to produce maximal release of additive variance with more loci follow directly from our analytical results, which show that high levels of inbreeding (F > 0.5) are needed for significant conversion of higher-order components. We discuss alternative approaches to modeling multilocus epistasis and understanding its consequences.},
author = {Turelli, Michael and Nicholas Barton},
journal = {Evolution; International Journal of Organic Evolution},
number = {9},
pages = {1763 -- 1776},
publisher = {Wiley-Blackwell},
title = {{Will population bottlenecks and multilocus epistasis increase additive genetic variance?}},
doi = {10.1111/j.0014-3820.2006.tb00521.x},
volume = {60},
year = {2006},
}
@article{3608,
abstract = {We study the evolution of inversions that capture locally adapted alleles when two populations are exchanging migrants or hybridizing. By suppressing recombination between the loci, a new inversion can spread. Neither drift nor coadaptation between the alleles (epistasis) is needed, so this local adaptation mechanism may apply to a broader range of genetic and demographic situations than alternative hypotheses that have been widely discussed. The mechanism can explain many features observed in inversion systems. It will drive an inversion to high frequency if there is no countervailing force, which could explain fixed differences observed between populations and species. An inversion can be stabilized at an intermediate frequency if it also happens to capture one or more deleterious recessive mutations, which could explain polymorphisms that are common in some species. This polymorphism can cycle in frequency with the changing selective advantage of the locally favored alleles. The mechanism can establish underdominant inversions that decrease heterokaryotype fitness by several percent if the cause of fitness loss is structural, while if the cause is genic there is no limit to the strength of underdominance that can result. The mechanism is expected to cause loci responsible for adaptive species-specific differences to map to inversions, as seen in recent QTL studies. We discuss data that support the hypothesis, review other mechanisms for inversion evolution, and suggest possible tests. },
author = {Kirkpatrick, Mark and Nicholas Barton},
journal = {Genetics},
number = {1},
pages = {419 -- 434},
publisher = {Genetics Society of America},
title = {{Chromosome inversions, local adaptation, and speciation}},
doi = {10.1534/genetics.105.047985},
volume = {173},
year = {2006},
}
@article{3609,
abstract = {Bombina bombina and B. variegata are two anciently diverged toad taxa that have adapted to different breeding habitats yet hybridize freely in zones of overlap where their parapatric distributions meet. Here, we report on a joint genetic and ecological analysis of a hybrid zone in the vicinity of Stryi in western Ukraine. We used five unlinked allozyme loci, two nuclear single nucleotide polymorphisms and a mitochondrial DNA haplotype as genetic markers. Parallel allele frequency clines with a sharp central step occur across a sharp ecotone, where transitions in aquatic habitat, elevation, and terrestrial vegetation coincide. The width of the hybrid zone, estimated as the inverse of the maximum gradient in allele frequency, is 2.3 km. This is the smallest of four estimates derived from different clinal transects across Europe. We argue that the narrow cline near Stryi is mainly due to a combination of habitat distribution and habitat preference. Adult toads show a preference for either ponds (B. bombina) or puddles (B. variegata), which is known to affect the distribution of genotypes within the hybrid zones. At Stryi, it should cause a reduction of the dispersal rate across the ecotone and thus narrow the cline. A detailed comparison of all five intensively studied Bombina transects lends support to the hypothesis that habitat distribution plus habitat preference can jointly affect the structure of hybrid zones and, ultimately, the resulting barriers to gene flow between differentiated gene pools. This study also represents a resampling of an area that was last studied more than 70 years ago. Our allele-frequency clines largely coincide with those that were described then on the basis of morphological variation. However, we found asymmetrical introgression of B. variegata genes into B. bombina territory along the bank of a river.},
author = {Yanchukov, Alexey and Hofman, Sebastian and Szymura, Jacek M and Mezhzherin, Sergey V and Morozov-Leonov, Sviatoslav and Nicholas Barton and Nürnberger, Beate},
journal = {Evolution; International Journal of Organic Evolution},
number = {3},
pages = {583 -- 600},
publisher = {Wiley-Blackwell},
title = {{Hybridization of Bombina bombina and B. variegata (Anura, Discoglossidae) at a sharp ecotone in western Ukraine: comparisons across transects and over time}},
doi = {10.1111/j.0014-3820.2006.tb01139.x},
volume = {60},
year = {2006},
}
@article{3610,
abstract = {For a model of diallelic loci with arbitrary epistasis, Barton and Turelli [2004. Effects of genetic drift on variance components under a general model of epistasis. Evolution 58, 2111–2132] gave results for variances among and within replicate lines obtained by inbreeding without selection. Here, we discuss the relation between their population genetic methods and classical quantitative genetic arguments. In particular, we consider the case of no dominance using classical identity by descent arguments, which generalizes their results from two alleles to multiple alleles. To clarify the connections between the alternative methods, we obtain the same results using an intermediate method, which explicitly identifies the statistical effects of sets of loci. We also discuss the effects of population bottlenecks on covariances among relatives.},
author = {Hill, William G and Nicholas Barton and Turelli, Michael},
journal = {Theoretical Population Biology},
number = {1},
pages = {56 -- 62},
publisher = {Academic Press},
title = {{Prediction of effects of genetic drift on variance components under a general model of epistasis}},
doi = {10.1016/j.tpb.2005.10.001},
volume = {70},
year = {2006},
}
@inproceedings{3677,
abstract = {We propose a video retrieval framework based on a novel combination of spatiograms and the Jensen-Shannon divergence, and validate its performance in two quantitative experiments on TRECVID BBC Rushes data. In the first experiment, color-based methods are tested by grouping redundant shots in an unsupervised clustering. Results of the second experiment show that motion-based spatiograms make a promising fast, compressed-domain descriptor for the detection of interview scenes.},
author = {Ulges, Adrian and Christoph Lampert and Keysers,Daniel},
pages = {1 -- 10},
publisher = {NIST (National Institute of Standards and Technology, US Department of Commerce)},
title = {{Spatiogram-based shot distances for video retrieval}},
year = {2006},
}
@inproceedings{3679,
abstract = {This paper describes a new system for "Finding Satellite Tracks” in astronomical images based on the modern geometric approach. There is an increasing need of using methods with solid mathematical and statistical foundation in astronomical image processing. Where the computational methods are serving in all disciplines of science, they are becoming popular in the field of astronomy as well. Currently different computational systems are required to be numerically optimized before to get applied on astronomical images. So at present there is no single system which solves the problems of astronomers using computational methods based on modern approaches. The system "Finding Satellite Tracks” is based on geometric matching method "Recognition by Adaptive Subdivision of Transformation Space (RAST)".},
author = {Ali,Haider and Christoph Lampert and Breuel,Thomas M},
pages = {892 -- 901},
publisher = {Springer},
title = {{Satellite tracks removal in astronomical images}},
doi = {10.1007/11892755_92},
volume = {4225},
year = {2006},
}
@inproceedings{3680,
abstract = {The detection of counterfeit in printed documents is currently based mainly on built-in security features or on human expertise. We propose a classification system that supports non-expert users to distinguish original documents from PC-made forgeries by analyzing the printing technique used. Each letter in a document is classified using a support vector machine that has been trained to distinguish laser from inkjet printouts. A color-coded visualization helps the user to interpret the per-letter classification results},
author = {Christoph Lampert and Mei,Lin and Breuel,Thomas M},
pages = {639 -- 634},
publisher = {IEEE},
title = {{Printing technique classification for document counterfeit detection}},
doi = {10.1109/ICCIAS.2006.294214},
volume = {1},
year = {2006},
}
@inproceedings{3683,
abstract = {Many algorithms to remove distortion from document images have be proposed in recent years, but so far there is no reliable method for comparing their performance. In this paper we propose a collection of methods to measure the quality of such restoration algorithms for document image which show a non-linear distortion due to perspective or page curl. For the result from these measurement to be meaningful, a common data set of ground truth is required. We therefore started with the buildup of a document image database that is meant to serve as a common data basis for all kinds of restoration from images of 3D-shaped document. The long term goal would be to establish this database and following extensions in the area of document image dewarping as an as fruitful and indispensable tool as e.g. the NIST database is for OCR, or the Caltech database is for object and face recognition.},
author = {Christoph Lampert and Breuel,Thomas M},
publisher = {Springer},
title = {{Objective quality measurement for geometric document image restoration}},
year = {2006},
}
@inproceedings{3685,
abstract = {Video compression currently is dominated by engineering and fine-tuned heuristic methods. In this paper, we propose to instead apply the well-developed machinery of machine learning in order to support the optimization of existing video encoders and the creation of new ones. Exemplarily, we show how by machine learning we can improve one encoding step that is crucial for the performance of all current video standards: macroblock mode decision. By formulating the problem in a Bayesian setup, we show that macroblock mode decision can be reduced to a classification problem with a cost function for misclassification that is sample dependent. We demonstrate how to apply different machine learning techniques to obtain suitable classifiers and we show in detailed experiments that all of these perform better than the state-of-the-art heuristic method},
author = {Christoph Lampert},
pages = {936 -- 940},
publisher = {IEEE},
title = {{Machine learning for video compression: Macroblock mode decision}},
doi = {10.1109/ICPR.2006.778},
year = {2006},
}
@inproceedings{3692,
author = {Keysers,Daniel and Christoph Lampert and Breuel,Thomas M},
publisher = {SPIE},
title = {{Color image dequantization by constrained diffusion}},
doi = {10.1117/12.648713},
volume = {6058},
year = {2006},
}
@inproceedings{3693,
abstract = {Gaussian filtering in one, two or three dimensions is among the most commonly needed tasks in signal and image processing. Finite impulse response filters in the time domain with Gaussian masks are easy to implement in either floating or fixed point arithmetic, because Gaussian kernels are strictly positive and bounded. But these implementations are slow for large images or kernels. With the recursive IIR-filters and FFT-based methods, there are at least two alternative methods to perform Gaussian filtering in a faster way, but so far they are only applicable when floating-point hardware is available. In this paper, a fixed-point implementation of recursive Gaussian filtering is discussed and applied to isotropic and anisotropic image filtering by making use of a non-orthogonal separation scheme of the Gaussian filter.},
author = {Christoph Lampert and Wirjadi,Oliver},
pages = {1565 -- 1568},
publisher = {IEEE},
title = {{Anisotropic Gaussian filtering using fixed point arithmetic}},
doi = {10.1109/ICIP.2006.312606},
year = {2006},
}
@article{3695,
abstract = {We give an analytical and geometrical treatment of what it means to separate a Gaussian kernel along arbitrary axes in Ropfn, and we present a separation scheme that allows us to efficiently implement anisotropic Gaussian convolution filters for data of arbitrary dimensionality. Based on our previous analysis we show that this scheme is optimal with regard to the number of memory accesses and interpolation operations needed. The proposed method relies on nonorthogonal convolution axes and works completely in image space. Thus, it avoids the need for a fast Fourier transform (FFT)-subroutine. Depending on the accuracy and speed requirements, different interpolation schemes and methods to implement the one-dimensional Gaussian (finite impulse response and infinite impulse response) can be integrated. Special emphasis is put on analyzing the performance and accuracy of the new method. In particular, we show that without any special optimization of the source code, it can perform anisotropic Gaussian filtering faster than methods relying on the FFT.},
author = {Christoph Lampert and Wirjadi,Oliver},
journal = {IEEE Transactions on Image Processing (TIP)},
number = {11},
pages = {3501 -- 3513},
publisher = {IEEE},
title = {{An optimal non-orthogonal separation of the anisotropic Gaussian convolution filter}},
doi = { 10.1109/TIP.2006.877501 },
volume = {15},
year = {2006},
}
@article{9505,
abstract = {Cytosine methylation, a common form of DNA modification that antagonizes transcription, is found at transposons and repeats in vertebrates, plants and fungi. Here we have mapped DNA methylation in the entire Arabidopsis thaliana genome at high resolution. DNA methylation covers transposons and is present within a large fraction of A. thaliana genes. Methylation within genes is conspicuously biased away from gene ends, suggesting a dependence on RNA polymerase transit. Genic methylation is strongly influenced by transcription: moderately transcribed genes are most likely to be methylated, whereas genes at either extreme are least likely. In turn, transcription is influenced by methylation: short methylated genes are poorly expressed, and loss of methylation in the body of a gene leads to enhanced transcription. Our results indicate that genic transcription and DNA methylation are closely interwoven processes.},
author = {ZILBERMAN, Daniel and Gehring, Mary and Tran, Robert K. and Ballinger, Tracy and Henikoff, Steven},
issn = {1546-1718},
journal = {Nature Genetics},
number = {1},
pages = {61--69},
publisher = {Nature Publishing Group},
title = {{Genome-wide analysis of Arabidopsis thaliana DNA methylation uncovers an interdependence between methylation and transcription}},
doi = {10.1038/ng1929},
volume = {39},
year = {2006},
}
@article{2307,
abstract = {The human norepinephrine (NE) transporter (hNET) attenuates neuronal signaling by rapid NE clearance from the synaptic cleft, and NET is a target for cocaine and amphetamines as well as therapeutics for depression, obsessive-compulsive disorder, and post-traumatic stress disorder. In spite of its central importance in the nervous system, little is known about how NET substrates, such as NE, 1-methyl-4-tetrahydropyridinium (MPP+), or amphetamine, interact with NET at the molecular level. Nor do we understand the mechanisms behind the transport rate. Previously we introduced a fluorescent substrate similar to MPP+, which allowed separate and simultaneous binding and transport measurement (Schwartz, J. W., Blakely, R. D., and DeFelice, L. J. (2003) J. Biol. Chem. 278, 9768-9777). Here we use this substrate, 4-(4-(dimethylamino)styrl)-N-methyl-pyridinium (ASP+), in combination with green fluorescent protein-tagged hNETs to measure substrate-transporter stoichiometry and substrate binding kinetics. Calibrated confocal microscopy and fluorescence correlation spectroscopy reveal that hNETs, which are homo-multimers, bind one substrate molecule per transporter subunit. Substrate residence at the transporter, obtained from rapid on-off kinetics revealed in fluorescence correlation spectroscopy, is 526 μs. Substrate residence obtained by infinite dilution is 1000 times slower. This novel examination of substrate-transporter kinetics indicates that a single ASP + molecule binds and unbinds thousands of times before being transported or ultimately dissociated from hNET. Calibrated fluorescent images combined with mass spectroscopy give a transport rate of 0.06 ASP +/hNET-protein/s, thus 36,000 on-off binding events (and 36 actual departures) occur for one transport event. Therefore binding has a low probability of resulting in transport. We interpret these data to mean that inefficient binding could contribute to slow transport rates.},
author = {Schwartz, Joel W and Gaia Novarino and Piston, David W and DeFelice, Louis J},
journal = {Journal of Biological Chemistry},
number = {19},
pages = {19177 -- 19184},
publisher = {American Society for Biochemistry and Molecular Biology},
title = {{Substrate binding stoichiometry and kinetics of the norepinephrine transporter}},
doi = {10.1074/jbc.M412923200},
volume = {280},
year = {2005},
}
@book{2335,
abstract = {This book contains a unique survey of the mathematically rigorous results about the quantum-mechanical many-body problem that have been obtained by the authors in the past seven years. It addresses a topic that is not only rich mathematically, using a large variety of techniques in mathematical analysis, but is also one with strong ties to current experiments on ultra-cold Bose gases and Bose-Einstein condensation. The book provides a pedagogical entry into an active area of ongoing research for both graduate students and researchers. It is an outgrowth of a course given by the authors for graduate students and post-doctoral researchers at the Oberwolfach Research Institute in 2004. The book also provides a coherent summary of the field and a reference for mathematicians and physicists active in research on quantum mechanics.},
author = {Lieb, Élliott H and Robert Seiringer and Solovej, Jan P and Yngvason, Jakob},
booktitle = {The mathematics of the Bose gas and its condensation},
publisher = {Birkhäuser},
title = {{The mathematics of the Bose gas and its condensation}},
volume = {34},
year = {2005},
}
@inbook{2336,
abstract = {
Now that the low temperature properties of quantum-mechanical many-body systems (bosons) at low density, ρ, can be examined experimentally it is appropriate to revisit some of the formulas deduced by many authors 4–5 decades ago, and to explore new regimes not treated before. For systems with repulsive (i.e. positive) interaction potentials the experimental low temperature state and the ground state are effectively synonymous — and this fact is used in all modeling. In such cases, the leading term in the energy/particle is 2πħ2 aρ/m where a is the scattering length of the two-body potential. Owing to the delicate and peculiar nature of bosonic correlations (such as the strange N 7/5 law for charged bosons), four decades of research failed to establish this plausible formula rigorously. The only previous lower bound for the energy was found by Dyson in 1957, but it was 14 times too small. The correct asymptotic formula has been obtained by us and this work will be presented. The reason behind the mathematical difficulties will be emphasized. A different formula, postulated as late as 1971 by Schick, holds in two dimensions and this, too, will be shown to be correct. With the aid of the methodology developed to prove the lower bound for the homogeneous gas, several other problems have been successfully addressed. One is the proof by us that the Gross-Pitaevskii equation correctly describes the ground state in the ‘traps’ actually used in the experiments. For this system it is also possible to prove complete Bose condensation and superfluidity as we have shown. On the frontier of experimental developments is the possibility that a dilute gas in an elongated trap will behave like a one-dimensional system; we have proved this mathematically. Another topic is a proof that Foldy’s 1961 theory of a high density Bose gas of charged particles correctly describes its ground state energy; using this we can also prove the N 7/5 formula for the ground state energy of the two-component charged Bose gas proposed by Dyson in 1967. All of this is quite recent work and it is hoped that the mathematical methodology might be useful, ultimately, to solve more complex problems connected with these interesting systems.},
author = {Lieb, Élliott H and Robert Seiringer and Solovej, Jan P and Yngvason, Jakob},
booktitle = {Perspectives in Analysis},
editor = {Benedicks, Michael and Jones, Peter W and Smirnov, Stanislav and Winckler, Björn},
pages = {97 -- 183},
publisher = {Springer},
title = {{The quantum-mechanical many-body problem: The Bose gas}},
doi = {10.1007/3-540-30434-7_9},
volume = {27},
year = {2005},
}
@article{2359,
abstract = {The validity of substituting a c-number z for the k = 0 mode operator a0 is established rigorously in full generality, thereby verifying one aspect of Bogoliubov's 1947 theory. This substitution not only yields the correct value of thermodynamic quantities such as the pressure or ground state energy, but also the value of |z|2 that maximizes the partition function equals the true amount of condensation in the presence of a gauge-symmetry-breaking term. This point had previously been elusive.},
author = {Lieb, Élliott H and Robert Seiringer and Yngvason, Jakob},
journal = {Physical Review Letters},
number = {8},
publisher = {American Physical Society},
title = {{Justification of c-number substitutions in bosonic hamiltonians}},
doi = {10.1103/PhysRevLett.94.080401},
volume = {94},
year = {2005},
}
@article{2361,
abstract = {The strong subadditivity of entropy plays a key role in several areas of physics and mathematics. It states that the entropy S[±]=- Tr(Ï±lnÏ±) of a density matrix Ï±123 on the product of three Hilbert spaces satisfies S[Ï±123]- S[Ï±12]≤S[Ï±23]-S[Ï±2]. We strengthen this to S[Ï±123]-S[Ï±12] ≤αnα(S[Ï±23α]-S[Ï±2α]), where the nα are weights and the Ï±23α are partitions of Ï±23. Correspondingly, there is a strengthening of the theorem that the map A|Trexp[L+lnA] is concave. As applications we prove some monotonicity and convexity properties of the Wehrl coherent state entropy and entropy inequalities for quantum gases.},
author = {Lieb, Élliott H and Robert Seiringer},
journal = {Physical Review A - Atomic, Molecular, and Optical Physics},
number = {6},
publisher = {American Physical Society},
title = {{Stronger subadditivity of entropy}},
doi = {10.1103/PhysRevA.71.062329},
volume = {71},
year = {2005},
}