@article{2038,
abstract = {Recently, there has been an effort to add quantitative objectives to formal verification and synthesis. We introduce and investigate the extension of temporal logics with quantitative atomic assertions. At the heart of quantitative objectives lies the accumulation of values along a computation. It is often the accumulated sum, as with energy objectives, or the accumulated average, as with mean-payoff objectives. We investigate the extension of temporal logics with the prefix-accumulation assertions Sum(v) ≥ c and Avg(v) ≥ c, where v is a numeric (or Boolean) variable of the system, c is a constant rational number, and Sum(v) and Avg(v) denote the accumulated sum and average of the values of v from the beginning of the computation up to the current point in time. We also allow the path-accumulation assertions LimInfAvg(v) ≥ c and LimSupAvg(v) ≥ c, referring to the average value along an entire infinite computation. We study the border of decidability for such quantitative extensions of various temporal logics. In particular, we show that extending the fragment of CTL that has only the EX, EF, AX, and AG temporal modalities with both prefix-accumulation assertions, or extending LTL with both path-accumulation assertions, results in temporal logics whose model-checking problem is decidable. Moreover, the prefix-accumulation assertions may be generalized with "controlled accumulation," allowing, for example, to specify constraints on the average waiting time between a request and a grant. On the negative side, we show that this branching-time logic is, in a sense, the maximal logic with one or both of the prefix-accumulation assertions that permits a decidable model-checking procedure. Extending a temporal logic that has the EG or EU modalities, such as CTL or LTL, makes the problem undecidable.},
author = {Boker, Udi and Chatterjee, Krishnendu and Henzinger, Thomas A and Kupferman, Orna},
journal = {ACM Transactions on Computational Logic (TOCL)},
number = {4},
publisher = {ACM},
title = {{Temporal specifications with accumulative values}},
doi = {10.1145/2629686},
volume = {15},
year = {2014},
}
@article{2039,
abstract = {A fundamental question in biology is the following: what is the time scale that is needed for evolutionary innovations? There are many results that characterize single steps in terms of the fixation time of new mutants arising in populations of certain size and structure. But here we ask a different question, which is concerned with the much longer time scale of evolutionary trajectories: how long does it take for a population exploring a fitness landscape to find target sequences that encode new biological functions? Our key variable is the length, (Formula presented.) of the genetic sequence that undergoes adaptation. In computer science there is a crucial distinction between problems that require algorithms which take polynomial or exponential time. The latter are considered to be intractable. Here we develop a theoretical approach that allows us to estimate the time of evolution as function of (Formula presented.) We show that adaptation on many fitness landscapes takes time that is exponential in (Formula presented.) even if there are broad selection gradients and many targets uniformly distributed in sequence space. These negative results lead us to search for specific mechanisms that allow evolution to work on polynomial time scales. We study a regeneration process and show that it enables evolution to work in polynomial time.},
author = {Chatterjee, Krishnendu and Pavlogiannis, Andreas and Adlam, Ben and Nowak, Martin},
journal = {PLoS Computational Biology},
number = {9},
publisher = {Public Library of Science},
title = {{The time scale of evolutionary innovation}},
doi = {10.1371/journal.pcbi.1003818},
volume = {10},
year = {2014},
}
@article{2040,
abstract = {Development requires tissue growth as well as cell diversification. To address how these processes are coordinated, we analyzed the development of molecularly distinct domains of neural progenitors in the mouse and chick neural tube. We show that during development, these domains undergo changes in size that do not scale with changes in overall tissue size. Our data show that domain proportions are first established by opposing morphogen gradients and subsequently controlled by domain-specific regulation of differentiation rate but not differences in proliferation rate. Regulation of differentiation rate is key to maintaining domain proportions while accommodating both intra- and interspecies variations in size. Thus, the sequential control of progenitor specification and differentiation elaborates pattern without requiring that signaling gradients grow as tissues expand. },
author = {Kicheva, Anna and Bollenbach, Mark Tobias and Ribeiro, Ana and Pérez Valle, Helena and Lovell Badge, Robin and Episkopou, Vasso and Briscoe, James},
journal = {Science},
number = {6204},
publisher = {American Association for the Advancement of Science},
title = {{Coordination of progenitor specification and growth in mouse and chick spinal cord}},
doi = {10.1126/science.1254927},
volume = {345},
year = {2014},
}
@article{2041,
abstract = {The hippocampus mediates several higher brain functions, such as learning, memory, and spatial coding. The input region of the hippocampus, the dentate gyrus, plays a critical role in these processes. Several lines of evidence suggest that the dentate gyrus acts as a preprocessor of incoming information, preparing it for subsequent processing in CA3. For example, the dentate gyrus converts input from the entorhinal cortex, where cells have multiple spatial fields, into the spatially more specific place cell activity characteristic of the CA3 region. Furthermore, the dentate gyrus is involved in pattern separation, transforming relatively similar input patterns into substantially different output patterns. Finally, the dentate gyrus produces a very sparse coding scheme in which only a very small fraction of neurons are active at any one time.},
author = {Jonas, Peter M and Lisman, John},
journal = {Frontiers in Neural Circuits},
publisher = {Frontiers Research Foundation},
title = {{Structure, function and plasticity of hippocampal dentate gyrus microcircuits}},
doi = {10.3389/fncir.2014.00107},
volume = {8},
year = {2014},
}
@article{2042,
abstract = {Background: CRISPR is a microbial immune system likely to be involved in host-parasite coevolution. It functions using target sequences encoded by the bacterial genome, which interfere with invading nucleic acids using a homology-dependent system. The system also requires protospacer associated motifs (PAMs), short motifs close to the target sequence that are required for interference in CRISPR types I and II. Here, we investigate whether PAMs are depleted in phage genomes due to selection pressure to escape recognition.Results: To this end, we analyzed two data sets. Phages infecting all bacterial hosts were analyzed first, followed by a detailed analysis of phages infecting the genus Streptococcus, where PAMs are best understood. We use two different measures of motif underrepresentation that control for codon bias and the frequency of submotifs. We compare phages infecting species with a particular CRISPR type to those infecting species without that type. Since only known PAMs were investigated, the analysis is restricted to CRISPR types I-C and I-E and in Streptococcus to types I-C and II. We found evidence for PAM depletion in Streptococcus phages infecting hosts with CRISPR type I-C, in Vibrio phages infecting hosts with CRISPR type I-E and in Streptococcus thermopilus phages infecting hosts with type II-A, known as CRISPR3.Conclusions: The observed motif depletion in phages with hosts having CRISPR can be attributed to selection rather than to mutational bias, as mutational bias should affect the phages of all hosts. This observation implies that the CRISPR system has been efficient in the groups discussed here.},
author = {Kupczok, Anne and Bollback, Jonathan P},
journal = {BMC Genomics},
number = {1},
publisher = {BioMed Central},
title = {{Motif depletion in bacteriophages infecting hosts with CRISPR systems}},
doi = {10.1186/1471-2164-15-663},
volume = {15},
year = {2014},
}
@inproceedings{2043,
abstract = {Persistent homology is a popular and powerful tool for capturing topological features of data. Advances in algorithms for computing persistent homology have reduced the computation time drastically – as long as the algorithm does not exhaust the available memory. Following up on a recently presented parallel method for persistence computation on shared memory systems [1], we demonstrate that a simple adaption of the standard reduction algorithm leads to a variant for distributed systems. Our algorithmic design ensures that the data is distributed over the nodes without redundancy; this permits the computation of much larger instances than on a single machine. Moreover, we observe that the parallelism at least compensates for the overhead caused by communication between nodes, and often even speeds up the computation compared to sequential and even parallel shared memory algorithms. In our experiments, we were able to compute the persistent homology of filtrations with more than a billion (109) elements within seconds on a cluster with 32 nodes using less than 6GB of memory per node.},
author = {Bauer, Ulrich and Kerber, Michael and Reininghaus, Jan},
booktitle = {Proceedings of the Workshop on Algorithm Engineering and Experiments},
editor = { McGeoch, Catherine and Meyer, Ulrich},
location = {Portland, USA},
pages = {31 -- 38},
publisher = {Society of Industrial and Applied Mathematics},
title = {{Distributed computation of persistent homology}},
doi = {10.1137/1.9781611973198.4},
year = {2014},
}
@inbook{2044,
abstract = {We present a parallel algorithm for computing the persistent homology of a filtered chain complex. Our approach differs from the commonly used reduction algorithm by first computing persistence pairs within local chunks, then simplifying the unpaired columns, and finally applying standard reduction on the simplified matrix. The approach generalizes a technique by Günther et al., which uses discrete Morse Theory to compute persistence; we derive the same worst-case complexity bound in a more general context. The algorithm employs several practical optimization techniques, which are of independent interest. Our sequential implementation of the algorithm is competitive with state-of-the-art methods, and we further improve the performance through parallel computation.},
author = {Bauer, Ulrich and Kerber, Michael and Reininghaus, Jan},
booktitle = {Topological Methods in Data Analysis and Visualization III},
editor = {Bremer, Peer-Timo and Hotz, Ingrid and Pascucci, Valerio and Peikert, Ronald},
pages = {103 -- 117},
publisher = {Springer},
title = {{Clear and Compress: Computing Persistent Homology in Chunks}},
doi = {10.1007/978-3-319-04099-8_7},
year = {2014},
}
@inproceedings{2045,
abstract = {We introduce and study a new notion of enhanced chosen-ciphertext security (ECCA) for public-key encryption. Loosely speaking, in the ECCA security experiment, the decryption oracle provided to the adversary is augmented to return not only the output of the decryption algorithm on a queried ciphertext but also of a randomness-recovery algorithm associated to the scheme. Our results mainly concern the case where the randomness-recovery algorithm is efficient. We provide constructions of ECCA-secure encryption from adaptive trapdoor functions as defined by Kiltz et al. (EUROCRYPT 2010), resulting in ECCA encryption from standard number-theoretic assumptions. We then give two applications of ECCA-secure encryption: (1) We use it as a unifying concept in showing equivalence of adaptive trapdoor functions and tag-based adaptive trapdoor functions, resolving an open question of Kiltz et al. (2) We show that ECCA-secure encryption can be used to securely realize an approach to public-key encryption with non-interactive opening (PKENO) originally suggested by Damgård and Thorbek (EUROCRYPT 2007), resulting in new and practical PKENO schemes quite different from those in prior work. Our results demonstrate that ECCA security is of both practical and theoretical interest.},
author = {Dachman Soled, Dana and Fuchsbauer, Georg and Mohassel, Payman and O’Neill, Adam},
booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
editor = {Krawczyk, Hugo},
location = {Buenos Aires, Argentina},
pages = {329 -- 344},
publisher = {Springer},
title = {{Enhanced chosen-ciphertext security and applications}},
doi = {10.1007/978-3-642-54631-0_19},
volume = {8383},
year = {2014},
}
@inproceedings{2046,
abstract = {We introduce policy-based signatures (PBS), where a signer can only sign messages conforming to some authority-specified policy. The main requirements are unforgeability and privacy, the latter meaning that signatures not reveal the policy. PBS offers value along two fronts: (1) On the practical side, they allow a corporation to control what messages its employees can sign under the corporate key. (2) On the theoretical side, they unify existing work, capturing other forms of signatures as special cases or allowing them to be easily built. Our work focuses on definitions of PBS, proofs that this challenging primitive is realizable for arbitrary policies, efficient constructions for specific policies, and a few representative applications.},
author = {Bellare, Mihir and Fuchsbauer, Georg},
booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
editor = {Krawczyk, Hugo},
location = {Buenos Aires, Argentina},
pages = {520 -- 537},
publisher = {Springer},
title = {{Policy-based signatures}},
doi = {10.1007/978-3-642-54631-0_30},
volume = {8383},
year = {2014},
}
@inproceedings{2047,
abstract = {Following the publication of an attack on genome-wide association studies (GWAS) data proposed by Homer et al., considerable attention has been given to developing methods for releasing GWAS data in a privacy-preserving way. Here, we develop an end-to-end differentially private method for solving regression problems with convex penalty functions and selecting the penalty parameters by cross-validation. In particular, we focus on penalized logistic regression with elastic-net regularization, a method widely used to in GWAS analyses to identify disease-causing genes. We show how a differentially private procedure for penalized logistic regression with elastic-net regularization can be applied to the analysis of GWAS data and evaluate our method’s performance.},
author = {Yu, Fei and Rybar, Michal and Uhler, Caroline and Fienberg, Stephen},
booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
editor = {Domingo Ferrer, Josep},
location = {Ibiza, Spain},
pages = {170 -- 184},
publisher = {Springer},
title = {{Differentially-private logistic regression for detecting multiple-SNP association in GWAS databases}},
doi = {10.1007/978-3-319-11257-2_14},
volume = {8744},
year = {2014},
}
@article{2050,
abstract = {The flow instability and further transition to turbulence in a toroidal pipe (torus) with curvature ratio (tube-to-coiling diameter) 0.049 is investigated experimentally. The flow inside the toroidal pipe is driven by a steel sphere fitted to the inner pipe diameter. The sphere is moved with constant azimuthal velocity from outside the torus by a moving magnet. The experiment is designed to investigate curved pipe flow by optical measurement techniques. Using stereoscopic particle image velocimetry, laser Doppler velocimetry and pressure drop measurements, the flow is measured for Reynolds numbers ranging from 1000 to 15 000. Time- and space-resolved velocity fields are obtained and analysed. The steady axisymmetric basic flow is strongly influenced by centrifugal effects. On an increase of the Reynolds number we find a sequence of bifurcations. For Re=4075±2% a supercritical bifurcation to an oscillatory flow is found in which waves travel in the streamwise direction with a phase velocity slightly faster than the mean flow. The oscillatory flow is superseded by a presumably quasi-periodic flow at a further increase of the Reynolds number before turbulence sets in. The results are found to be compatible, in general, with earlier experimental and numerical investigations on transition to turbulence in helical and curved pipes. However, important aspects of the bifurcation scenario differ considerably.},
author = {Kühnen, Jakob and Holzner, Markus and Hof, Björn and Kuhlmann, Hendrik},
journal = {Journal of Fluid Mechanics},
pages = {463 -- 491},
publisher = {Cambridge University Press},
title = {{Experimental investigation of transitional flow in a toroidal pipe}},
doi = {10.1017/jfm.2013.603},
volume = {738},
year = {2014},
}
@inproceedings{2051,
abstract = {We show that the usual score function for conditional Markov networks can be written as the expectation over the scores of their spanning trees. We also show that a small random sample of these output trees can attain a significant fraction of the margin obtained by the complete graph and we provide conditions under which we can perform tractable inference. The experimental results confirm that practical learning is scalable to realistic datasets using this approach.},
author = {Marchand, Mario and Hongyu, Su and Emilie Morvant and Rousu, Juho and Shawe-Taylor, John},
publisher = {Neural Information Processing Systems},
title = {{Multilabel structured output learning with random spanning trees of max-margin Markov networks}},
year = {2014},
}
@inproceedings{2053,
abstract = {In contrast to the usual understanding of probabilistic systems as stochastic processes, recently these systems have also been regarded as transformers of probabilities. In this paper, we give a natural definition of strong bisimulation for probabilistic systems corresponding to this view that treats probability distributions as first-class citizens. Our definition applies in the same way to discrete systems as well as to systems with uncountable state and action spaces. Several examples demonstrate that our definition refines the understanding of behavioural equivalences of probabilistic systems. In particular, it solves a longstanding open problem concerning the representation of memoryless continuous time by memoryfull continuous time. Finally, we give algorithms for computing this bisimulation not only for finite but also for classes of uncountably infinite systems.},
author = {Hermanns, Holger and Krčál, Jan and Kretinsky, Jan},
booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
editor = {Baldan, Paolo and Gorla, Daniele},
location = {Rome, Italy},
pages = {249 -- 265},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{Probabilistic bisimulation: Naturally on distributions}},
doi = {10.1007/978-3-662-44584-6_18},
volume = {8704},
year = {2014},
}
@article{2056,
abstract = {We consider a continuous-time Markov chain (CTMC) whose state space is partitioned into aggregates, and each aggregate is assigned a probability measure. A sufficient condition for defining a CTMC over the aggregates is presented as a variant of weak lumpability, which also characterizes that the measure over the original process can be recovered from that of the aggregated one. We show how the applicability of de-aggregation depends on the initial distribution. The application section is devoted to illustrate how the developed theory aids in reducing CTMC models of biochemical systems particularly in connection to protein-protein interactions. We assume that the model is written by a biologist in form of site-graph-rewrite rules. Site-graph-rewrite rules compactly express that, often, only a local context of a protein (instead of a full molecular species) needs to be in a certain configuration in order to trigger a reaction event. This observation leads to suitable aggregate Markov chains with smaller state spaces, thereby providing sufficient reduction in computational complexity. This is further exemplified in two case studies: simple unbounded polymerization and early EGFR/insulin crosstalk.},
author = {Ganguly, Arnab and Petrov, Tatjana and Koeppl, Heinz},
journal = {Journal of Mathematical Biology},
number = {3},
pages = {767 -- 797},
publisher = {Springer},
title = {{Markov chain aggregation and its applications to combinatorial reaction networks}},
doi = {10.1007/s00285-013-0738-7},
volume = {69},
year = {2014},
}
@inproceedings{2057,
abstract = {In the past few years, a lot of attention has been devoted to multimedia indexing by fusing multimodal informations. Two kinds of fusion schemes are generally considered: The early fusion and the late fusion. We focus on late classifier fusion, where one combines the scores of each modality at the decision level. To tackle this problem, we investigate a recent and elegant well-founded quadratic program named MinCq coming from the machine learning PAC-Bayesian theory. MinCq looks for the weighted combination, over a set of real-valued functions seen as voters, leading to the lowest misclassification rate, while maximizing the voters’ diversity. We propose an extension of MinCq tailored to multimedia indexing. Our method is based on an order-preserving pairwise loss adapted to ranking that allows us to improve Mean Averaged Precision measure while taking into account the diversity of the voters that we want to fuse. We provide evidence that this method is naturally adapted to late fusion procedures and confirm the good behavior of our approach on the challenging PASCAL VOC’07 benchmark.},
author = {Morvant, Emilie and Habrard, Amaury and Ayache, Stéphane},
booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
location = {Joensuu, Finland},
pages = {153 -- 162},
publisher = {Springer},
title = {{Majority vote of diverse classifiers for late fusion}},
doi = {10.1007/978-3-662-44415-3_16},
volume = {8621},
year = {2014},
}
@inproceedings{2058,
abstract = {We present a method for smoothly blending between existing liquid animations. We introduce a semi-automatic method for matching two existing liquid animations, which we use to create new fluid motion that plausibly interpolates the input. Our contributions include a new space-time non-rigid iterative closest point algorithm that incorporates user guidance, a subsampling technique for efficient registration of meshes with millions of vertices, and a fast surface extraction algorithm that produces 3D triangle meshes from a 4D space-time surface. Our technique can be used to instantly create hundreds of new simulations, or to interactively explore complex parameter spaces. Our method is guaranteed to produce output that does not deviate from the input animations, and it generalizes to multiple dimensions. Because our method runs at interactive rates after the initial precomputation step, it has potential applications in games and training simulations.},
author = {Raveendran, Karthik and Wojtan, Christopher J and Thuerey, Nils and Türk, Greg},
booktitle = {ACM Transactions on Graphics},
location = {Vancouver, Canada},
number = {4},
publisher = {ACM},
title = {{Blending liquids}},
doi = {10.1145/2601097.2601126},
volume = {33},
year = {2014},
}
@article{2059,
abstract = {Plant embryogenesis is regulated by differential distribution of the plant hormone auxin. However, the cells establishing these gradients during microspore embryogenesis remain to be identified. For the first time, we describe, using the DR5 or DR5rev reporter gene systems, the GFP- and GUS-based auxin biosensors to monitor auxin during Brassica napus androgenesis at cellular resolution in the initial stages. Our study provides evidence that the distribution of auxin changes during embryo development and depends on the temperature-inducible in vitro culture conditions. For this, microspores (mcs) were induced to embryogenesis by heat treatment and then subjected to genetic modification via Agrobacterium tumefaciens. The duration of high temperature treatment had a significant influence on auxin distribution in isolated and in vitro-cultured microspores and on microspore-derived embryo development. In the “mild” heat-treated (1 day at 32 °C) mcs, auxin localized in a polar way already at the uni-nucleate microspore, which was critical for the initiation of embryos with suspensor-like structure. Assuming a mean mcs radius of 20 μm, endogenous auxin content in a single cell corresponded to concentration of 1.01 μM. In mcs subjected to a prolonged heat (5 days at 32 °C), although auxin concentration increased dozen times, auxin polarization was set up at a few-celled pro-embryos without suspensor. Those embryos were enclosed in the outer wall called the exine. The exine rupture was accompanied by the auxin gradient polarization. Relative quantitative estimation of auxin, using time-lapse imaging, revealed that primordia possess up to 1.3-fold higher amounts than those found in the root apices of transgenic MDEs in the presence of exogenous auxin. Our results show, for the first time, which concentration of endogenous auxin coincides with the first cell division and how the high temperature interplays with auxin, by what affects delay early establishing microspore polarity. Moreover, we present how the local auxin accumulation demonstrates the apical–basal axis formation of the androgenic embryo and directs the axiality of the adult haploid plant.},
author = {Dubas, Ewa and Moravčíková, Jana and Libantová, Jana and Matušíková, Ildikó and Benková, Eva and Zur, Iwona and Krzewska, Monika},
journal = {Protoplasma},
number = {5},
pages = {1077 -- 1087},
publisher = {Springer},
title = {{The influence of heat stress on auxin distribution in transgenic B napus microspores and microspore derived embryos}},
doi = {10.1007/s00709-014-0616-1},
volume = {251},
year = {2014},
}
@article{2062,
abstract = {The success story of fast-spiking, parvalbumin-positive (PV+) GABAergic interneurons (GABA, γ-aminobutyric acid) in the mammalian central nervous system is noteworthy. In 1995, the properties of these interneurons were completely unknown. Twenty years later, thanks to the massive use of subcellular patch-clamp techniques, simultaneous multiple-cell recording, optogenetics, in vivo measurements, and computational approaches, our knowledge about PV+ interneurons became more extensive than for several types of pyramidal neurons. These findings have implications beyond the “small world” of basic research on GABAergic cells. For example, the results provide a first proof of principle that neuroscientists might be able to close the gaps between the molecular, cellular, network, and behavioral levels, representing one of the main challenges at the present time. Furthermore, the results may form the basis for PV+ interneurons as therapeutic targets for brain disease in the future. However, much needs to be learned about the basic function of these interneurons before clinical neuroscientists will be able to use PV+ interneurons for therapeutic purposes.},
author = {Hu, Hua and Gan, Jian and Jonas, Peter M},
journal = {Science},
number = {6196},
publisher = {American Association for the Advancement of Science},
title = {{Fast-spiking parvalbumin^+ GABAergic interneurons: From cellular design to microcircuit function}},
doi = {10.1126/science.1255263},
volume = {345},
year = {2014},
}
@article{2064,
abstract = {We examined the synaptic structure, quantity, and distribution of α-amino-3-hydroxy-5-methylisoxazole-4-propionic acid (AMPA)- and N-methyl-D-aspartate (NMDA)-type glutamate receptors (AMPARs and NMDARs, respectively) in rat cochlear nuclei by a highly sensitive freeze-fracture replica labeling technique. Four excitatory synapses formed by two distinct inputs, auditory nerve (AN) and parallel fibers (PF), on different cell types were analyzed. These excitatory synapse types included AN synapses on bushy cells (AN-BC synapses) and fusiform cells (AN-FC synapses) and PF synapses on FC (PF-FC synapses) and cartwheel cell spines (PF-CwC synapses). Immunogold labeling revealed differences in synaptic structure as well as AMPAR and NMDAR number and/or density in both AN and PF synapses, indicating a target-dependent organization. The immunogold receptor labeling also identified differences in the synaptic organization of FCs based on AN or PF connections, indicating an input-dependent organization in FCs. Among the four excitatory synapse types, the AN-BC synapses were the smallest and had the most densely packed intramembrane particles (IMPs), whereas the PF-CwC synapses were the largest and had sparsely packed IMPs. All four synapse types showed positive correlations between the IMP-cluster area and the AMPAR number, indicating a common intrasynapse-type relationship for glutamatergic synapses. Immunogold particles for AMPARs were distributed over the entire area of individual AN synapses; PF synapses often showed synaptic areas devoid of labeling. The gold-labeling for NMDARs occurred in a mosaic fashion, with less positive correlations between the IMP-cluster area and the NMDAR number. Our observations reveal target- and input-dependent features in the structure, number, and organization of AMPARs and NMDARs in AN and PF synapses.},
author = {Rubio, Maía and Fukazawa, Yugo and Kamasawa, Naomi and Clarkson, Cheryl and Molnár, Elek and Shigemoto, Ryuichi},
journal = {Journal of Comparative Neurology},
number = {18},
pages = {4023 -- 4042},
publisher = {Wiley-Blackwell},
title = {{Target- and input-dependent organization of AMPA and NMDA receptors in synaptic connections of the cochlear nucleus}},
doi = {10.1002/cne.23654},
volume = {522},
year = {2014},
}
@inproceedings{2082,
abstract = {NMAC is a mode of operation which turns a fixed input-length keyed hash function f into a variable input-length function. A practical single-key variant of NMAC called HMAC is a very popular and widely deployed message authentication code (MAC). Security proofs and attacks for NMAC can typically be lifted to HMAC. NMAC was introduced by Bellare, Canetti and Krawczyk [Crypto'96], who proved it to be a secure pseudorandom function (PRF), and thus also a MAC, assuming that (1) f is a PRF and (2) the function we get when cascading f is weakly collision-resistant. Unfortunately, HMAC is typically instantiated with cryptographic hash functions like MD5 or SHA-1 for which (2) has been found to be wrong. To restore the provable guarantees for NMAC, Bellare [Crypto'06] showed its security based solely on the assumption that f is a PRF, albeit via a non-uniform reduction. - Our first contribution is a simpler and uniform proof for this fact: If f is an ε-secure PRF (against q queries) and a δ-non-adaptively secure PRF (against q queries), then NMAC f is an (ε+ℓqδ)-secure PRF against q queries of length at most ℓ blocks each. - We then show that this ε+ℓqδ bound is basically tight. For the most interesting case where ℓqδ ≥ ε we prove this by constructing an f for which an attack with advantage ℓqδ exists. This also violates the bound O(ℓε) on the PRF-security of NMAC recently claimed by Koblitz and Menezes. - Finally, we analyze the PRF-security of a modification of NMAC called NI [An and Bellare, Crypto'99] that differs mainly by using a compression function with an additional keying input. This avoids the constant rekeying on multi-block messages in NMAC and allows for a security proof starting by the standard switch from a PRF to a random function, followed by an information-theoretic analysis. We carry out such an analysis, obtaining a tight ℓq2/2 c bound for this step, improving over the trivial bound of ℓ2q2/2c. The proof borrows combinatorial techniques originally developed for proving the security of CBC-MAC [Bellare et al., Crypto'05].},
author = {Gazi, Peter and Pietrzak, Krzysztof Z and Rybar, Michal},
editor = {Garay, Juan and Gennaro, Rosario},
location = {Santa Barbara, USA},
number = {1},
pages = {113 -- 130},
publisher = {Springer},
title = {{The exact PRF-security of NMAC and HMAC}},
doi = {10.1007/978-3-662-44371-2_7},
volume = {8616},
year = {2014},
}