@inproceedings{2046,
abstract = {We introduce policy-based signatures (PBS), where a signer can only sign messages conforming to some authority-specified policy. The main requirements are unforgeability and privacy, the latter meaning that signatures not reveal the policy. PBS offers value along two fronts: (1) On the practical side, they allow a corporation to control what messages its employees can sign under the corporate key. (2) On the theoretical side, they unify existing work, capturing other forms of signatures as special cases or allowing them to be easily built. Our work focuses on definitions of PBS, proofs that this challenging primitive is realizable for arbitrary policies, efficient constructions for specific policies, and a few representative applications.},
author = {Bellare, Mihir and Fuchsbauer, Georg},
booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
editor = {Krawczyk, Hugo},
location = {Buenos Aires, Argentina},
pages = {520 -- 537},
publisher = {Springer},
title = {{Policy-based signatures}},
doi = {10.1007/978-3-642-54631-0_30},
volume = {8383},
year = {2014},
}
@article{2003,
abstract = {Learning can be facilitated by previous knowledge when it is organized into relational representations forming schemas. In this issue of Neuron, McKenzie et al. (2014) demonstrate that the hippocampus rapidly forms interrelated, hierarchical memory representations to support schema-based learning.},
author = {O'Neill, Joseph and Csicsvari, Jozsef L},
journal = {Neuron},
number = {1},
pages = {8 -- 10},
publisher = {Elsevier},
title = {{Learning by example in the hippocampus}},
doi = {10.1016/j.neuron.2014.06.013},
volume = {83},
year = {2014},
}
@inproceedings{2058,
abstract = {We present a method for smoothly blending between existing liquid animations. We introduce a semi-automatic method for matching two existing liquid animations, which we use to create new fluid motion that plausibly interpolates the input. Our contributions include a new space-time non-rigid iterative closest point algorithm that incorporates user guidance, a subsampling technique for efficient registration of meshes with millions of vertices, and a fast surface extraction algorithm that produces 3D triangle meshes from a 4D space-time surface. Our technique can be used to instantly create hundreds of new simulations, or to interactively explore complex parameter spaces. Our method is guaranteed to produce output that does not deviate from the input animations, and it generalizes to multiple dimensions. Because our method runs at interactive rates after the initial precomputation step, it has potential applications in games and training simulations.},
author = {Raveendran, Karthik and Wojtan, Christopher J and Thuerey, Nils and Türk, Greg},
booktitle = {ACM Transactions on Graphics},
location = {Vancouver, Canada},
number = {4},
publisher = {ACM},
title = {{Blending liquids}},
doi = {10.1145/2601097.2601126},
volume = {33},
year = {2014},
}
@article{2022,
abstract = {Radial glial progenitors (RGPs) are responsible for producing nearly all neocortical neurons. To gain insight into the patterns of RGP division and neuron production, we quantitatively analyzed excitatory neuron genesis in the mouse neocortex using Mosaic Analysis with Double Markers, which provides single-cell resolution of progenitor division patterns and potential in vivo. We found that RGPs progress through a coherent program in which their proliferative potential diminishes in a predictable manner. Upon entry into the neurogenic phase, individual RGPs produce ∼8–9 neurons distributed in both deep and superficial layers, indicating a unitary output in neuronal production. Removal of OTX1, a transcription factor transiently expressed in RGPs, results in both deep- and superficial-layer neuron loss and a reduction in neuronal unit size. Moreover, ∼1/6 of neurogenic RGPs proceed to produce glia. These results suggest that progenitor behavior and histogenesis in the mammalian neocortex conform to a remarkably orderly and deterministic program.},
author = {Gao, Peng and Postiglione, Maria P and Krieger, Teresa and Hernandez, Luisirene and Wang, Chao and Han, Zhi and Streicher, Carmen and Papusheva, Ekaterina and Insolera, Ryan and Chugh, Kritika and Kodish, Oren and Huang, Kun and Simons, Benjamin and Luo, Liqun and Hippenmeyer, Simon and Shi, Song},
journal = {Cell},
number = {4},
pages = {775 -- 788},
publisher = {Cell Press},
title = {{Deterministic progenitor behavior and unitary production of neurons in the neocortex}},
doi = {10.1016/j.cell.2014.10.027},
volume = {159},
year = {2014},
}
@inproceedings{2173,
abstract = {In this work we introduce a new approach to co-classification, i.e. the task of jointly classifying multiple, otherwise independent, data samples. The method we present, named CoConut, is based on the idea of adding a regularizer in the label space to encode certain priors on the resulting labelings. A regularizer that encourages labelings that are smooth across the test set, for instance, can be seen as a test-time variant of the cluster assumption, which has been proven useful at training time in semi-supervised learning. A regularizer that introduces a preference for certain class proportions can be regarded as a prior distribution on the class labels. CoConut can build on existing classifiers without making any assumptions on how they were obtained and without the need to re-train them. The use of a regularizer adds a new level of flexibility. It allows the integration of potentially new information at test time, even in other modalities than what the classifiers were trained on. We evaluate our framework on six datasets, reporting a clear performance gain in classification accuracy compared to the standard classification setup that predicts labels for each test sample separately.
},
author = {Khamis, Sameh and Lampert, Christoph},
booktitle = {Proceedings of the British Machine Vision Conference 2014},
location = {Nottingham, UK},
publisher = {BMVA Press},
title = {{CoConut: Co-classification with output space regularization}},
year = {2014},
}
@article{2178,
abstract = {We consider the three-state toric homogeneous Markov chain model (THMC) without loops and initial parameters. At time T, the size of the design matrix is 6 × 3 · 2T-1 and the convex hull of its columns is the model polytope. We study the behavior of this polytope for T ≥ 3 and we show that it is defined by 24 facets for all T ≥ 5. Moreover, we give a complete description of these facets. From this, we deduce that the toric ideal associated with the design matrix is generated by binomials of degree at most 6. Our proof is based on a result due to Sturmfels, who gave a bound on the degree of the generators of a toric ideal, provided the normality of the corresponding toric variety. In our setting, we established the normality of the toric variety associated to the THMC model by studying the geometric properties of the model polytope.},
author = {Haws, David and Martin Del Campo Sanchez, Abraham and Takemura, Akimichi and Yoshida, Ruriko},
journal = {Beitrage zur Algebra und Geometrie},
number = {1},
pages = {161 -- 188},
publisher = {Springer},
title = {{Markov degree of the three-state toric homogeneous Markov chain model}},
doi = {10.1007/s13366-013-0178-y},
volume = {55},
year = {2014},
}
@article{2180,
abstract = {Weighted majority votes allow one to combine the output of several classifiers or voters. MinCq is a recent algorithm for optimizing the weight of each voter based on the minimization of a theoretical bound over the risk of the vote with elegant PAC-Bayesian generalization guarantees. However, while it has demonstrated good performance when combining weak classifiers, MinCq cannot make use of the useful a priori knowledge that one may have when using a mixture of weak and strong voters. In this paper, we propose P-MinCq, an extension of MinCq that can incorporate such knowledge in the form of a constraint over the distribution of the weights, along with general proofs of convergence that stand in the sample compression setting for data-dependent voters. The approach is applied to a vote of k-NN classifiers with a specific modeling of the voters' performance. P-MinCq significantly outperforms the classic k-NN classifier, a symmetric NN and MinCq using the same voters. We show that it is also competitive with LMNN, a popular metric learning algorithm, and that combining both approaches further reduces the error.},
author = {Bellet, Aurélien and Habrard, Amaury and Morvant, Emilie and Sebban, Marc},
journal = {Machine Learning},
number = {1-2},
pages = {129 -- 154},
publisher = {Springer},
title = {{Learning a priori constrained weighted majority votes}},
doi = {10.1007/s10994-014-5462-z},
volume = {97},
year = {2014},
}
@inproceedings{2185,
abstract = {We revisit the classical problem of converting an imperfect source of randomness into a usable cryptographic key. Assume that we have some cryptographic application P that expects a uniformly random m-bit key R and ensures that the best attack (in some complexity class) against P(R) has success probability at most δ. Our goal is to design a key-derivation function (KDF) h that converts any random source X of min-entropy k into a sufficiently "good" key h(X), guaranteeing that P(h(X)) has comparable security δ′ which is 'close' to δ. Seeded randomness extractors provide a generic way to solve this problem for all applications P, with resulting security δ′ = O(δ), provided that we start with entropy k ≥ m + 2 log (1/δ) - O(1). By a result of Radhakrishnan and Ta-Shma, this bound on k (called the "RT-bound") is also known to be tight in general. Unfortunately, in many situations the loss of 2 log (1/δ) bits of entropy is unacceptable. This motivates the study KDFs with less entropy waste by placing some restrictions on the source X or the application P. In this work we obtain the following new positive and negative results in this regard: - Efficient samplability of the source X does not help beat the RT-bound for general applications. This resolves the SRT (samplable RT) conjecture of Dachman-Soled et al. [DGKM12] in the affirmative, and also shows that the existence of computationally-secure extractors beating the RT-bound implies the existence of one-way functions. - We continue in the line of work initiated by Barak et al. [BDK+11] and construct new information-theoretic KDFs which beat the RT-bound for large but restricted classes of applications. Specifically, we design efficient KDFs that work for all unpredictability applications P (e.g., signatures, MACs, one-way functions, etc.) and can either: (1) extract all of the entropy k = m with a very modest security loss δ′ = O(δ·log (1/δ)), or alternatively, (2) achieve essentially optimal security δ′ = O(δ) with a very modest entropy loss k ≥ m + loglog (1/δ). In comparison, the best prior results from [BDK+11] for this class of applications would only guarantee δ′ = O(√δ) when k = m, and would need k ≥ m + log (1/δ) to get δ′ = O(δ). - The weaker bounds of [BDK+11] hold for a larger class of so-called "square- friendly" applications (which includes all unpredictability, but also some important indistinguishability, applications). Unfortunately, we show that these weaker bounds are tight for the larger class of applications. - We abstract out a clean, information-theoretic notion of (k,δ,δ′)- unpredictability extractors, which guarantee "induced" security δ′ for any δ-secure unpredictability application P, and characterize the parameters achievable for such unpredictability extractors. Of independent interest, we also relate this notion to the previously-known notion of (min-entropy) condensers, and improve the state-of-the-art parameters for such condensers.},
author = {Dodis, Yevgeniy and Pietrzak, Krzysztof Z and Wichs, Daniel},
editor = {Nguyen, Phong and Oswald, Elisabeth},
location = {Copenhagen, Denmark},
pages = {93 -- 110},
publisher = {Springer},
title = {{Key derivation without entropy waste}},
doi = {10.1007/978-3-642-55220-5_6},
volume = {8441},
year = {2014},
}
@article{2154,
abstract = {A result of Boros and Füredi (d = 2) and of Bárány (arbitrary d) asserts that for every d there exists cd > 0 such that for every n-point set P ⊂ ℝd, some point of ℝd is covered by at least (Formula presented.) of the d-simplices spanned by the points of P. The largest possible value of cd has been the subject of ongoing research. Recently Gromov improved the existing lower bounds considerably by introducing a new, topological proof method. We provide an exposition of the combinatorial component of Gromov's approach, in terms accessible to combinatorialists and discrete geometers, and we investigate the limits of his method. In particular, we give tighter bounds on the cofilling profiles for the (n - 1)-simplex. These bounds yield a minor improvement over Gromov's lower bounds on cd for large d, but they also show that the room for further improvement through the cofilling profiles alone is quite small. We also prove a slightly better lower bound for c3 by an approach using an additional structure besides the cofilling profiles. We formulate a combinatorial extremal problem whose solution might perhaps lead to a tight lower bound for cd.},
author = {Matoušek, Jiří and Wagner, Uli},
journal = {Discrete & Computational Geometry},
number = {1},
pages = {1 -- 33},
publisher = {Springer},
title = {{On Gromov's method of selecting heavily covered points}},
doi = {10.1007/s00454-014-9584-7},
volume = {52},
year = {2014},
}
@inproceedings{2159,
abstract = {Motivated by topological Tverberg-type problems, we consider multiple (double, triple, and higher multiplicity) selfintersection points of maps from finite simplicial complexes (compact polyhedra) into ℝd and study conditions under which such multiple points can be eliminated. The most classical case is that of embeddings (i.e., maps without double points) of a κ-dimensional complex K into ℝ2κ. For this problem, the work of van Kampen, Shapiro, and Wu provides an efficiently testable necessary condition for embeddability (namely, vanishing of the van Kampen ob-struction). For κ ≥ 3, the condition is also sufficient, and yields a polynomial-time algorithm for deciding embeddability: One starts with an arbitrary map f : K→ℝ2κ, which generically has finitely many double points; if k ≥ 3 and if the obstruction vanishes then one can successively remove these double points by local modifications of the map f. One of the main tools is the famous Whitney trick that permits eliminating pairs of double points of opposite intersection sign. We are interested in generalizing this approach to intersection points of higher multiplicity. We call a point y 2 ℝd an r-fold Tverberg point of a map f : Kκ →ℝd if y lies in the intersection f(σ1)∩. ∩f(σr) of the images of r pairwise disjoint simplices of K. The analogue of (non-)embeddability that we study is the problem Tverbergκ r→d: Given a κ-dimensional complex K, does it satisfy a Tverberg-type theorem with parameters r and d, i.e., does every map f : K κ → ℝd have an r-fold Tverberg point? Here, we show that for fixed r, κ and d of the form d = rm and k = (r-1)m, m ≥ 3, there is a polynomial-time algorithm for deciding this (based on the vanishing of a cohomological obstruction, as in the case of embeddings). Our main tool is an r-fold analogue of the Whitney trick: Given r pairwise disjoint simplices of K such that the intersection of their images contains two r-fold Tverberg points y+ and y- of opposite intersection sign, we can eliminate y+ and y- by a local isotopy of f. In a subsequent paper, we plan to develop this further and present a generalization of the classical Haeiger-Weber Theorem (which yields a necessary and sufficient condition for embeddability of κ-complexes into ℝd for a wider range of dimensions) to intersection points of higher multiplicity.},
author = {Mabillard, Isaac and Wagner, Uli},
booktitle = {Proceedings of the Annual Symposium on Computational Geometry},
location = {Kyoto, Japan},
pages = {171 -- 180},
publisher = {ACM},
title = {{Eliminating Tverberg points, I. An analogue of the Whitney trick}},
doi = {10.1145/2582112.2582134},
year = {2014},
}