@article{2716,
abstract = {Multi-dimensional mean-payoff and energy games provide the mathematical foundation for the quantitative study of reactive systems, and play a central role in the emerging quantitative theory of verification and synthesis. In this work, we study the strategy synthesis problem for games with such multi-dimensional objectives along with a parity condition, a canonical way to express ω ω -regular conditions. While in general, the winning strategies in such games may require infinite memory, for synthesis the most relevant problem is the construction of a finite-memory winning strategy (if one exists). Our main contributions are as follows. First, we show a tight exponential bound (matching upper and lower bounds) on the memory required for finite-memory winning strategies in both multi-dimensional mean-payoff and energy games along with parity objectives. This significantly improves the triple exponential upper bound for multi energy games (without parity) that could be derived from results in literature for games on vector addition systems with states. Second, we present an optimal symbolic and incremental algorithm to compute a finite-memory winning strategy (if one exists) in such games. Finally, we give a complete characterization of when finite memory of strategies can be traded off for randomness. In particular, we show that for one-dimension mean-payoff parity games, randomized memoryless strategies are as powerful as their pure finite-memory counterparts.},
author = {Chatterjee, Krishnendu and Randour, Mickael and Raskin, Jean},
journal = {Acta Informatica},
number = {3-4},
pages = {129 -- 163},
publisher = {Springer},
title = {{Strategy synthesis for multi-dimensional quantitative objectives}},
doi = {10.1007/s00236-013-0182-6},
volume = {51},
year = {2014},
}
@article{2038,
abstract = {Recently, there has been an effort to add quantitative objectives to formal verification and synthesis. We introduce and investigate the extension of temporal logics with quantitative atomic assertions. At the heart of quantitative objectives lies the accumulation of values along a computation. It is often the accumulated sum, as with energy objectives, or the accumulated average, as with mean-payoff objectives. We investigate the extension of temporal logics with the prefix-accumulation assertions Sum(v) ≥ c and Avg(v) ≥ c, where v is a numeric (or Boolean) variable of the system, c is a constant rational number, and Sum(v) and Avg(v) denote the accumulated sum and average of the values of v from the beginning of the computation up to the current point in time. We also allow the path-accumulation assertions LimInfAvg(v) ≥ c and LimSupAvg(v) ≥ c, referring to the average value along an entire infinite computation. We study the border of decidability for such quantitative extensions of various temporal logics. In particular, we show that extending the fragment of CTL that has only the EX, EF, AX, and AG temporal modalities with both prefix-accumulation assertions, or extending LTL with both path-accumulation assertions, results in temporal logics whose model-checking problem is decidable. Moreover, the prefix-accumulation assertions may be generalized with "controlled accumulation," allowing, for example, to specify constraints on the average waiting time between a request and a grant. On the negative side, we show that this branching-time logic is, in a sense, the maximal logic with one or both of the prefix-accumulation assertions that permits a decidable model-checking procedure. Extending a temporal logic that has the EG or EU modalities, such as CTL or LTL, makes the problem undecidable.},
author = {Boker, Udi and Chatterjee, Krishnendu and Henzinger, Thomas A and Kupferman, Orna},
journal = {ACM Transactions on Computational Logic (TOCL)},
number = {4},
publisher = {ACM},
title = {{Temporal specifications with accumulative values}},
doi = {10.1145/2629686},
volume = {15},
year = {2014},
}
@inproceedings{2189,
abstract = {En apprentissage automatique, nous parlons d'adaptation de domaine lorsque les données de test (cibles) et d'apprentissage (sources) sont générées selon différentes distributions. Nous devons donc développer des algorithmes de classification capables de s'adapter à une nouvelle distribution, pour laquelle aucune information sur les étiquettes n'est disponible. Nous attaquons cette problématique sous l'angle de l'approche PAC-Bayésienne qui se focalise sur l'apprentissage de modèles définis comme des votes de majorité sur un ensemble de fonctions. Dans ce contexte, nous introduisons PV-MinCq une version adaptative de l'algorithme (non adaptatif) MinCq. PV-MinCq suit le principe suivant. Nous transférons les étiquettes sources aux points cibles proches pour ensuite appliquer MinCq sur l'échantillon cible ``auto-étiqueté'' (justifié par une borne théorique). Plus précisément, nous définissons un auto-étiquetage non itératif qui se focalise dans les régions où les distributions marginales source et cible sont les plus similaires. Dans un second temps, nous étudions l'influence de notre auto-étiquetage pour en déduire une procédure de validation des hyperparamètres. Finalement, notre approche montre des résultats empiriques prometteurs.},
author = {Morvant, Emilie},
location = {Saint-Etienne, France},
pages = {49--58},
publisher = {Elsevier},
title = {{Adaptation de domaine de vote de majorité par auto-étiquetage non itératif}},
volume = {1},
year = {2014},
}
@inproceedings{1516,
abstract = {We present a rigorous derivation of the BCS gap equation for superfluid fermionic gases with point interactions. Our starting point is the BCS energy functional, whose minimizer we investigate in the limit when the range of the interaction potential goes to zero.
},
author = {Bräunlich, Gerhard and Hainzl, Christian and Seiringer, Robert},
booktitle = {Proceedings of the QMath12 Conference},
location = {Berlin, Germany},
pages = {127 -- 137},
publisher = {World Scientific Publishing},
title = {{On the BCS gap equation for superfluid fermionic gases}},
doi = {10.1142/9789814618144_0007 },
year = {2014},
}
@article{2083,
abstract = {Understanding the effects of sex and migration on adaptation to novel environments remains a key problem in evolutionary biology. Using a single-cell alga Chlamydomonas reinhardtii, we investigated how sex and migration affected rates of evolutionary rescue in a sink environment, and subsequent changes in fitness following evolutionary rescue. We show that sex and migration affect both the rate of evolutionary rescue and subsequent adaptation. However, their combined effects change as the populations adapt to a sink habitat. Both sex and migration independently increased rates of evolutionary rescue, but the effect of sex on subsequent fitness improvements, following initial rescue, changed with migration, as sex was beneficial in the absence of migration but constraining adaptation when combined with migration. These results suggest that sex and migration are beneficial during the initial stages of adaptation, but can become detrimental as the population adapts to its environment.},
author = {Lagator, Mato and Morgan, Andrew and Neve, Paul and Colegrave, Nick},
journal = {Evolution},
number = {8},
pages = {2296 -- 2305},
publisher = {Wiley},
title = {{Role of sex and migration in adaptation to sink environments}},
doi = {10.1111/evo.12440},
volume = {68},
year = {2014},
}
@article{119,
abstract = {Observations of flowing granular matter have suggested that same-material tribocharging depends on particle size, typically rendering large grains positive and small ones negative. Models assuming the transfer of trapped electrons can account for this trend, but have not been validated. Tracking individual grains in an electric field, we show quantitatively that charge is transferred based on size between materially identical grains. However, the surface density of trapped electrons, measured independently by thermoluminescence techniques, is orders of magnitude too small to account for the scale of charge transferred. This reveals that trapped electrons are not a necessary ingredient for same-material tribocharging.},
author = {Waitukaitis, Scott R and Lee, Victor and Pierson, James and Forman, Steven and Jaeger, Heinrich},
journal = {APS Physics, Physical Review Letters},
number = {21},
publisher = {American Physical Society},
title = {{Size-dependent same-material tribocharging in insulating grains}},
doi = {10.1103/PhysRevLett.112.218001},
volume = {112},
year = {2014},
}
@inproceedings{1702,
abstract = {In this paper we present INTERHORN, a solver for recursion-free Horn clauses. The main application domain of INTERHORN lies in solving interpolation problems arising in software verification. We show how a range of interpolation problems, including path, transition, nested, state/transition and well-founded interpolation can be handled directly by INTERHORN. By detailing these interpolation problems and their Horn clause representations, we hope to encourage the emergence of a common back-end interpolation interface useful for diverse verification tools.},
author = {Gupta, Ashutosh and Popeea, Corneliu and Rybalchenko, Andrey},
booktitle = {Electronic Proceedings in Theoretical Computer Science, EPTCS},
location = {Vienna, Austria},
pages = {31 -- 38},
publisher = {Open Publishing},
title = {{Generalised interpolation by solving recursion free-horn clauses}},
doi = {10.4204/EPTCS.169.5},
volume = {169},
year = {2014},
}
@article{1889,
abstract = {We study translation-invariant quasi-free states for a system of fermions with two-particle interactions. The associated energy functional is similar to the BCS functional but also includes direct and exchange energies. We show that for suitable short-range interactions, these latter terms only lead to a renormalization of the chemical potential, with the usual properties of the BCS functional left unchanged. Our analysis thus represents a rigorous justification of part of the BCS approximation. We give bounds on the critical temperature below which the system displays superfluidity.},
author = {Bräunlich, Gerhard and Hainzl, Christian and Seiringer, Robert},
journal = {Reviews in Mathematical Physics},
number = {7},
publisher = {World Scientific Publishing},
title = {{Translation-invariant quasi-free states for fermionic systems and the BCS approximation}},
doi = {10.1142/S0129055X14500123},
volume = {26},
year = {2014},
}
@article{1896,
abstract = {Biopolymer length regulation is a complex process that involves a large number of biological, chemical, and physical subprocesses acting simultaneously across multiple spatial and temporal scales. An illustrative example important for genomic stability is the length regulation of telomeres - nucleoprotein structures at the ends of linear chromosomes consisting of tandemly repeated DNA sequences and a specialized set of proteins. Maintenance of telomeres is often facilitated by the enzyme telomerase but, particularly in telomerase-free systems, the maintenance of chromosomal termini depends on alternative lengthening of telomeres (ALT) mechanisms mediated by recombination. Various linear and circular DNA structures were identified to participate in ALT, however, dynamics of the whole process is still poorly understood. We propose a chemical kinetics model of ALT with kinetic rates systematically derived from the biophysics of DNA diffusion and looping. The reaction system is reduced to a coagulation-fragmentation system by quasi-steady-state approximation. The detailed treatment of kinetic rates yields explicit formulas for expected size distributions of telomeres that demonstrate the key role played by the J factor, a quantitative measure of bending of polymers. The results are in agreement with experimental data and point out interesting phenomena: an appearance of very long telomeric circles if the total telomere density exceeds a critical value (excess mass) and a nonlinear response of the telomere size distributions to the amount of telomeric DNA in the system. The results can be of general importance for understanding dynamics of telomeres in telomerase-independent systems as this mode of telomere maintenance is similar to the situation in tumor cells lacking telomerase activity. Furthermore, due to its universality, the model may also serve as a prototype of an interaction between linear and circular DNA structures in various settings.},
author = {Kollár, Richard and Bod'ová, Katarína and Nosek, Jozef and Tomáška, Ľubomír},
journal = {Physical Review E Statistical Nonlinear and Soft Matter Physics},
number = {3},
publisher = {American Institute of Physics},
title = {{Mathematical model of alternative mechanism of telomere length maintenance}},
doi = {10.1103/PhysRevE.89.032701},
volume = {89},
year = {2014},
}
@article{1923,
abstract = {We derive the equations for a thin, axisymmetric elastic shell subjected to an internal active stress giving rise to active tension and moments within the shell. We discuss the stability of a cylindrical elastic shell and its response to a localized change in internal active stress. This description is relevant to describe the cellular actomyosin cortex, a thin shell at the cell surface behaving elastically at a short timescale and subjected to active internal forces arising from myosin molecular motor activity. We show that the recent observations of cell deformation following detachment of adherent cells (Maître J-L et al 2012 Science 338 253-6) are well accounted for by this mechanical description. The actin cortex elastic and bending moduli can be obtained from a quantitative analysis of cell shapes observed in these experiments. Our approach thus provides a non-invasive, imaging-based method for the extraction of cellular physical parameters.},
author = {Berthoumieux, Hélène and Maître, Jean-Léon and Heisenberg, Carl-Philipp J and Paluch, Ewa and Julicher, Frank and Salbreux, Guillaume},
journal = {New Journal of Physics},
publisher = {IOP Publishing Ltd.},
title = {{Active elastic thin shell theory for cellular deformations}},
doi = {10.1088/1367-2630/16/6/065005},
volume = {16},
year = {2014},
}
@article{1935,
abstract = {We consider Ising models in d = 2 and d = 3 dimensions with nearest neighbor ferromagnetic and long-range antiferromagnetic interactions, the latter decaying as (distance)-p, p > 2d, at large distances. If the strength J of the ferromagnetic interaction is larger than a critical value J c, then the ground state is homogeneous. It has been conjectured that when J is smaller than but close to J c, the ground state is periodic and striped, with stripes of constant width h = h(J), and h → ∞ as J → Jc -. (In d = 3 stripes mean slabs, not columns.) Here we rigorously prove that, if we normalize the energy in such a way that the energy of the homogeneous state is zero, then the ratio e 0(J)/e S(J) tends to 1 as J → Jc -, with e S(J) being the energy per site of the optimal periodic striped/slabbed state and e 0(J) the actual ground state energy per site of the system. Our proof comes with explicit bounds on the difference e 0(J)-e S(J) at small but positive J c-J, and also shows that in this parameter range the ground state is striped/slabbed in a certain sense: namely, if one looks at a randomly chosen window, of suitable size ℓ (very large compared to the optimal stripe size h(J)), one finds a striped/slabbed state with high probability.},
author = {Giuliani, Alessandro and Lieb, Élliott and Seiringer, Robert},
journal = {Communications in Mathematical Physics},
number = {1},
pages = {333 -- 350},
publisher = {Springer},
title = {{Formation of stripes and slabs near the ferromagnetic transition}},
doi = {10.1007/s00220-014-1923-2},
volume = {331},
year = {2014},
}
@article{1904,
abstract = {We prove a Strichartz inequality for a system of orthonormal functions, with an optimal behavior of the constant in the limit of a large number of functions. The estimate generalizes the usual Strichartz inequality, in the same fashion as the Lieb-Thirring inequality generalizes the Sobolev inequality. As an application, we consider the Schrödinger equation with a time-dependent potential and we show the existence of the wave operator in Schatten spaces.},
author = {Frank, Rupert and Lewin, Mathieu and Lieb, Élliott and Seiringer, Robert},
journal = {Journal of the European Mathematical Society},
number = {7},
pages = {1507 -- 1526},
publisher = {European Mathematical Society},
title = {{Strichartz inequality for orthonormal functions}},
doi = {10.4171/JEMS/467},
volume = {16},
year = {2014},
}
@article{1909,
abstract = {Summary: Phenotypes are often environmentally dependent, which requires organisms to track environmental change. The challenge for organisms is to construct phenotypes using the most accurate environmental cue. Here, we use a quantitative genetic model of adaptation by additive genetic variance, within- and transgenerational plasticity via linear reaction norms and indirect genetic effects respectively. We show how the relative influence on the eventual phenotype of these components depends on the predictability of environmental change (fast or slow, sinusoidal or stochastic) and the developmental lag τ between when the environment is perceived and when selection acts. We then decompose expected mean fitness into three components (variance load, adaptation and fluctuation load) to study the fitness costs of within- and transgenerational plasticity. A strongly negative maternal effect coefficient m minimizes the variance load, but a strongly positive m minimises the fluctuation load. The adaptation term is maximized closer to zero, with positive or negative m preferred under different environmental scenarios. Phenotypic plasticity is higher when τ is shorter and when the environment changes frequently between seasonal extremes. Expected mean population fitness is highest away from highest observed levels of phenotypic plasticity. Within- and transgenerational plasticity act in concert to deliver well-adapted phenotypes, which emphasizes the need to study both simultaneously when investigating phenotypic evolution.},
author = {Ezard, Thomas and Prizak, Roshan and Hoyle, Rebecca},
journal = {Functional Ecology},
number = {3},
pages = {693 -- 701},
publisher = {Wiley-Blackwell},
title = {{The fitness costs of adaptation via phenotypic plasticity and maternal effects}},
doi = {10.1111/1365-2435.12207},
volume = {28},
year = {2014},
}
@article{2041,
abstract = {The hippocampus mediates several higher brain functions, such as learning, memory, and spatial coding. The input region of the hippocampus, the dentate gyrus, plays a critical role in these processes. Several lines of evidence suggest that the dentate gyrus acts as a preprocessor of incoming information, preparing it for subsequent processing in CA3. For example, the dentate gyrus converts input from the entorhinal cortex, where cells have multiple spatial fields, into the spatially more specific place cell activity characteristic of the CA3 region. Furthermore, the dentate gyrus is involved in pattern separation, transforming relatively similar input patterns into substantially different output patterns. Finally, the dentate gyrus produces a very sparse coding scheme in which only a very small fraction of neurons are active at any one time.},
author = {Jonas, Peter M and Lisman, John},
journal = {Frontiers in Neural Circuits},
publisher = {Frontiers Research Foundation},
title = {{Structure, function and plasticity of hippocampal dentate gyrus microcircuits}},
doi = {10.3389/fncir.2014.00107},
volume = {8},
year = {2014},
}
@inproceedings{2046,
abstract = {We introduce policy-based signatures (PBS), where a signer can only sign messages conforming to some authority-specified policy. The main requirements are unforgeability and privacy, the latter meaning that signatures not reveal the policy. PBS offers value along two fronts: (1) On the practical side, they allow a corporation to control what messages its employees can sign under the corporate key. (2) On the theoretical side, they unify existing work, capturing other forms of signatures as special cases or allowing them to be easily built. Our work focuses on definitions of PBS, proofs that this challenging primitive is realizable for arbitrary policies, efficient constructions for specific policies, and a few representative applications.},
author = {Bellare, Mihir and Fuchsbauer, Georg},
booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
editor = {Krawczyk, Hugo},
location = {Buenos Aires, Argentina},
pages = {520 -- 537},
publisher = {Springer},
title = {{Policy-based signatures}},
doi = {10.1007/978-3-642-54631-0_30},
volume = {8383},
year = {2014},
}
@inproceedings{2058,
abstract = {We present a method for smoothly blending between existing liquid animations. We introduce a semi-automatic method for matching two existing liquid animations, which we use to create new fluid motion that plausibly interpolates the input. Our contributions include a new space-time non-rigid iterative closest point algorithm that incorporates user guidance, a subsampling technique for efficient registration of meshes with millions of vertices, and a fast surface extraction algorithm that produces 3D triangle meshes from a 4D space-time surface. Our technique can be used to instantly create hundreds of new simulations, or to interactively explore complex parameter spaces. Our method is guaranteed to produce output that does not deviate from the input animations, and it generalizes to multiple dimensions. Because our method runs at interactive rates after the initial precomputation step, it has potential applications in games and training simulations.},
author = {Raveendran, Karthik and Wojtan, Christopher J and Thuerey, Nils and Türk, Greg},
booktitle = {ACM Transactions on Graphics},
location = {Vancouver, Canada},
number = {4},
publisher = {ACM},
title = {{Blending liquids}},
doi = {10.1145/2601097.2601126},
volume = {33},
year = {2014},
}
@article{2022,
abstract = {Radial glial progenitors (RGPs) are responsible for producing nearly all neocortical neurons. To gain insight into the patterns of RGP division and neuron production, we quantitatively analyzed excitatory neuron genesis in the mouse neocortex using Mosaic Analysis with Double Markers, which provides single-cell resolution of progenitor division patterns and potential in vivo. We found that RGPs progress through a coherent program in which their proliferative potential diminishes in a predictable manner. Upon entry into the neurogenic phase, individual RGPs produce ∼8–9 neurons distributed in both deep and superficial layers, indicating a unitary output in neuronal production. Removal of OTX1, a transcription factor transiently expressed in RGPs, results in both deep- and superficial-layer neuron loss and a reduction in neuronal unit size. Moreover, ∼1/6 of neurogenic RGPs proceed to produce glia. These results suggest that progenitor behavior and histogenesis in the mammalian neocortex conform to a remarkably orderly and deterministic program.},
author = {Gao, Peng and Postiglione, Maria P and Krieger, Teresa and Hernandez, Luisirene and Wang, Chao and Han, Zhi and Streicher, Carmen and Papusheva, Ekaterina and Insolera, Ryan and Chugh, Kritika and Kodish, Oren and Huang, Kun and Simons, Benjamin and Luo, Liqun and Hippenmeyer, Simon and Shi, Song},
journal = {Cell},
number = {4},
pages = {775 -- 788},
publisher = {Cell Press},
title = {{Deterministic progenitor behavior and unitary production of neurons in the neocortex}},
doi = {10.1016/j.cell.2014.10.027},
volume = {159},
year = {2014},
}
@inproceedings{2173,
abstract = {In this work we introduce a new approach to co-classification, i.e. the task of jointly classifying multiple, otherwise independent, data samples. The method we present, named CoConut, is based on the idea of adding a regularizer in the label space to encode certain priors on the resulting labelings. A regularizer that encourages labelings that are smooth across the test set, for instance, can be seen as a test-time variant of the cluster assumption, which has been proven useful at training time in semi-supervised learning. A regularizer that introduces a preference for certain class proportions can be regarded as a prior distribution on the class labels. CoConut can build on existing classifiers without making any assumptions on how they were obtained and without the need to re-train them. The use of a regularizer adds a new level of flexibility. It allows the integration of potentially new information at test time, even in other modalities than what the classifiers were trained on. We evaluate our framework on six datasets, reporting a clear performance gain in classification accuracy compared to the standard classification setup that predicts labels for each test sample separately.
},
author = {Khamis, Sameh and Lampert, Christoph},
booktitle = {Proceedings of the British Machine Vision Conference 2014},
location = {Nottingham, UK},
publisher = {BMVA Press},
title = {{CoConut: Co-classification with output space regularization}},
year = {2014},
}
@article{2178,
abstract = {We consider the three-state toric homogeneous Markov chain model (THMC) without loops and initial parameters. At time T, the size of the design matrix is 6 × 3 · 2T-1 and the convex hull of its columns is the model polytope. We study the behavior of this polytope for T ≥ 3 and we show that it is defined by 24 facets for all T ≥ 5. Moreover, we give a complete description of these facets. From this, we deduce that the toric ideal associated with the design matrix is generated by binomials of degree at most 6. Our proof is based on a result due to Sturmfels, who gave a bound on the degree of the generators of a toric ideal, provided the normality of the corresponding toric variety. In our setting, we established the normality of the toric variety associated to the THMC model by studying the geometric properties of the model polytope.},
author = {Haws, David and Martin Del Campo Sanchez, Abraham and Takemura, Akimichi and Yoshida, Ruriko},
journal = {Beitrage zur Algebra und Geometrie},
number = {1},
pages = {161 -- 188},
publisher = {Springer},
title = {{Markov degree of the three-state toric homogeneous Markov chain model}},
doi = {10.1007/s13366-013-0178-y},
volume = {55},
year = {2014},
}
@article{2180,
abstract = {Weighted majority votes allow one to combine the output of several classifiers or voters. MinCq is a recent algorithm for optimizing the weight of each voter based on the minimization of a theoretical bound over the risk of the vote with elegant PAC-Bayesian generalization guarantees. However, while it has demonstrated good performance when combining weak classifiers, MinCq cannot make use of the useful a priori knowledge that one may have when using a mixture of weak and strong voters. In this paper, we propose P-MinCq, an extension of MinCq that can incorporate such knowledge in the form of a constraint over the distribution of the weights, along with general proofs of convergence that stand in the sample compression setting for data-dependent voters. The approach is applied to a vote of k-NN classifiers with a specific modeling of the voters' performance. P-MinCq significantly outperforms the classic k-NN classifier, a symmetric NN and MinCq using the same voters. We show that it is also competitive with LMNN, a popular metric learning algorithm, and that combining both approaches further reduces the error.},
author = {Bellet, Aurélien and Habrard, Amaury and Morvant, Emilie and Sebban, Marc},
journal = {Machine Learning},
number = {1-2},
pages = {129 -- 154},
publisher = {Springer},
title = {{Learning a priori constrained weighted majority votes}},
doi = {10.1007/s10994-014-5462-z},
volume = {97},
year = {2014},
}