@article{1133,
abstract = {It is a common knowledge that an effective interaction of a quantum impurity with an electromagnetic field can be screened by surrounding charge carriers, whether mobile or static. Here we demonstrate that very strong, "anomalous" screening can take place in the presence of a neutral, weakly polarizable environment, due to an exchange of orbital angular momentum between the impurity and the bath. Furthermore, we show that it is possible to generalize all phenomena related to isolated impurities in an external field to the case when a many-body environment is present, by casting the problem in terms of the angulon quasiparticle. As a result, the relevant observables such as the effective Rabi frequency, geometric phase, and impurity spatial alignment are straightforward to evaluate in terms of a single parameter: the angular-momentum-dependent screening factor.},
author = {Yakaboylu, Enderalp and Lemeshko, Mikhail},
issn = {00319007},
journal = {Physical Review Letters},
number = {8},
publisher = {American Physical Society},
title = {{Anomalous screening of quantum impurities by a neutral environment}},
doi = {10.1103/PhysRevLett.118.085302},
volume = {118},
year = {2017},
}
@article{1152,
abstract = {We propose a new memetic strategy that can solve the multi-physics, complex inverse problems, formulated as the multi-objective optimization ones, in which objectives are misfits between the measured and simulated states of various governing processes. The multi-deme structure of the strategy allows for both, intensive, relatively cheap exploration with a moderate accuracy and more accurate search many regions of Pareto set in parallel. The special type of selection operator prefers the coherent alternative solutions, eliminating artifacts appearing in the particular processes. The additional accuracy increment is obtained by the parallel convex searches applied to the local scalarizations of the misfit vector. The strategy is dedicated for solving ill-conditioned problems, for which inverting the single physical process can lead to the ambiguous results. The skill of the selection in artifact elimination is shown on the benchmark problem, while the whole strategy was applied for identification of oil deposits, where the misfits are related to various frequencies of the magnetic and electric waves of the magnetotelluric measurements. 2016 Elsevier B.V.},
author = {Gajda-Zagorska, Ewa P and Schaefer, Robert and Smołka, Maciej and Pardo, David and Alvarez Aramberri, Julen},
issn = {18777503},
journal = {Journal of Computational Science},
pages = {85 -- 94},
publisher = {Elsevier},
title = {{A multi objective memetic inverse solver reinforced by local optimization methods}},
doi = {10.1016/j.jocs.2016.06.007},
volume = {18},
year = {2017},
}
@article{1146,
abstract = {Aim: The present study was to compare the effects of nicotinic acid and nicotinamide on the plasma methyl donors, choline and betaine. Methods: Thirty adult subjects were randomly divided into three groups of equal size, and orally received purified water (C group), nicotinic acid (300 mg, NA group) or nicotinamide (300 mg, NM group). Plasma nicotinamide, N 1-methylnicotinamide, homocysteine, betaine and choline levels before and 1.5-h and 3-h post-dosing, plasma normetanephrine and metanephrine concentrations at 3-h post-dosing, and the urinary excretion of N 1-methyl-2-pyridone-5-carboxamide during the test period were examined. Results: The level of 3-h plasma nicotinamide, N 1-methylnicotinamide, homocysteine, the urinary excretion of N 1-methyl-2-pyridone-5-carboxamide and pulse pressure (PP) in the NM group was 221%, 3972%, 61%, 1728% and 21.2% higher than that of the control group (P < 0.01, except homocysteine and PP P < 0.05), while the 3-h plasma betaine, normetanephrine and metanephrine level in the NM group was 24.4%, 9.4% and 11.7% lower (P < 0.05, except betaine P < 0.01), without significant difference in choline levels. Similar but less pronounced changes were observed in the NA group, with a lower level of 3-h plasma N 1-methylnicotinamide (1.90 ± 0.20 μmol/l vs. 3.62 ± 0.27 μmol/l, P < 0.01) and homocysteine (12.85 ± 1.39 μmol/l vs. 18.08 ± 1.02 μmol/l, P < 0.05) but a higher level of betaine (27.44 ± 0.71 μmol/l vs. 23.52 ± 0.61 μmol/l, P < 0.05) than that of the NM group. Conclusion: The degradation of nicotinamide consumes more betaine than that of nicotinic acid at identical doses. This difference should be taken into consideration in niacin fortification. © 2016 Elsevier Ltd and European Society for Clinical Nutrition and Metabolism.},
author = {Sun, Wuping and Zhai, Ming-Zhu and Li, Da and Zhou, Yiming and Chen, Nana and Guo, Ming and Zhou, Shisheng},
journal = {Clinical Nutrition},
number = {4},
pages = {1136--1142},
publisher = {Churchill Livingstone},
title = {{Comparison of the effects of nicotinic acid and nicotinamide degradation on plasma betaine and choline levels}},
doi = {10.1016/j.clnu.2016.07.016},
volume = {36},
year = {2017},
}
@article{1159,
abstract = {Auxin steers numerous physiological processes in plants, making the tight control of its endogenous levels and spatiotemporal distribution a necessity. This regulation is achieved by different mechanisms, including auxin biosynthesis, metabolic conversions, degradation, and transport. Here, we introduce cis-cinnamic acid (c-CA) as a novel and unique addition to a small group of endogenous molecules affecting in planta auxin concentrations. c-CA is the photo-isomerization product of the phenylpropanoid pathway intermediate trans-CA (t-CA). When grown on c-CA-containing medium, an evolutionary diverse set of plant species were shown to exhibit phenotypes characteristic for high auxin levels, including inhibition of primary root growth, induction of root hairs, and promotion of adventitious and lateral rooting. By molecular docking and receptor binding assays, we showed that c-CA itself is neither an auxin nor an anti-auxin, and auxin profiling data revealed that c-CA does not significantly interfere with auxin biosynthesis. Single cell-based auxin accumulation assays showed that c-CA, and not t-CA, is a potent inhibitor of auxin efflux. Auxin signaling reporters detected changes in spatiotemporal distribution of the auxin response along the root of c-CA-treated plants, and long-distance auxin transport assays showed no inhibition of rootward auxin transport. Overall, these results suggest that the phenotypes of c-CA-treated plants are the consequence of a local change in auxin accumulation, induced by the inhibition of auxin efflux. This work reveals a novel mechanism how plants may regulate auxin levels and adds a novel, naturally occurring molecule to the chemical toolbox for the studies of auxin homeostasis.},
author = {Steenackers, Ward and Klíma, Petr and Quareshy, Mussa and Cesarino, Igor and Kumpf, Robert and Corneillie, Sander and Araújo, Pedro and Viaene, Tom and Goeminne, Geert and Nowack, Moritz and Ljung, Karin and Friml, Jirí and Blakeslee, Joshua and Novák, Ondřej and Zažímalová, Eva and Napier, Richard and Boerjan, Wout and Vanholme, Bartel},
issn = {00320889},
journal = {Plant Physiology},
number = {1},
pages = {552 -- 565},
publisher = {American Society of Plant Biologists},
title = {{Cis-cinnamic acid is a novel natural auxin efflux inhibitor that promotes lateral root formation}},
doi = {10.1104/pp.16.00943},
volume = {173},
year = {2017},
}
@article{1160,
abstract = {We investigate fundamental nonlinear dynamics of ferrofluidic Taylor-Couette flow - flow confined be-tween two concentric independently rotating cylinders - consider small aspect ratio by solving the ferro-hydrodynamical equations, carrying out systematic bifurcation analysis. Without magnetic field, we find steady flow patterns, previously observed with a simple fluid, such as those containing normal one- or two vortex cells, as well as anomalous one-cell and twin-cell flow states. However, when a symmetry-breaking transverse magnetic field is present, all flow states exhibit stimulated, finite two-fold mode. Various bifurcations between steady and unsteady states can occur, corresponding to the transitions between the two-cell and one-cell states. While unsteady, axially oscillating flow states can arise, we also detect the emergence of new unsteady flow states. In particular, we uncover two new states: one contains only the azimuthally oscillating solution in the configuration of the twin-cell flow state, and an-other a rotating flow state. Topologically, these flow states are a limit cycle and a quasiperiodic solution on a two-torus, respectively. Emergence of new flow states in addition to observed ones with classical fluid, indicates that richer but potentially more controllable dynamics in ferrofluidic flows, as such flow states depend on the external magnetic field.},
author = {Altmeyer, Sebastian and Do, Younghae and Lai, Ying},
issn = {20452322},
journal = {Scientific Reports},
publisher = {Nature Publishing Group},
title = {{Dynamics of ferrofluidic flow in the Taylor-Couette system with a small aspect ratio}},
doi = {10.1038/srep40012},
volume = {7},
year = {2017},
}
@article{1161,
abstract = {Coordinated changes of cell shape are often the result of the excitable, wave-like dynamics of the actin cytoskeleton. New work shows that, in migrating cells, protrusion waves arise from mechanochemical crosstalk between adhesion sites, membrane tension and the actin protrusive machinery.},
author = {Müller, Jan and Sixt, Michael K},
issn = {09609822},
journal = {Current Biology},
number = {1},
pages = {R24 -- R25},
publisher = {Cell Press},
title = {{Cell migration: Making the waves}},
doi = {10.1016/j.cub.2016.11.035},
volume = {27},
year = {2017},
}
@article{1162,
abstract = {Selected universal experimental properties of high-temperature superconducting (HTS) cuprates have been singled out in the last decade. One of the pivotal challenges in this field is the designation of a consistent interpretation framework within which we can describe quantitatively the universal features of those systems. Here we analyze in a detailed manner the principal experimental data and compare them quantitatively with the approach based on a single-band model of strongly correlated electrons supplemented with strong antiferromagnetic (super)exchange interaction (the so-called t−J−U model). The model rationale is provided by estimating its microscopic parameters on the basis of the three-band approach for the Cu-O plane. We use our original full Gutzwiller wave-function solution by going beyond the renormalized mean-field theory (RMFT) in a systematic manner. Our approach reproduces very well the observed hole doping (δ) dependence of the kinetic-energy gain in the superconducting phase, one of the principal non-Bardeen-Cooper-Schrieffer features of the cuprates. The calculated Fermi velocity in the nodal direction is practically δ-independent and its universal value agrees very well with that determined experimentally. Also, a weak doping dependence of the Fermi wave vector leads to an almost constant value of the effective mass in a pure superconducting phase which is both observed in experiment and reproduced within our approach. An assessment of the currently used models (t−J, Hubbard) is carried out and the results of the canonical RMFT as a zeroth-order solution are provided for comparison to illustrate the necessity of the introduced higher-order contributions.},
author = {Spałek, Jozef and Zegrodnik, Michał and Kaczmarczyk, Jan},
issn = {24699950},
journal = {Physical Review B - Condensed Matter and Materials Physics},
number = {2},
publisher = {American Physical Society},
title = {{Universal properties of high temperature superconductors from real space pairing t-J-U model and its quantitative comparison with experiment}},
doi = {10.1103/PhysRevB.95.024506},
volume = {95},
year = {2017},
}
@article{1163,
abstract = {We investigate the effect of the electron-hole (e-h) symmetry breaking on d-wave superconductivity induced by non-local effects of correlations in the generalized Hubbard model. The symmetry breaking is introduced in a two-fold manner: by the next-to-nearest neighbor hopping of electrons and by the charge-bond interaction - the off-diagonal term of the Coulomb potential. Both terms lead to a pronounced asymmetry of the superconducting order parameter. The next-to-nearest neighbor hopping enhances superconductivity for h-doping, while diminishes it for e-doping. The charge-bond interaction alone leads to the opposite effect and, additionally, to the kinetic-energy gain upon condensation in the underdoped regime. With both terms included, with similar amplitudes, the height of the superconducting dome and the critical doping remain in favor of h-doping. The influence of the charge-bond interaction on deviations from symmetry of the shape of the gap at the Fermi surface in the momentum space is briefly discussed.},
author = {Wysokiński, Marcin and Kaczmarczyk, Jan},
issn = {09538984},
journal = {Journal of Physics: Condensed Matter},
number = {8},
publisher = {IOP Publishing Ltd.},
title = {{Unconventional superconductivity in generalized Hubbard model role of electron–hole symmetry breaking terms}},
doi = {10.1088/1361-648X/aa532f},
volume = {29},
year = {2017},
}
@article{1168,
abstract = {Optimum experimental design theory has recently been extended for parameter estimation in copula models. The use of these models allows one to gain in flexibility by considering the model parameter set split into marginal and dependence parameters. However, this separation also leads to the natural issue of estimating only a subset of all model parameters. In this work, we treat this problem with the application of the (Formula presented.)-optimality to copula models. First, we provide an extension of the corresponding equivalence theory. Then, we analyze a wide range of flexible copula models to highlight the usefulness of (Formula presented.)-optimality in many possible scenarios. Finally, we discuss how the usage of the introduced design criterion also relates to the more general issue of copula selection and optimal design for model discrimination.},
author = {Perrone, Elisa and Rappold, Andreas and Müller, Werner},
journal = {Statistical Methods and Applications},
number = {3},
pages = {403 -- 418},
publisher = {Springer},
title = {{D inf s optimality in copula models}},
doi = {10.1007/s10260-016-0375-6},
volume = {26},
year = {2017},
}
@article{1169,
abstract = {Dispersal is a crucial factor in natural evolution, since it determines the habitat experienced by any population and defines the spatial scale of interactions between individuals. There is compelling evidence for systematic differences in dispersal characteristics within the same population, i.e., genotype-dependent dispersal. The consequences of genotype-dependent dispersal on other evolutionary phenomena, however, are poorly understood. In this article we investigate the effect of genotype-dependent dispersal on spatial gene frequency patterns, using a generalization of the classical diffusion model of selection and dispersal. Dispersal is characterized by the variance of dispersal (diffusion coefficient) and the mean displacement (directional advection term). We demonstrate that genotype-dependent dispersal may change the qualitative behavior of Fisher waves, which change from being “pulled” to being “pushed” wave fronts as the discrepancy in dispersal between genotypes increases. The speed of any wave is partitioned into components due to selection, genotype-dependent variance of dispersal, and genotype-dependent mean displacement. We apply our findings to wave fronts maintained by selection against heterozygotes. Furthermore, we identify a benefit of increased variance of dispersal, quantify its effect on the speed of the wave, and discuss the implications for the evolution of dispersal strategies.},
author = {Novak, Sebastian and Kollár, Richard},
issn = {00166731},
journal = {Genetics},
number = {1},
pages = {367 -- 374},
publisher = {Genetics Society of America},
title = {{Spatial gene frequency waves under genotype dependent dispersal}},
doi = {10.1534/genetics.116.193946},
volume = {205},
year = {2017},
}
@article{1173,
abstract = {We introduce the Voronoi functional of a triangulation of a finite set of points in the Euclidean plane and prove that among all geometric triangulations of the point set, the Delaunay triangulation maximizes the functional. This result neither extends to topological triangulations in the plane nor to geometric triangulations in three and higher dimensions.},
author = {Edelsbrunner, Herbert and Glazyrin, Alexey and Musin, Oleg and Nikitenko, Anton},
issn = {02099683},
journal = {Combinatorica},
number = {5},
pages = {887 -- 910},
publisher = {Springer},
title = {{The Voronoi functional is maximized by the Delaunay triangulation in the plane}},
doi = {10.1007/s00493-016-3308-y},
volume = {37},
year = {2017},
}
@inproceedings{1174,
abstract = {Security of cryptographic applications is typically defined by security games. The adversary, within certain resources, cannot win with probability much better than 0 (for unpredictability applications, like one-way functions) or much better than 1/2 (indistinguishability applications for instance encryption schemes). In so called squared-friendly applications the winning probability of the adversary, for different values of the application secret randomness, is not only close to 0 or 1/2 on average, but also concentrated in the sense that its second central moment is small. The class of squared-friendly applications, which contains all unpredictability applications and many indistinguishability applications, is particularly important for key derivation. Barak et al. observed that for square-friendly applications one can beat the "RT-bound", extracting secure keys with significantly smaller entropy loss. In turn Dodis and Yu showed that in squared-friendly applications one can directly use a "weak" key, which has only high entropy, as a secure key. In this paper we give sharp lower bounds on square security assuming security for "weak" keys. We show that any application which is either (a) secure with weak keys or (b) allows for entropy savings for keys derived by universal hashing, must be square-friendly. Quantitatively, our lower bounds match the positive results of Dodis and Yu and Barak et al. (TCC\'13, CRYPTO\'11) Hence, they can be understood as a general characterization of squared-friendly applications. While the positive results on squared-friendly applications where derived by one clever application of the Cauchy-Schwarz Inequality, for tight lower bounds we need more machinery. In our approach we use convex optimization techniques and some theory of circular matrices.},
author = {Skórski, Maciej},
issn = {18688969},
location = {Hannover, Germany},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{Lower bounds on key derivation for square-friendly applications}},
doi = {10.4230/LIPIcs.STACS.2017.57},
volume = {66},
year = {2017},
}
@inproceedings{1175,
abstract = {We study space complexity and time-space trade-offs with a focus not on peak memory usage but on overall memory consumption throughout the computation. Such a cumulative space measure was introduced for the computational model of parallel black pebbling by [Alwen and Serbinenko ’15] as a tool for obtaining results in cryptography. We consider instead the non- deterministic black-white pebble game and prove optimal cumulative space lower bounds and trade-offs, where in order to minimize pebbling time the space has to remain large during a significant fraction of the pebbling. We also initiate the study of cumulative space in proof complexity, an area where other space complexity measures have been extensively studied during the last 10–15 years. Using and extending the connection between proof complexity and pebble games in [Ben-Sasson and Nordström ’08, ’11] we obtain several strong cumulative space results for (even parallel versions of) the resolution proof system, and outline some possible future directions of study of this, in our opinion, natural and interesting space measure.},
author = {Alwen, Joel F and De Rezende, Susanna and Nordstrom, Jakob and Vinyals, Marc},
editor = {Papadimitriou, Christos},
issn = {18688969},
location = {Berkeley, CA, United States},
pages = {38:1--38--21},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{Cumulative space in black-white pebbling and resolution}},
doi = {10.4230/LIPIcs.ITCS.2017.38},
volume = {67},
year = {2017},
}
@inproceedings{1176,
abstract = {The algorithm Argon2i-B of Biryukov, Dinu and Khovratovich is currently being considered by the IRTF (Internet Research Task Force) as a new de-facto standard for password hashing. An older version (Argon2i-A) of the same algorithm was chosen as the winner of the recent Password Hashing Competition. An important competitor to Argon2i-B is the recently introduced Balloon Hashing (BH) algorithm of Corrigan-Gibs, Boneh and Schechter. A key security desiderata for any such algorithm is that evaluating it (even using a custom device) requires a large amount of memory amortized across multiple instances. Alwen and Blocki (CRYPTO 2016) introduced a class of theoretical attacks against Argon2i-A and BH. While these attacks yield large asymptotic reductions in the amount of memory, it was not, a priori, clear if (1) they can be extended to the newer Argon2i-B, (2) the attacks are effective on any algorithm for practical parameter ranges (e.g., 1GB of memory) and (3) if they can be effectively instantiated against any algorithm under realistic hardware constrains. In this work we answer all three of these questions in the affirmative for all three algorithms. This is also the first work to analyze the security of Argon2i-B. In more detail, we extend the theoretical attacks of Alwen and Blocki (CRYPTO 2016) to the recent Argon2i-B proposal demonstrating severe asymptotic deficiencies in its security. Next we introduce several novel heuristics for improving the attack's concrete memory efficiency even when on-chip memory bandwidth is bounded. We then simulate our attacks on randomly sampled Argon2i-A, Argon2i-B and BH instances and measure the resulting memory consumption for various practical parameter ranges and for a variety of upperbounds on the amount of parallelism available to the attacker. Finally we describe, implement, and test a new heuristic for applying the Alwen-Blocki attack to functions employing a technique developed by Corrigan-Gibs et al. for improving concrete security of memory-hard functions. We analyze the collected data and show the effects various parameters have on the memory consumption of the attack. In particular, we can draw several interesting conclusions about the level of security provided by these functions. · For the Alwen-Blocki attack to fail against practical memory parameters, Argon2i-B must be instantiated with more than 10 passes on memory - beyond the "paranoid" parameter setting in the current IRTF proposal. · The technique of Corrigan-Gibs for improving security can also be overcome by the Alwen-Blocki attack under realistic hardware constraints. · On a positive note, both the asymptotic and concrete security of Argon2i-B seem to improve on that of Argon2i-A.},
author = {Alwen, Joel F and Blocki, Jeremiah},
isbn = {978-150905761-0},
location = {Paris, France},
publisher = {IEEE},
title = {{Towards practical attacks on Argon2i and balloon hashing}},
doi = {10.1109/EuroSP.2017.47},
year = {2017},
}
@inproceedings{1178,
abstract = {For any pair (X, Z) of correlated random variables we can think of Z as a randomized function of X. If the domain of Z is small, one can make this function computationally efficient by allowing it to be only approximately correct. In folklore this problem is known as simulating auxiliary inputs. This idea of simulating auxiliary information turns out to be a very usefull tool, finding applications in complexity theory, cryptography, pseudorandomness and zero-knowledge. In this paper we revisit this problem, achieving the following results: (a) We present a novel boosting algorithm for constructing the simulator. This boosting proof is of independent interest, as it shows how to handle “negative mass” issues when constructing probability measures by shifting distinguishers in descent algorithms. Our technique essentially fixes the flaw in the TCC’14 paper “How to Fake Auxiliary Inputs”. (b) The complexity of our simulator is better than in previous works, including results derived from the uniform min-max theorem due to Vadhan and Zheng. To achieve (s,ϵ) -indistinguishability we need the complexity O(s⋅25ℓϵ−2) in time/circuit size, which improve previous bounds by a factor of ϵ−2. In particular, with we get meaningful provable security for the EUROCRYPT’09 leakage-resilient stream cipher instantiated with a standard 256-bit block cipher, like },
author = {Skórski, Maciej},
pages = {159 -- 179},
publisher = {Springer},
title = {{Simulating auxiliary inputs, revisited}},
doi = {10.1007/978-3-662-53641-4_7},
volume = {9985},
year = {2017},
}
@article{1180,
abstract = {In this article we define an algebraic vertex of a generalized polyhedron and show that the set of algebraic vertices is the smallest set of points needed to define the polyhedron. We prove that the indicator function of a generalized polytope P is a linear combination of indicator functions of simplices whose vertices are algebraic vertices of P. We also show that the indicator function of any generalized polyhedron is a linear combination, with integer coefficients, of indicator functions of cones with apices at algebraic vertices and line-cones. The concept of an algebraic vertex is closely related to the Fourier–Laplace transform. We show that a point v is an algebraic vertex of a generalized polyhedron P if and only if the tangent cone of P, at v, has non-zero Fourier–Laplace transform.},
author = {Akopyan, Arseniy and Bárány, Imre and Robins, Sinai},
issn = {00018708},
journal = {Advances in Mathematics},
pages = {627 -- 644},
publisher = {Academic Press},
title = {{Algebraic vertices of non-convex polyhedra}},
doi = {10.1016/j.aim.2016.12.026},
volume = {308},
year = {2017},
}
@article{1199,
abstract = {Much of quantitative genetics is based on the ‘infinitesimal model’, under which selection has a negligible effect on the genetic variance. This is typically justified by assuming a very large number of loci with additive effects. However, it applies even when genes interact, provided that the number of loci is large enough that selection on each of them is weak relative to random drift. In the long term, directional selection will change allele frequencies, but even then, the effects of epistasis on the ultimate change in trait mean due to selection may be modest. Stabilising selection can maintain many traits close to their optima, even when the underlying alleles are weakly selected. However, the number of traits that can be optimised is apparently limited to ~4Ne by the ‘drift load’, and this is hard to reconcile with the apparent complexity of many organisms. Just as for the mutation load, this limit can be evaded by a particular form of negative epistasis. A more robust limit is set by the variance in reproductive success. This suggests that selection accumulates information most efficiently in the infinitesimal regime, when selection on individual alleles is weak, and comparable with random drift. A review of evidence on selection strength suggests that although most variance in fitness may be because of alleles with large Nes, substantial amounts of adaptation may be because of alleles in the infinitesimal regime, in which epistasis has modest effects.},
author = {Barton, Nicholas H},
journal = {Heredity},
pages = {96 -- 109},
publisher = {Nature Publishing Group},
title = {{How does epistasis influence the response to selection?}},
doi = {10.1038/hdy.2016.109},
volume = {118},
year = {2017},
}
@inproceedings{1194,
abstract = {Termination is one of the basic liveness properties, and we study the termination problem for probabilistic programs with real-valued variables. Previous works focused on the qualitative problem that asks whether an input program terminates with probability~1 (almost-sure termination). A powerful approach for this qualitative problem is the notion of ranking supermartingales with respect to a given set of invariants. The quantitative problem (probabilistic termination) asks for bounds on the termination probability. A fundamental and conceptual drawback of the existing approaches to address probabilistic termination is that even though the supermartingales consider the probabilistic behavior of the programs, the invariants are obtained completely ignoring the probabilistic aspect. In this work we address the probabilistic termination problem for linear-arithmetic probabilistic programs with nondeterminism. We define the notion of {\em stochastic invariants}, which are constraints along with a probability bound that the constraints hold. We introduce a concept of {\em repulsing supermartingales}. First, we show that repulsing supermartingales can be used to obtain bounds on the probability of the stochastic invariants. Second, we show the effectiveness of repulsing supermartingales in the following three ways: (1)~With a combination of ranking and repulsing supermartingales we can compute lower bounds on the probability of termination; (2)~repulsing supermartingales provide witnesses for refutation of almost-sure termination; and (3)~with a combination of ranking and repulsing supermartingales we can establish persistence properties of probabilistic programs. We also present results on related computational problems and an experimental evaluation of our approach on academic examples. },
author = {Chatterjee, Krishnendu and Novotny, Petr and Zikelic, Djordje},
issn = {07308566},
location = {Paris, France},
number = {1},
pages = {145 -- 160},
publisher = {ACM},
title = {{Stochastic invariants for probabilistic termination}},
doi = {10.1145/3009837.3009873},
volume = {52},
year = {2017},
}
@article{1196,
abstract = {We define the . model-measuring problem: given a model . M and specification . ϕ, what is the maximal distance . ρ such that all models . M' within distance . ρ from . M satisfy (or violate) . ϕ. The model-measuring problem presupposes a distance function on models. We concentrate on . automatic distance functions, which are defined by weighted automata. The model-measuring problem subsumes several generalizations of the classical model-checking problem, in particular, quantitative model-checking problems that measure the degree of satisfaction of a specification; robustness problems that measure how much a model can be perturbed without violating the specification; and parameter synthesis for hybrid systems. We show that for automatic distance functions, and (a) . ω-regular linear-time, (b) . ω-regular branching-time, and (c) hybrid specifications, the model-measuring problem can be solved.We use automata-theoretic model-checking methods for model measuring, replacing the emptiness question for word, tree, and hybrid automata by the . optimal-value question for the weighted versions of these automata. For automata over words and trees, we consider weighted automata that accumulate weights by maximizing, summing, discounting, and limit averaging. For hybrid automata, we consider monotonic (parametric) hybrid automata, a hybrid counterpart of (discrete) weighted automata.We give several examples of using the model-measuring problem to compute various notions of robustness and quantitative satisfaction for temporal specifications. Further, we propose the modeling framework for model measuring to ease the specification and reduce the likelihood of errors in modeling.Finally, we present a variant of the model-measuring problem, called the . model-repair problem. The model-repair problem applies to models that do not satisfy the specification; it can be used to derive restrictions, under which the model satisfies the specification, i.e., to repair the model.},
author = {Henzinger, Thomas A and Otop, Jan},
journal = {Nonlinear Analysis: Hybrid Systems},
pages = {166 -- 190},
publisher = {Elsevier},
title = {{Model measuring for discrete and hybrid systems}},
doi = {10.1016/j.nahs.2016.09.001},
volume = {23},
year = {2017},
}
@article{1191,
abstract = {Variation in genotypes may be responsible for differences in dispersal rates, directional biases, and growth rates of individuals. These traits may favor certain genotypes and enhance their spatiotemporal spreading into areas occupied by the less advantageous genotypes. We study how these factors influence the speed of spreading in the case of two competing genotypes under the assumption that spatial variation of the total population is small compared to the spatial variation of the frequencies of the genotypes in the population. In that case, the dynamics of the frequency of one of the genotypes is approximately described by a generalized Fisher–Kolmogorov–Petrovskii–Piskunov (F–KPP) equation. This generalized F–KPP equation with (nonlinear) frequency-dependent diffusion and advection terms admits traveling wave solutions that characterize the invasion of the dominant genotype. Our existence results generalize the classical theory for traveling waves for the F–KPP with constant coefficients. Moreover, in the particular case of the quadratic (monostable) nonlinear growth–decay rate in the generalized F–KPP we study in detail the influence of the variance in diffusion and mean displacement rates of the two genotypes on the minimal wave propagation speed.},
author = {Kollár, Richard and Novak, Sebastian},
journal = {Bulletin of Mathematical Biology},
number = {3},
pages = {525--559},
publisher = {Springer},
title = {{Existence of traveling waves for the generalized F–KPP equation}},
doi = {10.1007/s11538-016-0244-3},
volume = {79},
year = {2017},
}
@article{1207,
abstract = {The eigenvalue distribution of the sum of two large Hermitian matrices, when one of them is conjugated by a Haar distributed unitary matrix, is asymptotically given by the free convolution of their spectral distributions. We prove that this convergence also holds locally in the bulk of the spectrum, down to the optimal scales larger than the eigenvalue spacing. The corresponding eigenvectors are fully delocalized. Similar results hold for the sum of two real symmetric matrices, when one is conjugated by Haar orthogonal matrix.},
author = {Bao, Zhigang and Erdös, László and Schnelli, Kevin},
issn = {00103616},
journal = {Communications in Mathematical Physics},
number = {3},
pages = {947 -- 990},
publisher = {Springer},
title = {{Local law of addition of random matrices on optimal scale}},
doi = {10.1007/s00220-016-2805-6},
volume = {349},
year = {2017},
}
@article{1208,
abstract = {We study parameter estimation in linear Gaussian covariance models, which are p-dimensional Gaussian models with linear constraints on the covariance matrix. Maximum likelihood estimation for this class of models leads to a non-convex optimization problem which typically has many local maxima. Using recent results on the asymptotic distribution of extreme eigenvalues of the Wishart distribution, we provide sufficient conditions for any hill climbing method to converge to the global maximum. Although we are primarily interested in the case in which n≫p, the proofs of our results utilize large sample asymptotic theory under the scheme n/p→γ>1. Remarkably, our numerical simulations indicate that our results remain valid for p as small as 2. An important consequence of this analysis is that, for sample sizes n≃14p, maximum likelihood estimation for linear Gaussian covariance models behaves as if it were a convex optimization problem. © 2016 The Royal Statistical Society and Blackwell Publishing Ltd.},
author = {Zwiernik, Piotr and Uhler, Caroline and Richards, Donald},
issn = {13697412},
journal = {Journal of the Royal Statistical Society. Series B: Statistical Methodology},
number = {4},
pages = {1269 -- 1292},
publisher = {Wiley-Blackwell},
title = {{Maximum likelihood estimation for linear Gaussian covariance models}},
doi = {10.1111/rssb.12217},
volume = {79},
year = {2017},
}
@article{1211,
abstract = {Systems such as fluid flows in channels and pipes or the complex Ginzburg–Landau system, defined over periodic domains, exhibit both continuous symmetries, translational and rotational, as well as discrete symmetries under spatial reflections or complex conjugation. The simplest, and very common symmetry of this type is the equivariance of the defining equations under the orthogonal group O(2). We formulate a novel symmetry reduction scheme for such systems by combining the method of slices with invariant polynomial methods, and show how it works by applying it to the Kuramoto–Sivashinsky system in one spatial dimension. As an example, we track a relative periodic orbit through a sequence of bifurcations to the onset of chaos. Within the symmetry-reduced state space we are able to compute and visualize the unstable manifolds of relative periodic orbits, their torus bifurcations, a transition to chaos via torus breakdown, and heteroclinic connections between various relative periodic orbits. It would be very hard to carry through such analysis in the full state space, without a symmetry reduction such as the one we present here.},
author = {Budanur, Nazmi B and Cvitanović, Predrag},
journal = {Journal of Statistical Physics},
number = {3-4},
pages = {636--655},
publisher = {Springer},
title = {{Unstable manifolds of relative periodic orbits in the symmetry reduced state space of the Kuramoto–Sivashinsky system}},
doi = {10.1007/s10955-016-1672-z},
volume = {167},
year = {2017},
}
@article{1213,
abstract = {Bacterial cytokinesis is commonly initiated by the Z-ring, a dynamic cytoskeletal structure that assembles at the site of division. Its primary component is FtsZ, a tubulin-like GTPase, that like its eukaryotic relative forms protein filaments in the presence of GTP. Since the discovery of the Z-ring 25 years ago, various models for the role of FtsZ have been suggested. However, important information about the architecture and dynamics of FtsZ filaments during cytokinesis is still missing. One reason for this lack of knowledge has been the small size of bacteria, which has made it difficult to resolve the orientation and dynamics of individual FtsZ filaments in the Z-ring. While superresolution microscopy experiments have helped to gain more information about the organization of the Z-ring in the dividing cell, they were not yet able to elucidate a mechanism of how FtsZ filaments reorganize during assembly and disassembly of the Z-ring. In this chapter, we explain how to use an in vitro reconstitution approach to investigate the self-organization of FtsZ filaments recruited to a biomimetic lipid bilayer by its membrane anchor FtsA. We show how to perform single-molecule experiments to study the behavior of individual FtsZ monomers during the constant reorganization of the FtsZ-FtsA filament network. We describe how to analyze the dynamics of single molecules and explain why this information can help to shed light onto possible mechanism of Z-ring constriction. We believe that similar experimental approaches will be useful to study the mechanism of membrane-based polymerization of other cytoskeletal systems, not only from prokaryotic but also eukaryotic origin.},
author = {Baranova, Natalia and Loose, Martin},
issn = {0091679X},
journal = {Methods in Cell Biology},
pages = {355 -- 370},
publisher = {Academic Press},
title = {{Single-molecule measurements to study polymerization dynamics of FtsZ-FtsA copolymers}},
doi = {10.1016/bs.mcb.2016.03.036},
volume = {137},
year = {2017},
}
@article{1228,
abstract = {Since 2006, reprogrammed cells have increasingly been used as a biomedical research technique in addition to neuro-psychiatric methods. These rapidly evolving techniques allow for the generation of neuronal sub-populations, and have sparked interest not only in monogenetic neuro-psychiatric diseases, but also in poly-genetic and poly-aetiological disorders such as schizophrenia (SCZ) and bipolar disorder (BPD). This review provides a summary of 19 publications on reprogrammed adult somatic cells derived from patients with SCZ, and five publications using this technique in patients with BPD. As both disorders are complex and heterogeneous, there is a plurality of hypotheses to be tested in vitro. In SCZ, data on alterations of dopaminergic transmission in vitro are sparse, despite the great explanatory power of the so-called DA hypothesis of SCZ. Some findings correspond to perturbations of cell energy metabolism, and observations in reprogrammed cells suggest neuro-developmental alterations. Some studies also report on the efficacy of medicinal compounds to revert alterations observed in cellular models. However, due to the paucity of replication studies, no comprehensive conclusions can be drawn from studies using reprogrammed cells at the present time. In the future, findings from cell culture methods need to be integrated with clinical, epidemiological, pharmacological and imaging data in order to generate a more comprehensive picture of SCZ and BPD.},
author = {Sauerzopf, Ulrich and Sacco, Roberto and Novarino, Gaia and Niello, Marco and Weidenauer, Ana and Praschak Rieder, Nicole and Sitte, Harald and Willeit, Matthaeus},
journal = {European Journal of Neuroscience},
number = {1},
pages = {45 -- 57},
publisher = {Wiley-Blackwell},
title = {{Are reprogrammed cells a useful tool for studying dopamine dysfunction in psychotic disorders? A review of the current evidence}},
doi = {10.1111/ejn.13418},
volume = {45},
year = {2017},
}
@article{123,
abstract = {The Leidenfrost effect occurs when an object near a hot surface vaporizes rapidly enough to lift itself up and hover. Although well understood for liquids and stiff sublimable solids, nothing is known about the effect with materials whose stiffness lies between these extremes. Here we introduce a new phenomenon that occurs with vaporizable soft solids - the elastic Leidenfrost effect. By dropping hydrogel spheres onto hot surfaces we find that, rather than hovering, they energetically bounce several times their diameter for minutes at a time. With high-speed video during a single impact, we uncover high-frequency microscopic gap dynamics at the sphere/substrate interface. We show how these otherwise-hidden agitations constitute work cycles that harvest mechanical energy from the vapour and sustain the bouncing. Our findings suggest a new strategy for injecting mechanical energy into a widely used class of soft materials, with potential relevance to fields such as active matter, soft robotics and microfluidics.},
author = {Waitukaitis, Scott R and Zuiderwijk, Antal and Souslov, Anton and Coulais, Corentin and Van Hecke, Martin},
journal = {Nature Physics},
number = {11},
pages = {1095 -- 1099},
publisher = {Nature Publishing Group},
title = {{Coupling the Leidenfrost effect and elastic deformations to power sustained bouncing}},
doi = {10.1038/nphys4194},
volume = {13},
year = {2017},
}
@article{1336,
abstract = {Evolutionary algorithms (EAs) form a popular optimisation paradigm inspired by natural evolution. In recent years the field of evolutionary computation has developed a rigorous analytical theory to analyse the runtimes of EAs on many illustrative problems. Here we apply this theory to a simple model of natural evolution. In the Strong Selection Weak Mutation (SSWM) evolutionary regime the time between occurrences of new mutations is much longer than the time it takes for a mutated genotype to take over the population. In this situation, the population only contains copies of one genotype and evolution can be modelled as a stochastic process evolving one genotype by means of mutation and selection between the resident and the mutated genotype. The probability of accepting the mutated genotype then depends on the change in fitness. We study this process, SSWM, from an algorithmic perspective, quantifying its expected optimisation time for various parameters and investigating differences to a similar evolutionary algorithm, the well-known (1+1) EA. We show that SSWM can have a moderate advantage over the (1+1) EA at crossing fitness valleys and study an example where SSWM outperforms the (1+1) EA by taking advantage of information on the fitness gradient.},
author = {Paixao, Tiago and Pérez Heredia, Jorge and Sudholt, Dirk and Trubenova, Barbora},
issn = {01784617},
journal = {Algorithmica},
number = {2},
pages = {681 -- 713},
publisher = {Springer},
title = {{Towards a runtime comparison of natural and artificial evolution}},
doi = {10.1007/s00453-016-0212-1},
volume = {78},
year = {2017},
}
@article{1337,
abstract = {We consider the local eigenvalue distribution of large self-adjoint N×N random matrices H=H∗ with centered independent entries. In contrast to previous works the matrix of variances sij=\mathbbmE|hij|2 is not assumed to be stochastic. Hence the density of states is not the Wigner semicircle law. Its possible shapes are described in the companion paper (Ajanki et al. in Quadratic Vector Equations on the Complex Upper Half Plane. arXiv:1506.05095). We show that as N grows, the resolvent, G(z)=(H−z)−1, converges to a diagonal matrix, diag(m(z)), where m(z)=(m1(z),…,mN(z)) solves the vector equation −1/mi(z)=z+∑jsijmj(z) that has been analyzed in Ajanki et al. (Quadratic Vector Equations on the Complex Upper Half Plane. arXiv:1506.05095). We prove a local law down to the smallest spectral resolution scale, and bulk universality for both real symmetric and complex hermitian symmetry classes.},
author = {Ajanki, Oskari H and Erdös, László and Krüger, Torben H},
issn = {01788051},
journal = {Probability Theory and Related Fields},
number = {3-4},
pages = {667 -- 727},
publisher = {Springer},
title = {{Universality for general Wigner-type matrices}},
doi = {10.1007/s00440-016-0740-2},
volume = {169},
year = {2017},
}
@article{1367,
abstract = {One of the major challenges in physically based modelling is making simulations efficient. Adaptive models provide an essential solution to these efficiency goals. These models are able to self-adapt in space and time, attempting to provide the best possible compromise between accuracy and speed. This survey reviews the adaptive solutions proposed so far in computer graphics. Models are classified according to the strategy they use for adaptation, from time-stepping and freezing techniques to geometric adaptivity in the form of structured grids, meshes and particles. Applications range from fluids, through deformable bodies, to articulated solids.},
author = {Manteaux, Pierre and Wojtan, Christopher J and Narain, Rahul and Redon, Stéphane and Faure, François and Cani, Marie},
issn = {01677055},
journal = {Computer Graphics Forum},
number = {6},
pages = {312 -- 337},
publisher = {Wiley-Blackwell},
title = {{Adaptive physically based models in computer graphics}},
doi = {10.1111/cgf.12941},
volume = {36},
year = {2017},
}
@article{1433,
abstract = {Phat is an open-source C. ++ library for the computation of persistent homology by matrix reduction, targeted towards developers of software for topological data analysis. We aim for a simple generic design that decouples algorithms from data structures without sacrificing efficiency or user-friendliness. We provide numerous different reduction strategies as well as data types to store and manipulate the boundary matrix. We compare the different combinations through extensive experimental evaluation and identify optimization techniques that work well in practical situations. We also compare our software with various other publicly available libraries for persistent homology.},
author = {Bauer, Ulrich and Kerber, Michael and Reininghaus, Jan and Wagner, Hubert},
issn = { 07477171},
journal = {Journal of Symbolic Computation},
pages = {76 -- 90},
publisher = {Academic Press},
title = {{Phat - Persistent homology algorithms toolbox}},
doi = {10.1016/j.jsc.2016.03.008},
volume = {78},
year = {2017},
}
@article{1010,
abstract = {We prove a local law in the bulk of the spectrum for random Gram matrices XX∗, a generalization of sample covariance matrices, where X is a large matrix with independent, centered entries with arbitrary variances. The limiting eigenvalue density that generalizes the Marchenko-Pastur law is determined by solving a system of nonlinear equations. Our entrywise and averaged local laws are on the optimal scale with the optimal error bounds. They hold both in the square case (hard edge) and in the properly rectangular case (soft edge). In the latter case we also establish a macroscopic gap away from zero in the spectrum of XX∗. },
author = {Alt, Johannes and Erdös, László and Krüger, Torben H},
issn = {10836489},
journal = {Electronic Journal of Probability},
publisher = {Institute of Mathematical Statistics},
title = {{Local law for random Gram matrices}},
doi = {10.1214/17-EJP42},
volume = {22},
year = {2017},
}
@article{1528,
abstract = {We consider N×N Hermitian random matrices H consisting of blocks of size M≥N6/7. The matrix elements are i.i.d. within the blocks, close to a Gaussian in the four moment matching sense, but their distribution varies from block to block to form a block-band structure, with an essential band width M. We show that the entries of the Green’s function G(z)=(H−z)−1 satisfy the local semicircle law with spectral parameter z=E+iη down to the real axis for any η≫N−1, using a combination of the supersymmetry method inspired by Shcherbina (J Stat Phys 155(3): 466–499, 2014) and the Green’s function comparison strategy. Previous estimates were valid only for η≫M−1. The new estimate also implies that the eigenvectors in the middle of the spectrum are fully delocalized.},
author = {Bao, Zhigang and Erdös, László},
issn = {01788051},
journal = {Probability Theory and Related Fields},
number = {3-4},
pages = {673 -- 776},
publisher = {Springer},
title = {{Delocalization for a class of random block band matrices}},
doi = {10.1007/s00440-015-0692-y},
volume = {167},
year = {2017},
}
@article{1113,
abstract = {A drawing of a graph G is radial if the vertices of G are placed on concentric circles C 1 , . . . , C k with common center c , and edges are drawn radially : every edge intersects every circle centered at c at most once. G is radial planar if it has a radial embedding, that is, a crossing-free radial drawing. If the vertices of G are ordered or partitioned into ordered levels (as they are for leveled graphs), we require that the assignment of vertices to circles corresponds to the given ordering or leveling. We show that a graph G is radial planar if G has a radial drawing in which every two edges cross an even number of times; the radial embedding has the same leveling as the radial drawing. In other words, we establish the weak variant of the Hanani-Tutte theorem for radial planarity. This generalizes a result by Pach and Toth.},
author = {Fulek, Radoslav and Pelsmajer, Michael and Schaefer, Marcus},
journal = {Journal of Graph Algorithms and Applications},
number = {1},
pages = {135 -- 154},
publisher = {Brown University},
title = {{Hanani-Tutte for radial planarity}},
doi = {10.7155/jgaa.00408},
volume = {21},
year = {2017},
}
@article{169,
abstract = {We show that a twisted variant of Linnik’s conjecture on sums of Kloosterman sums leads to an optimal covering exponent for S3.},
author = {Browning, Timothy D and Kumaraswamy, Vinay and Steiner, Rapael},
journal = {International Mathematics Research Notices},
publisher = {Oxford University Press},
title = {{Twisted Linnik implies optimal covering exponent for S3}},
doi = {10.1093/imrn/rnx116},
year = {2017},
}
@article{172,
abstract = {We study strong approximation for some algebraic varieties over ℚ which are defined using norm forms. This allows us to confirm a special case of a conjecture due to Harpaz and Wittenberg.},
author = {Browning, Timothy D and Schindler, Damaris},
journal = {International Mathematics Research Notices},
publisher = {Oxford University Press},
title = {{Strong approximation and a conjecture of Harpaz and Wittenberg}},
doi = {10.1093/imrn/rnx252},
year = {2017},
}
@article{1407,
abstract = {We consider the problem of computing the set of initial states of a dynamical system such that there exists a control strategy to ensure that the trajectories satisfy a temporal logic specification with probability 1 (almost-surely). We focus on discrete-time, stochastic linear dynamics and specifications given as formulas of the Generalized Reactivity(1) fragment of Linear Temporal Logic over linear predicates in the states of the system. We propose a solution based on iterative abstraction-refinement, and turn-based 2-player probabilistic games. While the theoretical guarantee of our algorithm after any finite number of iterations is only a partial solution, we show that if our algorithm terminates, then the result is the set of all satisfying initial states. Moreover, for any (partial) solution our algorithm synthesizes witness control strategies to ensure almost-sure satisfaction of the temporal logic specification. While the proposed algorithm guarantees progress and soundness in every iteration, it is computationally demanding. We offer an alternative, more efficient solution for the reachability properties that decomposes the problem into a series of smaller problems of the same type. All algorithms are demonstrated on an illustrative case study.},
author = {Svoreňová, Mária and Kretinsky, Jan and Chmelik, Martin and Chatterjee, Krishnendu and Cěrná, Ivana and Belta, Cǎlin},
journal = {Nonlinear Analysis: Hybrid Systems},
number = {2},
pages = {230 -- 253},
publisher = {Elsevier},
title = {{Temporal logic control for stochastic linear systems using abstraction refinement of probabilistic games}},
doi = {10.1016/j.nahs.2016.04.006},
volume = {23},
year = {2017},
}
@article{1338,
abstract = {We present a computer-aided programming approach to concurrency. The approach allows programmers to program assuming a friendly, non-preemptive scheduler, and our synthesis procedure inserts synchronization to ensure that the final program works even with a preemptive scheduler. The correctness specification is implicit, inferred from the non-preemptive behavior. Let us consider sequences of calls that the program makes to an external interface. The specification requires that any such sequence produced under a preemptive scheduler should be included in the set of sequences produced under a non-preemptive scheduler. We guarantee that our synthesis does not introduce deadlocks and that the synchronization inserted is optimal w.r.t. a given objective function. The solution is based on a finitary abstraction, an algorithm for bounded language inclusion modulo an independence relation, and generation of a set of global constraints over synchronization placements. Each model of the global constraints set corresponds to a correctness-ensuring synchronization placement. The placement that is optimal w.r.t. the given objective function is chosen as the synchronization solution. We apply the approach to device-driver programming, where the driver threads call the software interface of the device and the API provided by the operating system. Our experiments demonstrate that our synthesis method is precise and efficient. The implicit specification helped us find one concurrency bug previously missed when model-checking using an explicit, user-provided specification. We implemented objective functions for coarse-grained and fine-grained locking and observed that different synchronization placements are produced for our experiments, favoring a minimal number of synchronization operations or maximum concurrency, respectively.},
author = {Cerny, Pavol and Clarke, Edmund and Henzinger, Thomas A and Radhakrishna, Arjun and Ryzhyk, Leonid and Samanta, Roopsha and Tarrach, Thorsten},
journal = {Formal Methods in System Design},
number = {2-3},
pages = {97 -- 139},
publisher = {Springer},
title = {{From non-preemptive to preemptive scheduling using synchronization synthesis}},
doi = {10.1007/s10703-016-0256-5},
volume = {50},
year = {2017},
}
@article{1351,
abstract = {The behaviour of gene regulatory networks (GRNs) is typically analysed using simulation-based statistical testing-like methods. In this paper, we demonstrate that we can replace this approach by a formal verification-like method that gives higher assurance and scalability. We focus on Wagner’s weighted GRN model with varying weights, which is used in evolutionary biology. In the model, weight parameters represent the gene interaction strength that may change due to genetic mutations. For a property of interest, we synthesise the constraints over the parameter space that represent the set of GRNs satisfying the property. We experimentally show that our parameter synthesis procedure computes the mutational robustness of GRNs—an important problem of interest in evolutionary biology—more efficiently than the classical simulation method. We specify the property in linear temporal logic. We employ symbolic bounded model checking and SMT solving to compute the space of GRNs that satisfy the property, which amounts to synthesizing a set of linear constraints on the weights.},
author = {Giacobbe, Mirco and Guet, Calin C and Gupta, Ashutosh and Henzinger, Thomas A and Paixao, Tiago and Petrov, Tatjana},
issn = {00015903},
journal = {Acta Informatica},
number = {8},
pages = {765 -- 787},
publisher = {Springer},
title = {{Model checking the evolution of gene regulatory networks}},
doi = {10.1007/s00236-016-0278-x},
volume = {54},
year = {2017},
}
@article{1074,
abstract = {Recently it has become feasible to detect long blocks of nearly identical sequence shared between pairs of genomes. These IBD blocks are direct traces of recent coalescence events and, as such, contain ample signal to infer recent demography. Here, we examine sharing of such blocks in two-dimensional populations with local migration. Using a diffusion approximation to trace genetic ancestry, we derive analytical formulae for patterns of isolation by distance of IBD blocks, which can also incorporate recent population density changes. We introduce an inference scheme that uses a composite likelihood approach to fit these formulae. We then extensively evaluate our theory and inference method on a range of scenarios using simulated data. We first validate the diffusion approximation by showing that the theoretical results closely match the simulated block sharing patterns. We then demonstrate that our inference scheme can accurately and robustly infer dispersal rate and effective density, as well as bounds on recent dynamics of population density. To demonstrate an application, we use our estimation scheme to explore the fit of a diffusion model to Eastern European samples in the POPRES data set. We show that ancestry diffusing with a rate of σ ≈ 50–100 km/√gen during the last centuries, combined with accelerating population growth, can explain the observed exponential decay of block sharing with increasing pairwise sample distance.},
author = {Ringbauer, Harald and Coop, Graham and Barton, Nicholas H},
issn = {00166731},
journal = {Genetics},
number = {3},
pages = {1335 -- 1351},
publisher = {Genetics Society of America},
title = {{Inferring recent demography from isolation by distance of long shared sequence blocks}},
doi = {10.1534/genetics.116.196220},
volume = {205},
year = {2017},
}
@phdthesis{1155,
abstract = {This dissertation concerns the automatic verification of probabilistic systems and programs with arrays by statistical and logical methods. Although statistical and logical methods are different in nature, we show that they can be successfully combined for system analysis. In the first part of the dissertation we present a new statistical algorithm for the verification of probabilistic systems with respect to unbounded properties, including linear temporal logic. Our algorithm often performs faster than the previous approaches, and at the same time requires less information about the system. In addition, our method can be generalized to unbounded quantitative properties such as mean-payoff bounds. In the second part, we introduce two techniques for comparing probabilistic systems. Probabilistic systems are typically compared using the notion of equivalence, which requires the systems to have the equal probability of all behaviors. However, this notion is often too strict, since probabilities are typically only empirically estimated, and any imprecision may break the relation between processes. On the one hand, we propose to replace the Boolean notion of equivalence by a quantitative distance of similarity. For this purpose, we introduce a statistical framework for estimating distances between Markov chains based on their simulation runs, and we investigate which distances can be approximated in our framework. On the other hand, we propose to compare systems with respect to a new qualitative logic, which expresses that behaviors occur with probability one or a positive probability. This qualitative analysis is robust with respect to modeling errors and applicable to many domains. In the last part, we present a new quantifier-free logic for integer arrays, which allows us to express counting. Counting properties are prevalent in array-manipulating programs, however they cannot be expressed in the quantified fragments of the theory of arrays. We present a decision procedure for our logic, and provide several complexity results.},
author = {Daca, Przemyslaw},
pages = {163},
publisher = {IST Austria},
title = {{Statistical and logical methods for property checking}},
doi = {10.15479/AT:ISTA:TH_730},
year = {2017},
}
@article{1294,
abstract = {We study controller synthesis problems for finite-state Markov decision processes, where the objective is to optimize the expected mean-payoff performance and stability (also known as variability in the literature). We argue that the basic notion of expressing the stability using the statistical variance of the mean payoff is sometimes insufficient, and propose an alternative definition. We show that a strategy ensuring both the expected mean payoff and the variance below given bounds requires randomization and memory, under both the above definitions. We then show that the problem of finding such a strategy can be expressed as a set of constraints.},
author = {Brázdil, Tomáš and Chatterjee, Krishnendu and Forejt, Vojtěch and Kučera, Antonín},
journal = {Journal of Computer and System Sciences},
pages = {144 -- 170},
publisher = {Elsevier},
title = {{Trading performance for stability in Markov decision processes}},
doi = {10.1016/j.jcss.2016.09.009},
volume = {84},
year = {2017},
}
@article{265,
abstract = {We establish the dimension and irreducibility of the moduli space of rational curves (of fixed degree) on arbitrary smooth hypersurfaces of sufficiently low degree. A spreading out argument reduces the problem to hypersurfaces defined over finite fields of large cardinality, which can then be tackled using a function field version of the Hardy-Littlewood circle method, in which particular care is taken to ensure uniformity in the size of the underlying finite field.},
author = {Timothy Browning and Vishe, Pankaj},
journal = {Geometric Methods in Algebra and Number Theory},
number = {7},
pages = {1657 -- 1675},
publisher = { Mathematical Sciences Publishers},
title = {{Rational curves on smooth hypersurfaces of low degree}},
doi = {10.2140/ant.2017.11.1657},
volume = {11},
year = {2017},
}
@article{266,
abstract = {We generalise Birch's seminal work on forms in many variables to handle a system of forms in which the degrees need not all be the same. This allows us to prove the Hasse principle, weak approximation, and the Manin-Peyre conjecture for a smooth and geometrically integral variety X Pm, provided only that its dimension is large enough in terms of its degree.},
author = {Timothy Browning and Heath-Brown, Roger},
journal = {Journal of the European Mathematical Society},
number = {2},
pages = {357 -- 394},
publisher = {European Mathematical Society Publishing House},
title = {{Forms in many variables and differing degrees}},
doi = {10.4171/JEMS/668},
volume = {19},
year = {2017},
}
@article{267,
abstract = {Building on recent work of Bhargava, Elkies and Schnidman and of Kriz and Li, we produce infinitely many smooth cubic surfaces defined over the field of rational numbers that contain rational points.},
author = {Timothy Browning},
journal = {Mathematika},
number = {3},
pages = {818 -- 839},
publisher = {Cambridge University Press},
title = {{Many cubic surfaces contain rational points}},
doi = {10.1112/S0025579317000195},
volume = {63},
year = {2017},
}
@article{268,
abstract = {We show that any subset of the squares of positive relative upper density contains nontrivial solutions to a translation-invariant linear equation in five or more variables, with explicit quantitative bounds. As a consequence, we establish the partition regularity of any diagonal quadric in five or more variables whose coefficients sum to zero. Unlike previous approaches, which are limited to equations in seven or more variables, we employ transference technology of Green to import bounds from the linear setting.},
author = {Timothy Browning and Prendiville, Sean M},
journal = {International Mathematics Research Notices},
number = {7},
pages = {2219 -- 2248},
publisher = {Oxford University Press},
title = {{A transference approach to a Roth-type theorem in the squares}},
doi = {10.1093/imrn/rnw096},
volume = {2017},
year = {2017},
}
@article{272,
abstract = {Given a number field K/Q and a polynomial P ε Q [t], all of whose roots are Q, let X be the variety defined by the equation NK (x) = P (t). Combining additive combinatiorics with descent we show that the Brauer-Manin obstruction is the only obstruction to the Hesse principle and weak approximation on any smooth and projective model of X.},
author = {Timothy Browning and Matthiesen, Lilian},
journal = {Annales Scientifiques de l'Ecole Normale Superieure},
number = {6},
pages = {1383 -- 1446},
publisher = {Societe Mathematique de France},
title = {{Norm forms for arbitrary number fields as products of linear polynomials}},
doi = {10.24033/asens.2348},
volume = {50},
year = {2017},
}
@inproceedings{274,
abstract = {We consider the problem of estimating the partition function Z(β)=∑xexp(−β(H(x)) of a Gibbs distribution with a Hamilton H(⋅), or more precisely the logarithm of the ratio q=lnZ(0)/Z(β). It has been recently shown how to approximate q with high probability assuming the existence of an oracle that produces samples from the Gibbs distribution for a given parameter value in [0,β]. The current best known approach due to Huber [9] uses O(qlnn⋅[lnq+lnlnn+ε−2]) oracle calls on average where ε is the desired accuracy of approximation and H(⋅) is assumed to lie in {0}∪[1,n]. We improve the complexity to O(qlnn⋅ε−2) oracle calls. We also show that the same complexity can be achieved if exact oracles are replaced with approximate sampling oracles that are within O(ε2qlnn) variation distance from exact oracles. Finally, we prove a lower bound of Ω(q⋅ε−2) oracle calls under a natural model of computation.},
author = {Kolmogorov, Vladimir},
booktitle = {Proceedings of the 31st Conference On Learning Theory},
pages = {228--249},
publisher = {PMLR},
title = {{A faster approximation algorithm for the Gibbs partition function}},
volume = {75},
year = {2017},
}
@article{269,
author = {Browning, Timothy D and Loughran, Daniel},
journal = {Mathematische Zeitschrift},
number = {3-4},
pages = {1249 -- 1267},
publisher = {Springer},
title = {{Varieties with too many rational points}},
doi = {10.1007/s00209-016-1746-2},
volume = {285},
year = {2017},
}
@article{270,
abstract = {Given a symmetric variety Y defined over Q and a non-zero polynomial with integer coefficients, we use techniques from homogeneous dynamics to establish conditions under which the polynomial can be made r-free for a Zariski dense set of integral points on Y . We also establish an asymptotic counting formula for this set. In the special case that Y is a quadric hypersurface, we give explicit bounds on the size of r by combining the argument with a uniform upper bound for the density of integral points on general affine quadrics defined over Q.},
author = {Timothy Browning and Gorodnik, Alexander},
journal = {Proceedings of the London Mathematical Society},
number = {6},
pages = {1044 -- 1080},
publisher = {Wiley Blackwell},
title = {{Power-free values of polynomials on symmetric varieties}},
doi = {10.1112/plms.12030},
volume = {114},
year = {2017},
}
@article{271,
abstract = {We show that a non-singular integral form of degree d is soluble non-trivially over the integers if and only if it is soluble non-trivially over the reals and the p-adic numbers, provided that the form has at least (d-\sqrt{d}/2)2^d variables. This improves on a longstanding result of Birch.},
author = {Timothy Browning and Prendiville, Sean M},
journal = {Journal fur die Reine und Angewandte Mathematik},
number = {731},
pages = {203 -- 234},
publisher = {Walter de Gruyter},
title = {{Improvements in Birch's theorem on forms in many variables}},
doi = {doi.org/10.1515/crelle-2014-0122},
volume = {2017},
year = {2017},
}