@article{1146,
abstract = {Aim: The present study was to compare the effects of nicotinic acid and nicotinamide on the plasma methyl donors, choline and betaine. Methods: Thirty adult subjects were randomly divided into three groups of equal size, and orally received purified water (C group), nicotinic acid (300 mg, NA group) or nicotinamide (300 mg, NM group). Plasma nicotinamide, N 1-methylnicotinamide, homocysteine, betaine and choline levels before and 1.5-h and 3-h post-dosing, plasma normetanephrine and metanephrine concentrations at 3-h post-dosing, and the urinary excretion of N 1-methyl-2-pyridone-5-carboxamide during the test period were examined. Results: The level of 3-h plasma nicotinamide, N 1-methylnicotinamide, homocysteine, the urinary excretion of N 1-methyl-2-pyridone-5-carboxamide and pulse pressure (PP) in the NM group was 221%, 3972%, 61%, 1728% and 21.2% higher than that of the control group (P < 0.01, except homocysteine and PP P < 0.05), while the 3-h plasma betaine, normetanephrine and metanephrine level in the NM group was 24.4%, 9.4% and 11.7% lower (P < 0.05, except betaine P < 0.01), without significant difference in choline levels. Similar but less pronounced changes were observed in the NA group, with a lower level of 3-h plasma N 1-methylnicotinamide (1.90 ± 0.20 μmol/l vs. 3.62 ± 0.27 μmol/l, P < 0.01) and homocysteine (12.85 ± 1.39 μmol/l vs. 18.08 ± 1.02 μmol/l, P < 0.05) but a higher level of betaine (27.44 ± 0.71 μmol/l vs. 23.52 ± 0.61 μmol/l, P < 0.05) than that of the NM group. Conclusion: The degradation of nicotinamide consumes more betaine than that of nicotinic acid at identical doses. This difference should be taken into consideration in niacin fortification. © 2016 Elsevier Ltd and European Society for Clinical Nutrition and Metabolism.},
author = {Sun, Wuping and Zhai, Ming-Zhu and Li, Da and Zhou, Yiming and Chen, Nana and Guo, Ming and Zhou, Shisheng},
journal = {Clinical Nutrition},
number = {4},
pages = {1136--1142},
publisher = {Churchill Livingstone},
title = {{Comparison of the effects of nicotinic acid and nicotinamide degradation on plasma betaine and choline levels}},
doi = {10.1016/j.clnu.2016.07.016},
volume = {36},
year = {2017},
}
@article{1152,
abstract = {We propose a new memetic strategy that can solve the multi-physics, complex inverse problems, formulated as the multi-objective optimization ones, in which objectives are misfits between the measured and simulated states of various governing processes. The multi-deme structure of the strategy allows for both, intensive, relatively cheap exploration with a moderate accuracy and more accurate search many regions of Pareto set in parallel. The special type of selection operator prefers the coherent alternative solutions, eliminating artifacts appearing in the particular processes. The additional accuracy increment is obtained by the parallel convex searches applied to the local scalarizations of the misfit vector. The strategy is dedicated for solving ill-conditioned problems, for which inverting the single physical process can lead to the ambiguous results. The skill of the selection in artifact elimination is shown on the benchmark problem, while the whole strategy was applied for identification of oil deposits, where the misfits are related to various frequencies of the magnetic and electric waves of the magnetotelluric measurements. 2016 Elsevier B.V.},
author = {Gajda-Zagorska, Ewa P and Schaefer, Robert and Smołka, Maciej and Pardo, David and Alvarez Aramberri, Julen},
issn = {18777503},
journal = {Journal of Computational Science},
pages = {85 -- 94},
publisher = {Elsevier},
title = {{A multi objective memetic inverse solver reinforced by local optimization methods}},
doi = {10.1016/j.jocs.2016.06.007},
volume = {18},
year = {2017},
}
@phdthesis{1155,
abstract = {This dissertation concerns the automatic verification of probabilistic systems and programs with arrays by statistical and logical methods. Although statistical and logical methods are different in nature, we show that they can be successfully combined for system analysis. In the first part of the dissertation we present a new statistical algorithm for the verification of probabilistic systems with respect to unbounded properties, including linear temporal logic. Our algorithm often performs faster than the previous approaches, and at the same time requires less information about the system. In addition, our method can be generalized to unbounded quantitative properties such as mean-payoff bounds. In the second part, we introduce two techniques for comparing probabilistic systems. Probabilistic systems are typically compared using the notion of equivalence, which requires the systems to have the equal probability of all behaviors. However, this notion is often too strict, since probabilities are typically only empirically estimated, and any imprecision may break the relation between processes. On the one hand, we propose to replace the Boolean notion of equivalence by a quantitative distance of similarity. For this purpose, we introduce a statistical framework for estimating distances between Markov chains based on their simulation runs, and we investigate which distances can be approximated in our framework. On the other hand, we propose to compare systems with respect to a new qualitative logic, which expresses that behaviors occur with probability one or a positive probability. This qualitative analysis is robust with respect to modeling errors and applicable to many domains. In the last part, we present a new quantifier-free logic for integer arrays, which allows us to express counting. Counting properties are prevalent in array-manipulating programs, however they cannot be expressed in the quantified fragments of the theory of arrays. We present a decision procedure for our logic, and provide several complexity results.},
author = {Daca, Przemyslaw},
pages = {163},
publisher = {IST Austria},
title = {{Statistical and logical methods for property checking}},
doi = {10.15479/AT:ISTA:TH_730},
year = {2017},
}
@article{1159,
abstract = {Auxin steers numerous physiological processes in plants, making the tight control of its endogenous levels and spatiotemporal distribution a necessity. This regulation is achieved by different mechanisms, including auxin biosynthesis, metabolic conversions, degradation, and transport. Here, we introduce cis-cinnamic acid (c-CA) as a novel and unique addition to a small group of endogenous molecules affecting in planta auxin concentrations. c-CA is the photo-isomerization product of the phenylpropanoid pathway intermediate trans-CA (t-CA). When grown on c-CA-containing medium, an evolutionary diverse set of plant species were shown to exhibit phenotypes characteristic for high auxin levels, including inhibition of primary root growth, induction of root hairs, and promotion of adventitious and lateral rooting. By molecular docking and receptor binding assays, we showed that c-CA itself is neither an auxin nor an anti-auxin, and auxin profiling data revealed that c-CA does not significantly interfere with auxin biosynthesis. Single cell-based auxin accumulation assays showed that c-CA, and not t-CA, is a potent inhibitor of auxin efflux. Auxin signaling reporters detected changes in spatiotemporal distribution of the auxin response along the root of c-CA-treated plants, and long-distance auxin transport assays showed no inhibition of rootward auxin transport. Overall, these results suggest that the phenotypes of c-CA-treated plants are the consequence of a local change in auxin accumulation, induced by the inhibition of auxin efflux. This work reveals a novel mechanism how plants may regulate auxin levels and adds a novel, naturally occurring molecule to the chemical toolbox for the studies of auxin homeostasis.},
author = {Steenackers, Ward and Klíma, Petr and Quareshy, Mussa and Cesarino, Igor and Kumpf, Robert and Corneillie, Sander and Araújo, Pedro and Viaene, Tom and Goeminne, Geert and Nowack, Moritz and Ljung, Karin and Friml, Jirí and Blakeslee, Joshua and Novák, Ondřej and Zažímalová, Eva and Napier, Richard and Boerjan, Wout and Vanholme, Bartel},
issn = {00320889},
journal = {Plant Physiology},
number = {1},
pages = {552 -- 565},
publisher = {American Society of Plant Biologists},
title = {{Cis-cinnamic acid is a novel natural auxin efflux inhibitor that promotes lateral root formation}},
doi = {10.1104/pp.16.00943},
volume = {173},
year = {2017},
}
@article{1160,
abstract = {We investigate fundamental nonlinear dynamics of ferrofluidic Taylor-Couette flow - flow confined be-tween two concentric independently rotating cylinders - consider small aspect ratio by solving the ferro-hydrodynamical equations, carrying out systematic bifurcation analysis. Without magnetic field, we find steady flow patterns, previously observed with a simple fluid, such as those containing normal one- or two vortex cells, as well as anomalous one-cell and twin-cell flow states. However, when a symmetry-breaking transverse magnetic field is present, all flow states exhibit stimulated, finite two-fold mode. Various bifurcations between steady and unsteady states can occur, corresponding to the transitions between the two-cell and one-cell states. While unsteady, axially oscillating flow states can arise, we also detect the emergence of new unsteady flow states. In particular, we uncover two new states: one contains only the azimuthally oscillating solution in the configuration of the twin-cell flow state, and an-other a rotating flow state. Topologically, these flow states are a limit cycle and a quasiperiodic solution on a two-torus, respectively. Emergence of new flow states in addition to observed ones with classical fluid, indicates that richer but potentially more controllable dynamics in ferrofluidic flows, as such flow states depend on the external magnetic field.},
author = {Altmeyer, Sebastian and Do, Younghae and Lai, Ying},
issn = {20452322},
journal = {Scientific Reports},
publisher = {Nature Publishing Group},
title = {{Dynamics of ferrofluidic flow in the Taylor-Couette system with a small aspect ratio}},
doi = {10.1038/srep40012},
volume = {7},
year = {2017},
}
@article{1161,
abstract = {Coordinated changes of cell shape are often the result of the excitable, wave-like dynamics of the actin cytoskeleton. New work shows that, in migrating cells, protrusion waves arise from mechanochemical crosstalk between adhesion sites, membrane tension and the actin protrusive machinery.},
author = {Müller, Jan and Sixt, Michael K},
issn = {09609822},
journal = {Current Biology},
number = {1},
pages = {R24 -- R25},
publisher = {Cell Press},
title = {{Cell migration: Making the waves}},
doi = {10.1016/j.cub.2016.11.035},
volume = {27},
year = {2017},
}
@article{1162,
abstract = {Selected universal experimental properties of high-temperature superconducting (HTS) cuprates have been singled out in the last decade. One of the pivotal challenges in this field is the designation of a consistent interpretation framework within which we can describe quantitatively the universal features of those systems. Here we analyze in a detailed manner the principal experimental data and compare them quantitatively with the approach based on a single-band model of strongly correlated electrons supplemented with strong antiferromagnetic (super)exchange interaction (the so-called t−J−U model). The model rationale is provided by estimating its microscopic parameters on the basis of the three-band approach for the Cu-O plane. We use our original full Gutzwiller wave-function solution by going beyond the renormalized mean-field theory (RMFT) in a systematic manner. Our approach reproduces very well the observed hole doping (δ) dependence of the kinetic-energy gain in the superconducting phase, one of the principal non-Bardeen-Cooper-Schrieffer features of the cuprates. The calculated Fermi velocity in the nodal direction is practically δ-independent and its universal value agrees very well with that determined experimentally. Also, a weak doping dependence of the Fermi wave vector leads to an almost constant value of the effective mass in a pure superconducting phase which is both observed in experiment and reproduced within our approach. An assessment of the currently used models (t−J, Hubbard) is carried out and the results of the canonical RMFT as a zeroth-order solution are provided for comparison to illustrate the necessity of the introduced higher-order contributions.},
author = {Spałek, Jozef and Zegrodnik, Michał and Kaczmarczyk, Jan},
issn = {24699950},
journal = {Physical Review B - Condensed Matter and Materials Physics},
number = {2},
publisher = {American Physical Society},
title = {{Universal properties of high temperature superconductors from real space pairing t-J-U model and its quantitative comparison with experiment}},
doi = {10.1103/PhysRevB.95.024506},
volume = {95},
year = {2017},
}
@article{1163,
abstract = {We investigate the effect of the electron-hole (e-h) symmetry breaking on d-wave superconductivity induced by non-local effects of correlations in the generalized Hubbard model. The symmetry breaking is introduced in a two-fold manner: by the next-to-nearest neighbor hopping of electrons and by the charge-bond interaction - the off-diagonal term of the Coulomb potential. Both terms lead to a pronounced asymmetry of the superconducting order parameter. The next-to-nearest neighbor hopping enhances superconductivity for h-doping, while diminishes it for e-doping. The charge-bond interaction alone leads to the opposite effect and, additionally, to the kinetic-energy gain upon condensation in the underdoped regime. With both terms included, with similar amplitudes, the height of the superconducting dome and the critical doping remain in favor of h-doping. The influence of the charge-bond interaction on deviations from symmetry of the shape of the gap at the Fermi surface in the momentum space is briefly discussed.},
author = {Wysokiński, Marcin and Kaczmarczyk, Jan},
issn = {09538984},
journal = {Journal of Physics: Condensed Matter},
number = {8},
publisher = {IOP Publishing Ltd.},
title = {{Unconventional superconductivity in generalized Hubbard model role of electron–hole symmetry breaking terms}},
doi = {10.1088/1361-648X/aa532f},
volume = {29},
year = {2017},
}
@article{1168,
abstract = {Optimum experimental design theory has recently been extended for parameter estimation in copula models. The use of these models allows one to gain in flexibility by considering the model parameter set split into marginal and dependence parameters. However, this separation also leads to the natural issue of estimating only a subset of all model parameters. In this work, we treat this problem with the application of the (Formula presented.)-optimality to copula models. First, we provide an extension of the corresponding equivalence theory. Then, we analyze a wide range of flexible copula models to highlight the usefulness of (Formula presented.)-optimality in many possible scenarios. Finally, we discuss how the usage of the introduced design criterion also relates to the more general issue of copula selection and optimal design for model discrimination.},
author = {Perrone, Elisa and Rappold, Andreas and Müller, Werner},
journal = {Statistical Methods and Applications},
number = {3},
pages = {403 -- 418},
publisher = {Springer},
title = {{D inf s optimality in copula models}},
doi = {10.1007/s10260-016-0375-6},
volume = {26},
year = {2017},
}
@article{1169,
abstract = {Dispersal is a crucial factor in natural evolution, since it determines the habitat experienced by any population and defines the spatial scale of interactions between individuals. There is compelling evidence for systematic differences in dispersal characteristics within the same population, i.e., genotype-dependent dispersal. The consequences of genotype-dependent dispersal on other evolutionary phenomena, however, are poorly understood. In this article we investigate the effect of genotype-dependent dispersal on spatial gene frequency patterns, using a generalization of the classical diffusion model of selection and dispersal. Dispersal is characterized by the variance of dispersal (diffusion coefficient) and the mean displacement (directional advection term). We demonstrate that genotype-dependent dispersal may change the qualitative behavior of Fisher waves, which change from being “pulled” to being “pushed” wave fronts as the discrepancy in dispersal between genotypes increases. The speed of any wave is partitioned into components due to selection, genotype-dependent variance of dispersal, and genotype-dependent mean displacement. We apply our findings to wave fronts maintained by selection against heterozygotes. Furthermore, we identify a benefit of increased variance of dispersal, quantify its effect on the speed of the wave, and discuss the implications for the evolution of dispersal strategies.},
author = {Novak, Sebastian and Kollár, Richard},
issn = {00166731},
journal = {Genetics},
number = {1},
pages = {367 -- 374},
publisher = {Genetics Society of America},
title = {{Spatial gene frequency waves under genotype dependent dispersal}},
doi = {10.1534/genetics.116.193946},
volume = {205},
year = {2017},
}
@article{1173,
abstract = {We introduce the Voronoi functional of a triangulation of a finite set of points in the Euclidean plane and prove that among all geometric triangulations of the point set, the Delaunay triangulation maximizes the functional. This result neither extends to topological triangulations in the plane nor to geometric triangulations in three and higher dimensions.},
author = {Edelsbrunner, Herbert and Glazyrin, Alexey and Musin, Oleg and Nikitenko, Anton},
issn = {02099683},
journal = {Combinatorica},
number = {5},
pages = {887 -- 910},
publisher = {Springer},
title = {{The Voronoi functional is maximized by the Delaunay triangulation in the plane}},
doi = {10.1007/s00493-016-3308-y},
volume = {37},
year = {2017},
}
@inproceedings{1174,
abstract = {Security of cryptographic applications is typically defined by security games. The adversary, within certain resources, cannot win with probability much better than 0 (for unpredictability applications, like one-way functions) or much better than 1/2 (indistinguishability applications for instance encryption schemes). In so called squared-friendly applications the winning probability of the adversary, for different values of the application secret randomness, is not only close to 0 or 1/2 on average, but also concentrated in the sense that its second central moment is small. The class of squared-friendly applications, which contains all unpredictability applications and many indistinguishability applications, is particularly important for key derivation. Barak et al. observed that for square-friendly applications one can beat the "RT-bound", extracting secure keys with significantly smaller entropy loss. In turn Dodis and Yu showed that in squared-friendly applications one can directly use a "weak" key, which has only high entropy, as a secure key. In this paper we give sharp lower bounds on square security assuming security for "weak" keys. We show that any application which is either (a) secure with weak keys or (b) allows for entropy savings for keys derived by universal hashing, must be square-friendly. Quantitatively, our lower bounds match the positive results of Dodis and Yu and Barak et al. (TCC\'13, CRYPTO\'11) Hence, they can be understood as a general characterization of squared-friendly applications. While the positive results on squared-friendly applications where derived by one clever application of the Cauchy-Schwarz Inequality, for tight lower bounds we need more machinery. In our approach we use convex optimization techniques and some theory of circular matrices.},
author = {Skórski, Maciej},
issn = {18688969},
location = {Hannover, Germany},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{Lower bounds on key derivation for square-friendly applications}},
doi = {10.4230/LIPIcs.STACS.2017.57},
volume = {66},
year = {2017},
}
@inproceedings{1175,
abstract = {We study space complexity and time-space trade-offs with a focus not on peak memory usage but on overall memory consumption throughout the computation. Such a cumulative space measure was introduced for the computational model of parallel black pebbling by [Alwen and Serbinenko ’15] as a tool for obtaining results in cryptography. We consider instead the non- deterministic black-white pebble game and prove optimal cumulative space lower bounds and trade-offs, where in order to minimize pebbling time the space has to remain large during a significant fraction of the pebbling. We also initiate the study of cumulative space in proof complexity, an area where other space complexity measures have been extensively studied during the last 10–15 years. Using and extending the connection between proof complexity and pebble games in [Ben-Sasson and Nordström ’08, ’11] we obtain several strong cumulative space results for (even parallel versions of) the resolution proof system, and outline some possible future directions of study of this, in our opinion, natural and interesting space measure.},
author = {Alwen, Joel F and De Rezende, Susanna and Nordstrom, Jakob and Vinyals, Marc},
editor = {Papadimitriou, Christos},
issn = {18688969},
location = {Berkeley, CA, United States},
pages = {38:1--38--21},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{Cumulative space in black-white pebbling and resolution}},
doi = {10.4230/LIPIcs.ITCS.2017.38},
volume = {67},
year = {2017},
}
@inproceedings{1176,
abstract = {The algorithm Argon2i-B of Biryukov, Dinu and Khovratovich is currently being considered by the IRTF (Internet Research Task Force) as a new de-facto standard for password hashing. An older version (Argon2i-A) of the same algorithm was chosen as the winner of the recent Password Hashing Competition. An important competitor to Argon2i-B is the recently introduced Balloon Hashing (BH) algorithm of Corrigan-Gibs, Boneh and Schechter. A key security desiderata for any such algorithm is that evaluating it (even using a custom device) requires a large amount of memory amortized across multiple instances. Alwen and Blocki (CRYPTO 2016) introduced a class of theoretical attacks against Argon2i-A and BH. While these attacks yield large asymptotic reductions in the amount of memory, it was not, a priori, clear if (1) they can be extended to the newer Argon2i-B, (2) the attacks are effective on any algorithm for practical parameter ranges (e.g., 1GB of memory) and (3) if they can be effectively instantiated against any algorithm under realistic hardware constrains. In this work we answer all three of these questions in the affirmative for all three algorithms. This is also the first work to analyze the security of Argon2i-B. In more detail, we extend the theoretical attacks of Alwen and Blocki (CRYPTO 2016) to the recent Argon2i-B proposal demonstrating severe asymptotic deficiencies in its security. Next we introduce several novel heuristics for improving the attack's concrete memory efficiency even when on-chip memory bandwidth is bounded. We then simulate our attacks on randomly sampled Argon2i-A, Argon2i-B and BH instances and measure the resulting memory consumption for various practical parameter ranges and for a variety of upperbounds on the amount of parallelism available to the attacker. Finally we describe, implement, and test a new heuristic for applying the Alwen-Blocki attack to functions employing a technique developed by Corrigan-Gibs et al. for improving concrete security of memory-hard functions. We analyze the collected data and show the effects various parameters have on the memory consumption of the attack. In particular, we can draw several interesting conclusions about the level of security provided by these functions. · For the Alwen-Blocki attack to fail against practical memory parameters, Argon2i-B must be instantiated with more than 10 passes on memory - beyond the "paranoid" parameter setting in the current IRTF proposal. · The technique of Corrigan-Gibs for improving security can also be overcome by the Alwen-Blocki attack under realistic hardware constraints. · On a positive note, both the asymptotic and concrete security of Argon2i-B seem to improve on that of Argon2i-A.},
author = {Alwen, Joel F and Blocki, Jeremiah},
isbn = {978-150905761-0},
location = {Paris, France},
publisher = {IEEE},
title = {{Towards practical attacks on Argon2i and balloon hashing}},
doi = {10.1109/EuroSP.2017.47},
year = {2017},
}
@article{1180,
abstract = {In this article we define an algebraic vertex of a generalized polyhedron and show that the set of algebraic vertices is the smallest set of points needed to define the polyhedron. We prove that the indicator function of a generalized polytope P is a linear combination of indicator functions of simplices whose vertices are algebraic vertices of P. We also show that the indicator function of any generalized polyhedron is a linear combination, with integer coefficients, of indicator functions of cones with apices at algebraic vertices and line-cones. The concept of an algebraic vertex is closely related to the Fourier–Laplace transform. We show that a point v is an algebraic vertex of a generalized polyhedron P if and only if the tangent cone of P, at v, has non-zero Fourier–Laplace transform.},
author = {Akopyan, Arseniy and Bárány, Imre and Robins, Sinai},
issn = {00018708},
journal = {Advances in Mathematics},
pages = {627 -- 644},
publisher = {Academic Press},
title = {{Algebraic vertices of non-convex polyhedra}},
doi = {10.1016/j.aim.2016.12.026},
volume = {308},
year = {2017},
}
@article{1187,
abstract = {We construct efficient authentication protocols and message authentication codes (MACs) whose security can be reduced to the learning parity with noise (LPN) problem. Despite a large body of work—starting with the (Formula presented.) protocol of Hopper and Blum in 2001—until now it was not even known how to construct an efficient authentication protocol from LPN which is secure against man-in-the-middle attacks. A MAC implies such a (two-round) protocol.},
author = {Kiltz, Eike and Pietrzak, Krzysztof Z and Venturi, Daniele and Cash, David and Jain, Abhishek},
journal = {Journal of Cryptology},
number = {4},
pages = {1238 -- 1275},
publisher = {Springer},
title = {{Efficient authentication from hard learning problems}},
doi = {10.1007/s00145-016-9247-3},
volume = {30},
year = {2017},
}
@article{1191,
abstract = {Variation in genotypes may be responsible for differences in dispersal rates, directional biases, and growth rates of individuals. These traits may favor certain genotypes and enhance their spatiotemporal spreading into areas occupied by the less advantageous genotypes. We study how these factors influence the speed of spreading in the case of two competing genotypes under the assumption that spatial variation of the total population is small compared to the spatial variation of the frequencies of the genotypes in the population. In that case, the dynamics of the frequency of one of the genotypes is approximately described by a generalized Fisher–Kolmogorov–Petrovskii–Piskunov (F–KPP) equation. This generalized F–KPP equation with (nonlinear) frequency-dependent diffusion and advection terms admits traveling wave solutions that characterize the invasion of the dominant genotype. Our existence results generalize the classical theory for traveling waves for the F–KPP with constant coefficients. Moreover, in the particular case of the quadratic (monostable) nonlinear growth–decay rate in the generalized F–KPP we study in detail the influence of the variance in diffusion and mean displacement rates of the two genotypes on the minimal wave propagation speed.},
author = {Kollár, Richard and Novak, Sebastian},
journal = {Bulletin of Mathematical Biology},
number = {3},
pages = {525--559},
publisher = {Springer},
title = {{Existence of traveling waves for the generalized F–KPP equation}},
doi = {10.1007/s11538-016-0244-3},
volume = {79},
year = {2017},
}
@inproceedings{1192,
abstract = {The main result of this paper is a generalization of the classical blossom algorithm for finding perfect matchings. Our algorithm can efficiently solve Boolean CSPs where each variable appears in exactly two constraints (we call it edge CSP) and all constraints are even Δ-matroid relations (represented by lists of tuples). As a consequence of this, we settle the complexity classification of planar Boolean CSPs started by Dvorak and Kupec. Knowing that edge CSP is tractable for even Δ-matroid constraints allows us to extend the tractability result to a larger class of Δ-matroids that includes many classes that were known to be tractable before, namely co-independent, compact, local and binary.},
author = {Kazda, Alexandr and Kolmogorov, Vladimir and Rolinek, Michal},
isbn = {978-161197478-2},
location = {Barcelona, Spain},
pages = {307 -- 326},
publisher = {SIAM},
title = {{Even delta-matroids and the complexity of planar Boolean CSPs}},
doi = {10.1137/1.9781611974782.20},
year = {2017},
}
@inproceedings{1194,
abstract = {Termination is one of the basic liveness properties, and we study the termination problem for probabilistic programs with real-valued variables. Previous works focused on the qualitative problem that asks whether an input program terminates with probability~1 (almost-sure termination). A powerful approach for this qualitative problem is the notion of ranking supermartingales with respect to a given set of invariants. The quantitative problem (probabilistic termination) asks for bounds on the termination probability. A fundamental and conceptual drawback of the existing approaches to address probabilistic termination is that even though the supermartingales consider the probabilistic behavior of the programs, the invariants are obtained completely ignoring the probabilistic aspect. In this work we address the probabilistic termination problem for linear-arithmetic probabilistic programs with nondeterminism. We define the notion of {\em stochastic invariants}, which are constraints along with a probability bound that the constraints hold. We introduce a concept of {\em repulsing supermartingales}. First, we show that repulsing supermartingales can be used to obtain bounds on the probability of the stochastic invariants. Second, we show the effectiveness of repulsing supermartingales in the following three ways: (1)~With a combination of ranking and repulsing supermartingales we can compute lower bounds on the probability of termination; (2)~repulsing supermartingales provide witnesses for refutation of almost-sure termination; and (3)~with a combination of ranking and repulsing supermartingales we can establish persistence properties of probabilistic programs. We also present results on related computational problems and an experimental evaluation of our approach on academic examples. },
author = {Chatterjee, Krishnendu and Novotny, Petr and Zikelic, Djordje},
issn = {07308566},
location = {Paris, France},
number = {1},
pages = {145 -- 160},
publisher = {ACM},
title = {{Stochastic invariants for probabilistic termination}},
doi = {10.1145/3009837.3009873},
volume = {52},
year = {2017},
}
@article{1196,
abstract = {We define the . model-measuring problem: given a model . M and specification . ϕ, what is the maximal distance . ρ such that all models . M' within distance . ρ from . M satisfy (or violate) . ϕ. The model-measuring problem presupposes a distance function on models. We concentrate on . automatic distance functions, which are defined by weighted automata. The model-measuring problem subsumes several generalizations of the classical model-checking problem, in particular, quantitative model-checking problems that measure the degree of satisfaction of a specification; robustness problems that measure how much a model can be perturbed without violating the specification; and parameter synthesis for hybrid systems. We show that for automatic distance functions, and (a) . ω-regular linear-time, (b) . ω-regular branching-time, and (c) hybrid specifications, the model-measuring problem can be solved.We use automata-theoretic model-checking methods for model measuring, replacing the emptiness question for word, tree, and hybrid automata by the . optimal-value question for the weighted versions of these automata. For automata over words and trees, we consider weighted automata that accumulate weights by maximizing, summing, discounting, and limit averaging. For hybrid automata, we consider monotonic (parametric) hybrid automata, a hybrid counterpart of (discrete) weighted automata.We give several examples of using the model-measuring problem to compute various notions of robustness and quantitative satisfaction for temporal specifications. Further, we propose the modeling framework for model measuring to ease the specification and reduce the likelihood of errors in modeling.Finally, we present a variant of the model-measuring problem, called the . model-repair problem. The model-repair problem applies to models that do not satisfy the specification; it can be used to derive restrictions, under which the model satisfies the specification, i.e., to repair the model.},
author = {Henzinger, Thomas A and Otop, Jan},
journal = {Nonlinear Analysis: Hybrid Systems},
pages = {166 -- 190},
publisher = {Elsevier},
title = {{Model measuring for discrete and hybrid systems}},
doi = {10.1016/j.nahs.2016.09.001},
volume = {23},
year = {2017},
}