@inproceedings{609,
abstract = {Several cryptographic schemes and applications are based on functions that are both reasonably efficient to compute and moderately hard to invert, including client puzzles for Denial-of-Service protection, password protection via salted hashes, or recent proof-of-work blockchain systems. Despite their wide use, a definition of this concept has not yet been distilled and formalized explicitly. Instead, either the applications are proven directly based on the assumptions underlying the function, or some property of the function is proven, but the security of the application is argued only informally. The goal of this work is to provide a (universal) definition that decouples the efforts of designing new moderately hard functions and of building protocols based on them, serving as an interface between the two. On a technical level, beyond the mentioned definitions, we instantiate the model for four different notions of hardness. We extend the work of Alwen and Serbinenko (STOC 2015) by providing a general tool for proving security for the first notion of memory-hard functions that allows for provably secure applications. The tool allows us to recover all of the graph-theoretic techniques developed for proving security under the older, non-composable, notion of security used by Alwen and Serbinenko. As an application of our definition of moderately hard functions, we prove the security of two different schemes for proofs of effort (PoE). We also formalize and instantiate the concept of a non-interactive proof of effort (niPoE), in which the proof is not bound to a particular communication context but rather any bit-string chosen by the prover.},
author = {Alwen, Joel F and Tackmann, Björn},
editor = {Kalai, Yael and Reyzin, Leonid},
isbn = {978-331970499-9},
location = {Baltimore, MD, United States},
pages = {493 -- 526},
publisher = {Springer},
title = {{Moderately hard functions: Definition, instantiations, and applications}},
doi = {10.1007/978-3-319-70500-2_17},
volume = {10677},
year = {2017},
}
@article{610,
abstract = {The fact that the complete graph K5 does not embed in the plane has been generalized in two independent directions. On the one hand, the solution of the classical Heawood problem for graphs on surfaces established that the complete graph Kn embeds in a closed surface M (other than the Klein bottle) if and only if (n−3)(n−4) ≤ 6b1(M), where b1(M) is the first Z2-Betti number of M. On the other hand, van Kampen and Flores proved that the k-skeleton of the n-dimensional simplex (the higher-dimensional analogue of Kn+1) embeds in R2k if and only if n ≤ 2k + 1. Two decades ago, Kühnel conjectured that the k-skeleton of the n-simplex embeds in a compact, (k − 1)-connected 2k-manifold with kth Z2-Betti number bk only if the following generalized Heawood inequality holds: (k+1 n−k−1) ≤ (k+1 2k+1)bk. This is a common generalization of the case of graphs on surfaces as well as the van Kampen–Flores theorem. In the spirit of Kühnel’s conjecture, we prove that if the k-skeleton of the n-simplex embeds in a compact 2k-manifold with kth Z2-Betti number bk, then n ≤ 2bk(k 2k+2)+2k+4. This bound is weaker than the generalized Heawood inequality, but does not require the assumption that M is (k−1)-connected. Our results generalize to maps without q-covered points, in the spirit of Tverberg’s theorem, for q a prime power. Our proof uses a result of Volovikov about maps that satisfy a certain homological triviality condition.},
author = {Goaoc, Xavier and Mabillard, Isaac and Paták, Pavel and Patakova, Zuzana and Tancer, Martin and Wagner, Uli},
journal = {Israel Journal of Mathematics},
number = {2},
pages = {841 -- 866},
publisher = {Springer},
title = {{On generalized Heawood inequalities for manifolds: A van Kampen–Flores type nonembeddability result}},
doi = {10.1007/s11856-017-1607-7},
volume = {222},
year = {2017},
}
@article{611,
abstract = {Small RNAs (sRNAs) regulate genes in plants and animals. Here, we show that population-wide differences in color patterns in snapdragon flowers are caused by an inverted duplication that generates sRNAs. The complexity and size of the transcripts indicate that the duplication represents an intermediate on the pathway to microRNA evolution. The sRNAs repress a pigment biosynthesis gene, creating a yellow highlight at the site of pollinator entry. The inverted duplication exhibits steep clines in allele frequency in a natural hybrid zone, showing that the allele is under selection. Thus, regulatory interactions of evolutionarily recent sRNAs can be acted upon by selection and contribute to the evolution of phenotypic diversity.},
author = {Bradley, Desmond and Xu, Ping and Mohorianu, Irina and Whibley, Annabel and Field, David and Tavares, Hugo and Couchman, Matthew and Copsey, Lucy and Carpenter, Rosemary and Li, Miaomiao and Li, Qun and Xue, Yongbiao and Dalmay, Tamas and Coen, Enrico},
issn = {00368075},
journal = {Science},
number = {6365},
pages = {925 -- 928},
publisher = {American Association for the Advancement of Science},
title = {{Evolution of flower color pattern through selection on regulatory small RNAs}},
doi = {10.1126/science.aao3526},
volume = {358},
year = {2017},
}
@article{6113,
author = {Oda, Shigekazu and Toyoshima, Yu and de Bono, Mario},
issn = {0027-8424},
journal = {Proceedings of the National Academy of Sciences},
number = {23},
pages = {E4658--E4665},
publisher = {National Academy of Sciences},
title = {{Modulation of sensory information processing by a neuroglobin in Caenorhabditis elegans}},
doi = {10.1073/pnas.1614596114},
volume = {114},
year = {2017},
}
@article{6115,
abstract = {Animals adjust their behavioral priorities according to momentary needs and prior experience. We show that Caenorhabditis elegans changes how it processes sensory information according to the oxygen environment it experienced recently. C. elegans acclimated to 7% O2 are aroused by CO2 and repelled by pheromones that attract animals acclimated to 21% O2. This behavioral plasticity arises from prolonged activity differences in a circuit that continuously signals O2 levels. A sustained change in the activity of O2-sensing neurons reprograms the properties of their postsynaptic partners, the RMG hub interneurons. RMG is gap-junctionally coupled to the ASK and ADL pheromone sensors that respectively drive pheromone attraction and repulsion. Prior O2 experience has opposite effects on the pheromone responsiveness of these neurons. These circuit changes provide a physiological correlate of altered pheromone valence. Our results suggest C. elegans stores a memory of recent O2 experience in the RMG circuit and illustrate how a circuit is flexibly sculpted to guide behavioral decisions in a context-dependent manner.},
author = {Fenk, Lorenz A. and de Bono, Mario},
issn = {0027-8424},
journal = {Proceedings of the National Academy of Sciences},
number = {16},
pages = {4195--4200},
publisher = {National Academy of Sciences},
title = {{Memory of recent oxygen experience switches pheromone valence inCaenorhabditis elegans}},
doi = {10.1073/pnas.1618934114},
volume = {114},
year = {2017},
}
@article{6117,
abstract = {Interleukin-17 (IL-17) is a major pro-inflammatory cytokine: it mediates responses to pathogens or tissue damage, and drives autoimmune diseases. Little is known about its role in the nervous system. Here we show that IL-17 has neuromodulator-like properties in Caenorhabditis elegans. IL-17 can act directly on neurons to alter their response properties and contribution to behaviour. Using unbiased genetic screens, we delineate an IL-17 signalling pathway and show that it acts in the RMG hub interneurons. Disrupting IL-17 signalling reduces RMG responsiveness to input from oxygen sensors, and renders sustained escape from 21% oxygen transient and contingent on additional stimuli. Over-activating IL-17 receptors abnormally heightens responses to 21% oxygen in RMG neurons and whole animals. IL-17 deficiency can be bypassed by optogenetic stimulation of RMG. Inducing IL-17 expression in adults can rescue mutant defects within 6 h. These findings reveal a non-immunological role of IL-17 modulating circuit function and behaviour.},
author = {Chen, Changchun and Itakura, Eisuke and Nelson, Geoffrey M. and Sheng, Ming and Laurent, Patrick and Fenk, Lorenz A. and Butcher, Rebecca A. and Hegde, Ramanujan S. and de Bono, Mario},
issn = {0028-0836},
journal = {Nature},
number = {7639},
pages = {43--48},
publisher = {Springer Nature},
title = {{IL-17 is a neuromodulator of Caenorhabditis elegans sensory responses}},
doi = {10.1038/nature20818},
volume = {542},
year = {2017},
}
@article{613,
abstract = {Bacteria in groups vary individually, and interact with other bacteria and the environment to produce population-level patterns of gene expression. Investigating such behavior in detail requires measuring and controlling populations at the single-cell level alongside precisely specified interactions and environmental characteristics. Here we present an automated, programmable platform that combines image-based gene expression and growth measurements with on-line optogenetic expression control for hundreds of individual Escherichia coli cells over days, in a dynamically adjustable environment. This integrated platform broadly enables experiments that bridge individual and population behaviors. We demonstrate: (i) population structuring by independent closed-loop control of gene expression in many individual cells, (ii) cell-cell variation control during antibiotic perturbation, (iii) hybrid bio-digital circuits in single cells, and freely specifiable digital communication between individual bacteria. These examples showcase the potential for real-time integration of theoretical models with measurement and control of many individual cells to investigate and engineer microbial population behavior.},
author = {Chait, Remy P and Ruess, Jakob and Bergmiller, Tobias and Tkacik, Gasper and Guet, Calin C},
issn = {20411723},
journal = {Nature Communications},
number = {1},
publisher = {Nature Publishing Group},
title = {{Shaping bacterial population behavior through computer interfaced control of individual cells}},
doi = {10.1038/s41467-017-01683-1},
volume = {8},
year = {2017},
}
@article{614,
abstract = {Moths and butterflies (Lepidoptera) usually have a pair of differentiated WZ sex chromosomes. However, in most lineages outside of the division Ditrysia, as well as in the sister order Trichoptera, females lack a W chromosome. The W is therefore thought to have been acquired secondarily. Here we compare the genomes of three Lepidoptera species (one Dytrisia and two non-Dytrisia) to test three models accounting for the origin of the W: (1) a Z-autosome fusion; (2) a sex chromosome turnover; and (3) a non-canonical mechanism (e.g., through the recruitment of a B chromosome). We show that the gene content of the Z is highly conserved across Lepidoptera (rejecting a sex chromosome turnover) and that very few genes moved onto the Z in the common ancestor of the Ditrysia (arguing against a Z-autosome fusion). Our comparative genomics analysis therefore supports the secondary acquisition of the Lepidoptera W by a non-canonical mechanism, and it confirms the extreme stability of well-differentiated sex chromosomes.},
author = {Fraisse, Christelle and Picard, Marion A and Vicoso, Beatriz},
issn = {20411723},
journal = {Nature Communications},
number = {1},
publisher = {Nature Publishing Group},
title = {{The deep conservation of the Lepidoptera Z chromosome suggests a non canonical origin of the W}},
doi = {10.1038/s41467-017-01663-5},
volume = {8},
year = {2017},
}
@article{615,
abstract = {We show that the Dyson Brownian Motion exhibits local universality after a very short time assuming that local rigidity and level repulsion of the eigenvalues hold. These conditions are verified, hence bulk spectral universality is proven, for a large class of Wigner-like matrices, including deformed Wigner ensembles and ensembles with non-stochastic variance matrices whose limiting densities differ from Wigner's semicircle law.},
author = {Erdös, László and Schnelli, Kevin},
issn = {02460203},
journal = {Annales de l'institut Henri Poincare (B) Probability and Statistics},
number = {4},
pages = {1606 -- 1656},
publisher = {Institute of Mathematical Statistics},
title = {{Universality for random matrix flows with time dependent density}},
doi = {10.1214/16-AIHP765},
volume = {53},
year = {2017},
}
@article{618,
abstract = {Background: Increasing temperatures are predicted to strongly impact host-parasite interactions, but empirical tests are rare. Host species that are naturally exposed to a broad temperature spectrum offer the possibility to investigate the effects of elevated temperatures on hosts and parasites. Using three-spined sticklebacks, Gasterosteus aculeatus L., and tapeworms, Schistocephalus solidus (Müller, 1776), originating from a cold and a warm water site of a volcanic lake, we subjected sympatric and allopatric host-parasite combinations to cold and warm conditions in a fully crossed design. We predicted that warm temperatures would promote the development of the parasites, while the hosts might benefit from cooler temperatures. We further expected adaptations to the local temperature and mutual adaptations of local host-parasite pairs. Results: Overall, S. solidus parasites grew faster at warm temperatures and stickleback hosts at cold temperatures. On a finer scale, we observed that parasites were able to exploit their hosts more efficiently at the parasite’s temperature of origin. In contrast, host tolerance towards parasite infection was higher when sticklebacks were infected with parasites at the parasite’s ‘foreign’ temperature. Cold-origin sticklebacks tended to grow faster and parasite infection induced a stronger immune response. Conclusions: Our results suggest that increasing environmental temperatures promote the parasite rather than the host and that host tolerance is dependent on the interaction between parasite infection and temperature. Sticklebacks might use tolerance mechanisms towards parasite infection in combination with their high plasticity towards temperature changes to cope with increasing parasite infection pressures and rising temperatures.},
author = {Franke, Frederik and Armitage, Sophie and Kutzer, Megan and Kurtz, Joachim and Scharsack, Jörn},
issn = {17563305},
journal = {Parasites & Vectors},
number = {252},
publisher = {BioMed Central},
title = {{Environmental temperature variation influences fitness trade-offs in a fish-tapeworm association }},
doi = {10.1186/s13071-017-2192-7},
volume = {10},
year = {2017},
}
@article{6196,
abstract = {PMAC is a simple and parallel block-cipher mode of operation, which was introduced by Black and Rogaway at Eurocrypt 2002. If instantiated with a (pseudo)random permutation over n-bit strings, PMAC constitutes a provably secure variable input-length (pseudo)random function. For adversaries making q queries, each of length at most l (in n-bit blocks), and of total length σ ≤ ql, the original paper proves an upper bound on the distinguishing advantage of Ο(σ2/2n), while the currently best bound is Ο (qσ/2n).In this work we show that this bound is tight by giving an attack with advantage Ω (q2l/2n). In the PMAC construction one initially XORs a mask to every message block, where the mask for the ith block is computed as τi := γi·L, where L is a (secret) random value, and γi is the i-th codeword of the Gray code. Our attack applies more generally to any sequence of γi’s which contains a large coset of a subgroup of GF(2n). We then investigate if the security of PMAC can be further improved by using τi’s that are k-wise independent, for k > 1 (the original distribution is only 1-wise independent). We observe that the security of PMAC will not increase in general, even if the masks are chosen from a 2-wise independent distribution, and then prove that the security increases to O(q<2/2n), if the τi are 4-wise independent. Due to simple extension attacks, this is the best bound one can hope for, using any distribution on the masks. Whether 3-wise independence is already sufficient to get this level of security is left as an open problem.},
author = {Gazi, Peter and Pietrzak, Krzysztof Z and Rybar, Michal},
issn = {2519-173X},
journal = {IACR Transactions on Symmetric Cryptology},
number = {2},
pages = {145--161},
publisher = {Ruhr University Bochum},
title = {{The exact security of PMAC}},
doi = {10.13154/TOSC.V2016.I2.145-161},
volume = {2016},
year = {2017},
}
@article{621,
abstract = {The mammalian cerebral cortex is responsible for higher cognitive functions such as perception, consciousness, and acquiring and processing information. The neocortex is organized into six distinct laminae, each composed of a rich diversity of cell types which assemble into highly complex cortical circuits. Radial glia progenitors (RGPs) are responsible for producing all neocortical neurons and certain glia lineages. Here, we discuss recent discoveries emerging from clonal lineage analysis at the single RGP cell level that provide us with an inaugural quantitative framework of RGP lineage progression. We further discuss the importance of the relative contribution of intrinsic gene functions and non-cell-autonomous or community effects in regulating RGP proliferation behavior and lineage progression.},
author = {Beattie, Robert J and Hippenmeyer, Simon},
issn = {00145793},
journal = {FEBS letters},
number = {24},
pages = {3993 -- 4008},
publisher = {Wiley-Blackwell},
title = {{Mechanisms of radial glia progenitor cell lineage progression}},
doi = {10.1002/1873-3468.12906},
volume = {591},
year = {2017},
}
@inbook{623,
abstract = {Genetic factors might be largely responsible for the development of autism spectrum disorder (ASD) that alone or in combination with specific environmental risk factors trigger the pathology. Multiple mutations identified in ASD patients that impair synaptic function in the central nervous system are well studied in animal models. How these mutations might interact with other risk factors is not fully understood though. Additionally, how systems outside of the brain are altered in the context of ASD is an emerging area of research. Extracerebral influences on the physiology could begin in utero and contribute to changes in the brain and in the development of other body systems and further lead to epigenetic changes. Therefore, multiple recent studies have aimed at elucidating the role of gene-environment interactions in ASD. Here we provide an overview on the extracerebral systems that might play an important associative role in ASD and review evidence regarding the potential roles of inflammation, trace metals, metabolism, genetic susceptibility, enteric nervous system function and the microbiota of the gastrointestinal (GI) tract on the development of endophenotypes in animal models of ASD. By influencing environmental conditions, it might be possible to reduce or limit the severity of ASD pathology.},
author = {Hill Yardin, Elisa and Mckeown, Sonja and Novarino, Gaia and Grabrucker, Andreas},
booktitle = {Translational Anatomy and Cell Biology of Autism Spectrum Disorder},
editor = {Schmeisser, Michael and Boekers, Tobias},
isbn = {978-3-319-52496-2},
issn = {03015556},
pages = {159 -- 187},
publisher = {Springer},
title = {{Extracerebral dysfunction in animal models of autism spectrum disorder}},
doi = {10.1007/978-3-319-52498-6_9},
volume = {224},
year = {2017},
}
@article{624,
abstract = {Bacteria adapt to adverse environmental conditions by altering gene expression patterns. Recently, a novel stress adaptation mechanism has been described that allows Escherichia coli to alter gene expression at the post-transcriptional level. The key player in this regulatory pathway is the endoribonuclease MazF, the toxin component of the toxin-antitoxin module mazEF that is triggered by various stressful conditions. In general, MazF degrades the majority of transcripts by cleaving at ACA sites, which results in the retardation of bacterial growth. Furthermore, MazF can process a small subset of mRNAs and render them leaderless by removing their ribosome binding site. MazF concomitantly modifies ribosomes, making them selective for the translation of leaderless mRNAs. In this study, we employed fluorescent reporter-systems to investigate mazEF expression during stressful conditions, and to infer consequences of the mRNA processing mediated by MazF on gene expression at the single-cell level. Our results suggest that mazEF transcription is maintained at low levels in single cells encountering adverse conditions, such as antibiotic stress or amino acid starvation. Moreover, using the grcA mRNA as a model for MazF-mediated mRNA processing, we found that MazF activation promotes heterogeneity in the grcA reporter expression, resulting in a subpopulation of cells with increased levels of GrcA reporter protein.},
author = {Nikolic, Nela and Didara, Zrinka and Moll, Isabella},
issn = {21678359},
journal = {PeerJ},
number = {9},
publisher = {PeerJ},
title = {{MazF activation promotes translational heterogeneity of the grcA mRNA in Escherichia coli populations}},
doi = {10.7717/peerj.3830},
volume = {2017},
year = {2017},
}
@inbook{625,
abstract = {In the analysis of reactive systems a quantitative objective assigns a real value to every trace of the system. The value decision problem for a quantitative objective requires a trace whose value is at least a given threshold, and the exact value decision problem requires a trace whose value is exactly the threshold. We compare the computational complexity of the value and exact value decision problems for classical quantitative objectives, such as sum, discounted sum, energy, and mean-payoff for two standard models of reactive systems, namely, graphs and graph games.},
author = {Chatterjee, Krishnendu and Doyen, Laurent and Henzinger, Thomas A},
booktitle = {Models, Algorithms, Logics and Tools},
editor = {Aceto, Luca and Bacci, Giorgio and Ingólfsdóttir, Anna and Legay, Axel and Mardare, Radu},
issn = {03029743},
pages = {367 -- 381},
publisher = {Springer},
title = {{The cost of exactness in quantitative reachability}},
doi = {10.1007/978-3-319-63121-9_18},
volume = {10460},
year = {2017},
}
@article{626,
abstract = {Our focus here is on the infinitesimal model. In this model, one or several quantitative traits are described as the sum of a genetic and a non-genetic component, the first being distributed within families as a normal random variable centred at the average of the parental genetic components, and with a variance independent of the parental traits. Thus, the variance that segregates within families is not perturbed by selection, and can be predicted from the variance components. This does not necessarily imply that the trait distribution across the whole population should be Gaussian, and indeed selection or population structure may have a substantial effect on the overall trait distribution. One of our main aims is to identify some general conditions on the allelic effects for the infinitesimal model to be accurate. We first review the long history of the infinitesimal model in quantitative genetics. Then we formulate the model at the phenotypic level in terms of individual trait values and relationships between individuals, but including different evolutionary processes: genetic drift, recombination, selection, mutation, population structure, …. We give a range of examples of its application to evolutionary questions related to stabilising selection, assortative mating, effective population size and response to selection, habitat preference and speciation. We provide a mathematical justification of the model as the limit as the number M of underlying loci tends to infinity of a model with Mendelian inheritance, mutation and environmental noise, when the genetic component of the trait is purely additive. We also show how the model generalises to include epistatic effects. We prove in particular that, within each family, the genetic components of the individual trait values in the current generation are indeed normally distributed with a variance independent of ancestral traits, up to an error of order 1∕M. Simulations suggest that in some cases the convergence may be as fast as 1∕M.},
author = {Barton, Nicholas H and Etheridge, Alison and Véber, Amandine},
issn = {00405809},
journal = {Theoretical Population Biology},
pages = {50 -- 73},
publisher = {Academic Press},
title = {{The infinitesimal model: Definition derivation and implications}},
doi = {10.1016/j.tpb.2017.06.001},
volume = {118},
year = {2017},
}
@article{627,
abstract = {Beige adipocytes are a new type of recruitable brownish adipocytes, with highly mitochondrial membrane uncoupling protein 1 expression and thermogenesis. Beige adipocytes were found among white adipocytes, especially in subcutaneous white adipose tissue (sWAT). Therefore, beige adipocytes may be involved in the regulation of energy metabolism and fat deposition. Transient receptor potential melastatin 8 (TRPM8), a Ca2+-permeable non-selective cation channel, plays vital roles in the regulation of various cellular functions. It has been reported that TRPM8 activation enhanced the thermogenic function of brown adiposytes. However, the involvement of TRPM8 in the thermogenic function of WAT remains unexplored. Our data revealed that TRPM8 was expressed in mouse white adipocytes at mRNA, protein and functional levels. The mRNA expression of Trpm8 was significantly increased in the differentiated white adipocytes than pre-adipocytes. Moreover, activation of TRPM8 by menthol enhanced the expression of thermogenic genes in cultured white aidpocytes. And menthol-induced increases of the thermogenic genes in white adipocytes was inhibited by either KT5720 (a protein kinase A inhibitor) or BAPTA-AM. In addition, high fat diet (HFD)-induced obesity in mice was significantly recovered by co-treatment with menthol. Dietary menthol enhanced WAT "browning" and improved glucose metabolism in HFD-induced obesity mice as well. Therefore, we concluded that TRPM8 might be involved in WAT "browning" by increasing the expression levels of genes related to thermogenesis and energy metabolism. And dietary menthol could be a novel approach for combating human obesity and related metabolic diseases.},
author = {Jiang, Changyu and Zhai, Ming-Zhu and Yan, Dong and Li, Da and Li, Chen and Zhang, Yonghong and Xiao, Lizu and Xiong, Donglin and Deng, Qiwen and Sun, Wuping},
issn = {19492553},
journal = {Oncotarget},
number = {43},
pages = {75114 -- 75126},
publisher = {Impact Journals LLC},
title = {{Dietary menthol-induced TRPM8 activation enhances WAT “browning” and ameliorates diet-induced obesity}},
doi = {10.18632/oncotarget.20540},
volume = {8},
year = {2017},
}
@inproceedings{628,
abstract = {We consider the problem of developing automated techniques for solving recurrence relations to aid the expected-runtime analysis of programs. The motivation is that several classical textbook algorithms have quite efficient expected-runtime complexity, whereas the corresponding worst-case bounds are either inefficient (e.g., Quick-Sort), or completely ineffective (e.g., Coupon-Collector). Since the main focus of expected-runtime analysis is to obtain efficient bounds, we consider bounds that are either logarithmic, linear or almost-linear (O(log n), O(n), O(n · log n), respectively, where n represents the input size). Our main contribution is an efficient (simple linear-time algorithm) sound approach for deriving such expected-runtime bounds for the analysis of recurrence relations induced by randomized algorithms. The experimental results show that our approach can efficiently derive asymptotically optimal expected-runtime bounds for recurrences of classical randomized algorithms, including Randomized-Search, Quick-Sort, Quick-Select, Coupon-Collector, where the worst-case bounds are either inefficient (such as linear as compared to logarithmic expected-runtime complexity, or quadratic as compared to linear or almost-linear expected-runtime complexity), or ineffective.},
author = {Chatterjee, Krishnendu and Fu, Hongfei and Murhekar, Aniket},
editor = {Majumdar, Rupak and Kunčak, Viktor},
isbn = {978-331963386-2},
location = {Heidelberg, Germany},
pages = {118 -- 139},
publisher = {Springer},
title = {{Automated recurrence analysis for almost linear expected runtime bounds}},
doi = {10.1007/978-3-319-63387-9_6},
volume = {10426},
year = {2017},
}
@phdthesis{6287,
abstract = {The main objects considered in the present work are simplicial and CW-complexes with vertices forming a random point cloud. In particular, we consider a Poisson point process in R^n and study Delaunay and Voronoi complexes of the first and higher orders and weighted Delaunay complexes obtained as sections of Delaunay complexes, as well as the Čech complex. Further, we examine theDelaunay complex of a Poisson point process on the sphere S^n, as well as of a uniform point cloud, which is equivalent to the convex hull, providing a connection to the theory of random polytopes. Each of the complexes in question can be endowed with a radius function, which maps its cells to the radii of appropriately chosen circumspheres, called the radius of the cell. Applying and developing discrete Morse theory for these functions, joining it together with probabilistic and sometimes analytic machinery, and developing several integral geometric tools, we aim at getting the distributions of circumradii of typical cells. For all considered complexes, we are able to generalize and obtain up to constants the distribution of radii of typical intervals of all types. In low dimensions the constants can be computed explicitly, thus providing the explicit expressions for the expected numbers of cells. In particular, it allows to find the expected density of simplices of every dimension for a Poisson point process in R^4, whereas the result for R^3 was known already in 1970's.},
author = {Nikitenko, Anton},
pages = {86},
publisher = {IST Austria},
title = {{Discrete Morse theory for random complexes }},
doi = {10.15479/AT:ISTA:th_873},
year = {2017},
}
@inbook{629,
abstract = {Even simple cells like bacteria have precisely regulated cellular anatomies, which allow them to grow, divide and to respond to internal or external cues with high fidelity. How spatial and temporal intracellular organization in prokaryotic cells is achieved and maintained on the basis of locally interacting proteins still remains largely a mystery. Bulk biochemical assays with purified components and in vivo experiments help us to approach key cellular processes from two opposite ends, in terms of minimal and maximal complexity. However, to understand how cellular phenomena emerge, that are more than the sum of their parts, we have to assemble cellular subsystems step by step from the bottom up. Here, we review recent in vitro reconstitution experiments with proteins of the bacterial cell division machinery and illustrate how they help to shed light on fundamental cellular mechanisms that constitute spatiotemporal order and regulate cell division.},
author = {Loose, Martin and Zieske, Katja and Schwille, Petra},
booktitle = {Prokaryotic Cytoskeletons},
pages = {419 -- 444},
publisher = {Springer},
title = {{Reconstitution of protein dynamics involved in bacterial cell division}},
doi = {10.1007/978-3-319-53047-5_15},
volume = {84},
year = {2017},
}
@inproceedings{630,
abstract = {Background: Standards have become available to share semantically encoded vital parameters from medical devices, as required for example by personal healthcare records. Standardised sharing of biosignal data largely remains open. Objectives: The goal of this work is to explore available biosignal file format and data exchange standards and profiles, and to conceptualise end-To-end solutions. Methods: The authors reviewed and discussed available biosignal file format standards with other members of international standards development organisations (SDOs). Results: A raw concept for standards based acquisition, storage, archiving and sharing of biosignals was developed. The GDF format may serve for storing biosignals. Signals can then be shared using FHIR resources and may be stored on FHIR servers or in DICOM archives, with DICOM waveforms as one possible format. Conclusion: Currently a group of international SDOs (e.g. HL7, IHE, DICOM, IEEE) is engaged in intensive discussions. This discussion extends existing work that already was adopted by large implementer communities. The concept presented here only reports the current status of the discussion in Austria. The discussion will continue internationally, with results to be expected over the coming years.},
author = {Sauermann, Stefan and David, Veronika and Schlögl, Alois and Egelkraut, Reinhard and Frohner, Matthias and Pohn, Birgit and Urbauer, Philipp and Mense, Alexander},
isbn = {978-161499758-0},
location = {Vienna, Austria},
pages = {356 -- 362},
publisher = {IOS Press},
title = {{Biosignals standards and FHIR: The way to go}},
doi = {10.3233/978-1-61499-759-7-356},
volume = {236},
year = {2017},
}
@inproceedings{631,
abstract = {Template polyhedra generalize intervals and octagons to polyhedra whose facets are orthogonal to a given set of arbitrary directions. They have been employed in the abstract interpretation of programs and, with particular success, in the reachability analysis of hybrid automata. While previously, the choice of directions has been left to the user or a heuristic, we present a method for the automatic discovery of directions that generalize and eliminate spurious counterexamples. We show that for the class of convex hybrid automata, i.e., hybrid automata with (possibly nonlinear) convex constraints on derivatives, such directions always exist and can be found using convex optimization. We embed our method inside a CEGAR loop, thus enabling the time-unbounded reachability analysis of an important and richer class of hybrid automata than was previously possible. We evaluate our method on several benchmarks, demonstrating also its superior efficiency for the special case of linear hybrid automata.},
author = {Bogomolov, Sergiy and Frehse, Goran and Giacobbe, Mirco and Henzinger, Thomas A},
isbn = {978-366254576-8},
location = {Uppsala, Sweden},
pages = {589 -- 606},
publisher = {Springer},
title = {{Counterexample guided refinement of template polyhedra}},
doi = {10.1007/978-3-662-54577-5_34},
volume = {10205},
year = {2017},
}
@article{632,
abstract = {We consider a 2D quantum system of N bosons in a trapping potential |x|s, interacting via a pair potential of the form N2β−1 w(Nβ x). We show that for all 0 < β < (s + 1)/(s + 2), the leading order behavior of ground states of the many-body system is described in the large N limit by the corresponding cubic nonlinear Schrödinger energy functional. Our result covers the focusing case (w < 0) where even the stability of the many-body system is not obvious. This answers an open question mentioned by X. Chen and J. Holmer for harmonic traps (s = 2). Together with the BBGKY hierarchy approach used by these authors, our result implies the convergence of the many-body quantum dynamics to the focusing NLS equation with harmonic trap for all 0 < β < 3/4. },
author = {Lewin, Mathieu and Nam, Phan and Rougerie, Nicolas},
journal = {Proceedings of the American Mathematical Society},
number = {6},
pages = {2441 -- 2454},
publisher = {American Mathematical Society},
title = {{A note on 2D focusing many boson systems}},
doi = {10.1090/proc/13468},
volume = {145},
year = {2017},
}
@inproceedings{633,
abstract = {A Rapidly-exploring Random Tree (RRT) is an algorithm which can search a non-convex region of space by incrementally building a space-filling tree. The tree is constructed from random points drawn from system’s state space and is biased to grow towards large unexplored areas in the system. RRT can provide better coverage of a system’s possible behaviors compared with random simulations, but is more lightweight than full reachability analysis. In this paper, we explore some of the design decisions encountered while implementing a hybrid extension of the RRT algorithm, which have not been elaborated on before. In particular, we focus on handling non-determinism, which arises due to discrete transitions. We introduce the notion of important points to account for this phenomena. We showcase our ideas using heater and navigation benchmarks.},
author = {Bak, Stanley and Bogomolov, Sergiy and Henzinger, Thomas A and Kumar, Aviral},
editor = {Abate, Alessandro and Bodo, Sylvie},
isbn = {978-331963500-2},
location = {Heidelberg, Germany},
pages = {83 -- 89},
publisher = {Springer},
title = {{Challenges and tool implementation of hybrid rapidly exploring random trees}},
doi = {10.1007/978-3-319-63501-9_6},
volume = {10381},
year = {2017},
}
@inbook{634,
abstract = {As autism spectrum disorder (ASD) is largely regarded as a neurodevelopmental condition, long-time consensus was that its hallmark features are irreversible. However, several studies from recent years using defined mouse models of ASD have provided clear evidence that in mice neurobiological and behavioural alterations can be ameliorated or even reversed by genetic restoration or pharmacological treatment either before or after symptom onset. Here, we review findings on genetic and pharmacological reversibility of phenotypes in mouse models of ASD. Our review should give a comprehensive overview on both aspects and encourage future studies to better understand the underlying molecular mechanisms that might be translatable from animals to humans.},
author = {Schroeder, Jan and Deliu, Elena and Novarino, Gaia and Schmeisser, Michael},
booktitle = {Translational Anatomy and Cell Biology of Autism Spectrum Disorder},
editor = {Schmeisser, Michael and Boekers, Tobias},
pages = {189 -- 211},
publisher = {Springer},
title = {{Genetic and pharmacological reversibility of phenotypes in mouse models of autism spectrum disorder}},
doi = {10.1007/978-3-319-52498-6_10},
volume = {224},
year = {2017},
}
@inproceedings{635,
abstract = {Memory-hard functions (MHFs) are hash algorithms whose evaluation cost is dominated by memory cost. As memory, unlike computation, costs about the same across different platforms, MHFs cannot be evaluated at significantly lower cost on dedicated hardware like ASICs. MHFs have found widespread applications including password hashing, key derivation, and proofs-of-work. This paper focuses on scrypt, a simple candidate MHF designed by Percival, and described in RFC 7914. It has been used within a number of cryptocurrencies (e.g., Litecoin and Dogecoin) and has been an inspiration for Argon2d, one of the winners of the recent password-hashing competition. Despite its popularity, no rigorous lower bounds on its memory complexity are known. We prove that scrypt is optimally memory-hard, i.e., its cumulative memory complexity (cmc) in the parallel random oracle model is Ω(n2w), where w and n are the output length and number of invocations of the underlying hash function, respectively. High cmc is a strong security target for MHFs introduced by Alwen and Serbinenko (STOC’15) which implies high memory cost even for adversaries who can amortize the cost over many evaluations and evaluate the underlying hash functions many times in parallel. Our proof is the first showing optimal memory-hardness for any MHF. Our result improves both quantitatively and qualitatively upon the recent work by Alwen et al. (EUROCRYPT’16) who proved a weaker lower bound of Ω(n2w/ log2 n) for a restricted class of adversaries.},
author = {Alwen, Joel F and Chen, Binchi and Pietrzak, Krzysztof Z and Reyzin, Leonid and Tessaro, Stefano},
editor = {Coron, Jean-Sébastien and Buus Nielsen, Jesper},
isbn = {978-331956616-0},
location = {Paris, France},
pages = {33 -- 62},
publisher = {Springer},
title = {{Scrypt is maximally memory hard}},
doi = {10.1007/978-3-319-56617-7_2},
volume = {10212},
year = {2017},
}
@inproceedings{636,
abstract = {Signal regular expressions can specify sequential properties of real-valued signals based on threshold conditions, regular operations, and duration constraints. In this paper we endow them with a quantitative semantics which indicates how robustly a signal matches or does not match a given expression. First, we show that this semantics is a safe approximation of a distance between the signal and the language defined by the expression. Then, we consider the robust matching problem, that is, computing the quantitative semantics of every segment of a given signal relative to an expression. We present an algorithm that solves this problem for piecewise-constant and piecewise-linear signals and show that for such signals the robustness map is a piecewise-linear function. The availability of an indicator describing how robustly a signal segment matches some regular pattern provides a general framework for quantitative monitoring of cyber-physical systems.},
author = {Bakhirkin, Alexey and Ferrere, Thomas and Maler, Oded and Ulus, Dogan},
editor = {Abate, Alessandro and Geeraerts, Gilles},
isbn = {978-331965764-6},
location = {Berlin, Germany},
pages = {189 -- 206},
publisher = {Springer},
title = {{On the quantitative semantics of regular expressions over real-valued signals}},
doi = {10.1007/978-3-319-65765-3_11},
volume = {10419},
year = {2017},
}
@inproceedings{637,
abstract = {For many cryptographic primitives, it is relatively easy to achieve selective security (where the adversary commits a-priori to some of the choices to be made later in the attack) but appears difficult to achieve the more natural notion of adaptive security (where the adversary can make all choices on the go as the attack progresses). A series of several recent works shows how to cleverly achieve adaptive security in several such scenarios including generalized selective decryption (Panjwani, TCC ’07 and Fuchsbauer et al., CRYPTO ’15), constrained PRFs (Fuchsbauer et al., ASIACRYPT ’14), and Yao garbled circuits (Jafargholi and Wichs, TCC ’16b). Although the above works expressed vague intuition that they share a common technique, the connection was never made precise. In this work we present a new framework that connects all of these works and allows us to present them in a unified and simplified fashion. Moreover, we use the framework to derive a new result for adaptively secure secret sharing over access structures defined via monotone circuits. We envision that further applications will follow in the future. Underlying our framework is the following simple idea. It is well known that selective security, where the adversary commits to n-bits of information about his future choices, automatically implies adaptive security at the cost of amplifying the adversary’s advantage by a factor of up to 2n. However, in some cases the proof of selective security proceeds via a sequence of hybrids, where each pair of adjacent hybrids locally only requires some smaller partial information consisting of m ≪ n bits. The partial information needed might be completely different between different pairs of hybrids, and if we look across all the hybrids we might rely on the entire n-bit commitment. Nevertheless, the above is sufficient to prove adaptive security, at the cost of amplifying the adversary’s advantage by a factor of only 2m ≪ 2n. In all of our examples using the above framework, the different hybrids are captured by some sort of a graph pebbling game and the amount of information that the adversary needs to commit to in each pair of hybrids is bounded by the maximum number of pebbles in play at any point in time. Therefore, coming up with better strategies for proving adaptive security translates to various pebbling strategies for different types of graphs.},
author = {Jafargholi, Zahra and Kamath Hosdurg, Chethan and Klein, Karen and Komargodski, Ilan and Pietrzak, Krzysztof Z and Wichs, Daniel},
editor = {Katz, Jonathan and Shacham, Hovav},
isbn = {978-331963687-0},
location = {Santa Barbara, CA, United States},
pages = {133 -- 163},
publisher = {Springer},
title = {{Be adaptive avoid overcommitting}},
doi = {10.1007/978-3-319-63688-7_5},
volume = {10401},
year = {2017},
}
@proceedings{638,
editor = {Bogomolov, Sergiy and Martel, Matthieu and Prabhakar, Pavithra},
publisher = {Springer},
title = {{Numerical Software Verification}},
doi = {10.1007/978-3-319-54292-8},
volume = {10152},
year = {2017},
}
@inproceedings{640,
abstract = {Data-independent Memory Hard Functions (iMHFS) are finding a growing number of applications in security; especially in the domain of password hashing. An important property of a concrete iMHF is specified by fixing a directed acyclic graph (DAG) Gn on n nodes. The quality of that iMHF is then captured by the following two pebbling complexities of Gn: – The parallel cumulative pebbling complexity Π∥cc(Gn) must be as high as possible (to ensure that the amortized cost of computing the function on dedicated hardware is dominated by the cost of memory). – The sequential space-time pebbling complexity Πst(Gn) should be as close as possible to Π∥cc(Gn) (to ensure that using many cores in parallel and amortizing over many instances does not give much of an advantage). In this paper we construct a family of DAGs with best possible parameters in an asymptotic sense, i.e., where Π∥cc(Gn) = Ω(n2/ log(n)) (which matches a known upper bound) and Πst(Gn) is within a constant factor of Π∥cc(Gn). Our analysis relies on a new connection between the pebbling complexity of a DAG and its depth-robustness (DR) – a well studied combinatorial property. We show that high DR is sufficient for high Π∥cc. Alwen and Blocki (CRYPTO’16) showed that high DR is necessary and so, together, these results fully characterize DAGs with high Π∥cc in terms of DR. Complementing these results, we provide new upper and lower bounds on the Π∥cc of several important candidate iMHFs from the literature. We give the first lower bounds on the memory hardness of the Catena and Balloon Hashing functions in a parallel model of computation and we give the first lower bounds of any kind for (a version) of Argon2i. Finally we describe a new class of pebbling attacks improving on those of Alwen and Blocki (CRYPTO’16). By instantiating these attacks we upperbound the Π∥cc of the Password Hashing Competition winner Argon2i and one of the Balloon Hashing functions by O (n1.71). We also show an upper bound of O(n1.625) for the Catena functions and the two remaining Balloon Hashing functions.},
author = {Alwen, Joel F and Blocki, Jeremiah and Pietrzak, Krzysztof Z},
editor = {Coron, Jean-Sébastien and Buus Nielsen, Jesper},
isbn = {978-331956616-0},
location = {Paris, France},
pages = {3 -- 32},
publisher = {Springer},
title = {{Depth-robust graphs and their cumulative memory complexity}},
doi = {10.1007/978-3-319-56617-7_1},
volume = {10212},
year = {2017},
}
@inproceedings{641,
abstract = {We introduce two novel methods for learning parameters of graphical models for image labelling. The following two tasks underline both methods: (i) perturb model parameters based on given features and ground truth labelings, so as to exactly reproduce these labelings as optima of the local polytope relaxation of the labelling problem; (ii) train a predictor for the perturbed model parameters so that improved model parameters can be applied to the labelling of novel data. Our first method implements task (i) by inverse linear programming and task (ii) using a regressor e.g. a Gaussian process. Our second approach simultaneously solves tasks (i) and (ii) in a joint manner, while being restricted to linearly parameterised predictors. Experiments demonstrate the merits of both approaches.},
author = {Trajkovska, Vera and Swoboda, Paul and Åström, Freddie and Petra, Stefanie},
editor = {Lauze, François and Dong, Yiqiu and Bjorholm Dahl, Anders},
isbn = {978-331958770-7},
location = {Kolding, Denmark},
pages = {323 -- 334},
publisher = {Springer},
title = {{Graphical model parameter learning by inverse linear programming}},
doi = {10.1007/978-3-319-58771-4_26},
volume = {10302},
year = {2017},
}
@article{642,
abstract = {Cauchy problems with SPDEs on the whole space are localized to Cauchy problems on a ball of radius R. This localization reduces various kinds of spatial approximation schemes to finite dimensional problems. The error is shown to be exponentially small. As an application, a numerical scheme is presented which combines the localization and the space and time discretization, and thus is fully implementable.},
author = {Gerencser, Mate and Gyöngy, István},
issn = {00255718},
journal = {Mathematics of Computation},
number = {307},
pages = {2373 -- 2397},
publisher = {American Mathematical Society},
title = {{Localization errors in solving stochastic partial differential equations in the whole space}},
doi = {10.1090/mcom/3201},
volume = {86},
year = {2017},
}
@misc{6426,
abstract = {Synchronous programs are easy to specify because the side effects of an operation are finished by the time the invocation of the operation returns to the caller. Asynchronous programs, on the other hand, are difficult to specify because there are side effects due to pending computation scheduled as a result of the invocation of an operation. They are also difficult to verify because of the large number of possible interleavings of concurrent asynchronous computation threads. We show that specifications and correctness proofs for asynchronous programs can be structured by introducing the fiction, for proof purposes, that intermediate, non-quiescent states of asynchronous operations can be ignored. Then, the task of specification becomes relatively simple and the task of verification can be naturally decomposed into smaller sub-tasks. The sub-tasks iteratively summarize, guided by the structure of an asynchronous program, the atomic effect of non-atomic operations and the synchronous effect of asynchronous operations. This structuring of specifications and proofs corresponds to the introduction of multiple layers of stepwise refinement for asynchronous programs. We present the first proof rule, called synchronization, to reduce asynchronous invocations on a lower layer to synchronous invocations on a higher layer. We implemented our proof method in CIVL and evaluated it on a collection of benchmark programs.},
author = {Henzinger, Thomas A and Kragl, Bernhard and Qadeer, Shaz},
issn = {2664-1690},
pages = {28},
publisher = {IST Austria},
title = {{Synchronizing the asynchronous}},
doi = {10.15479/AT:IST-2018-853-v2-2},
year = {2017},
}
@article{643,
abstract = {It has been reported that nicotinamide-overload induces oxidative stress associated with insulin resistance, the key feature of type 2 diabetes mellitus (T2DM). This study aimed to investigate the effects of B vitamins in T2DM. Glucose tolerance tests (GTT) were carried out in adult Sprague-Dawley rats treated with or without cumulative doses of B vitamins. More specifically, insulin tolerance tests (ITT) were also carried out in adult Sprague-Dawley rats treated with or without cumulative doses of Vitamin B3. We found that cumulative Vitamin B1 and Vitamin B3 administration significantly increased the plasma H2O2 levels associated with high insulin levels. Only Vitamin B3 reduced muscular and hepatic glycogen contents. Cumulative administration of nicotinic acid, another form of Vitamin B3, also significantly increased plasma insulin level and H2O2 generation. Moreover, cumulative administration of nicotinic acid or nicotinamide impaired glucose metabolism. This study suggested that excess Vitamin B1 and Vitamin B3 caused oxidative stress and insulin resistance.},
author = {Sun, Wuping and Zhai, Ming-Zhu and Zhou, Qian and Qian, Chengrui and Jiang, Changyu},
issn = {03044920},
journal = {Chinese Journal of Physiology},
number = {4},
pages = {207 -- 214},
publisher = {Chinese Physiological Society},
title = {{Effects of B vitamins overload on plasma insulin level and hydrogen peroxide generation in rats}},
doi = {10.4077/CJP.2017.BAF469},
volume = {60},
year = {2017},
}
@article{644,
abstract = {An instance of the valued constraint satisfaction problem (VCSP) is given by a finite set of variables, a finite domain of labels, and a sum of functions, each function depending on a subset of the variables. Each function can take finite values specifying costs of assignments of labels to its variables or the infinite value, which indicates an infeasible assignment. The goal is to find an assignment of labels to the variables that minimizes the sum. We study, assuming that P 6= NP, how the complexity of this very general problem depends on the set of functions allowed in the instances, the so-called constraint language. The case when all allowed functions take values in f0;1g corresponds to ordinary CSPs, where one deals only with the feasibility issue, and there is no optimization. This case is the subject of the algebraic CSP dichotomy conjecture predicting for which constraint languages CSPs are tractable (i.e., solvable in polynomial time) and for which they are NP-hard. The case when all allowed functions take only finite values corresponds to a finitevalued CSP, where the feasibility aspect is trivial and one deals only with the optimization issue. The complexity of finite-valued CSPs was fully classified by Thapper and Živný. An algebraic necessary condition for tractability of a general-valued CSP with a fixed constraint language was recently given by Kozik and Ochremiak. As our main result, we prove that if a constraint language satisfies this algebraic necessary condition, and the feasibility CSP (i.e., the problem of deciding whether a given instance has a feasible solution) corresponding to the VCSP with this language is tractable, then the VCSP is tractable. The algorithm is a simple combination of the assumed algorithm for the feasibility CSP and the standard LP relaxation. As a corollary, we obtain that a dichotomy for ordinary CSPs would imply a dichotomy for general-valued CSPs.},
author = {Kolmogorov, Vladimir and Krokhin, Andrei and Rolinek, Michal},
journal = {SIAM Journal on Computing},
number = {3},
pages = {1087 -- 1110},
publisher = {SIAM},
title = {{The complexity of general-valued CSPs}},
doi = {10.1137/16M1091836},
volume = {46},
year = {2017},
}
@inproceedings{645,
abstract = {Markov decision processes (MDPs) are standard models for probabilistic systems with non-deterministic behaviours. Long-run average rewards provide a mathematically elegant formalism for expressing long term performance. Value iteration (VI) is one of the simplest and most efficient algorithmic approaches to MDPs with other properties, such as reachability objectives. Unfortunately, a naive extension of VI does not work for MDPs with long-run average rewards, as there is no known stopping criterion. In this work our contributions are threefold. (1) We refute a conjecture related to stopping criteria for MDPs with long-run average rewards. (2) We present two practical algorithms for MDPs with long-run average rewards based on VI. First, we show that a combination of applying VI locally for each maximal end-component (MEC) and VI for reachability objectives can provide approximation guarantees. Second, extending the above approach with a simulation-guided on-demand variant of VI, we present an anytime algorithm that is able to deal with very large models. (3) Finally, we present experimental results showing that our methods significantly outperform the standard approaches on several benchmarks.},
author = {Ashok, Pranav and Chatterjee, Krishnendu and Daca, Przemyslaw and Kretinsky, Jan and Meggendorfer, Tobias},
editor = {Majumdar, Rupak and Kunčak, Viktor},
isbn = {978-331963386-2},
location = {Heidelberg, Germany},
pages = {201 -- 221},
publisher = {Springer},
title = {{Value iteration for long run average reward in markov decision processes}},
doi = {10.1007/978-3-319-63387-9_10},
volume = {10426},
year = {2017},
}
@inproceedings{646,
abstract = {We present a novel convex relaxation and a corresponding inference algorithm for the non-binary discrete tomography problem, that is, reconstructing discrete-valued images from few linear measurements. In contrast to state of the art approaches that split the problem into a continuous reconstruction problem for the linear measurement constraints and a discrete labeling problem to enforce discrete-valued reconstructions, we propose a joint formulation that addresses both problems simultaneously, resulting in a tighter convex relaxation. For this purpose a constrained graphical model is set up and evaluated using a novel relaxation optimized by dual decomposition. We evaluate our approach experimentally and show superior solutions both mathematically (tighter relaxation) and experimentally in comparison to previously proposed relaxations.},
author = {Kuske, Jan and Swoboda, Paul and Petra, Stefanie},
editor = {Lauze, François and Dong, Yiqiu and Bjorholm Dahl, Anders},
isbn = {978-331958770-7},
location = {Kolding, Denmark},
pages = {235 -- 246},
publisher = {Springer},
title = {{A novel convex relaxation for non binary discrete tomography}},
doi = {10.1007/978-3-319-58771-4_19},
volume = {10302},
year = {2017},
}
@inproceedings{647,
abstract = {Despite researchers’ efforts in the last couple of decades, reachability analysis is still a challenging problem even for linear hybrid systems. Among the existing approaches, the most practical ones are mainly based on bounded-time reachable set over-approximations. For the purpose of unbounded-time analysis, one important strategy is to abstract the original system and find an invariant for the abstraction. In this paper, we propose an approach to constructing a new kind of abstraction called conic abstraction for affine hybrid systems, and to computing reachable sets based on this abstraction. The essential feature of a conic abstraction is that it partitions the state space of a system into a set of convex polyhedral cones which is derived from a uniform conic partition of the derivative space. Such a set of polyhedral cones is able to cut all trajectories of the system into almost straight segments so that every segment of a reach pipe in a polyhedral cone tends to be straight as well, and hence can be over-approximated tightly by polyhedra using similar techniques as HyTech or PHAVer. In particular, for diagonalizable affine systems, our approach can guarantee to find an invariant for unbounded reachable sets, which is beyond the capability of bounded-time reachability analysis tools. We implemented the approach in a tool and experiments on benchmarks show that our approach is more powerful than SpaceEx and PHAVer in dealing with diagonalizable systems.},
author = {Bogomolov, Sergiy and Giacobbe, Mirco and Henzinger, Thomas A and Kong, Hui},
isbn = {978-331965764-6},
location = {Berlin, Germany},
pages = {116 -- 132},
publisher = {Springer},
title = {{Conic abstractions for hybrid systems}},
doi = {10.1007/978-3-319-65765-3_7},
volume = {10419 },
year = {2017},
}
@inproceedings{648,
abstract = {Pseudoentropy has found a lot of important applications to cryptography and complexity theory. In this paper we focus on the foundational problem that has not been investigated so far, namely by how much pseudoentropy (the amount seen by computationally bounded attackers) diﬀers from its information-theoretic counterpart (seen by unbounded observers), given certain limits on attacker’s computational power? We provide the following answer for HILL pseudoentropy, which exhibits a threshold behavior around the size exponential in the entropy amount:– If the attacker size (s) and advantage () satisfy s (formula presented) where k is the claimed amount of pseudoentropy, then the pseudoentropy boils down to the information-theoretic smooth entropy. – If s (formula presented) then pseudoentropy could be arbitrarily bigger than the information-theoretic smooth entropy. Besides answering the posted question, we show an elegant application of our result to the complexity theory, namely that it implies the clas-sical result on the existence of functions hard to approximate (due to Pippenger). In our approach we utilize non-constructive techniques: the duality of linear programming and the probabilistic method.},
author = {Skórski, Maciej},
editor = {Jäger, Gerhard and Steila, Silvia},
isbn = {978-331955910-0},
location = {Bern, Switzerland},
pages = {600 -- 613},
publisher = {Springer},
title = {{On the complexity of breaking pseudoentropy}},
doi = {10.1007/978-3-319-55911-7_43},
volume = {10185},
year = {2017},
}
@inbook{649,
abstract = {We give a short overview on a recently developed notion of Ricci curvature for discrete spaces. This notion relies on geodesic convexity properties of the relative entropy along geodesics in the space of probability densities, for a metric which is similar to (but different from) the 2-Wasserstein metric. The theory can be considered as a discrete counterpart to the theory of Ricci curvature for geodesic measure spaces developed by Lott–Sturm–Villani.},
author = {Maas, Jan},
booktitle = {Modern Approaches to Discrete Curvature},
editor = {Najman, Laurent and Romon, Pascal},
issn = {978-3-319-58002-9},
pages = {159 -- 174},
publisher = {Springer},
title = {{Entropic Ricci curvature for discrete spaces}},
doi = {10.1007/978-3-319-58002-9_5},
volume = {2184},
year = {2017},
}
@inproceedings{650,
abstract = {In this work we present a short and unified proof for the Strong and Weak Regularity Lemma, based on the cryptographic tech-nique called low-complexity approximations. In short, both problems reduce to a task of finding constructively an approximation for a certain target function under a class of distinguishers (test functions), where dis-tinguishers are combinations of simple rectangle-indicators. In our case these approximations can be learned by a simple iterative procedure, which yields a unified and simple proof, achieving for any graph with density d and any approximation parameter the partition size. The novelty in our proof is: (a) a simple approach which yields both strong and weaker variant, and (b) improvements when d = o(1). At an abstract level, our proof can be seen a refinement and simplification of the “analytic” proof given by Lovasz and Szegedy.},
author = {Skórski, Maciej},
editor = {Jäger, Gerhard and Steila, Silvia},
issn = {03029743},
location = {Bern, Switzerland},
pages = {586 -- 599},
publisher = {Springer},
title = {{A cryptographic view of regularity lemmas: Simpler unified proofs and refined bounds}},
doi = {10.1007/978-3-319-55911-7_42},
volume = {10185},
year = {2017},
}
@article{651,
abstract = {Superhydrophobic surfaces reduce the frictional drag between water and solid materials, but this effect is often temporary. The realization of sustained drag reduction has applications for water vehicles and pipeline flows.
},
author = {Hof, Björn},
issn = {00280836},
journal = {Nature},
number = {7636},
pages = {161 -- 162},
publisher = {Nature Publishing Group},
title = {{Fluid dynamics: Water flows out of touch}},
doi = {10.1038/541161a},
volume = {541},
year = {2017},
}
@inproceedings{6517,
abstract = {A (possibly degenerate) drawing of a graph G in the plane is approximable by an embedding if it can be turned into an embedding by an arbitrarily small perturbation. We show that testing, whether a drawing of a planar graph G in the plane is approximable by an embedding, can be carried out in polynomial time, if a desired embedding of G belongs to a fixed isotopy class, i.e., the rotation system (or equivalently the faces) of the embedding of G and the choice of outer face are fixed. In other words, we show that c-planarity with embedded pipes is tractable for graphs with fixed embeddings. To the best of our knowledge an analogous result was previously known essentially only when G is a cycle.},
author = {Fulek, Radoslav},
location = {Phuket, Thailand},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{Embedding graphs into embedded graphs}},
doi = {10.4230/LIPICS.ISAAC.2017.34},
volume = {92},
year = {2017},
}
@inproceedings{6519,
abstract = {Graph games with omega-regular winning conditions provide a mathematical framework to analyze a wide range of problems in the analysis of reactive systems and programs (such as the synthesis of reactive systems, program repair, and the verification of branching time properties). Parity conditions are canonical forms to specify omega-regular winning conditions. Graph games with parity conditions are equivalent to mu-calculus model checking, and thus a very important algorithmic problem. Symbolic algorithms are of great significance because they provide scalable algorithms for the analysis of large finite-state systems, as well as algorithms for the analysis of infinite-state systems with finite quotient. A set-based symbolic algorithm uses the basic set operations and the one-step predecessor operators. We consider graph games with n vertices and parity conditions with c priorities (equivalently, a mu-calculus formula with c alternations of least and greatest fixed points). While many explicit algorithms exist for graph games with parity conditions, for set-based symbolic algorithms there are only two algorithms (notice that we use space to refer to the number of sets stored by a symbolic algorithm): (a) the basic algorithm that requires O(n^c) symbolic operations and linear space; and (b) an improved algorithm that requires O(n^{c/2+1}) symbolic operations but also O(n^{c/2+1}) space (i.e., exponential space). In this work we present two set-based symbolic algorithms for parity games: (a) our first algorithm requires O(n^{c/2+1}) symbolic operations and only requires linear space; and (b) developing on our first algorithm, we present an algorithm that requires O(n^{c/3+1}) symbolic operations and only linear space. We also present the first linear space set-based symbolic algorithm for parity games that requires at most a sub-exponential number of symbolic operations. },
author = {Chatterjee, Krishnendu and Dvorák, Wolfgang and Henzinger, Monika and Loitzenbauer, Veronika},
location = {Stockholm, Sweden},
publisher = {Schloss Dagstuhl -Leibniz-Zentrum fuer Informatik},
title = {{Improved set-based symbolic algorithms for parity games}},
doi = {10.4230/LIPICS.CSL.2017.18},
volume = {82},
year = {2017},
}
@inproceedings{652,
abstract = {We present an approach that enables robots to self-organize their sensorimotor behavior from scratch without providing specific information about neither the robot nor its environment. This is achieved by a simple neural control law that increases the consistency between external sensor dynamics and internal neural dynamics of the utterly simple controller. In this way, the embodiment and the agent-environment coupling are the only source of individual development. We show how an anthropomorphic tendon driven arm-shoulder system develops different behaviors depending on that coupling. For instance: Given a bottle half-filled with water, the arm starts to shake it, driven by the physical response of the water. When attaching a brush, the arm can be manipulated into wiping a table, and when connected to a revolvable wheel it finds out how to rotate it. Thus, the robot may be said to discover the affordances of the world. When allowing two (simulated) humanoid robots to interact physically, they engage into a joint behavior development leading to, for instance, spontaneous cooperation. More social effects are observed if the robots can visually perceive each other. Although, as an observer, it is tempting to attribute an apparent intentionality, there is nothing of the kind put in. As a conclusion, we argue that emergent behavior may be much less rooted in explicit intentions, internal motivations, or specific reward systems than is commonly believed.},
author = {Der, Ralf and Martius, Georg S},
isbn = {978-150905069-7},
location = {Cergy-Pontoise, France},
publisher = {IEEE},
title = {{Dynamical self consistency leads to behavioral development and emergent social interactions in robots}},
doi = {10.1109/DEVLRN.2016.7846789},
year = {2017},
}
@inproceedings{6526,
abstract = {This paper studies the complexity of estimating Rényi divergences of discrete distributions: p observed from samples and the baseline distribution q known a priori. Extending the results of Acharya et al. (SODA'15) on estimating Rényi entropy, we present improved estimation techniques together with upper and lower bounds on the sample complexity. We show that, contrarily to estimating Rényi entropy where a sublinear (in the alphabet size) number of samples suffices, the sample complexity is heavily dependent on events occurring unlikely in q, and is unbounded in general (no matter what an estimation technique is used). For any divergence of integer order bigger than 1, we provide upper and lower bounds on the number of samples dependent on probabilities of p and q (the lower bounds hold for non-integer orders as well). We conclude that the worst-case sample complexity is polynomial in the alphabet size if and only if the probabilities of q are non-negligible. This gives theoretical insights into heuristics used in the applied literature to handle numerical instability, which occurs for small probabilities of q. Our result shows that they should be handled with care not only because of numerical issues, but also because of a blow up in the sample complexity.},
author = {Skórski, Maciej},
booktitle = {2017 IEEE International Symposium on Information Theory (ISIT)},
isbn = {9781509040964},
location = {Aachen, Germany},
publisher = {IEEE},
title = {{On the complexity of estimating Rènyi divergences}},
doi = {10.1109/isit.2017.8006529},
year = {2017},
}
@inproceedings{6527,
abstract = {A memory-hard function (MHF) ƒn with parameter n can be computed in sequential time and space n. Simultaneously, a high amortized parallel area-time complexity (aAT) is incurred per evaluation. In practice, MHFs are used to limit the rate at which an adversary (using a custom computational device) can evaluate a security sensitive function that still occasionally needs to be evaluated by honest users (using an off-the-shelf general purpose device). The most prevalent examples of such sensitive functions are Key Derivation Functions (KDFs) and password hashing algorithms where rate limits help mitigate off-line dictionary attacks. As the honest users' inputs to these functions are often (low-entropy) passwords special attention is given to a class of side-channel resistant MHFs called iMHFs.
Essentially all iMHFs can be viewed as some mode of operation (making n calls to some round function) given by a directed acyclic graph (DAG) with very low indegree. Recently, a combinatorial property of a DAG has been identified (called "depth-robustness") which results in good provable security for an iMHF based on that DAG. Depth-robust DAGs have also proven useful in other cryptographic applications. Unfortunately, up till now, all known very depth-robust DAGs are impractically complicated and little is known about their exact (i.e. non-asymptotic) depth-robustness both in theory and in practice.
In this work we build and analyze (both formally and empirically) several exceedingly simple and efficient to navigate practical DAGs for use in iMHFs and other applications. For each DAG we:
*Prove that their depth-robustness is asymptotically maximal.
*Prove bounds of at least 3 orders of magnitude better on their exact depth-robustness compared to known bounds for other practical iMHF.
*Implement and empirically evaluate their depth-robustness and aAT against a variety of state-of-the art (and several new) depth-reduction and low aAT attacks.
We find that, against all attacks, the new DAGs perform significantly better in practice than Argon2i, the most widely deployed iMHF in practice.
Along the way we also improve the best known empirical attacks on the aAT of Argon2i by implementing and testing several heuristic versions of a (hitherto purely theoretical) depth-reduction attack. Finally, we demonstrate practicality of our constructions by modifying the Argon2i code base to use one of the new high aAT DAGs. Experimental benchmarks on a standard off-the-shelf CPU show that the new modifications do not adversely affect the impressive throughput of Argon2i (despite seemingly enjoying significantly higher aAT).
},
author = {Alwen, Joel F and Blocki, Jeremiah and Harsha, Ben},
booktitle = {Proceedings of the 2017 ACM SIGSAC Conference on Computer and Communications Security},
isbn = {9781450349468},
location = {Dallas, TX, USA},
pages = {1001--1017},
publisher = {ACM Press},
title = {{Practical graphs for optimal side-channel resistant memory-hard functions}},
doi = {10.1145/3133956.3134031},
year = {2017},
}
@article{653,
abstract = {The extent of heterogeneity among driver gene mutations present in naturally occurring metastases - that is, treatment-naive metastatic disease - is largely unknown. To address this issue, we carried out 60× whole-genome sequencing of 26 metastases from four patients with pancreatic cancer. We found that identical mutations in known driver genes were present in every metastatic lesion for each patient studied. Passenger gene mutations, which do not have known or predicted functional consequences, accounted for all intratumoral heterogeneity. Even with respect to these passenger mutations, our analysis suggests that the genetic similarity among the founding cells of metastases was higher than that expected for any two cells randomly taken from a normal tissue. The uniformity of known driver gene mutations among metastases in the same patient has critical and encouraging implications for the success of future targeted therapies in advanced-stage disease.},
author = {Makohon Moore, Alvin and Zhang, Ming and Reiter, Johannes and Božić, Ivana and Allen, Benjamin and Kundu, Deepanjan and Chatterjee, Krishnendu and Wong, Fay and Jiao, Yuchen and Kohutek, Zachary and Hong, Jungeui and Attiyeh, Marc and Javier, Breanna and Wood, Laura and Hruban, Ralph and Nowak, Martin and Papadopoulos, Nickolas and Kinzler, Kenneth and Vogelstein, Bert and Iacobuzio Donahue, Christine},
issn = {10614036},
journal = {Nature Genetics},
number = {3},
pages = {358 -- 366},
publisher = {Nature Publishing Group},
title = {{Limited heterogeneity of known driver gene mutations among the metastases of individual patients with pancreatic cancer}},
doi = {10.1038/ng.3764},
volume = {49},
year = {2017},
}
@article{654,
abstract = {In November 2016, developmental biologists, synthetic biologists and engineers gathered in Paris for a meeting called ‘Engineering the embryo’. The participants shared an interest in exploring how synthetic systems can reveal new principles of embryonic development, and how the in vitro manipulation and modeling of development using stem cells can be used to integrate ideas and expertise from physics, developmental biology and tissue engineering. As we review here, the conference pinpointed some of the challenges arising at the intersection of these fields, along with great enthusiasm for finding new approaches and collaborations.},
author = {Kicheva, Anna and Rivron, Nicolas},
issn = {09501991},
journal = {Development},
number = {5},
pages = {733 -- 736},
publisher = {Company of Biologists},
title = {{Creating to understand – developmental biology meets engineering in Paris}},
doi = {10.1242/dev.144915},
volume = {144},
year = {2017},
}
@article{655,
abstract = {The bacterial flagellum is a self-assembling nanomachine. The external flagellar filament, several times longer than a bacterial cell body, is made of a few tens of thousands subunits of a single protein: flagellin. A fundamental problem concerns the molecular mechanism of how the flagellum grows outside the cell, where no discernible energy source is available. Here, we monitored the dynamic assembly of individual flagella using in situ labelling and real-time immunostaining of elongating flagellar filaments. We report that the rate of flagellum growth, initially ~1,700 amino acids per second, decreases with length and that the previously proposed chain mechanism does not contribute to the filament elongation dynamics. Inhibition of the proton motive force-dependent export apparatus revealed a major contribution of substrate injection in driving filament elongation. The combination of experimental and mathematical evidence demonstrates that a simple, injection-diffusion mechanism controls bacterial flagella growth outside the cell.},
author = {Renault, Thibaud and Abraham, Anthony and Bergmiller, Tobias and Paradis, Guillaume and Rainville, Simon and Charpentier, Emmanuelle and Guet, Calin C and Tu, Yuhai and Namba, Keiichi and Keener, James and Minamino, Tohru and Erhardt, Marc},
issn = {2050084X},
journal = {eLife},
publisher = {eLife Sciences Publications},
title = {{Bacterial flagella grow through an injection diffusion mechanism}},
doi = {10.7554/eLife.23136},
volume = {6},
year = {2017},
}