@article{13397, abstract = {Self-assembly of inorganic nanoparticles has been studied extensively for particles having different sizes and compositions. However, relatively little attention has been devoted to how the shape and surface chemistry of magnetic nanoparticles affects their self-assembly properties. Here, we undertook a combined experiment–theory study aimed at better understanding of the self-assembly of cubic magnetite (Fe3O4) particles. We demonstrated that, depending on the experimental parameters, such as the direction of the magnetic field and nanoparticle density, a variety of superstructures can be obtained, including one-dimensional filaments and helices, as well as C-shaped assemblies described here for the first time. Furthermore, we functionalized the surfaces of the magnetic nanocubes with light-sensitive ligands. Using these modified nanoparticles, we were able to achieve orthogonal control of self-assembly using a magnetic field and light.}, author = {Singh, Gurvinder and Chan, Henry and Udayabhaskararao, T. and Gelman, Elijah and Peddis, Davide and Baskin, Artem and Leitus, Gregory and Král, Petr and Klajn, Rafal}, issn = {1364-5498}, journal = {Faraday Discussions}, keywords = {Physical and Theoretical Chemistry}, pages = {403--421}, publisher = {Royal Society of Chemistry}, title = {{Magnetic field-induced self-assembly of iron oxide nanocubes}}, doi = {10.1039/c4fd00265b}, volume = {181}, year = {2015}, } @article{13398, author = {Sun, Yugang and Scarabelli, Leonardo and Kotov, Nicholas and Tebbe, Moritz and Lin, Xiao-Min and Brullot, Ward and Isa, Lucio and Schurtenberger, Peter and Moehwald, Helmuth and Fedin, Igor and Velev, Orlin and Faivre, Damien and Sorensen, Christopher and Perzynski, Régine and Chanana, Munish and Li, Zhihai and Bresme, Fernando and Král, Petr and Firlar, Emre and Schiffrin, David and Souza Junior, Joao Batista and Fery, Andreas and Shevchenko, Elena and Tarhan, Ozgur and Alivisatos, Armand Paul and Disch, Sabrina and Klajn, Rafal and Ghosh, Suvojit}, issn = {1364-5498}, journal = {Faraday Discussions}, keywords = {Physical and Theoretical Chemistry}, pages = {463--479}, publisher = {Royal Society of Chemistry}, title = {{Field-assisted self-assembly process: General discussion}}, doi = {10.1039/c5fd90041g}, volume = {181}, year = {2015}, } @article{14017, abstract = {The detection of electron motion and electronic wave-packet dynamics is one of the core goals of attosecond science. Recently, choosing the nitric oxide molecule as an example, we have introduced and demonstrated an experimental approach to measure coupled valence electronic and rotational wave packets using high-order-harmonic-generation (HHG) spectroscopy [Kraus et al., Phys. Rev. Lett. 111, 243005 (2013)]. A short outline of the theory to describe the combination of the pump and HHG probe process was published together with an extensive discussion of experimental results [Baykusheva et al., Faraday Discuss. 171, 113 (2014)]. The comparison of theory and experiment showed good agreement on a quantitative level. Here, we present the theory in detail, which is based on a generalized density-matrix approach that describes the pump process and the subsequent probing of the wave packets by a semiclassical quantitative rescattering approach. An in-depth analysis of the different Raman scattering contributions to the creation of the coupled rotational and electronic spin-orbit wave packets is made. We present results for parallel and perpendicular linear polarizations of the pump and probe laser pulses. Furthermore, an analysis of the combined rotational-electronic density matrix in terms of irreducible components is presented that facilitates interpretation of the results.}, author = {Zhang, Song Bin and Baykusheva, Denitsa Rangelova and Kraus, Peter M. and Wörner, Hans Jakob and Rohringer, Nina}, issn = {1094-1622}, journal = {Physical Review A}, keywords = {Atomic and Molecular Physics, and Optics}, number = {2}, publisher = {American Physical Society}, title = {{Theoretical study of molecular electronic and rotational coherences by high-order-harmonic generation}}, doi = {10.1103/physreva.91.023421}, volume = {91}, year = {2015}, } @article{14016, abstract = {All attosecond time-resolved measurements have so far relied on the use of intense near-infrared laser pulses. In particular, attosecond streaking, laser-induced electron diffraction and high-harmonic generation all make use of non-perturbative light–matter interactions. Remarkably, the effect of the strong laser field on the studied sample has often been neglected in previous studies. Here we use high-harmonic spectroscopy to measure laser-induced modifications of the electronic structure of molecules. We study high-harmonic spectra of spatially oriented CH3F and CH3Br as generic examples of polar polyatomic molecules. We accurately measure intensity ratios of even and odd-harmonic orders, and of the emission from aligned and unaligned molecules. We show that these robust observables reveal a substantial modification of the molecular electronic structure by the external laser field. Our insights offer new challenges and opportunities for a range of emerging strong-field attosecond spectroscopies.}, author = {Kraus, P. M. and Tolstikhin, O. I. and Baykusheva, Denitsa Rangelova and Rupenyan, A. and Schneider, J. and Bisgaard, C. Z. and Morishita, T. and Jensen, F. and Madsen, L. B. and Wörner, H. J.}, issn = {2041-1723}, journal = {Nature Communications}, keywords = {General Physics and Astronomy, General Biochemistry, Genetics and Molecular Biology, General Chemistry, Multidisciplinary}, publisher = {Springer Nature}, title = {{Observation of laser-induced electronic structure in oriented polyatomic molecules}}, doi = {10.1038/ncomms8039}, volume = {6}, year = {2015}, } @article{14013, abstract = {The ultrafast motion of electrons and holes after light-matter interaction is fundamental to a broad range of chemical and biophysical processes. We advanced high-harmonic spectroscopy to resolve spatially and temporally the migration of an electron hole immediately after ionization of iodoacetylene while simultaneously demonstrating extensive control over the process. A multidimensional approach, based on the measurement and accurate theoretical description of both even and odd harmonic orders, enabled us to reconstruct both quantum amplitudes and phases of the electronic states with a resolution of ~100 attoseconds. We separately reconstructed quasi-field-free and laser-controlled charge migration as a function of the spatial orientation of the molecule and determined the shape of the hole created by ionization. Our technique opens the prospect of laser control over electronic primary processes.}, author = {Kraus, P. M. and Mignolet, B. and Baykusheva, Denitsa Rangelova and Rupenyan, A. and Horný, L. and Penka, E. F. and Grassi, G. and Tolstikhin, O. I. and Schneider, J. and Jensen, F. and Madsen, L. B. and Bandrauk, A. D. and Remacle, F. and Wörner, H. J.}, issn = {1095-9203}, journal = {Science}, keywords = {Multidisciplinary}, number = {6262}, pages = {790--795}, publisher = {American Association for the Advancement of Science}, title = {{Measurement and laser control of attosecond charge migration in ionized iodoacetylene}}, doi = {10.1126/science.aab2160}, volume = {350}, year = {2015}, } @article{14015, abstract = {We advance high-harmonic spectroscopy to resolve molecular charge migration in time and space and simultaneously demonstrate extensive control over the process. A multidimensional approach enables us to reconstruct both quantum amplitudes and phases with a resolution of better than 100 attoseconds and to separately reconstruct field-free and laser- driven charge migration. Our techniques make charge migration in molecules measurable on the attosecond time scale and open new avenues for laser control of electronic primary processes.}, author = {Kraus, P M and Mignolet, B and Baykusheva, Denitsa Rangelova and Rupenyan, A and Horný, L and Penka, E F and Tolstikhin, O I and Schneider, J and Jensen, F and Madsen, L B and Bandrauk, A D and Remacle, F and Wörner, H J}, issn = {1742-6596}, journal = {Journal of Physics: Conference Series}, keywords = {General Physics and Astronomy}, number = {11}, publisher = {IOP Publishing}, title = {{Attosecond charge migration and its laser control}}, doi = {10.1088/1742-6596/635/11/112136}, volume = {635}, year = {2015}, } @article{14014, abstract = {We have studied a coupled electronic-nuclear wave packet in nitric oxide using time-resolved strong-field photoelectron holography and rescattering. We show that the electronic dynamics mainly appears in the holographic structures whereas nuclear motion strongly modulates the angular distribution of the rescattered photoelectrons.}, author = {Walt, Samuel G and Ram, N Bhargava and von Conta, Aaron and Baykusheva, Denitsa Rangelova and Atala, Marcos and Wörner, Hans Jakob}, issn = {1742-6596}, journal = {Journal of Physics: Conference Series}, keywords = {General Physics and Astronomy}, number = {11}, publisher = {IOP Publishing}, title = {{Resolving the dynamics of valence-shell electrons and nuclei through laser-induced diffraction and holography}}, doi = {10.1088/1742-6596/635/11/112135}, volume = {635}, year = {2015}, } @misc{9719, abstract = {Parasitism creates selection for resistance mechanisms in host populations and is hypothesized to promote increased host evolvability. However, the influence of these traits on host evolution when parasites are no longer present is unclear. We used experimental evolution and whole-genome sequencing of Escherichia coli to determine the effects of past and present exposure to parasitic viruses (phages) on the spread of mutator alleles, resistance, and bacterial competitive fitness. We found that mutator alleles spread rapidly during adaptation to any of four different phage species, and this pattern was even more pronounced with multiple phages present simultaneously. However, hypermutability did not detectably accelerate adaptation in the absence of phages and recovery of fitness costs associated with resistance. Several lineages evolved phage resistance through elevated mucoidy, and during subsequent evolution in phage-free conditions they rapidly reverted to nonmucoid, phage-susceptible phenotypes. Genome sequencing revealed that this phenotypic reversion was achieved by additional genetic changes rather than by genotypic reversion of the initial resistance mutations. Insertion sequence (IS) elements played a key role in both the acquisition of resistance and adaptation in the absence of parasites; unlike single nucleotide polymorphisms, IS insertions were not more frequent in mutator lineages. Our results provide a genetic explanation for rapid reversion of mucoidy, a phenotype observed in other bacterial species including human pathogens. Moreover, this demonstrates that the types of genetic change underlying adaptation to fitness costs, and consequently the impact of evolvability mechanisms such as increased point-mutation rates, depend critically on the mechanism of resistance.}, author = {Wielgoss, Sébastien and Bergmiller, Tobias and Bischofberger, Anna M. and Hall, Alex R.}, publisher = {Dryad}, title = {{Data from: Adaptation to parasites and costs of parasite resistance in mutator and non-mutator bacteria}}, doi = {10.5061/dryad.cj910}, year = {2015}, } @phdthesis{1401, abstract = {The human ability to recognize objects in complex scenes has driven research in the computer vision field over couple of decades. This thesis focuses on the object recognition task in images. That is, given the image, we want the computer system to be able to predict the class of the object that appears in the image. A recent successful attempt to bridge semantic understanding of the image perceived by humans and by computers uses attribute-based models. Attributes are semantic properties of the objects shared across different categories, which humans and computers can decide on. To explore the attribute-based models we take a statistical machine learning approach, and address two key learning challenges in view of object recognition task: learning augmented attributes as mid-level discriminative feature representation, and learning with attributes as privileged information. Our main contributions are parametric and non-parametric models and algorithms to solve these frameworks. In the parametric approach, we explore an autoencoder model combined with the large margin nearest neighbor principle for mid-level feature learning, and linear support vector machines for learning with privileged information. In the non-parametric approach, we propose a supervised Indian Buffet Process for automatic augmentation of semantic attributes, and explore the Gaussian Processes classification framework for learning with privileged information. A thorough experimental analysis shows the effectiveness of the proposed models in both parametric and non-parametric views.}, author = {Sharmanska, Viktoriia}, issn = {2663-337X}, pages = {144}, publisher = {Institute of Science and Technology Austria}, title = {{Learning with attributes for object recognition: Parametric and non-parametrics views}}, doi = {10.15479/at:ista:1401}, year = {2015}, } @article{1709, abstract = {The competition for resources among cells, individuals or species is a fundamental characteristic of evolution. Biological all-pay auctions have been used to model situations where multiple individuals compete for a single resource. However, in many situations multiple resources with various values exist and single reward auctions are not applicable. We generalize the model to multiple rewards and study the evolution of strategies. In biological all-pay auctions the bid of an individual corresponds to its strategy and is equivalent to its payment in the auction. The decreasingly ordered rewards are distributed according to the decreasingly ordered bids of the participating individuals. The reproductive success of an individual is proportional to its fitness given by the sum of the rewards won minus its payments. Hence, successful bidding strategies spread in the population. We find that the results for the multiple reward case are very different from the single reward case. While the mixed strategy equilibrium in the single reward case with more than two players consists of mostly low-bidding individuals, we show that the equilibrium can convert to many high-bidding individuals and a few low-bidding individuals in the multiple reward case. Some reward values lead to a specialization among the individuals where one subpopulation competes for the rewards and the other subpopulation largely avoids costly competitions. Whether the mixed strategy equilibrium is an evolutionarily stable strategy (ESS) depends on the specific values of the rewards.}, author = {Reiter, Johannes and Kanodia, Ayush and Gupta, Raghav and Nowak, Martin and Chatterjee, Krishnendu}, journal = {Proceedings of the Royal Society of London Series B Biological Sciences}, number = {1812}, publisher = {Royal Society}, title = {{Biological auctions with multiple rewards}}, doi = {10.1098/rspb.2015.1041}, volume = {282}, year = {2015}, } @phdthesis{1400, abstract = {Cancer results from an uncontrolled growth of abnormal cells. Sequentially accumulated genetic and epigenetic alterations decrease cell death and increase cell replication. We used mathematical models to quantify the effect of driver gene mutations. The recently developed targeted therapies can lead to dramatic regressions. However, in solid cancers, clinical responses are often short-lived because resistant cancer cells evolve. We estimated that approximately 50 different mutations can confer resistance to a typical targeted therapeutic agent. We find that resistant cells are likely to be present in expanded subclones before the start of the treatment. The dominant strategy to prevent the evolution of resistance is combination therapy. Our analytical results suggest that in most patients, dual therapy, but not monotherapy, can result in long-term disease control. However, long-term control can only occur if there are no possible mutations in the genome that can cause cross-resistance to both drugs. Furthermore, we showed that simultaneous therapy with two drugs is much more likely to result in long-term disease control than sequential therapy with the same drugs. To improve our understanding of the underlying subclonal evolution we reconstruct the evolutionary history of a patient's cancer from next-generation sequencing data of spatially-distinct DNA samples. Using a quantitative measure of genetic relatedness, we found that pancreatic cancers and their metastases demonstrated a higher level of relatedness than that expected for any two cells randomly taken from a normal tissue. This minimal amount of genetic divergence among advanced lesions indicates that genetic heterogeneity, when quantitatively defined, is not a fundamental feature of the natural history of untreated pancreatic cancers. Our newly developed, phylogenomic tool Treeomics finds evidence for seeding patterns of metastases and can directly be used to discover rules governing the evolution of solid malignancies to transform cancer into a more predictable disease.}, author = {Reiter, Johannes}, issn = {2663-337X}, pages = {183}, publisher = {Institute of Science and Technology Austria}, title = {{The subclonal evolution of cancer}}, year = {2015}, } @article{1792, abstract = {Motivated by recent ideas of Harman (Unif. Distrib. Theory, 2010) we develop a new concept of variation of multivariate functions on a compact Hausdorff space with respect to a collection D of subsets. We prove a general version of the Koksma-Hlawka theorem that holds for this notion of variation and discrepancy with respect to D. As special cases, we obtain Koksma-Hlawka inequalities for classical notions, such as extreme or isotropic discrepancy. For extreme discrepancy, our result coincides with the usual Koksma-Hlawka theorem. We show that the space of functions of bounded D-variation contains important discontinuous functions and is closed under natural algebraic operations. Finally, we illustrate the results on concrete integration problems from integral geometry and stereology.}, author = {Pausinger, Florian and Svane, Anne}, journal = {Journal of Complexity}, number = {6}, pages = {773 -- 797}, publisher = {Academic Press}, title = {{A Koksma-Hlawka inequality for general discrepancy systems}}, doi = {10.1016/j.jco.2015.06.002}, volume = {31}, year = {2015}, } @phdthesis{1399, abstract = {This thesis is concerned with the computation and approximation of intrinsic volumes. Given a smooth body M and a certain digital approximation of it, we develop algorithms to approximate various intrinsic volumes of M using only measurements taken from its digital approximations. The crucial idea behind our novel algorithms is to link the recent theory of persistent homology to the theory of intrinsic volumes via the Crofton formula from integral geometry and, in particular, via Euler characteristic computations. Our main contributions are a multigrid convergent digital algorithm to compute the first intrinsic volume of a solid body in R^n as well as an appropriate integration pipeline to approximate integral-geometric integrals defined over the Grassmannian manifold.}, author = {Pausinger, Florian}, issn = {2663-337X}, pages = {144}, publisher = {Institute of Science and Technology Austria}, title = {{On the approximation of intrinsic volumes}}, year = {2015}, } @article{1666, abstract = {Evolution of gene regulation is crucial for our understanding of the phenotypic differences between species, populations and individuals. Sequence-specific binding of transcription factors to the regulatory regions on the DNA is a key regulatory mechanism that determines gene expression and hence heritable phenotypic variation. We use a biophysical model for directional selection on gene expression to estimate the rates of gain and loss of transcription factor binding sites (TFBS) in finite populations under both point and insertion/deletion mutations. Our results show that these rates are typically slow for a single TFBS in an isolated DNA region, unless the selection is extremely strong. These rates decrease drastically with increasing TFBS length or increasingly specific protein-DNA interactions, making the evolution of sites longer than ∼ 10 bp unlikely on typical eukaryotic speciation timescales. Similarly, evolution converges to the stationary distribution of binding sequences very slowly, making the equilibrium assumption questionable. The availability of longer regulatory sequences in which multiple binding sites can evolve simultaneously, the presence of “pre-sites” or partially decayed old sites in the initial sequence, and biophysical cooperativity between transcription factors, can all facilitate gain of TFBS and reconcile theoretical calculations with timescales inferred from comparative genomics.}, author = {Tugrul, Murat and Paixao, Tiago and Barton, Nicholas H and Tkacik, Gasper}, journal = {PLoS Genetics}, number = {11}, publisher = {Public Library of Science}, title = {{Dynamics of transcription factor binding site evolution}}, doi = {10.1371/journal.pgen.1005639}, volume = {11}, year = {2015}, } @inproceedings{1502, abstract = {We extend the theory of input-output conformance with operators for merge and quotient. The former is useful when testing against multiple requirements or views. The latter can be used to generate tests for patches of an already tested system. Both operators can combine systems with different action alphabets, which is usually the case when constructing complex systems and specifications from parts, for instance different views as well as newly defined functionality of a~previous version of the system.}, author = {Beneš, Nikola and Daca, Przemyslaw and Henzinger, Thomas A and Kretinsky, Jan and Nickovic, Dejan}, isbn = {978-1-4503-3471-6}, location = {Montreal, QC, Canada}, pages = {101 -- 110}, publisher = {ACM}, title = {{Complete composition operators for IOCO-testing theory}}, doi = {10.1145/2737166.2737175}, year = {2015}, } @article{1501, abstract = {We consider Markov decision processes (MDPs) which are a standard model for probabilistic systems. We focus on qualitative properties for MDPs that can express that desired behaviors of the system arise almost-surely (with probability 1) or with positive probability. We introduce a new simulation relation to capture the refinement relation of MDPs with respect to qualitative properties, and present discrete graph algorithms with quadratic complexity to compute the simulation relation. We present an automated technique for assume-guarantee style reasoning for compositional analysis of two-player games by giving a counterexample guided abstraction-refinement approach to compute our new simulation relation. We show a tight link between two-player games and MDPs, and as a consequence the results for games are lifted to MDPs with qualitative properties. We have implemented our algorithms and show that the compositional analysis leads to significant improvements. }, author = {Chatterjee, Krishnendu and Chmelik, Martin and Daca, Przemyslaw}, journal = {Formal Methods in System Design}, number = {2}, pages = {230 -- 264}, publisher = {Springer}, title = {{CEGAR for compositional analysis of qualitative properties in Markov decision processes}}, doi = {10.1007/s10703-015-0235-2}, volume = {47}, year = {2015}, } @article{1602, abstract = {Interprocedural analysis is at the heart of numerous applications in programming languages, such as alias analysis, constant propagation, etc. Recursive state machines (RSMs) are standard models for interprocedural analysis. We consider a general framework with RSMs where the transitions are labeled from a semiring, and path properties are algebraic with semiring operations. RSMs with algebraic path properties can model interprocedural dataflow analysis problems, the shortest path problem, the most probable path problem, etc. The traditional algorithms for interprocedural analysis focus on path properties where the starting point is fixed as the entry point of a specific method. In this work, we consider possible multiple queries as required in many applications such as in alias analysis. The study of multiple queries allows us to bring in a very important algorithmic distinction between the resource usage of the one-time preprocessing vs for each individual query. The second aspect that we consider is that the control flow graphs for most programs have constant treewidth. Our main contributions are simple and implementable algorithms that supportmultiple queries for algebraic path properties for RSMs that have constant treewidth. Our theoretical results show that our algorithms have small additional one-time preprocessing, but can answer subsequent queries significantly faster as compared to the current best-known solutions for several important problems, such as interprocedural reachability and shortest path. We provide a prototype implementation for interprocedural reachability and intraprocedural shortest path that gives a significant speed-up on several benchmarks.}, author = {Chatterjee, Krishnendu and Ibsen-Jensen, Rasmus and Pavlogiannis, Andreas and Goyal, Prateesh}, journal = {ACM SIGPLAN Notices}, location = {Mumbai, India}, number = {1}, pages = {97 -- 109}, publisher = {ACM}, title = {{Faster algorithms for algebraic path properties in recursive state machines with constant treewidth}}, doi = {10.1145/2676726.2676979}, volume = {50}, year = {2015}, } @article{1604, abstract = {We consider the quantitative analysis problem for interprocedural control-flow graphs (ICFGs). The input consists of an ICFG, a positive weight function that assigns every transition a positive integer-valued number, and a labelling of the transitions (events) as good, bad, and neutral events. The weight function assigns to each transition a numerical value that represents ameasure of how good or bad an event is. The quantitative analysis problem asks whether there is a run of the ICFG where the ratio of the sum of the numerical weights of good events versus the sum of weights of bad events in the long-run is at least a given threshold (or equivalently, to compute the maximal ratio among all valid paths in the ICFG). The quantitative analysis problem for ICFGs can be solved in polynomial time, and we present an efficient and practical algorithm for the problem. We show that several problems relevant for static program analysis, such as estimating the worst-case execution time of a program or the average energy consumption of a mobile application, can be modeled in our framework. We have implemented our algorithm as a tool in the Java Soot framework. We demonstrate the effectiveness of our approach with two case studies. First, we show that our framework provides a sound approach (no false positives) for the analysis of inefficiently-used containers. Second, we show that our approach can also be used for static profiling of programs which reasons about methods that are frequently invoked. Our experimental results show that our tool scales to relatively large benchmarks, and discovers relevant and useful information that can be used to optimize performance of the programs.}, author = {Chatterjee, Krishnendu and Pavlogiannis, Andreas and Velner, Yaron}, isbn = {978-1-4503-3300-9}, journal = {Proceedings of the 42nd Annual ACM SIGPLAN-SIGACT }, location = {Mumbai, India}, number = {1}, pages = {539 -- 551}, publisher = {ACM}, title = {{Quantitative interprocedural analysis}}, doi = {10.1145/2676726.2676968}, volume = {50}, year = {2015}, } @inproceedings{1607, abstract = {We consider the core algorithmic problems related to verification of systems with respect to three classical quantitative properties, namely, the mean-payoff property, the ratio property, and the minimum initial credit for energy property. The algorithmic problem given a graph and a quantitative property asks to compute the optimal value (the infimum value over all traces) from every node of the graph. We consider graphs with constant treewidth, and it is well-known that the control-flow graphs of most programs have constant treewidth. Let n denote the number of nodes of a graph, m the number of edges (for constant treewidth graphs m=O(n)) and W the largest absolute value of the weights. Our main theoretical results are as follows. First, for constant treewidth graphs we present an algorithm that approximates the mean-payoff value within a multiplicative factor of ϵ in time O(n⋅log(n/ϵ)) and linear space, as compared to the classical algorithms that require quadratic time. Second, for the ratio property we present an algorithm that for constant treewidth graphs works in time O(n⋅log(|a⋅b|))=O(n⋅log(n⋅W)), when the output is ab, as compared to the previously best known algorithm with running time O(n2⋅log(n⋅W)). Third, for the minimum initial credit problem we show that (i) for general graphs the problem can be solved in O(n2⋅m) time and the associated decision problem can be solved in O(n⋅m) time, improving the previous known O(n3⋅m⋅log(n⋅W)) and O(n2⋅m) bounds, respectively; and (ii) for constant treewidth graphs we present an algorithm that requires O(n⋅logn) time, improving the previous known O(n4⋅log(n⋅W)) bound. We have implemented some of our algorithms and show that they present a significant speedup on standard benchmarks.}, author = {Chatterjee, Krishnendu and Ibsen-Jensen, Rasmus and Pavlogiannis, Andreas}, location = {San Francisco, CA, USA}, pages = {140 -- 157}, publisher = {Springer}, title = {{Faster algorithms for quantitative verification in constant treewidth graphs}}, doi = {10.1007/978-3-319-21690-4_9}, volume = {9206}, year = {2015}, } @inproceedings{1714, abstract = {We present a flexible framework for the automated competitive analysis of on-line scheduling algorithms for firm-deadline real-time tasks based on multi-objective graphs: Given a task set and an on-line scheduling algorithm specified as a labeled transition system, along with some optional safety, liveness, and/or limit-average constraints for the adversary, we automatically compute the competitive ratio of the algorithm w.r.t. A clairvoyant scheduler. We demonstrate the flexibility and power of our approach by comparing the competitive ratio of several on-line algorithms, including Dover, that have been proposed in the past, for various task sets. Our experimental results reveal that none of these algorithms is universally optimal, in the sense that there are task sets where other schedulers provide better performance. Our framework is hence a very useful design tool for selecting optimal algorithms for a given application.}, author = {Chatterjee, Krishnendu and Pavlogiannis, Andreas and Kößler, Alexander and Schmid, Ulrich}, booktitle = {Real-Time Systems Symposium}, location = {Rome, Italy}, number = {January}, pages = {118 -- 127}, publisher = {IEEE}, title = {{A framework for automated competitive analysis of on-line scheduling of firm-deadline tasks}}, doi = {10.1109/RTSS.2014.9}, volume = {2015}, year = {2015}, } @inproceedings{1633, abstract = {We present a method for simulating brittle fracture under the assumptions of quasi-static linear elastic fracture mechanics (LEFM). Using the boundary element method (BEM) and Lagrangian crack-fronts, we produce highly detailed fracture surfaces. The computational cost of the BEM is alleviated by using a low-resolution mesh and interpolating the resulting stress intensity factors when propagating the high-resolution crack-front. Our system produces physics-based fracture surfaces with high spatial and temporal resolution, taking spatial variation of material toughness and/or strength into account. It also allows for crack initiation to be handled separately from crack propagation, which is not only more reasonable from a physics perspective, but can also be used to control the simulation. Separating the resolution of the crack-front from the resolution of the computational mesh increases the efficiency and therefore the amount of visual detail on the resulting fracture surfaces. The BEM also allows us to re-use previously computed blocks of the system matrix.}, author = {Hahn, David and Wojtan, Christopher J}, location = {Los Angeles, CA, United States}, number = {4}, publisher = {ACM}, title = {{High-resolution brittle fracture simulation with boundary elements}}, doi = {10.1145/2766896}, volume = {34}, year = {2015}, } @article{1537, abstract = {3D amoeboid cell migration is central to many developmental and disease-related processes such as cancer metastasis. Here, we identify a unique prototypic amoeboid cell migration mode in early zebrafish embryos, termed stable-bleb migration. Stable-bleb cells display an invariant polarized balloon-like shape with exceptional migration speed and persistence. Progenitor cells can be reversibly transformed into stable-bleb cells irrespective of their primary fate and motile characteristics by increasing myosin II activity through biochemical or mechanical stimuli. Using a combination of theory and experiments, we show that, in stable-bleb cells, cortical contractility fluctuations trigger a stochastic switch into amoeboid motility, and a positive feedback between cortical flows and gradients in contractility maintains stable-bleb cell polarization. We further show that rearward cortical flows drive stable-bleb cell migration in various adhesive and non-adhesive environments, unraveling a highly versatile amoeboid migration phenotype.}, author = {Ruprecht, Verena and Wieser, Stefan and Callan Jones, Andrew and Smutny, Michael and Morita, Hitoshi and Sako, Keisuke and Barone, Vanessa and Ritsch Marte, Monika and Sixt, Michael K and Voituriez, Raphaël and Heisenberg, Carl-Philipp J}, journal = {Cell}, number = {4}, pages = {673 -- 685}, publisher = {Cell Press}, title = {{Cortical contractility triggers a stochastic switch to fast amoeboid cell motility}}, doi = {10.1016/j.cell.2015.01.008}, volume = {160}, year = {2015}, } @article{1591, abstract = {Auxin participates in a multitude of developmental processes, as well as responses to environmental cues. Compared with other plant hormones, auxin exhibits a unique property, as it undergoes directional, cell-to-cell transport facilitated by plasma membrane-localized transport proteins. Among them, a prominent role has been ascribed to the PIN family of auxin efflux facilitators. PIN proteins direct polar auxin transport on account of their asymmetric subcellular localizations. In this review, we provide an overview of the multiple developmental roles of PIN proteins, including the atypical endoplasmic reticulum-localized members of the family, and look at the family from an evolutionary perspective. Next, we cover the cell biological and molecular aspects of PIN function, in particular the establishment of their polar subcellular localization. Hormonal and environmental inputs into the regulation of PIN action are summarized as well.}, author = {Adamowski, Maciek and Friml, Jirí}, journal = {Plant Cell}, number = {1}, pages = {20 -- 32}, publisher = {American Society of Plant Biologists}, title = {{PIN-dependent auxin transport: Action, regulation, and evolution}}, doi = {10.1105/tpc.114.134874}, volume = {27}, year = {2015}, } @article{1677, abstract = {We consider real symmetric and complex Hermitian random matrices with the additional symmetry hxy = hN-y,N-x. The matrix elements are independent (up to the fourfold symmetry) and not necessarily identically distributed. This ensemble naturally arises as the Fourier transform of a Gaussian orthogonal ensemble. Italso occurs as the flip matrix model - an approximation of the two-dimensional Anderson model at small disorder. We show that the density of states converges to the Wigner semicircle law despite the new symmetry type. We also prove the local version of the semicircle law on the optimal scale.}, author = {Alt, Johannes}, journal = {Journal of Mathematical Physics}, number = {10}, publisher = {American Institute of Physics}, title = {{The local semicircle law for random matrices with a fourfold symmetry}}, doi = {10.1063/1.4932606}, volume = {56}, year = {2015}, } @article{1678, abstract = {High-throughput live-cell screens are intricate elements of systems biology studies and drug discovery pipelines. Here, we demonstrate an optogenetics-assisted method that avoids the need for chemical activators and reporters, reduces the number of operational steps and increases information content in a cell-based small-molecule screen against human protein kinases, including an orphan receptor tyrosine kinase. This blueprint for all-optical screening can be adapted to many drug targets and cellular processes.}, author = {Inglés Prieto, Álvaro and Gschaider-Reichhart, Eva and Muellner, Markus and Nowak, Matthias and Nijman, Sebastian and Grusch, Michael and Janovjak, Harald L}, journal = {Nature Chemical Biology}, number = {12}, pages = {952 -- 954}, publisher = {Nature Publishing Group}, title = {{Light-assisted small-molecule screening against protein kinases}}, doi = {10.1038/nchembio.1933}, volume = {11}, year = {2015}, } @article{1576, abstract = {Gene expression is controlled primarily by interactions between transcription factor proteins (TFs) and the regulatory DNA sequence, a process that can be captured well by thermodynamic models of regulation. These models, however, neglect regulatory crosstalk: the possibility that noncognate TFs could initiate transcription, with potentially disastrous effects for the cell. Here, we estimate the importance of crosstalk, suggest that its avoidance strongly constrains equilibrium models of TF binding, and propose an alternative nonequilibrium scheme that implements kinetic proofreading to suppress erroneous initiation. This proposal is consistent with the observed covalent modifications of the transcriptional apparatus and predicts increased noise in gene expression as a trade-off for improved specificity. Using information theory, we quantify this trade-off to find when optimal proofreading architectures are favored over their equilibrium counterparts. Such architectures exhibit significant super-Poisson noise at low expression in steady state.}, author = {Cepeda Humerez, Sarah A and Rieckh, Georg and Tkacik, Gasper}, journal = {Physical Review Letters}, number = {24}, publisher = {American Physical Society}, title = {{Stochastic proofreading mechanism alleviates crosstalk in transcriptional regulation}}, doi = {10.1103/PhysRevLett.115.248101}, volume = {115}, year = {2015}, } @unpublished{8183, abstract = {We study conditions under which a finite simplicial complex $K$ can be mapped to $\mathbb R^d$ without higher-multiplicity intersections. An almost $r$-embedding is a map $f: K\to \mathbb R^d$ such that the images of any $r$ pairwise disjoint simplices of $K$ do not have a common point. We show that if $r$ is not a prime power and $d\geq 2r+1$, then there is a counterexample to the topological Tverberg conjecture, i.e., there is an almost $r$-embedding of the $(d+1)(r-1)$-simplex in $\mathbb R^d$. This improves on previous constructions of counterexamples (for $d\geq 3r$) based on a series of papers by M. \"Ozaydin, M. Gromov, P. Blagojevi\'c, F. Frick, G. Ziegler, and the second and fourth present authors. The counterexamples are obtained by proving the following algebraic criterion in codimension 2: If $r\ge3$ and if $K$ is a finite $2(r-1)$-complex then there exists an almost $r$-embedding $K\to \mathbb R^{2r}$ if and only if there exists a general position PL map $f:K\to \mathbb R^{2r}$ such that the algebraic intersection number of the $f$-images of any $r$ pairwise disjoint simplices of $K$ is zero. This result can be restated in terms of cohomological obstructions or equivariant maps, and extends an analogous codimension 3 criterion by the second and fourth authors. As another application we classify ornaments $f:S^3 \sqcup S^3\sqcup S^3\to \mathbb R^5$ up to ornament concordance. It follows from work of M. Freedman, V. Krushkal and P. Teichner that the analogous criterion for $r=2$ is false. We prove a lemma on singular higher-dimensional Borromean rings, yielding an elementary proof of the counterexample.}, author = {Avvakumov, Sergey and Mabillard, Isaac and Skopenkov, A. and Wagner, Uli}, booktitle = {arXiv}, title = {{Eliminating higher-multiplicity intersections, III. Codimension 2}}, year = {2015}, } @misc{5441, abstract = {We study algorithmic questions for concurrent systems where the transitions are labeled from a complete, closed semiring, and path properties are algebraic with semiring operations. The algebraic path properties can model dataflow analysis problems, the shortest path problem, and many other natural problems that arise in program analysis. We consider that each component of the concurrent system is a graph with constant treewidth, a property satisfied by the controlflow graphs of most programs. We allow for multiple possible queries, which arise naturally in demand driven dataflow analysis. The study of multiple queries allows us to consider the tradeoff between the resource usage of the one-time preprocessing and for each individual query. The traditional approach constructs the product graph of all components and applies the best-known graph algorithm on the product. In this approach, even the answer to a single query requires the transitive closure (i.e., the results of all possible queries), which provides no room for tradeoff between preprocessing and query time. Our main contributions are algorithms that significantly improve the worst-case running time of the traditional approach, and provide various tradeoffs depending on the number of queries. For example, in a concurrent system of two components, the traditional approach requires hexic time in the worst case for answering one query as well as computing the transitive closure, whereas we show that with one-time preprocessing in almost cubic time, each subsequent query can be answered in at most linear time, and even the transitive closure can be computed in almost quartic time. Furthermore, we establish conditional optimality results showing that the worst-case running time of our algorithms cannot be improved without achieving major breakthroughs in graph algorithms (i.e., improving the worst-case bound for the shortest path problem in general graphs). Preliminary experimental results show that our algorithms perform favorably on several benchmarks.}, author = {Chatterjee, Krishnendu and Ibsen-Jensen, Rasmus and Goharshady, Amir and Pavlogiannis, Andreas}, issn = {2664-1690}, pages = {24}, publisher = {IST Austria}, title = {{Algorithms for algebraic path properties in concurrent systems of constant treewidth components}}, doi = {10.15479/AT:IST-2015-340-v1-1}, year = {2015}, } @misc{5442, abstract = {We study algorithmic questions for concurrent systems where the transitions are labeled from a complete, closed semiring, and path properties are algebraic with semiring operations. The algebraic path properties can model dataflow analysis problems, the shortest path problem, and many other natural properties that arise in program analysis. We consider that each component of the concurrent system is a graph with constant treewidth, and it is known that the controlflow graphs of most programs have constant treewidth. We allow for multiple possible queries, which arise naturally in demand driven dataflow analysis problems (e.g., alias analysis). The study of multiple queries allows us to consider the tradeoff between the resource usage of the \emph{one-time} preprocessing and for \emph{each individual} query. The traditional approaches construct the product graph of all components and apply the best-known graph algorithm on the product. In the traditional approach, even the answer to a single query requires the transitive closure computation (i.e., the results of all possible queries), which provides no room for tradeoff between preprocessing and query time. Our main contributions are algorithms that significantly improve the worst-case running time of the traditional approach, and provide various tradeoffs depending on the number of queries. For example, in a concurrent system of two components, the traditional approach requires hexic time in the worst case for answering one query as well as computing the transitive closure, whereas we show that with one-time preprocessing in almost cubic time, each subsequent query can be answered in at most linear time, and even the transitive closure can be computed in almost quartic time. Furthermore, we establish conditional optimality results that show that the worst-case running times of our algorithms cannot be improved without achieving major breakthroughs in graph algorithms (such as improving the worst-case bounds for the shortest path problem in general graphs whose current best-known bound has not been improved in five decades). Finally, we provide a prototype implementation of our algorithms which significantly outperforms the existing algorithmic methods on several benchmarks.}, author = {Anonymous, 1 and Anonymous, 2 and Anonymous, 3 and Anonymous, 4}, issn = {2664-1690}, pages = {22}, publisher = {IST Austria}, title = {{Algorithms for algebraic path properties in concurrent systems of constant treewidth components}}, year = {2015}, } @inproceedings{1689, abstract = {We consider the problem of computing the set of initial states of a dynamical system such that there exists a control strategy to ensure that the trajectories satisfy a temporal logic specification with probability 1 (almost-surely). We focus on discrete-time, stochastic linear dynamics and specifications given as formulas of the Generalized Reactivity(1) fragment of Linear Temporal Logic over linear predicates in the states of the system. We propose a solution based on iterative abstraction-refinement, and turn-based 2-player probabilistic games. While the theoretical guarantee of our algorithm after any finite number of iterations is only a partial solution, we show that if our algorithm terminates, then the result is the set of satisfying initial states. Moreover, for any (partial) solution our algorithm synthesizes witness control strategies to ensure almost-sure satisfaction of the temporal logic specification. We demonstrate our approach on an illustrative case study.}, author = {Svoreňová, Mária and Kretinsky, Jan and Chmelik, Martin and Chatterjee, Krishnendu and Cěrná, Ivana and Belta, Cǎlin}, booktitle = {Proceedings of the 18th International Conference on Hybrid Systems: Computation and Control}, location = {Seattle, WA, United States}, pages = {259 -- 268}, publisher = {ACM}, title = {{Temporal logic control for stochastic linear systems using abstraction refinement of probabilistic games}}, doi = {10.1145/2728606.2728608}, year = {2015}, } @inproceedings{1729, abstract = {We present a computer-aided programming approach to concurrency. The approach allows programmers to program assuming a friendly, non-preemptive scheduler, and our synthesis procedure inserts synchronization to ensure that the final program works even with a preemptive scheduler. The correctness specification is implicit, inferred from the non-preemptive behavior. Let us consider sequences of calls that the program makes to an external interface. The specification requires that any such sequence produced under a preemptive scheduler should be included in the set of such sequences produced under a non-preemptive scheduler. The solution is based on a finitary abstraction, an algorithm for bounded language inclusion modulo an independence relation, and rules for inserting synchronization. We apply the approach to device-driver programming, where the driver threads call the software interface of the device and the API provided by the operating system. Our experiments demonstrate that our synthesis method is precise and efficient, and, since it does not require explicit specifications, is more practical than the conventional approach based on user-provided assertions.}, author = {Cerny, Pavol and Clarke, Edmund and Henzinger, Thomas A and Radhakrishna, Arjun and Ryzhyk, Leonid and Samanta, Roopsha and Tarrach, Thorsten}, location = {San Francisco, CA, United States}, pages = {180 -- 197}, publisher = {Springer}, title = {{From non-preemptive to preemptive scheduling using synchronization synthesis}}, doi = {10.1007/978-3-319-21668-3_11}, volume = {9207}, year = {2015}, } @inproceedings{1835, abstract = {The behaviour of gene regulatory networks (GRNs) is typically analysed using simulation-based statistical testing-like methods. In this paper, we demonstrate that we can replace this approach by a formal verification-like method that gives higher assurance and scalability. We focus on Wagner’s weighted GRN model with varying weights, which is used in evolutionary biology. In the model, weight parameters represent the gene interaction strength that may change due to genetic mutations. For a property of interest, we synthesise the constraints over the parameter space that represent the set of GRNs satisfying the property. We experimentally show that our parameter synthesis procedure computes the mutational robustness of GRNs –an important problem of interest in evolutionary biology– more efficiently than the classical simulation method. We specify the property in linear temporal logics. We employ symbolic bounded model checking and SMT solving to compute the space of GRNs that satisfy the property, which amounts to synthesizing a set of linear constraints on the weights.}, author = {Giacobbe, Mirco and Guet, Calin C and Gupta, Ashutosh and Henzinger, Thomas A and Paixao, Tiago and Petrov, Tatjana}, location = {London, United Kingdom}, pages = {469 -- 483}, publisher = {Springer}, title = {{Model checking gene regulatory networks}}, doi = {10.1007/978-3-662-46681-0_47}, volume = {9035}, year = {2015}, } @article{1509, abstract = {The Auxin Binding Protein1 (ABP1) has been identified based on its ability to bind auxin with high affinity and studied for a long time as a prime candidate for the extracellular auxin receptor responsible for mediating in particular the fast non-transcriptional auxin responses. However, the contradiction between the embryo-lethal phenotypes of the originally described Arabidopsis T-DNA insertional knock-out alleles (abp1-1 and abp1-1s) and the wild type-like phenotypes of other recently described loss-of-function alleles (abp1-c1 and abp1-TD1) questions the biological importance of ABP1 and relevance of the previous genetic studies. Here we show that there is no hidden copy of the ABP1 gene in the Arabidopsis genome but the embryo-lethal phenotypes of abp1-1 and abp1-1s alleles are very similar to the knock-out phenotypes of the neighboring gene, BELAYA SMERT (BSM). Furthermore, the allelic complementation test between bsm and abp1 alleles shows that the embryo-lethality in the abp1-1 and abp1-1s alleles is caused by the off-target disruption of the BSM locus by the T-DNA insertions. This clarifies the controversy of different phenotypes among published abp1 knock-out alleles and asks for reflections on the developmental role of ABP1.}, author = {Michalko, Jaroslav and Dravecka, Marta and Bollenbach, Tobias and Friml, Jirí}, journal = {F1000 Research }, publisher = {F1000 Research}, title = {{Embryo-lethal phenotypes in early abp1 mutants are due to disruption of the neighboring BSM gene}}, doi = {10.12688/f1000research.7143.1}, volume = {4}, year = {2015}, } @article{1681, abstract = {In many social situations, individuals endeavor to find the single best possible partner, but are constrained to evaluate the candidates in sequence. Examples include the search for mates, economic partnerships, or any other long-term ties where the choice to interact involves two parties. Surprisingly, however, previous theoretical work on mutual choice problems focuses on finding equilibrium solutions, while ignoring the evolutionary dynamics of decisions. Empirically, this may be of high importance, as some equilibrium solutions can never be reached unless the population undergoes radical changes and a sufficient number of individuals change their decisions simultaneously. To address this question, we apply a mutual choice sequential search problem in an evolutionary game-theoretical model that allows one to find solutions that are favored by evolution. As an example, we study the influence of sequential search on the evolutionary dynamics of cooperation. For this, we focus on the classic snowdrift game and the prisoner’s dilemma game.}, author = {Priklopil, Tadeas and Chatterjee, Krishnendu}, issn = {2073-4336}, journal = {Games}, number = {4}, pages = {413 -- 437}, publisher = {MDPI}, title = {{Evolution of decisions in population games with sequentially searching individuals}}, doi = {10.3390/g6040413}, volume = {6}, year = {2015}, } @article{1655, abstract = {Quantifying behaviors of robots which were generated autonomously from task-independent objective functions is an important prerequisite for objective comparisons of algorithms and movements of animals. The temporal sequence of such a behavior can be considered as a time series and hence complexity measures developed for time series are natural candidates for its quantification. The predictive information and the excess entropy are such complexity measures. They measure the amount of information the past contains about the future and thus quantify the nonrandom structure in the temporal sequence. However, when using these measures for systems with continuous states one has to deal with the fact that their values will depend on the resolution with which the systems states are observed. For deterministic systems both measures will diverge with increasing resolution. We therefore propose a new decomposition of the excess entropy in resolution dependent and resolution independent parts and discuss how they depend on the dimensionality of the dynamics, correlations and the noise level. For the practical estimation we propose to use estimates based on the correlation integral instead of the direct estimation of the mutual information based on next neighbor statistics because the latter allows less control of the scale dependencies. Using our algorithm we are able to show how autonomous learning generates behavior of increasing complexity with increasing learning duration.}, author = {Martius, Georg S and Olbrich, Eckehard}, journal = {Entropy}, number = {10}, pages = {7266 -- 7297}, publisher = {MDPI}, title = {{Quantifying emergent behavior of autonomous robots}}, doi = {10.3390/e17107266}, volume = {17}, year = {2015}, } @article{1834, abstract = {Huge body of evidences demonstrated that volatile anesthetics affect the hippocampal neurogenesis and neurocognitive functions, and most of them showed impairment at anesthetic dose. Here, we investigated the effect of low dose (1.8%) sevoflurane on hippocampal neurogenesis and dentate gyrus-dependent learning. Neonatal rats at postnatal day 4 to 6 (P4-6) were treated with 1.8% sevoflurane for 6 hours. Neurogenesis was quantified by bromodeoxyuridine labeling and electrophysiology recording. Four and seven weeks after treatment, the Morris water maze and contextual-fear discrimination learning tests were performed to determine the influence on spatial learning and pattern separation. A 6-hour treatment with 1.8% sevoflurane promoted hippocampal neurogenesis and increased the survival of newborn cells and the proportion of immature granular cells in the dentate gyrus of neonatal rats. Sevoflurane-treated rats performed better during the training days of the Morris water maze test and in contextual-fear discrimination learning test. These results suggest that a subanesthetic dose of sevoflurane promotes hippocampal neurogenesis in neonatal rats and facilitates their performance in dentate gyrus-dependent learning tasks.}, author = {Chen, Chong and Wang, Chao and Zhao, Xuan and Zhou, Tao and Xu, Dao and Wang, Zhi and Wang, Ying}, journal = {ASN Neuro}, number = {2}, publisher = {SAGE Publications}, title = {{Low-dose sevoflurane promoteshippocampal neurogenesis and facilitates the development of dentate gyrus-dependent learning in neonatal rats}}, doi = {10.1177/1759091415575845}, volume = {7}, year = {2015}, } @article{1635, abstract = {We calculate a Ricci curvature lower bound for some classical examples of random walks, namely, a chain on a slice of the n-dimensional discrete cube (the so-called Bernoulli-Laplace model) and the random transposition shuffle of the symmetric group of permutations on n letters.}, author = {Erbar, Matthias and Maas, Jan and Tetali, Prasad}, journal = {Annales de la faculté des sciences de Toulouse}, number = {4}, pages = {781 -- 800}, publisher = {Faculté des sciences de Toulouse}, title = {{Discrete Ricci curvature bounds for Bernoulli-Laplace and random transposition models}}, doi = {10.5802/afst.1464}, volume = {24}, year = {2015}, } @article{14303, abstract = {Scaffolded DNA origami enables the fabrication of a variety of complex nanostructures that promise utility in diverse fields of application, ranging from biosensing over advanced therapeutics to metamaterials. The broad applicability of DNA origami as a material beyond the level of proof-of-concept studies critically depends, among other factors, on the availability of large amounts of pure single-stranded scaffold DNA. Here, we present a method for the efficient production of M13 bacteriophage-derived genomic DNA using high-cell-density fermentation of Escherichia coli in stirred-tank bioreactors. We achieve phage titers of up to 1.6 × 1014 plaque-forming units per mL. Downstream processing yields up to 410 mg of high-quality single-stranded DNA per one liter reaction volume, thus upgrading DNA origami-based nanotechnology from the milligram to the gram scale.}, author = {Kick, B and Praetorius, Florian M and Dietz, H and Weuster-Botz, D}, issn = {1530-6992}, journal = {Nano Letters}, number = {7}, pages = {4672--4676}, publisher = {ACS Publications}, title = {{Efficient production of single-stranded phage DNA as scaffolds for DNA origami}}, doi = {10.1021/acs.nanolett.5b01461}, volume = {15}, year = {2015}, } @inproceedings{1603, abstract = {For deterministic systems, a counterexample to a property can simply be an error trace, whereas counterexamples in probabilistic systems are necessarily more complex. For instance, a set of erroneous traces with a sufficient cumulative probability mass can be used. Since these are too large objects to understand and manipulate, compact representations such as subchains have been considered. In the case of probabilistic systems with non-determinism, the situation is even more complex. While a subchain for a given strategy (or scheduler, resolving non-determinism) is a straightforward choice, we take a different approach. Instead, we focus on the strategy itself, and extract the most important decisions it makes, and present its succinct representation. The key tools we employ to achieve this are (1) introducing a concept of importance of a state w.r.t. the strategy, and (2) learning using decision trees. There are three main consequent advantages of our approach. Firstly, it exploits the quantitative information on states, stressing the more important decisions. Secondly, it leads to a greater variability and degree of freedom in representing the strategies. Thirdly, the representation uses a self-explanatory data structure. In summary, our approach produces more succinct and more explainable strategies, as opposed to e.g. binary decision diagrams. Finally, our experimental results show that we can extract several rules describing the strategy even for very large systems that do not fit in memory, and based on the rules explain the erroneous behaviour.}, author = {Brázdil, Tomáš and Chatterjee, Krishnendu and Chmelik, Martin and Fellner, Andreas and Kretinsky, Jan}, location = {San Francisco, CA, United States}, pages = {158 -- 177}, publisher = {Springer}, title = {{Counterexample explanation by learning small strategies in Markov decision processes}}, doi = {10.1007/978-3-319-21690-4_10}, volume = {9206}, year = {2015}, } @misc{5549, abstract = {This repository contains the experimental part of the CAV 2015 publication Counterexample Explanation by Learning Small Strategies in Markov Decision Processes. We extended the probabilistic model checker PRISM to represent strategies of Markov Decision Processes as Decision Trees. The archive contains a java executable version of the extended tool (prism_dectree.jar) together with a few examples of the PRISM benchmark library. To execute the program, please have a look at the README.txt, which provides instructions and further information on the archive. The archive contains scripts that (if run often enough) reproduces the data presented in the publication.}, author = {Fellner, Andreas}, keywords = {Markov Decision Process, Decision Tree, Probabilistic Verification, Counterexample Explanation}, publisher = {Institute of Science and Technology Austria}, title = {{Experimental part of CAV 2015 publication: Counterexample Explanation by Learning Small Strategies in Markov Decision Processes}}, doi = {10.15479/AT:ISTA:28}, year = {2015}, } @inproceedings{1512, abstract = {We show that very weak topological assumptions are enough to ensure the existence of a Helly-type theorem. More precisely, we show that for any non-negative integers b and d there exists an integer h(b,d) such that the following holds. If F is a finite family of subsets of R^d such that the ith reduced Betti number (with Z_2 coefficients in singular homology) of the intersection of any proper subfamily G of F is at most b for every non-negative integer i less or equal to (d-1)/2, then F has Helly number at most h(b,d). These topological conditions are sharp: not controlling any of these first Betti numbers allow for families with unbounded Helly number. Our proofs combine homological non-embeddability results with a Ramsey-based approach to build, given an arbitrary simplicial complex K, some well-behaved chain map from C_*(K) to C_*(R^d). Both techniques are of independent interest.}, author = {Goaoc, Xavier and Paták, Pavel and Patakova, Zuzana and Tancer, Martin and Wagner, Uli}, location = {Eindhoven, Netherlands}, pages = {507 -- 521}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Bounding Helly numbers via Betti numbers}}, doi = {10.4230/LIPIcs.SOCG.2015.507}, volume = {34}, year = {2015}, } @article{271, abstract = {We show that a non-singular integral form of degree d is soluble non-trivially over the integers if and only if it is soluble non-trivially over the reals and the p-adic numbers, provided that the form has at least (d-\sqrt{d}/2)2^d variables. This improves on a longstanding result of Birch.}, author = {Browning, Timothy D and Prendiville, Sean}, issn = {0075-4102}, journal = {Journal fur die Reine und Angewandte Mathematik}, number = {731}, pages = {203 -- 234}, publisher = {Walter de Gruyter}, title = {{Improvements in Birch's theorem on forms in many variables}}, doi = {10.1515/crelle-2014-0122}, volume = {2017}, year = {2015}, } @inproceedings{1675, abstract = {Proofs of work (PoW) have been suggested by Dwork and Naor (Crypto’92) as protection to a shared resource. The basic idea is to ask the service requestor to dedicate some non-trivial amount of computational work to every request. The original applications included prevention of spam and protection against denial of service attacks. More recently, PoWs have been used to prevent double spending in the Bitcoin digital currency system. In this work, we put forward an alternative concept for PoWs - so-called proofs of space (PoS), where a service requestor must dedicate a significant amount of disk space as opposed to computation. We construct secure PoS schemes in the random oracle model (with one additional mild assumption required for the proof to go through), using graphs with high “pebbling complexity” and Merkle hash-trees. We discuss some applications, including follow-up work where a decentralized digital currency scheme called Spacecoin is constructed that uses PoS (instead of wasteful PoW like in Bitcoin) to prevent double spending. The main technical contribution of this work is the construction of (directed, loop-free) graphs on N vertices with in-degree O(log logN) such that even if one places Θ(N) pebbles on the nodes of the graph, there’s a constant fraction of nodes that needs Θ(N) steps to be pebbled (where in every step one can put a pebble on a node if all its parents have a pebble).}, author = {Dziembowski, Stefan and Faust, Sebastian and Kolmogorov, Vladimir and Pietrzak, Krzysztof Z}, booktitle = {35th Annual Cryptology Conference}, isbn = {9783662479995}, issn = {0302-9743}, location = {Santa Barbara, CA, United States}, pages = {585 -- 605}, publisher = {Springer}, title = {{Proofs of space}}, doi = {10.1007/978-3-662-48000-7_29}, volume = {9216}, year = {2015}, } @article{15160, abstract = {The circadian clock orchestrates global changes in transcriptional regulation on a daily basis via the bHLH-PAS transcription factor CLOCK:BMAL1. Pathways driven by other bHLH-PAS transcription factors have a homologous repressor that modulates activity on a tissue-specific basis, but none have been identified for CLOCK:BMAL1. We show here that the cancer/testis antigen PASD1 fulfills this role to suppress circadian rhythms. PASD1 is evolutionarily related to CLOCK and interacts with the CLOCK:BMAL1 complex to repress transcriptional activation. Expression of PASD1 is restricted to germline tissues in healthy individuals but can be induced in cells of somatic origin upon oncogenic transformation. Reducing PASD1 in human cancer cells significantly increases the amplitude of transcriptional oscillations to generate more robust circadian rhythms. Our results describe a function for a germline-specific protein in regulation of the circadian clock and provide a molecular link from oncogenic transformation to suppression of circadian rhythms.}, author = {Michael, Alicia Kathleen and Harvey, Stacy L. and Sammons, Patrick J. and Anderson, Amanda P. and Kopalle, Hema M. and Banham, Alison H. and Partch, Carrie L.}, issn = {1097-2765}, journal = {Molecular Cell}, keywords = {Cell Biology, Molecular Biology}, number = {5}, pages = {743--754}, publisher = {Elsevier}, title = {{Cancer/Testis antigen PASD1 silences the circadian clock}}, doi = {10.1016/j.molcel.2015.03.031}, volume = {58}, year = {2015}, } @article{15159, abstract = {It is widely recognized that BMAL1 is an essential subunit of the primary transcription factor that drives rhythmic circadian transcription in the nucleus. In a surprising turn, Lipton et al. now show that BMAL1 rhythmically interacts with translational machinery in the cytosol to stimulate protein synthesis in response to mTOR signaling.}, author = {Michael, Alicia Kathleen and Asimgil, Hande and Partch, Carrie L.}, issn = {0968-0004}, journal = {Trends in Biochemical Sciences}, keywords = {Molecular Biology, Biochemistry}, number = {9}, pages = {489--490}, publisher = {Elsevier}, title = {{Cytosolic BMAL1 moonlights as a translation factor}}, doi = {10.1016/j.tibs.2015.07.006}, volume = {40}, year = {2015}, } @article{1619, abstract = {The emergence of drug resistant pathogens is a serious public health problem. It is a long-standing goal to predict rates of resistance evolution and design optimal treatment strategies accordingly. To this end, it is crucial to reveal the underlying causes of drug-specific differences in the evolutionary dynamics leading to resistance. However, it remains largely unknown why the rates of resistance evolution via spontaneous mutations and the diversity of mutational paths vary substantially between drugs. Here we comprehensively quantify the distribution of fitness effects (DFE) of mutations, a key determinant of evolutionary dynamics, in the presence of eight antibiotics representing the main modes of action. Using precise high-throughput fitness measurements for genome-wide Escherichia coli gene deletion strains, we find that the width of the DFE varies dramatically between antibiotics and, contrary to conventional wisdom, for some drugs the DFE width is lower than in the absence of stress. We show that this previously underappreciated divergence in DFE width among antibiotics is largely caused by their distinct drug-specific dose-response characteristics. Unlike the DFE, the magnitude of the changes in tolerated drug concentration resulting from genome-wide mutations is similar for most drugs but exceptionally small for the antibiotic nitrofurantoin, i.e., mutations generally have considerably smaller resistance effects for nitrofurantoin than for other drugs. A population genetics model predicts that resistance evolution for drugs with this property is severely limited and confined to reproducible mutational paths. We tested this prediction in laboratory evolution experiments using the “morbidostat”, a device for evolving bacteria in well-controlled drug environments. Nitrofurantoin resistance indeed evolved extremely slowly via reproducible mutations—an almost paradoxical behavior since this drug causes DNA damage and increases the mutation rate. Overall, we identified novel quantitative characteristics of the evolutionary landscape that provide the conceptual foundation for predicting the dynamics of drug resistance evolution.}, author = {Chevereau, Guillaume and Dravecka, Marta and Batur, Tugce and Guvenek, Aysegul and Ayhan, Dilay and Toprak, Erdal and Bollenbach, Mark Tobias}, journal = {PLoS Biology}, number = {11}, publisher = {Public Library of Science}, title = {{Quantifying the determinants of evolutionary dynamics leading to drug resistance}}, doi = {10.1371/journal.pbio.1002299}, volume = {13}, year = {2015}, } @article{10382, abstract = {Protein oligomers have been implicated as toxic agents in a wide range of amyloid-related diseases. However, it has remained unsolved whether the oligomers are a necessary step in the formation of amyloid fibrils or just a dangerous byproduct. Analogously, it has not been resolved if the amyloid nucleation process is a classical one-step nucleation process or a two-step process involving prenucleation clusters. We use coarse-grained computer simulations to study the effect of nonspecific attractions between peptides on the primary nucleation process underlying amyloid fibrillization. We find that, for peptides that do not attract, the classical one-step nucleation mechanism is possible but only at nonphysiologically high peptide concentrations. At low peptide concentrations, which mimic the physiologically relevant regime, attractive interpeptide interactions are essential for fibril formation. Nucleation then inevitably takes place through a two-step mechanism involving prefibrillar oligomers. We show that oligomers not only help peptides meet each other but also, create an environment that facilitates the conversion of monomers into the β-sheet–rich form characteristic of fibrils. Nucleation typically does not proceed through the most prevalent oligomers but through an oligomer size that is only observed in rare fluctuations, which is why such aggregates might be hard to capture experimentally. Finally, we find that the nucleation of amyloid fibrils cannot be described by classical nucleation theory: in the two-step mechanism, the critical nucleus size increases with increases in both concentration and interpeptide interactions, which is in direct contrast with predictions from classical nucleation theory.}, author = {Šarić, Anđela and Chebaro, Yassmine C. and Knowles, Tuomas P. J. and Frenkel, Daan}, issn = {1091-6490}, journal = {Proceedings of the National Academy of Sciences}, keywords = {multidisciplinary}, number = {50}, pages = {17869--17874}, publisher = {National Academy of Sciences}, title = {{Crucial role of nonspecific interactions in amyloid nucleation}}, doi = {10.1073/pnas.1410159111}, volume = {111}, year = {2014}, } @article{10383, abstract = {We use numerical simulations to compute the equation of state of a suspension of spherical self-propelled nanoparticles in two and three dimensions. We study in detail the effect of excluded volume interactions and confinement as a function of the system's temperature, concentration, and strength of the propulsion. We find a striking nonmonotonic dependence of the pressure on the temperature and provide simple scaling arguments to predict and explain the occurrence of such anomalous behavior. We explicitly show how our results have important implications for the effective forces on passive components suspended in a bath of active particles.}, author = {Mallory, S. A. and Šarić, Anđela and Valeriani, C. and Cacciuto, A.}, issn = {1550-2376}, journal = {Physical Review E}, number = {5}, publisher = {American Physical Society}, title = {{Anomalous thermomechanical properties of a self-propelled colloidal fluid}}, doi = {10.1103/physreve.89.052303}, volume = {89}, year = {2014}, } @article{1058, abstract = {Diffraction-unlimited far-field super-resolution fluorescence (nanoscopy) methods typically rely on transiently transferring fluorophores between two states, whereby this transfer is usually laid out as a switch. However, depending on whether this is induced in a spatially controlled manner using a pattern of light (coordinate-targeted) or stochastically on a single-molecule basis, specific requirements on the fluorophores are imposed. Therefore, the fluorophores are usually utilized just for one class of methods only. In this study we demonstrate that the reversibly switchable fluorescent protein Dreiklang enables live-cell recordings in both spatially controlled and stochastic modes. We show that the Dreiklang chromophore entails three different light-induced switching mechanisms, namely a reversible photochemical one, off-switching by stimulated emission, and a reversible transfer to a long-lived dark state from the S1 state, all of which can be utilized to overcome the diffraction barrier. We also find that for the single-molecule- based stochastic GSDIM approach (ground-state depletion followed by individual molecule return), Dreiklang provides a larger number of on-off localization events as compared to its progenitor Citrine. Altogether, Dreiklang is a versatile probe for essentially all popular forms of live-cell fluorescence nanoscopy.}, author = {Jensen, Nickels and Danzl, Johann G and Willig, Katrin and Lavoie Cardinal, Flavie and Brakemann, Tanja and Hell, Stefan and Jakobs, Stefan}, journal = {ChemPhysChem}, number = {4}, pages = {756 -- 762}, publisher = {Wiley-Blackwell}, title = {{Coordinate-targeted and coordinate-stochastic super-resolution microscopy with the reversibly switchable fluorescent protein dreiklang}}, doi = {10.1002/cphc.201301034}, volume = {15}, year = {2014}, } @article{10815, abstract = {In the last several decades, developmental biology has clarified the molecular mechanisms of embryogenesis and organogenesis. In particular, it has demonstrated that the “tool-kit genes” essential for regulating developmental processes are not only highly conserved among species, but are also used as systems at various times and places in an organism to control distinct developmental events. Therefore, mutations in many of these tool-kit genes may cause congenital diseases involving morphological abnormalities. This link between genes and abnormal morphological phenotypes underscores the importance of understanding how cells behave and contribute to morphogenesis as a result of gene function. Recent improvements in live imaging and in quantitative analyses of cellular dynamics will advance our understanding of the cellular pathogenesis of congenital diseases associated with aberrant morphologies. In these studies, it is critical to select an appropriate model organism for the particular phenomenon of interest.}, author = {Hashimoto, Masakazu and Morita, Hitoshi and Ueno, Naoto}, issn = {0914-3505}, journal = {Congenital Anomalies}, keywords = {Developmental Biology, Embryology, General Medicine, Pediatrics, Perinatology, and Child Health}, number = {1}, pages = {1--7}, publisher = {Wiley}, title = {{Molecular and cellular mechanisms of development underlying congenital diseases}}, doi = {10.1111/cga.12039}, volume = {54}, year = {2014}, } @book{10811, abstract = {Auxin is an important signaling compound in plants and vital for plant development and growth. The present book, Auxin and its Role in Plant Development, provides the reader with detailed and comprehensive insight into the functioning of the molecule on the whole and specifically in plant development. In the first part, the functioning, metabolism and signaling pathways of auxin in plants are explained, the second part depicts the specific role of auxin in plant development and the third part describes the interaction and functioning of the signaling compound upon stimuli of the environment. Each chapter is written by international experts in the respective field and designed for scientists and researchers in plant biology, plant development and cell biology to summarize the recent progress in understanding the role of auxin and suggest future perspectives for auxin research.}, editor = {Zažímalová, Eva and Petrášek, Jan and Benková, Eva}, isbn = {9783709115251}, pages = {444}, publisher = {Springer Nature}, title = {{Auxin and Its Role in Plant Development}}, doi = {10.1007/978-3-7091-1526-8}, year = {2014}, } @inproceedings{10884, abstract = {We revisit the parameterized model checking problem for token-passing systems and specifications in indexed CTL  ∗ \X. Emerson and Namjoshi (1995, 2003) have shown that parameterized model checking of indexed CTL  ∗ \X in uni-directional token rings can be reduced to checking rings up to some cutoff size. Clarke et al. (2004) have shown a similar result for general topologies and indexed LTL \X, provided processes cannot choose the directions for sending or receiving the token. We unify and substantially extend these results by systematically exploring fragments of indexed CTL  ∗ \X with respect to general topologies. For each fragment we establish whether a cutoff exists, and for some concrete topologies, such as rings, cliques and stars, we infer small cutoffs. Finally, we show that the problem becomes undecidable, and thus no cutoffs exist, if processes are allowed to choose the directions in which they send or from which they receive the token.}, author = {Aminof, Benjamin and Jacobs, Swen and Khalimov, Ayrat and Rubin, Sasha}, booktitle = {Verification, Model Checking, and Abstract Interpretation}, isbn = {9783642540127}, issn = {1611-3349}, location = {San Diego, CA, United States}, pages = {262--281}, publisher = {Springer Nature}, title = {{Parameterized model checking of token-passing systems}}, doi = {10.1007/978-3-642-54013-4_15}, volume = {8318}, year = {2014}, } @inbook{10893, abstract = {Saddle periodic orbits are an essential and stable part of the topological skeleton of a 3D vector field. Nevertheless, there is currently no efficient algorithm to robustly extract these features. In this chapter, we present a novel technique to extract saddle periodic orbits. Exploiting the analytic properties of such an orbit, we propose a scalar measure based on the finite-time Lyapunov exponent (FTLE) that indicates its presence. Using persistent homology, we can then extract the robust cycles of this field. These cycles thereby represent the saddle periodic orbits of the given vector field. We discuss the different existing FTLE approximation schemes regarding their applicability to this specific problem and propose an adapted version of FTLE called Normalized Velocity Separation. Finally, we evaluate our method using simple analytic vector field data.}, author = {Kasten, Jens and Reininghaus, Jan and Reich, Wieland and Scheuermann, Gerik}, booktitle = {Topological Methods in Data Analysis and Visualization III }, editor = {Bremer, Peer-Timo and Hotz, Ingrid and Pascucci, Valerio and Peikert, Ronald}, isbn = {9783319040981}, issn = {2197-666X}, pages = {55--69}, publisher = {Springer}, title = {{Toward the extraction of saddle periodic orbits}}, doi = {10.1007/978-3-319-04099-8_4}, volume = {1}, year = {2014}, } @article{11080, abstract = {The spindle assembly checkpoint prevents separation of sister chromatids until each kinetochore is attached to the mitotic spindle. Rodriguez-Bravo et al. report that the nuclear pore complex scaffolds spindle assembly checkpoint signaling in interphase, providing a store of inhibitory signals that limits the speed of the subsequent mitosis.}, author = {Buchwalter, Abigail and HETZER, Martin W}, issn = {0092-8674}, journal = {Cell}, keywords = {General Biochemistry, Genetics and Molecular Biology}, number = {5}, pages = {868--869}, publisher = {Elsevier}, title = {{Nuclear pores set the speed limit for mitosis}}, doi = {10.1016/j.cell.2014.02.004}, volume = {156}, year = {2014}, } @article{11082, abstract = {The nuclear pore complex (NPC) plays a critical role in gene expression by mediating import of transcription regulators into the nucleus and export of RNA transcripts to the cytoplasm. Emerging evidence suggests that in addition to mediating transport, a subset of nucleoporins (Nups) engage in transcriptional activation and elongation at genomic loci that are not associated with NPCs. The underlying mechanism and regulation of Nup mobility on and off nuclear pores remain unclear. Here we show that Nup50 is a mobile Nup with a pronounced presence both at the NPC and in the nucleoplasm that can move between these different localizations. Strikingly, the dynamic behavior of Nup50 in both locations is dependent on active transcription by RNA polymerase II and requires the N-terminal half of the protein, which contains importin α– and Nup153-binding domains. However, Nup50 dynamics are independent of importin α, Nup153, and Nup98, even though the latter two proteins also exhibit transcription-dependent mobility. Of interest, depletion of Nup50 from C2C12 myoblasts does not affect cell proliferation but inhibits differentiation into myotubes. Taken together, our results suggest a transport-independent role for Nup50 in chromatin biology that occurs away from the NPC.}, author = {Buchwalter, Abigail L. and Liang, Yun and HETZER, Martin W}, issn = {1059-1524}, journal = {Molecular Biology of the Cell}, keywords = {Cell Biology, Molecular Biology}, number = {16}, pages = {2472--2484}, publisher = {American Society for Cell Biology}, title = {{Nup50 is required for cell differentiation and exhibits transcription-dependent dynamics}}, doi = {10.1091/mbc.e14-04-0865}, volume = {25}, year = {2014}, } @article{11081, abstract = {In eukaryotic cells the nuclear genome is enclosed by the nuclear envelope (NE). In metazoans, the NE breaks down in mitosis and it has been assumed that the physical barrier separating nucleoplasm and cytoplasm remains intact during the rest of the cell cycle and cell differentiation. However, recent studies suggest that nonmitotic NE remodeling plays a critical role in development, virus infection, laminopathies, and cancer. Although the mechanisms underlying these NE restructuring events are currently being defined, one common theme is activation of protein kinase C family members in the interphase nucleus to disrupt the nuclear lamina, demonstrating the importance of the lamina in maintaining nuclear integrity.}, author = {Hatch, Emily and HETZER, Martin W}, issn = {1540-8140}, journal = {Journal of Cell Biology}, keywords = {Cell Biology}, number = {2}, pages = {133--141}, publisher = {Rockefeller University Press}, title = {{Breaching the nuclear envelope in development and disease}}, doi = {10.1083/jcb.201402003}, volume = {205}, year = {2014}, } @article{11583, abstract = {Candidate galaxies at redshifts of z ∼ 10 are now being found in extremely deep surveys, probing very small areas. As a consequence, candidates are very faint, making spectroscopic confirmation practically impossible. In order to overcome such limitations, we have undertaken the CF-HiZELS survey, which is a large-area, medium-depth near-infrared narrow-band survey targeted at z = 8.8 Lyman α (Lyα) emitters (LAEs) and covering 10 deg2 in part of the SSA22 field with the Canada–France–Hawaii Telescope (CFHT). We surveyed a comoving volume of 4.7 × 106 Mpc3 to a Lyα luminosity limit of 6.3 × 1043舁erg舁s−1. We look for Lyα candidates by applying the following criteria: (i) clear emission-line source, (ii) no optical detections (ugriz from CFHTLS), (iii) no visible detection in the optical stack (ugriz > 27), (iv) visually checked reliable NBJ and J detections and (v) J − K ≤ 0. We compute photometric redshifts and remove a significant amount of dusty lower redshift line-emitters at z ∼ 1.4 or 2.2. A total of 13 Lyα candidates were found, of which two are marked as strong candidates, but the majority have very weak constraints on their spectral energy distributions. Using follow-up observations with SINFONI/VLT, we are able to exclude the most robust candidates as LAEs. We put a strong constraint on the Lyα luminosity function at z ∼ 9 and make realistic predictions for ongoing and future surveys. Our results show that surveys for the highest redshift LAEs are susceptible of multiple contaminations and that spectroscopic follow-up is absolutely necessary.}, author = {Matthee, Jorryt J and Sobral, David and Swinbank, A. M. and Smail, Ian and Best, P. N. and Kim, Jae-Woo and Franx, Marijn and Milvang-Jensen, Bo and Fynbo, Johan}, issn = {1365-2966}, journal = {Monthly Notices of the Royal Astronomical Society}, keywords = {Space and Planetary Science, Astronomy and Astrophysics, galaxies: evolution, galaxies: high-redshift, cosmology: observations, dark ages, reionization, first stars}, number = {3}, pages = {2375--2387}, publisher = {Oxford University Press}, title = {{A 10 deg2 Lyman α survey at z=8.8 with spectroscopic follow-up: Strong constraints on the luminosity function and implications for other surveys}}, doi = {10.1093/mnras/stu392}, volume = {440}, year = {2014}, } @article{11582, abstract = {We have observed a sample of typical z ∼ 1 star-forming galaxies, selected from the HiZELS survey, with the new K-band Multi-Object Spectrograph (KMOS) near-infrared, multi-integral field unit instrument on the Very Large Telescope (VLT), in order to obtain their dynamics and metallicity gradients. The majority of our galaxies have a metallicity gradient consistent with being flat or negative (i.e. higher metallicity cores than outskirts). Intriguingly, we find a trend between metallicity gradient and specific star formation rate (sSFR), such that galaxies with a high sSFR tend to have relatively metal poor centres, a result which is strengthened when combined with data sets from the literature. This result appears to explain the discrepancies reported between different high-redshift studies and varying claims for evolution. From a galaxy evolution perspective, the trend we see would mean that a galaxy's sSFR is governed by the amount of metal-poor gas that can be funnelled into its core, triggered either by merging or through efficient accretion. In fact, merging may play a significant role as it is the starburst galaxies at all epochs, which have the more positive metallicity gradients. Our results may help to explain the origin of the fundamental metallicity relation, in which galaxies at a fixed mass are observed to have lower metallicities at higher star formation rates, especially if the metallicity is measured in an aperture encompassing only the central regions of the galaxy. Finally, we note that this study demonstrates the power of KMOS as an efficient instrument for large-scale resolved galaxy surveys.}, author = {Stott, John P. and Sobral, David and Swinbank, A. M. and Smail, Ian and Bower, Richard and Best, Philip N. and Sharples, Ray M. and Geach, James E. and Matthee, Jorryt J}, issn = {1365-2966}, journal = {Monthly Notices of the Royal Astronomical Society}, keywords = {Space and Planetary Science, Astronomy and Astrophysics, galaxies: abundances, galaxies: evolution, galaxies: kinematics and dynamics}, number = {3}, pages = {2695--2704}, publisher = {Oxford University Press}, title = {{A relationship between specific star formation rate and metallicity gradient within z ∼ 1 galaxies from KMOS-HiZELS}}, doi = {10.1093/mnras/stu1343}, volume = {443}, year = {2014}, } @article{11750, abstract = {We report on the magnetic properties of a hot-pressed FeSb 2 sample. We find a significant increase in the magnetic susceptibility in our sample when compared with the values previously reported for the polycrystalline sample. The pronounced Curie tail at low temperature corresponds to 0.2% of Fe 2+ impurities per mole. In the intrinsic conductivity region, the susceptibility due to free carriers shows thermally activated behavior and is consistent with the data reported for single crystal FeSb 2 . Based on our data and analysis, while the enhanced magnetic susceptibility in our sample comes mainly from a small amount of unreacted Fe, the contribution from the enhanced carrier density due to lattice and strain defects arising from the ball milling process is also significant. Existence of an unreacted Fe phase is evidenced by small coercivity values of ~100 observed at 50 and 300 K.}, author = {Pokharel, Mani and Zhao, Huaizhou and Modic, Kimberly A and Ren, Zhifeng and Opeil, Cyril}, issn = {1941-0069}, journal = {IEEE Transactions on Magnetics}, number = {5}, publisher = {Institute of Electrical and Electronics Engineers}, title = {{Magnetic properties of hot-pressed FeSb2}}, doi = {10.1109/TMAG.2013.2292607}, volume = {50}, year = {2014}, } @inproceedings{11789, abstract = {We study a weighted online bipartite matching problem: G(V 1, V 2, E) is a weighted bipartite graph where V 1 is known beforehand and the vertices of V 2 arrive online. The goal is to match vertices of V 2 as they arrive to vertices in V 1, so as to maximize the sum of weights of edges in the matching. If assignments to V 1 cannot be changed, no bounded competitive ratio is achievable. We study the weighted online matching problem with free disposal, where vertices in V 1 can be assigned multiple times, but only get credit for the maximum weight edge assigned to them over the course of the algorithm. For this problem, the greedy algorithm is 0.5-competitive and determining whether a better competitive ratio is achievable is a well known open problem. We identify an interesting special case where the edge weights are decomposable as the product of two factors, one corresponding to each end point of the edge. This is analogous to the well studied related machines model in the scheduling literature, although the objective functions are different. For this case of decomposable edge weights, we design a 0.5664 competitive randomized algorithm in complete bipartite graphs. We show that such instances with decomposable weights are non-trivial by establishing upper bounds of 0.618 for deterministic and 0.8 for randomized algorithms. A tight competitive ratio of 1 − 1/e ≈ 0.632 was known previously for both the 0-1 case as well as the case where edge weights depend on the offline vertices only, but for these cases, reassignments cannot change the quality of the solution. Beating 0.5 for weighted matching where reassignments are necessary has been a significant challenge. We thus give the first online algorithm with competitive ratio strictly better than 0.5 for a non-trivial case of weighted matching with free disposal.}, author = {Charikar, Moses and Henzinger, Monika H and Nguyễn, Huy L.}, booktitle = {22nd Annual European Symposium on Algorithms}, isbn = {978-366244776-5}, issn = {0302-9743}, location = {Wroclaw, Poland}, pages = {260 -- 271}, publisher = {Springer Nature}, title = {{Online bipartite matching with decomposable weights}}, doi = {10.1007/978-3-662-44777-2_22}, volume = {8737}, year = {2014}, } @inproceedings{11790, abstract = {Assume a seller wants to sell a digital product in a social network where a buyer’s valuation of the item has positive network externalities from her neighbors that already have the item. The goal of the seller is to maximize his revenue. Previous work on this problem [7] studies the case where clients are offered the item in sequence and have to pay personalized prices. This is highly infeasible in large scale networks such as the Facebook graph: (1) Offering items to the clients one after the other consumes a large amount of time, and (2) price-discrimination of clients could appear unfair to them and result in negative client reaction or could conflict with legal requirements. We study a setting dealing with these issues. Specifically, the item is offered in parallel to multiple clients at the same time and at the same price. This is called a round. We show that with O(logn) rounds, where n is the number of clients, a constant factor of the revenue with price discrimination can be achieved and that this is not possible with o(logn) rounds. Moreover we show that it is APX-hard to maximize the revenue and we give constant factor approximation algorithms for various further settings of limited price discrimination.}, author = {Cigler, Luděk and Dvořák, Wolfgang and Henzinger, Monika H and Starnberger, Martin}, booktitle = {10th International Conference of Web and Internet Economics}, issn = {0302-9743}, location = {Beijing, China}, pages = {44 -- 57}, publisher = {Springer Nature}, title = {{Limiting price discrimination when selling products with positive network externalities}}, doi = {10.1007/978-3-319-13129-0_4}, volume = {8877}, year = {2014}, } @article{118, abstract = {While the penetration of objects into granular media is well-studied, there is little understanding of how objects settle in gravities, geff, different from that of Earth - a scenario potentially relevant to the geomorphology of planets and asteroids and also to their exploration using man-made devices. By conducting experiments in an accelerating frame, we explore geff ranging from 0.4 g to 1.2 g. Surprisingly, we find that the rest depth is independent of geff and also that the time required for the object to come to rest scales like geff-1/2. With discrete element modeling simulations, we reproduce the experimental results and extend the range of geff to objects as small as asteroids and as large as Jupiter. Our results shed light on the initial stage of sedimentation into dry granular media across a range of celestial bodies and also have implications for the design of man-made, extraterrestrial vehicles and structures. Key Points The settling depth in granular media is independent of gravity The settling time scales like g-1/2 Layering driven by granular sedimentation should be similar.}, author = {Altshuler, Ernesto and Torres, H and González_Pita, A and Sánchez, Colina G and Pérez Penichet, Carlos and Waitukaitis, Scott R and Hidalgo, Rauól}, journal = {Geophysical Research Letters}, number = {9}, pages = {3032 -- 3037}, publisher = {Wiley-Blackwell}, title = {{Settling into dry granular media in different gravities}}, doi = {10.1002/2014GL059229}, volume = {41}, year = {2014}, } @inproceedings{11855, abstract = {The decremental single-source shortest paths (SSSP) problem concerns maintaining the distances between a given source node s to every node in an n-node m-edge graph G undergoing edge deletions. While its static counterpart can be easily solved in near-linear time, this decremental problem is much more challenging even in the undirected unweighted case. In this case, the classic O(mn) total update time of Even and Shiloach (JACM 1981) has been the fastest known algorithm for three decades. With the loss of a (1 + ε)-approximation factor, the running time was recently improved to O(n 2+o(1) ) by Bernstein and Roditty (SODA 2011), and more recently to O(n 1.8+o(1) + m 1+o(1) ) by Henzinger, Krinninger, and Nanongkai (SODA 2014). In this paper, we finally bring the running time of this case down to near-linear: We give a (1 + ε)-approximation algorithm with O(m 1+o(1) ) total update time, thus obtaining near-linear time. Moreover, we obtain O(m 1+o(1) log W) time for the weighted case, where the edge weights are integers from 1 to W. The only prior work on weighted graphs in o(mn log W) time is the O(mn 0.986 log W)-time algorithm by Henzinger, Krinninger, and Nanongkai (STOC 2014) which works for the general weighted directed case. In contrast to the previous results which rely on maintaining a sparse emulator, our algorithm relies on maintaining a so-called sparse (d, ε)-hop set introduced by Cohen (JACM 2000) in the PRAM literature. A (d, ε)-hop set of a graph G = (V, E) is a set E' of weighted edges such that the distance between any pair of nodes in G can be (1 + ε)-approximated by their d-hop distance (given by a path containing at most d edges) on G'=(V, E∪E'). Our algorithm can maintain an (n o(1) , ε)-hop set of near-linear size in near-linear time under edge deletions. It is the first of its kind to the best of our knowledge. To maintain the distances on this hop set, we develop a monotone bounded-hop Even-Shiloach tree. It results from extending and combining the monotone Even-Shiloach tree of Henzinger, Krinninger, and Nanongkai (FOCS 2013) with the bounded-hop SSSP technique of Bernstein (STOC 2013). These two new tools might be of independent interest.}, author = {Henzinger, Monika H and Krinninger, Sebastian and Nanongkai, Danupon}, booktitle = {55th Annual Symposium on Foundations of Computer Science}, issn = {0272-5428}, location = {Philadelphia, PA, United States}, pages = {146--155}, publisher = {Institute of Electrical and Electronics Engineers}, title = {{Decremental single-source shortest paths on undirected graphs in near-linear total update time}}, doi = {10.1109/focs.2014.24}, year = {2014}, } @inproceedings{11870, abstract = {We consider dynamic algorithms for maintaining Single-Source Reachability (SSR) and approximate Single-Source Shortest Paths (SSSP) on n-node m-edge directed graphs under edge deletions (decremental algorithms). The previous fastest algorithm for SSR and SSSP goes back three decades to Even and Shiloach (JACM 1981); it has O(1) query time and O(mn) total update time (i.e., linear amortized update time if all edges are deleted). This algorithm serves as a building block for several other dynamic algorithms. The question whether its total update time can be improved is a major, long standing, open problem. In this paper, we answer this question affirmatively. We obtain a randomized algorithm which, in a simplified form, achieves an Õ(mn0.984) expected total update time for SSR and (1 + ε)-approximate SSSP, where Õ(·) hides poly log n. We also extend our algorithm to achieve roughly the same running time for Strongly Connected Components (SCC), improving the algorithm of Roditty and Zwick (FOCS 2002), and an algorithm that improves the Õ (mn log W)-time algorithm of Bernstein (STOC 2013) for approximating SSSP on weighted directed graphs, where the edge weights are integers from 1 to W. All our algorithms have constant query time in the worst case.}, author = {Henzinger, Monika H and Krinninger, Sebastian and Nanongkai, Danupon}, booktitle = {46th Annual ACM Symposium on Theory of Computing}, isbn = {978-145032710-7}, issn = {0737-8017}, location = {New York, NY, United States}, publisher = {Association for Computing Machinery}, title = {{Sublinear-time decremental algorithms for single-source reachability and shortest paths on directed graphs}}, doi = {10.1145/2591796.2591869}, year = {2014}, } @inproceedings{11876, abstract = {We study dynamic (1 + ∊)-approximation algorithms for the single-source shortest paths problem in an unweighted undirected n-node m-edge graph under edge deletions. The fastest algorithm for this problem is an algorithm with O(n2+o(1)) total update time and constant query time by Bernstein and Roditty (SODA 2011). In this paper, we improve the total update time to O(n1.8+o(1) + m1+o(1)) while keeping the query time constant. This running time is essentially tight when m = Ω(n1.8) since we need Ω(m) time even in the static setting. For smaller values of m, the running time of our algorithm is subquadratic, and is the first that breaks through the quadratic time barrier. In obtaining this result, we develop a fast algorithm for what we call center cover data structure. We also make non-trivial extensions to our previous techniques called lazy-update and monotone Even-Shiloach trees (ICALP 2013 and FOCS 2013). As by-products of our new techniques, we obtain two new results for the decremental all-pairs shortest-paths problem. Our first result is the first approximation algorithm whose total update time is faster than Õ(mn) for all values of m. Our second result is a new trade-off between the total update time and the additive approximation guarantee.}, author = {Henzinger, Monika H and Krinninger, Sebastian and Nanongkai, Danupon}, booktitle = {25th Annual ACM-SIAM Symposium on Discrete Algorithms}, isbn = {978-1-61197-338-9}, location = {Portland, OR, United States}, pages = {1053--1072}, publisher = {Society for Industrial and Applied Mathematics}, title = {{A subquadratic-time algorithm for decremental single-source shortest paths}}, doi = {10.1137/1.9781611973402.79}, year = {2014}, } @inproceedings{11875, abstract = {We present the first deterministic data structures for maintaining approximate minimum vertex cover and maximum matching in a fully dynamic graph in time per update. In particular, for minimum vertex cover we provide deterministic data structures for maintaining a (2 + ε) approximation in O(log n/ε2) amortized time per update. For maximum matching, we show how to maintain a (3 + e) approximation in O(m1/3/ε2) amortized time per update, and a (4 + ε) approximation in O(m1/3/ε2) worst-case time per update. Our data structure for fully dynamic minimum vertex cover is essentially near-optimal and settles an open problem by Onak and Rubinfeld [13].}, author = {Bhattacharya, Sayan and Henzinger, Monika H and Italiano, Giuseppe F.}, booktitle = {26th Annual ACM-SIAM Symposium on Discrete Algorithms}, isbn = {978-1-61197-374-7}, location = {San Diego, CA, United States}, pages = {785--804}, publisher = {Society for Industrial and Applied Mathematics}, title = {{Deterministic fully dynamic data structures for vertex cover and matching}}, doi = {10.1137/1.9781611973730.54}, year = {2014}, } @article{119, abstract = {Observations of flowing granular matter have suggested that same-material tribocharging depends on particle size, typically rendering large grains positive and small ones negative. Models assuming the transfer of trapped electrons can account for this trend, but have not been validated. Tracking individual grains in an electric field, we show quantitatively that charge is transferred based on size between materially identical grains. However, the surface density of trapped electrons, measured independently by thermoluminescence techniques, is orders of magnitude too small to account for the scale of charge transferred. This reveals that trapped electrons are not a necessary ingredient for same-material tribocharging.}, author = {Waitukaitis, Scott R and Lee, Victor and Pierson, James and Forman, Steven and Jaeger, Heinrich}, journal = {APS Physics, Physical Review Letters}, number = {21}, publisher = {American Physical Society}, title = {{Size-dependent same-material tribocharging in insulating grains}}, doi = {10.1103/PhysRevLett.112.218001}, volume = {112}, year = {2014}, } @article{11968, abstract = {Membrane phospholipids typically contain fatty acids (FAs) of 16 and 18 carbon atoms. This particular chain length is evolutionarily highly conserved and presumably provides maximum stability and dynamic properties to biological membranes in response to nutritional or environmental cues. Here, we show that the relative proportion of C16 versus C18 FAs is regulated by the activity of acetyl-CoA carboxylase (Acc1), the first and rate-limiting enzyme of FA de novo synthesis. Acc1 activity is attenuated by AMPK/Snf1-dependent phosphorylation, which is required to maintain an appropriate acyl-chain length distribution. Moreover, we find that the transcriptional repressor Opi1 preferentially binds to C16 over C18 phosphatidic acid (PA) species: thus, C16-chain containing PA sequesters Opi1 more effectively to the ER, enabling AMPK/Snf1 control of PA acyl-chain length to determine the degree of derepression of Opi1 target genes. These findings reveal an unexpected regulatory link between the major energy-sensing kinase, membrane lipid composition, and transcription.}, author = {Hofbauer, Harald F. and Schopf, Florian H. and Schleifer, Hannes and Knittelfelder, Oskar L. and Pieber, Bartholomäus and Rechberger, Gerald N. and Wolinski, Heimo and Gaspar, Maria L. and Kappe, C. Oliver and Stadlmann, Johannes and Mechtler, Karl and Zenz, Alexandra and Lohner, Karl and Tehlivets, Oksana and Henry, Susan A. and Kohlwein, Sepp D.}, issn = {1878-1551}, journal = {Developmental Cell}, number = {6}, pages = {P729--739}, publisher = {Elsevier}, title = {{Regulation of gene expression through a transcriptional repressor that senses acyl-chain length in membrane phospholipids}}, doi = {10.1016/j.devcel.2014.04.025}, volume = {29}, year = {2014}, } @article{11967, abstract = {An experimentally easy to perform method for the generation of alumina-supported Fe3O4 nanoparticles [(6±1) nm size, 0.67 wt %]and the use of this material in hydrazine-mediated heterogeneously catalyzed reductions of nitroarenes to anilines under batch and continuous-flow conditions is presented. The bench-stable, reusable nano-Fe3O4@Al2O3 catalyst can selectively reduce functionalized nitroarenes at 1 mol % catalyst loading by using a 20 mol % excess of hydrazine hydrate in an elevated temperature regime (150 °C, reaction time 2–6 min in batch). For continuous-flow processing, the catalyst material is packed into dedicated cartridges and used in a commercially available high-temperature/-pressure flow device. In continuous mode, reaction times can be reduced to less than 1 min at 150 °C (30 bar back pressure) in a highly intensified process. The nano-Fe3O4@Al2O3 catalyst demonstrated stable reduction of nitrobenzene (0.5 M in MeOH) for more than 10 h on stream at a productivity of 30 mmol h−1 (0.72 mol per day). Importantly, virtually no leaching of the catalytically active material could be observed by inductively coupled plasma MS monitoring.}, author = {Moghaddam, Mojtaba Mirhosseini and Pieber, Bartholomäus and Glasnov, Toma and Kappe, C. Oliver}, issn = {1864-564X}, journal = {ChemSusChem}, number = {11}, pages = {3122--3131}, publisher = {Wiley}, title = {{Immobilized iron oxide nanoparticles as stable and reusable catalysts for hydrazine-mediated nitro reductions in continuous flow}}, doi = {10.1002/cssc.201402455}, volume = {7}, year = {2014}, } @article{11987, abstract = {A method for the direct lithiation of terminal alkynes and heterocycles with subsequent carboxylation in a continuous flow format was developed. This method provides carboxylic acids at ambient conditions within less than five seconds with only little excess of the organometallic base and CO2.}, author = {Pieber, Bartholomäus and Glasnov, Toma and Kappe, C. O.}, issn = {2046-2069}, journal = {RSC Advances}, number = {26}, publisher = {Royal Society of Chemistry}, title = {{Flash carboxylation: Fast lithiation–carboxylation sequence at room temperature in continuous flow}}, doi = {10.1039/c4ra01442a}, volume = {4}, year = {2014}, } @article{1309, abstract = {We show that weak solutions of the Derrida-Lebowitz-Speer-Spohn (DLSS) equation display infinite speed of support propagation. We apply our method to the case of the quantum drift-diffusion equation which augments the DLSS equation with a drift term and possibly a second-order diffusion term. The proof is accomplished using weighted entropy estimates, Hardy's inequality and a family of singular weight functions to derive a differential inequality; the differential inequality shows exponential growth of the weighted entropy, with the growth constant blowing up very fast as the singularity of the weight becomes sharper. To the best of our knowledge, this is the first example of a nonnegativity-preserving higher-order parabolic equation displaying infinite speed of support propagation.}, author = {Julian Fischer}, journal = {Nonlinear Differential Equations and Applications}, number = {1}, pages = {27 -- 50}, publisher = {Birkhäuser}, title = {{Infinite speed of support propagation for the Derrida-Lebowitz-Speer-Spohn equation and quantum drift-diffusion models}}, doi = {10.1007/s00030-013-0235-0}, volume = {21}, year = {2014}, } @article{1312, abstract = {We derive upper bounds on the waiting time of solutions to the thin-film equation in the regime of weak slippage n ∈ [2, 32\11). In particular, we give sufficient conditions on the initial data for instantaneous forward motion of the free boundary. For n ∈ (2, 32\11), our estimates are sharp, for n = 2, they are sharp up to a logarithmic correction term. Note that the case n = 2 corresponds-with a grain of salt-to the assumption of the Navier slip condition at the fluid-solid interface. We also obtain results in the regime of strong slippage n ∈ (1,2); however, in this regime we expect them not to be optimal. Our method is based on weighted backward entropy estimates, Hardy's inequality and singular weight functions; we deduce a differential inequality which would enforce blowup of the weighted entropy if the contact line were to remain stationary for too long.}, author = {Julian Fischer}, journal = {Archive for Rational Mechanics and Analysis}, number = {3}, pages = {771 -- 818}, publisher = {Springer}, title = {{Upper bounds on waiting times for the Thin-film equation: The case of weak slippage}}, doi = {10.1007/s00205-013-0690-0}, volume = {211}, year = {2014}, } @article{1375, abstract = {We consider directed graphs where each edge is labeled with an integer weight and study the fundamental algorithmic question of computing the value of a cycle with minimum mean weight. Our contributions are twofold: (1) First we show that the algorithmic question is reducible to the problem of a logarithmic number of min-plus matrix multiplications of n×n-matrices, where n is the number of vertices of the graph. (2) Second, when the weights are nonnegative, we present the first (1+ε)-approximation algorithm for the problem and the running time of our algorithm is Õ(nωlog3(nW/ε)/ε),1 where O(nω) is the time required for the classic n×n-matrix multiplication and W is the maximum value of the weights. With an additional O(log(nW/ε)) factor in space a cycle with approximately optimal weight can be computed within the same time bound.}, author = {Chatterjee, Krishnendu and Henzinger, Monika H and Krinninger, Sebastian and Loitzenbauer, Veronika and Raskin, Michael}, journal = {Theoretical Computer Science}, number = {C}, pages = {104 -- 116}, publisher = {Elsevier}, title = {{Approximating the minimum cycle mean}}, doi = {10.1016/j.tcs.2014.06.031}, volume = {547}, year = {2014}, } @inproceedings{1392, abstract = {Fault-tolerant distributed algorithms play an important role in ensuring the reliability of many software applications. In this paper we consider distributed algorithms whose computations are organized in rounds. To verify the correctness of such algorithms, we reason about (i) properties (such as invariants) of the state, (ii) the transitions controlled by the algorithm, and (iii) the communication graph. We introduce a logic that addresses these points, and contains set comprehensions with cardinality constraints, function symbols to describe the local states of each process, and a limited form of quantifier alternation to express the verification conditions. We show its use in automating the verification of consensus algorithms. In particular, we give a semi-decision procedure for the unsatisfiability problem of the logic and identify a decidable fragment. We successfully applied our framework to verify the correctness of a variety of consensus algorithms tolerant to both benign faults (message loss, process crashes) and value faults (message corruption).}, author = {Dragoi, Cezara and Henzinger, Thomas A and Veith, Helmut and Widder, Josef and Zufferey, Damien}, location = {San Diego, USA}, pages = {161 -- 181}, publisher = {Springer}, title = {{A logic-based framework for verifying consensus algorithms}}, doi = {10.1007/978-3-642-54013-4_10}, volume = {8318}, year = {2014}, } @inproceedings{1393, abstract = {Probabilistic programs are usual functional or imperative programs with two added constructs: (1) the ability to draw values at random from distributions, and (2) the ability to condition values of variables in a program via observations. Models from diverse application areas such as computer vision, coding theory, cryptographic protocols, biology and reliability analysis can be written as probabilistic programs. Probabilistic inference is the problem of computing an explicit representation of the probability distribution implicitly specified by a probabilistic program. Depending on the application, the desired output from inference may vary-we may want to estimate the expected value of some function f with respect to the distribution, or the mode of the distribution, or simply a set of samples drawn from the distribution. In this paper, we describe connections this research area called \Probabilistic Programming" has with programming languages and software engineering, and this includes language design, and the static and dynamic analysis of programs. We survey current state of the art and speculate on promising directions for future research.}, author = {Gordon, Andrew and Henzinger, Thomas A and Nori, Aditya and Rajamani, Sriram}, booktitle = {Proceedings of the on Future of Software Engineering}, location = {Hyderabad, India}, pages = {167 -- 181}, publisher = {ACM}, title = {{Probabilistic programming}}, doi = {10.1145/2593882.2593900}, year = {2014}, } @phdthesis{1404, abstract = {The co-evolution of hosts and pathogens is characterized by continuous adaptations of both parties. Pathogens of social insects need to adapt towards disease defences at two levels: 1) individual immunity of each colony member consisting of behavioural defence strategies as well as humoral and cellular immune responses and 2) social immunity that is collectively performed by all group members comprising behavioural, physiological and organisational defence strategies. To disentangle the selection pressure on pathogens by the collective versus individual level of disease defence in social insects, we performed an evolution experiment using the Argentine Ant, Linepithema humile, as a host and a mixture of the general insect pathogenic fungus Metarhizium spp. (6 strains) as a pathogen. We allowed pathogen evolution over 10 serial host passages to two different evolution host treatments: (1) only individual host immunity in a single host treatment, and (2) simultaneously acting individual and social immunity in a social host treatment, in which an exposed ant was accompanied by two untreated nestmates. Before starting the pathogen evolution experiment, the 6 Metarhizium spp. strains were characterised concerning conidiospore size killing rates in singly and socially reared ants, their competitiveness under coinfecting conditions and their influence on ant behaviour. We analysed how the ancestral atrain mixture changed in conidiospere size, killing rate and strain composition dependent on host treatment (single or social hosts) during 10 passages and found that killing rate and conidiospere size of the pathogen increased under both evolution regimes, but different depending on host treatment. Testing the evolved strain mixtures that evolved under either the single or social host treatment under both single and social current rearing conditions in a full factorial design experiment revealed that the additional collective defences in insect societies add new selection pressure for their coevolving pathogens that compromise their ability to adapt to its host at the group level. To our knowledge, this is the first study directly measuring the influence of social immunity on pathogen evolution.}, author = {Stock, Miriam}, pages = {101}, publisher = {IST Austria}, title = {{Evolution of a fungal pathogen towards individual versus social immunity in ants}}, year = {2014}, } @inproceedings{1516, abstract = {We present a rigorous derivation of the BCS gap equation for superfluid fermionic gases with point interactions. Our starting point is the BCS energy functional, whose minimizer we investigate in the limit when the range of the interaction potential goes to zero. }, author = {Bräunlich, Gerhard and Hainzl, Christian and Seiringer, Robert}, booktitle = {Proceedings of the QMath12 Conference}, location = {Berlin, Germany}, pages = {127 -- 137}, publisher = {World Scientific Publishing}, title = {{On the BCS gap equation for superfluid fermionic gases}}, doi = {10.1142/9789814618144_0007}, year = {2014}, } @article{1629, abstract = {We propose a method for propagating edit operations in 2D vector graphics, based on geometric relationship functions. These functions quantify the geometric relationship of a point to a polygon, such as the distance to the boundary or the direction to the closest corner vertex. The level sets of the relationship functions describe points with the same relationship to a polygon. For a given query point, we first determine a set of relationships to local features, construct all level sets for these relationships, and accumulate them. The maxima of the resulting distribution are points with similar geometric relationships. We show extensions to handle mirror symmetries, and discuss the use of relationship functions as local coordinate systems. Our method can be applied, for example, to interactive floorplan editing, and it is especially useful for large layouts, where individual edits would be cumbersome. We demonstrate populating 2D layouts with tens to hundreds of objects by propagating relatively few edit operations.}, author = {Guerrero, Paul and Jeschke, Stefan and Wimmer, Michael and Wonka, Peter}, journal = {ACM Transactions on Graphics}, number = {2}, publisher = {ACM}, title = {{Edit propagation using geometric relationship functions}}, doi = {10.1145/2591010}, volume = {33}, year = {2014}, } @inproceedings{10793, abstract = {The Hanani–Tutte theorem is a classical result proved for the first time in the 1930s that characterizes planar graphs as graphs that admit a drawing in the plane in which every pair of edges not sharing a vertex cross an even number of times. We generalize this classical result to clustered graphs with two disjoint clusters, and show that a straightforward extension of our result to flat clustered graphs with three or more disjoint clusters is not possible. We also give a new and short proof for a related result by Di Battista and Frati based on the matroid intersection algorithm.}, author = {Fulek, Radoslav and Kynčl, Jan and Malinović, Igor and Pálvölgyi, Dömötör}, booktitle = {International Symposium on Graph Drawing}, issn = {0302-9743}, pages = {428--436}, publisher = {Springer Nature}, title = {{Clustered planarity testing revisited}}, doi = {10.1007/978-3-662-45803-7_36}, volume = {8871}, year = {2014}, } @inproceedings{1643, abstract = {We extend the notion of verifiable random functions (VRF) to constrained VRFs, which generalize the concept of constrained pseudorandom functions, put forward by Boneh and Waters (Asiacrypt’13), and independently by Kiayias et al. (CCS’13) and Boyle et al. (PKC’14), who call them delegatable PRFs and functional PRFs, respectively. In a standard VRF the secret key sk allows one to evaluate a pseudorandom function at any point of its domain; in addition, it enables computation of a non-interactive proof that the function value was computed correctly. In a constrained VRF from the key sk one can derive constrained keys skS for subsets S of the domain, which allow computation of function values and proofs only at points in S. After formally defining constrained VRFs, we derive instantiations from the multilinear-maps-based constrained PRFs by Boneh and Waters, yielding a VRF with constrained keys for any set that can be decided by a polynomial-size circuit. Our VRFs have the same function values as the Boneh-Waters PRFs and are proved secure under the same hardness assumption, showing that verifiability comes at no cost. Constrained (functional) VRFs were stated as an open problem by Boyle et al.}, author = {Fuchsbauer, Georg}, booktitle = {SCN 2014}, editor = {Abdalla, Michel and De Prisco, Roberto}, location = {Amalfi, Italy}, pages = {95 -- 114}, publisher = {Springer}, title = {{Constrained Verifiable Random Functions }}, doi = {10.1007/978-3-319-10879-7_7}, volume = {8642}, year = {2014}, } @inproceedings{1702, abstract = {In this paper we present INTERHORN, a solver for recursion-free Horn clauses. The main application domain of INTERHORN lies in solving interpolation problems arising in software verification. We show how a range of interpolation problems, including path, transition, nested, state/transition and well-founded interpolation can be handled directly by INTERHORN. By detailing these interpolation problems and their Horn clause representations, we hope to encourage the emergence of a common back-end interpolation interface useful for diverse verification tools.}, author = {Gupta, Ashutosh and Popeea, Corneliu and Rybalchenko, Andrey}, booktitle = {Electronic Proceedings in Theoretical Computer Science, EPTCS}, location = {Vienna, Austria}, pages = {31 -- 38}, publisher = {Open Publishing}, title = {{Generalised interpolation by solving recursion free-horn clauses}}, doi = {10.4204/EPTCS.169.5}, volume = {169}, year = {2014}, } @inproceedings{1708, abstract = {It has been long argued that, because of inherent ambiguity and noise, the brain needs to represent uncertainty in the form of probability distributions. The neural encoding of such distributions remains however highly controversial. Here we present a novel circuit model for representing multidimensional real-valued distributions using a spike based spatio-temporal code. Our model combines the computational advantages of the currently competing models for probabilistic codes and exhibits realistic neural responses along a variety of classic measures. Furthermore, the model highlights the challenges associated with interpreting neural activity in relation to behavioral uncertainty and points to alternative population-level approaches for the experimental validation of distributed representations.}, author = {Savin, Cristina and Denève, Sophie}, location = {Montreal, Canada}, number = {January}, pages = {2024 -- 2032}, publisher = {Neural Information Processing Systems}, title = {{Spatio-temporal representations of uncertainty in spiking neural networks}}, volume = {3}, year = {2014}, } @article{1761, abstract = {Metal silicides formed by means of thermal annealing processes are employed as contact materials in microelectronics. Control of the structure of silicide/silicon interfaces becomes a critical issue when the characteristic size of the device is reduced below a few tens of nanometers. Here, we report on silicide clustering occurring within the channel of PtSi/Si/PtSi Schottky-barrier transistors. This phenomenon is investigated through atomistic simulations and low-temperature resonant-tunneling spectroscopy. Our results provide evidence for the segregation of a PtSi cluster with a diameter of a few nanometers from the silicide contact. The cluster acts as a metallic quantum dot giving rise to distinct signatures of quantum transport through its discrete energy states.}, author = {Mongillo, Massimo and Spathis, Panayotis N and Georgios Katsaros and De Franceschi, Silvano and Gentile, Pascal and Rurali, Riccardo and Cartoixà, Xavier}, journal = {Physical Review X}, number = {4}, publisher = {American Physical Society}, title = {{PtSi clustering in silicon probed by transport spectroscopy}}, doi = {10.1103/PhysRevX.3.041025}, volume = {3}, year = {2014}, } @article{1791, abstract = {Acute gene inactivation using short hairpin RNA (shRNA, knockdown) in developing brain is a powerful technique to study genetic function; however, discrepancies between knockdown and knockout murine phenotypes have left unanswered questions. For example, doublecortin (Dcx) knockdown but not knockout shows a neocortical neuronal migration phenotype. Here we report that in utero electroporation of shRNA, but not siRNA or miRNA, to Dcx demonstrates a migration phenotype in Dcx knockouts akin to the effect in wild-type mice, suggestingshRNA-mediated off-target toxicity. This effect wasnot limited to Dcx, as it was observed in Dclk1 knockouts, as well as with a fraction of scrambled shRNAs, suggesting a sequence-dependent but not sequence-specific effect. Profiling RNAs from electroporated cells showed a defect in endogenous let7 miRNA levels, and disruption of let7 or Dicer recapitulated the migration defect. The results suggest that shRNA-mediated knockdown can produce untoward migration effects by altering endogenous miRNA pathways.}, author = {Baek, SeungTae and Kerjan, Géraldine and Bielas, Stephanie L and Lee, Jieun and Fenstermaker, Ali G and Gaia Novarino and Gleeson, Joseph G}, journal = {Neuron}, number = {6}, pages = {1255 -- 1262}, publisher = {Elsevier}, title = {{Off-target effect of doublecortin family shRNA on neuronal migration associated with endogenous MicroRNA dysregulation}}, doi = {10.1016/j.neuron.2014.04.036}, volume = {82}, year = {2014}, } @inbook{1806, abstract = {The generation of asymmetry, at both cellular and tissue level, is one of the most essential capabilities of all eukaryotic organisms. It mediates basically all multicellular development ranging from embryogenesis and de novo organ formation till responses to various environmental stimuli. In plants, the awe-inspiring number of such processes is regulated by phytohormone auxin and its directional, cell-to-cell transport. The mediators of this transport, PIN auxin transporters, are asymmetrically localized at the plasma membrane, and this polar localization determines the directionality of intercellular auxin flow. Thus, auxin transport contributes crucially to the generation of local auxin gradients or maxima, which instruct given cell to change its developmental program. Here, we introduce and discuss the molecular components and cellular mechanisms regulating the generation and maintenance of cellular PIN polarity, as the general hallmarks of cell polarity in plants.}, author = {Baster, Pawel and Friml, Jiří}, booktitle = {Auxin and Its Role in Plant Development}, editor = {Zažímalová, Eva and Petrášek, Jan and Benková, Eva}, pages = {143 -- 170}, publisher = {Springer}, title = {{Auxin on the road navigated by cellular PIN polarity}}, doi = {10.1007/978-3-7091-1526-8_8}, year = {2014}, } @article{1816, abstract = {Watermarking techniques for vector graphics dislocate vertices in order to embed imperceptible, yet detectable, statistical features into the input data. The embedding process may result in a change of the topology of the input data, e.g., by introducing self-intersections, which is undesirable or even disastrous for many applications. In this paper we present a watermarking framework for two-dimensional vector graphics that employs conventional watermarking techniques but still provides the guarantee that the topology of the input data is preserved. The geometric part of this framework computes so-called maximum perturbation regions (MPR) of vertices. We propose two efficient algorithms to compute MPRs based on Voronoi diagrams and constrained triangulations. Furthermore, we present two algorithms to conditionally correct the watermarked data in order to increase the watermark embedding capacity and still guarantee topological correctness. While we focus on the watermarking of input formed by straight-line segments, one of our approaches can also be extended to circular arcs. We conclude the paper by demonstrating and analyzing the applicability of our framework in conjunction with two well-known watermarking techniques.}, author = {Huber, Stefan and Held, Martin and Meerwald, Peter and Kwitt, Roland}, journal = {International Journal of Computational Geometry and Applications}, number = {1}, pages = {61 -- 86}, publisher = {World Scientific Publishing}, title = {{Topology-preserving watermarking of vector graphics}}, doi = {10.1142/S0218195914500034}, volume = {24}, year = {2014}, } @article{1821, abstract = {We review recent progress towards a rigorous understanding of the Bogoliubov approximation for bosonic quantum many-body systems. We focus, in particular, on the excitation spectrum of a Bose gas in the mean-field (Hartree) limit. A list of open problems will be discussed at the end.}, author = {Seiringer, Robert}, journal = {Journal of Mathematical Physics}, number = {7}, publisher = {American Institute of Physics}, title = {{Bose gases, Bose-Einstein condensation, and the Bogoliubov approximation}}, doi = {10.1063/1.4881536}, volume = {55}, year = {2014}, } @article{1822, author = {Jakšić, Vojkan and Pillet, Claude and Seiringer, Robert}, journal = {Journal of Mathematical Physics}, number = {7}, publisher = {American Institute of Physics}, title = {{Introduction}}, doi = {10.1063/1.4884877}, volume = {55}, year = {2014}, } @inbook{1829, abstract = {Hitting and batting tasks, such as tennis forehands, ping-pong strokes, or baseball batting, depend on predictions where the ball can be intercepted and how it can properly be returned to the opponent. These predictions get more accurate over time, hence the behaviors need to be continuously modified. As a result, movement templates with a learned global shape need to be adapted during the execution so that the racket reaches a target position and velocity that will return the ball over to the other side of the net or court. It requires altering learned movements to hit a varying target with the necessary velocity at a specific instant in time. Such a task cannot be incorporated straightforwardly in most movement representations suitable for learning. For example, the standard formulation of the dynamical system based motor primitives (introduced by Ijspeert et al (2002b)) does not satisfy this property despite their flexibility which has allowed learning tasks ranging from locomotion to kendama. In order to fulfill this requirement, we reformulate the Ijspeert framework to incorporate the possibility of specifying a desired hitting point and a desired hitting velocity while maintaining all advantages of the original formulation.We show that the proposed movement template formulation works well in two scenarios, i.e., for hitting a ball on a string with a table tennis racket at a specified velocity and for returning balls launched by a ball gun successfully over the net using forehand movements.}, author = {Muelling, Katharina and Kroemer, Oliver and Lampert, Christoph and Schölkopf, Bernhard}, booktitle = {Learning Motor Skills}, editor = {Kober, Jens and Peters, Jan}, pages = {69 -- 82}, publisher = {Springer}, title = {{Movement templates for learning of hitting and batting}}, doi = {10.1007/978-3-319-03194-1_3}, volume = {97}, year = {2014}, } @article{1844, abstract = {Local protein interactions ("molecular context" effects) dictate amino acid replacements and can be described in terms of site-specific, energetic preferences for any different amino acid. It has been recently debated whether these preferences remain approximately constant during evolution or whether, due to coevolution of sites, they change strongly. Such research highlights an unresolved and fundamental issue with far-reaching implications for phylogenetic analysis and molecular evolution modeling. Here, we take advantage of the recent availability of phenotypically supported laboratory resurrections of Precambrian thioredoxins and β-lactamases to experimentally address the change of site-specific amino acid preferences over long geological timescales. Extensive mutational analyses support the notion that evolutionary adjustment to a new amino acid may occur, but to a large extent this is insufficient to erase the primitive preference for amino acid replacements. Generally, site-specific amino acid preferences appear to remain conserved throughout evolutionary history despite local sequence divergence. We show such preference conservation to be readily understandable in molecular terms and we provide crystallographic evidence for an intriguing structural-switch mechanism: Energetic preference for an ancestral amino acid in a modern protein can be linked to reorganization upon mutation to the ancestral local structure around the mutated site. Finally, we point out that site-specific preference conservation naturally leads to one plausible evolutionary explanation for the existence of intragenic global suppressor mutations.}, author = {Risso, Valeria and Manssour Triedo, Fadia and Delgado Delgado, Asuncion and Arco, Rocio and Barroso Deljesús, Alicia and Inglés Prieto, Álvaro and Godoy Ruiz, Raquel and Gavira, Josè and Gaucher, Eric and Ibarra Molero, Beatriz and Sánchez Ruiz, Jose}, journal = {Molecular Biology and Evolution}, number = {2}, pages = {440 -- 455}, publisher = {Oxford University Press}, title = {{Mutational studies on resurrected ancestral proteins reveal conservation of site-specific amino acid preferences throughout evolutionary history}}, doi = {10.1093/molbev/msu312}, volume = {32}, year = {2014}, } @article{1842, abstract = {We prove polynomial upper bounds of geometric Ramsey numbers of pathwidth-2 outerplanar triangulations in both convex and general cases. We also prove that the geometric Ramsey numbers of the ladder graph on 2n vertices are bounded by O(n3) and O(n10), in the convex and general case, respectively. We then apply similar methods to prove an (Formula presented.) upper bound on the Ramsey number of a path with n ordered vertices.}, author = {Cibulka, Josef and Gao, Pu and Krcál, Marek and Valla, Tomáš and Valtr, Pavel}, journal = {Discrete & Computational Geometry}, number = {1}, pages = {64 -- 79}, publisher = {Springer}, title = {{On the geometric ramsey number of outerplanar graphs}}, doi = {10.1007/s00454-014-9646-x}, volume = {53}, year = {2014}, } @article{1854, abstract = {In this paper, we present a method for non-rigid, partial shape matching in vector graphics. Given a user-specified query region in a 2D shape, similar regions are found, even if they are non-linearly distorted. Furthermore, a non-linear mapping is established between the query regions and these matches, which allows the automatic transfer of editing operations such as texturing. This is achieved by a two-step approach. First, pointwise correspondences between the query region and the whole shape are established. The transformation parameters of these correspondences are registered in an appropriate transformation space. For transformations between similar regions, these parameters form surfaces in transformation space, which are extracted in the second step of our method. The extracted regions may be related to the query region by a non-rigid transform, enabling non-rigid shape matching. In this paper, we present a method for non-rigid, partial shape matching in vector graphics. Given a user-specified query region in a 2D shape, similar regions are found, even if they are non-linearly distorted. Furthermore, a non-linear mapping is established between the query regions and these matches, which allows the automatic transfer of editing operations such as texturing. This is achieved by a two-step approach. First, pointwise correspondences between the query region and the whole shape are established. The transformation parameters of these correspondences are registered in an appropriate transformation space. For transformations between similar regions, these parameters form surfaces in transformation space, which are extracted in the second step of our method. The extracted regions may be related to the query region by a non-rigid transform, enabling non-rigid shape matching.}, author = {Guerrero, Paul and Auzinger, Thomas and Wimmer, Michael and Jeschke, Stefan}, journal = {Computer Graphics Forum}, number = {1}, pages = {239 -- 252}, publisher = {Wiley}, title = {{Partial shape matching using transformation parameter similarity}}, doi = {10.1111/cgf.12509}, volume = {34}, year = {2014}, } @article{1852, abstract = {To control morphogenesis, molecular regulatory networks have to interfere with the mechanical properties of the individual cells of developing organs and tissues, but how this is achieved is not well known. We study this issue here in the shoot meristem of higher plants, a group of undifferentiated cells where complex changes in growth rates and directions lead to the continuous formation of new organs [1, 2]. Here, we show that the plant hormone auxin plays an important role in this process via a dual, local effect on the extracellular matrix, the cell wall, which determines cell shape. Our study reveals that auxin not only causes a limited reduction in wall stiffness but also directly interferes with wall anisotropy via the regulation of cortical microtubule dynamics. We further show that to induce growth isotropy and organ outgrowth, auxin somehow interferes with the cortical microtubule-ordering activity of a network of proteins, including AUXIN BINDING PROTEIN 1 and KATANIN 1. Numerical simulations further indicate that the induced isotropy is sufficient to amplify the effects of the relatively minor changes in wall stiffness to promote organogenesis and the establishment of new growth axes in a robust manner.}, author = {Sassi, Massimiliano and Ali, Olivier and Boudon, Frédéric and Cloarec, Gladys and Abad, Ursula and Cellier, Coralie and Chen, Xu and Gilles, Benjamin and Milani, Pascale and Friml, Jirí and Vernoux, Teva and Godin, Christophe and Hamant, Olivier and Traas, Jan}, journal = {Current Biology}, number = {19}, pages = {2335 -- 2342}, publisher = {Cell Press}, title = {{An auxin-mediated shift toward growth isotropy promotes organ formation at the shoot meristem in Arabidopsis}}, doi = {10.1016/j.cub.2014.08.036}, volume = {24}, year = {2014}, } @inproceedings{1853, abstract = {Wireless sensor networks (WSNs) composed of low-power, low-cost sensor nodes are expected to form the backbone of future intelligent networks for a broad range of civil, industrial and military applications. These sensor nodes are often deployed through random spreading, and function in dynamic environments. Many applications of WSNs such as pollution tracking, forest fire detection, and military surveillance require knowledge of the location of constituent nodes. But the use of technologies such as GPS on all nodes is prohibitive due to power and cost constraints. So, the sensor nodes need to autonomously determine their locations. Most localization techniques use anchor nodes with known locations to determine the position of remaining nodes. Localization techniques have two conflicting requirements. On one hand, an ideal localization technique should be computationally simple and on the other hand, it must be resistant to attacks that compromise anchor nodes. In this paper, we propose a computationally light-weight game theoretic secure localization technique and demonstrate its effectiveness in comparison to existing techniques.}, author = {Jha, Susmit and Tripakis, Stavros and Seshia, Sanjit and Chatterjee, Krishnendu}, location = {Cambridge, USA}, pages = {85 -- 90}, publisher = {IEEE}, title = {{Game theoretic secure localization in wireless sensor networks}}, doi = {10.1109/IOT.2014.7030120}, year = {2014}, } @article{1862, abstract = {The prominent and evolutionarily ancient role of the plant hormone auxin is the regulation of cell expansion. Cell expansion requires ordered arrangement of the cytoskeleton but molecular mechanisms underlying its regulation by signalling molecules including auxin are unknown. Here we show in the model plant Arabidopsis thaliana that in elongating cells exogenous application of auxin or redistribution of endogenous auxin induces very rapid microtubule re-orientation from transverse to longitudinal, coherent with the inhibition of cell expansion. This fast auxin effect requires auxin binding protein 1 (ABP1) and involves a contribution of downstream signalling components such as ROP6 GTPase, ROP-interactive protein RIC1 and the microtubule-severing protein katanin. These components are required for rapid auxin-and ABP1-mediated re-orientation of microtubules to regulate cell elongation in roots and dark-grown hypocotyls as well as asymmetric growth during gravitropic responses.}, author = {Chen, Xu and Grandont, Laurie and Li, Hongjiang and Hauschild, Robert and Paque, Sébastien and Abuzeineh, Anas and Rakusova, Hana and Benková, Eva and Perrot Rechenmann, Catherine and Friml, Jirí}, issn = {1476-4687}, journal = {Nature}, number = {729}, pages = {90 -- 93}, publisher = {Nature Publishing Group}, title = {{Inhibition of cell expansion by rapid ABP1-mediated auxin effect on microtubules}}, doi = {10.1038/nature13889}, volume = {516}, year = {2014}, } @inproceedings{1869, abstract = {Boolean controllers for systems with complex datapaths are often very difficult to implement correctly, in particular when concurrency is involved. Yet, in many instances it is easy to formally specify correctness. For example, the specification for the controller of a pipelined processor only has to state that the pipelined processor gives the same results as a non-pipelined reference design. This makes such controllers a good target for automated synthesis. However, an efficient abstraction for the complex datapath elements is needed, as a bit-precise description is often infeasible. We present Suraq, the first controller synthesis tool which uses uninterpreted functions for the abstraction. Quantified firstorder formulas (with specific quantifier structure) serve as the specification language from which Suraq synthesizes Boolean controllers. Suraq transforms the specification into an unsatisfiable SMT formula, and uses Craig interpolation to compute its results. Using Suraq, we were able to synthesize a controller (consisting of two Boolean signals) for a five-stage pipelined DLX processor in roughly one hour and 15 minutes.}, author = {Hofferek, Georg and Gupta, Ashutosh}, booktitle = {HVC 2014}, editor = {Yahav, Eran}, location = {Haifa, Israel}, pages = {68 -- 74}, publisher = {Springer}, title = {{Suraq - a controller synthesis tool using uninterpreted functions}}, doi = {10.1007/978-3-319-13338-6_6}, volume = {8855}, year = {2014}, } @inproceedings{1872, abstract = {Extensionality axioms are common when reasoning about data collections, such as arrays and functions in program analysis, or sets in mathematics. An extensionality axiom asserts that two collections are equal if they consist of the same elements at the same indices. Using extensionality is often required to show that two collections are equal. A typical example is the set theory theorem (∀x)(∀y)x∪y = y ∪x. Interestingly, while humans have no problem with proving such set identities using extensionality, they are very hard for superposition theorem provers because of the calculi they use. In this paper we show how addition of a new inference rule, called extensionality resolution, allows first-order theorem provers to easily solve problems no modern first-order theorem prover can solve. We illustrate this by running the VAMPIRE theorem prover with extensionality resolution on a number of set theory and array problems. Extensionality resolution helps VAMPIRE to solve problems from the TPTP library of first-order problems that were never solved before by any prover.}, author = {Gupta, Ashutosh and Kovács, Laura and Kragl, Bernhard and Voronkov, Andrei}, booktitle = {ATVA 2014}, editor = {Cassez, Franck and Raskin, Jean-François}, location = {Sydney, Australia}, pages = {185 -- 200}, publisher = {Springer}, title = {{Extensional crisis and proving identity}}, doi = {10.1007/978-3-319-11936-6_14}, volume = {8837}, year = {2014}, } @inproceedings{1870, abstract = {We investigate the problem of checking if a finite-state transducer is robust to uncertainty in its input. Our notion of robustness is based on the analytic notion of Lipschitz continuity - a transducer is K-(Lipschitz) robust if the perturbation in its output is at most K times the perturbation in its input. We quantify input and output perturbation using similarity functions. We show that K-robustness is undecidable even for deterministic transducers. We identify a class of functional transducers, which admits a polynomial time automata-theoretic decision procedure for K-robustness. This class includes Mealy machines and functional letter-to-letter transducers. We also study K-robustness of nondeterministic transducers. Since a nondeterministic transducer generates a set of output words for each input word, we quantify output perturbation using setsimilarity functions. We show that K-robustness of nondeterministic transducers is undecidable, even for letter-to-letter transducers. We identify a class of set-similarity functions which admit decidable K-robustness of letter-to-letter transducers.}, author = {Henzinger, Thomas A and Otop, Jan and Samanta, Roopsha}, booktitle = {Leibniz International Proceedings in Informatics, LIPIcs}, location = {Delhi, India}, pages = {431 -- 443}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Lipschitz robustness of finite-state transducers}}, doi = {10.4230/LIPIcs.FSTTCS.2014.431}, volume = {29}, year = {2014}, } @inproceedings{1875, abstract = {We present a formal framework for repairing infinite-state, imperative, sequential programs, with (possibly recursive) procedures and multiple assertions; the framework can generate repaired programs by modifying the original erroneous program in multiple program locations, and can ensure the readability of the repaired program using user-defined expression templates; the framework also generates a set of inductive assertions that serve as a proof of correctness of the repaired program. As a step toward integrating programmer intent and intuition in automated program repair, we present a cost-aware formulation - given a cost function associated with permissible statement modifications, the goal is to ensure that the total program modification cost does not exceed a given repair budget. As part of our predicate abstractionbased solution framework, we present a sound and complete algorithm for repair of Boolean programs. We have developed a prototype tool based on SMT solving and used it successfully to repair diverse errors in benchmark C programs.}, author = {Samanta, Roopsha and Olivo, Oswaldo and Allen, Emerson}, editor = {Müller-Olm, Markus and Seidl, Helmut}, location = {Munich, Germany}, pages = {268 -- 284}, publisher = {Springer}, title = {{Cost-aware automatic program repair}}, doi = {10.1007/978-3-319-10936-7_17}, volume = {8723}, year = {2014}, } @article{1876, abstract = {We study densities of functionals over uniformly bounded triangulations of a Delaunay set of vertices, and prove that the minimum is attained for the Delaunay triangulation if this is the case for finite sets.}, author = {Dolbilin, Nikolai and Edelsbrunner, Herbert and Glazyrin, Alexey and Musin, Oleg}, issn = {16093321}, journal = {Moscow Mathematical Journal}, number = {3}, pages = {491 -- 504}, publisher = {Independent University of Moscow}, title = {{Functionals on triangulations of delaunay sets}}, doi = {10.17323/1609-4514-2014-14-3-491-504}, volume = {14}, year = {2014}, }