@article{5992,
abstract = {Lamellipodia are flat membrane protrusions formed during mesenchymal motion. Polymerization at the leading edge assembles the actin filament network and generates protrusion force. How this force is supported by the network and how the assembly rate is shared between protrusion and network retrograde flow determines the protrusion rate. We use mathematical modeling to understand experiments changing the F-actin density in lamellipodia of B16-F1 melanoma cells by modulation of Arp2/3 complex activity or knockout of the formins FMNL2 and FMNL3. Cells respond to a reduction of density with a decrease of protrusion velocity, an increase in the ratio of force to filament number, but constant network assembly rate. The relation between protrusion force and tension gradient in the F-actin network and the density dependency of friction, elasticity, and viscosity of the network explain the experimental observations. The formins act as filament nucleators and elongators with differential rates. Modulation of their activity suggests an effect on network assembly rate. Contrary to these expectations, the effect of changes in elongator composition is much weaker than the consequences of the density change. We conclude that the force acting on the leading edge membrane is the force required to drive F-actin network retrograde flow.},
author = {Dolati, Setareh and Kage, Frieda and Mueller, Jan and Müsken, Mathias and Kirchner, Marieluise and Dittmar, Gunnar and Sixt, Michael K and Rottner, Klemens and Falcke, Martin},
issn = {1059-1524},
journal = {Molecular Biology of the Cell},
number = {22},
pages = {2674--2686},
publisher = {American Society for Cell Biology },
title = {{On the relation between filament density, force generation, and protrusion rate in mesenchymal cell motility}},
doi = {10.1091/mbc.e18-02-0082},
volume = {29},
year = {2018},
}
@article{5993,
abstract = {In this article, we consider the termination problem of probabilistic programs with real-valued variables. Thequestions concerned are: qualitative ones that ask (i) whether the program terminates with probability 1(almost-sure termination) and (ii) whether the expected termination time is finite (finite termination); andquantitative ones that ask (i) to approximate the expected termination time (expectation problem) and (ii) tocompute a boundBsuch that the probability not to terminate afterBsteps decreases exponentially (con-centration problem). To solve these questions, we utilize the notion of ranking supermartingales, which isa powerful approach for proving termination of probabilistic programs. In detail, we focus on algorithmicsynthesis of linear ranking-supermartingales over affine probabilistic programs (Apps) with both angelic anddemonic non-determinism. An important subclass of Apps is LRApp which is defined as the class of all Appsover which a linear ranking-supermartingale exists.Our main contributions are as follows. Firstly, we show that the membership problem of LRApp (i) canbe decided in polynomial time for Apps with at most demonic non-determinism, and (ii) isNP-hard and inPSPACEfor Apps with angelic non-determinism. Moreover, theNP-hardness result holds already for Appswithout probability and demonic non-determinism. Secondly, we show that the concentration problem overLRApp can be solved in the same complexity as for the membership problem of LRApp. Finally, we show thatthe expectation problem over LRApp can be solved in2EXPTIMEand isPSPACE-hard even for Apps withoutprobability and non-determinism (i.e., deterministic programs). Our experimental results demonstrate theeffectiveness of our approach to answer the qualitative and quantitative questions over Apps with at mostdemonic non-determinism.},
author = {Chatterjee, Krishnendu and Fu, Hongfei and Novotný, Petr and Hasheminezhad, Rouzbeh},
issn = {0164-0925},
journal = {ACM Transactions on Programming Languages and Systems},
number = {2},
publisher = {Association for Computing Machinery (ACM)},
title = {{Algorithmic analysis of qualitative and quantitative termination problems for affine probabilistic programs}},
doi = {10.1145/3174800},
volume = {40},
year = {2018},
}
@article{5995,
abstract = {Motivation
Computational prediction of the effect of mutations on protein stability is used by researchers in many fields. The utility of the prediction methods is affected by their accuracy and bias. Bias, a systematic shift of the predicted change of stability, has been noted as an issue for several methods, but has not been investigated systematically. Presence of the bias may lead to misleading results especially when exploring the effects of combination of different mutations.
Results
Here we use a protocol to measure the bias as a function of the number of introduced mutations. It is based on a self-consistency test of the reciprocity the effect of a mutation. An advantage of the used approach is that it relies solely on crystal structures without experimentally measured stability values. We applied the protocol to four popular algorithms predicting change of protein stability upon mutation, FoldX, Eris, Rosetta and I-Mutant, and found an inherent bias. For one program, FoldX, we manage to substantially reduce the bias using additional relaxation by Modeller. Authors using algorithms for predicting effects of mutations should be aware of the bias described here.},
author = {Usmanova, Dinara R and Bogatyreva, Natalya S and Ariño Bernad, Joan and Eremina, Aleksandra A and Gorshkova, Anastasiya A and Kanevskiy, German M and Lonishin, Lyubov R and Meister, Alexander V and Yakupova, Alisa G and Kondrashov, Fyodor and Ivankov, Dmitry},
issn = {1367-4803},
journal = {Bioinformatics},
number = {21},
pages = {3653--3658},
publisher = {Oxford University Press },
title = {{Self-consistency test reveals systematic bias in programs for prediction change of stability upon mutation}},
doi = {10.1093/bioinformatics/bty340},
volume = {34},
year = {2018},
}
@article{5996,
abstract = {In pipes, turbulence sets in despite the linear stability of the laminar Hagen–Poiseuille flow. The Reynolds number ( ) for which turbulence first appears in a given experiment – the ‘natural transition point’ – depends on imperfections of the set-up, or, more precisely, on the magnitude of finite amplitude perturbations. At onset, turbulence typically only occupies a certain fraction of the flow, and this fraction equally is found to differ from experiment to experiment. Despite these findings, Reynolds proposed that after sufficiently long times, flows may settle to steady conditions: below a critical velocity, flows should (regardless of initial conditions) always return to laminar, while above this velocity, eddying motion should persist. As will be shown, even in pipes several thousand diameters long, the spatio-temporal intermittent flow patterns observed at the end of the pipe strongly depend on the initial conditions, and there is no indication that different flow patterns would eventually settle to a (statistical) steady state. Exploiting the fact that turbulent puffs do not age (i.e. they are memoryless), we continuously recreate the puff sequence exiting the pipe at the pipe entrance, and in doing so introduce periodic boundary conditions for the puff pattern. This procedure allows us to study the evolution of the flow patterns for arbitrary long times, and we find that after times in excess of advective time units, indeed a statistical steady state is reached. Although the resulting flows remain spatio-temporally intermittent, puff splitting and decay rates eventually reach a balance, so that the turbulent fraction fluctuates around a well-defined level which only depends on . In accordance with Reynolds’ proposition, we find that at lower (here 2020), flows eventually always resume to laminar, while for higher ( ), turbulence persists. The critical point for pipe flow hence falls in the interval of $2020 , which is in very good agreement with the recently proposed value of . The latter estimate was based on single-puff statistics and entirely neglected puff interactions. Unlike in typical contact processes where such interactions strongly affect the percolation threshold, in pipe flow, the critical point is only marginally influenced. Interactions, on the other hand, are responsible for the approach to the statistical steady state. As shown, they strongly affect the resulting flow patterns, where they cause ‘puff clustering’, and these regions of large puff densities are observed to travel across the puff pattern in a wave-like fashion.},
author = {Vasudevan, Mukund and Hof, Björn},
issn = {0022-1120},
journal = {Journal of Fluid Mechanics},
pages = {76--94},
publisher = {Cambridge University Press},
title = {{The critical point of the transition to turbulence in pipe flow}},
doi = {10.1017/jfm.2017.923},
volume = {839},
year = {2018},
}
@article{5998,
abstract = {Genome amplification and cellular senescence are commonly associated with pathological processes. While physiological roles for polyploidization and senescence have been described in mouse development, controversy exists over their significance in humans. Here, we describe tetraploidization and senescence as phenomena of normal human placenta development. During pregnancy, placental extravillous trophoblasts (EVTs) invade the pregnant endometrium, termed decidua, to establish an adapted microenvironment required for the developing embryo. This process is critically dependent on continuous cell proliferation and differentiation, which is thought to follow the classical model of cell cycle arrest prior to terminal differentiation. Strikingly, flow cytometry and DNAseq revealed that EVT formation is accompanied with a genome-wide polyploidization, independent of mitotic cycles. DNA replication in these cells was analysed by a fluorescent cell-cycle indicator reporter system, cell cycle marker expression and EdU incorporation. Upon invasion into the decidua, EVTs widely lose their replicative potential and enter a senescent state characterized by high senescence-associated (SA) β-galactosidase activity, induction of a SA secretory phenotype as well as typical metabolic alterations. Furthermore, we show that the shift from endocycle-dependent genome amplification to growth arrest is disturbed in androgenic complete hydatidiform moles (CHM), a hyperplastic pregnancy disorder associated with increased risk of developing choriocarinoma. Senescence is decreased in CHM-EVTs, accompanied by exacerbated endoreduplication and hyperploidy. We propose induction of cellular senescence as a ploidy-limiting mechanism during normal human placentation and unravel a link between excessive polyploidization and reduced senescence in CHM.},
author = {Velicky, Philipp and Meinhardt, Gudrun and Plessl, Kerstin and Vondra, Sigrid and Weiss, Tamara and Haslinger, Peter and Lendl, Thomas and Aumayr, Karin and Mairhofer, Mario and Zhu, Xiaowei and Schütz, Birgit and Hannibal, Roberta L. and Lindau, Robert and Weil, Beatrix and Ernerudh, Jan and Neesen, Jürgen and Egger, Gerda and Mikula, Mario and Röhrl, Clemens and Urban, Alexander E. and Baker, Julie and Knöfler, Martin and Pollheimer, Jürgen},
issn = {1553-7404},
journal = {PLOS Genetics},
number = {10},
publisher = {Public Library of Science},
title = {{Genome amplification and cellular senescence are hallmarks of human placenta development}},
doi = {10.1371/journal.pgen.1007698},
volume = {14},
year = {2018},
}
@article{5999,
abstract = {We introduce for each quiver Q and each algebraic oriented cohomology theory A, the cohomological Hall algebra (CoHA) of Q, as the A-homology of the moduli of representations of the preprojective algebra of Q. This generalizes the K-theoretic Hall algebra of commuting varieties defined by Schiffmann-Vasserot. When A is the Morava K-theory, we show evidence that this algebra is a candidate for Lusztig's reformulated conjecture on modular representations of algebraic groups.
We construct an action of the preprojective CoHA on the A-homology of Nakajima quiver varieties. We compare this with the action of the Borel subalgebra of Yangian when A is the intersection theory. We also give a shuffle algebra description of this CoHA in terms of the underlying formal group law of A. As applications, we obtain a shuffle description of the Yangian. },
author = {Yang, Yaping and Zhao, Gufang},
issn = {0024-6115},
journal = {Proceedings of the London Mathematical Society},
number = {5},
pages = {1029--1074},
publisher = {Oxford University Press},
title = {{The cohomological Hall algebra of a preprojective algebra}},
doi = {10.1112/plms.12111},
volume = {116},
year = {2018},
}
@article{6,
abstract = {Lesion and electrode location verification are traditionally done via histological examination of stained brain slices, a time-consuming procedure that requires manual estimation. Here, we describe a simple, straightforward method for quantifying lesions and locating electrodes in the brain that is less laborious and yields more detailed results. Whole brains are stained with osmium tetroxide, embedded in resin, and imaged with a micro-CT scanner. The scans result in 3D digital volumes of the brains with resolutions and virtual section thicknesses dependent on the sample size (12-15 and 5-6 µm per voxel for rat and zebra finch brains, respectively). Surface and deep lesions can be characterized, and single tetrodes, tetrode arrays, electrolytic lesions, and silicon probes can also be localized. Free and proprietary software allows experimenters to examine the sample volume from any plane and segment the volume manually or automatically. Because this method generates whole brain volume, lesions and electrodes can be quantified to a much higher degree than in current methods, which will help standardize comparisons within and across studies.},
author = {Masís, Javier and Mankus, David and Wolff, Steffen and Guitchounts, Grigori and Jösch, Maximilian A and Cox, David},
journal = {Journal of visualized experiments (JoVE)},
publisher = {NLM },
title = {{A micro-CT-based method for characterising lesions and locating electrodes in small animal brains}},
doi = {10.3791/58585},
volume = {141},
year = {2018},
}
@inbook{60,
abstract = {Model checking is a computer-assisted method for the analysis of dynamical systems that can be modeled by state-transition systems. Drawing from research traditions in mathematical logic, programming languages, hardware design, and theoretical computer science, model checking is now widely used for the verification of hardware and software in industry. This chapter is an introduction and short survey of model checking. The chapter aims to motivate and link the individual chapters of the handbook, and to provide context for readers who are not familiar with model checking.},
author = {Clarke, Edmund and Henzinger, Thomas A and Veith, Helmut},
booktitle = {Handbook of Model Checking},
editor = {Henzinger, Thomas A},
pages = {1 -- 26},
publisher = {Springer},
title = {{Introduction to model checking}},
doi = {10.1007/978-3-319-10575-8_1},
year = {2018},
}
@article{6001,
abstract = {The concurrent memory reclamation problem is that of devising a way for a deallocating thread to verify that no other concurrent threads hold references to a memory block being deallocated. To date, in the absence of automatic garbage collection, there is no satisfactory solution to this problem; existing tracking methods like hazard pointers, reference counters, or epoch-based techniques like RCU are either prohibitively expensive or require significant programming expertise to the extent that implementing them efficiently can be worthy of a publication. None of the existing techniques are automatic or even semi-automated.
In this article, we take a new approach to concurrent memory reclamation. Instead of manually tracking access to memory locations as done in techniques like hazard pointers, or restricting shared accesses to specific epoch boundaries as in RCU, our algorithm, called ThreadScan, leverages operating system signaling to automatically detect which memory locations are being accessed by concurrent threads.
Initial empirical evidence shows that ThreadScan scales surprisingly well and requires negligible programming effort beyond the standard use of Malloc and Free.},
author = {Alistarh, Dan-Adrian and Leiserson, William and Matveev, Alexander and Shavit, Nir},
issn = {2329-4949},
journal = {ACM Transactions on Parallel Computing},
number = {4},
publisher = {Association for Computing Machinery},
title = {{ThreadScan: Automatic and scalable memory reclamation}},
doi = {10.1145/3201897},
volume = {4},
year = {2018},
}
@article{6002,
abstract = {The Bogoliubov free energy functional is analysed. The functional serves as a model of a translation-invariant Bose gas at positive temperature. We prove the existence of minimizers in the case of repulsive interactions given by a sufficiently regular two-body potential. Furthermore, we prove the existence of a phase transition in this model and provide its phase diagram.},
author = {Napiórkowski, Marcin M and Reuvers, Robin and Solovej, Jan Philip},
issn = {0003-9527},
journal = {Archive for Rational Mechanics and Analysis},
number = {3},
pages = {1037--1090},
publisher = {Springer Nature},
title = {{The Bogoliubov free energy functional I: Existence of minimizers and phase diagram}},
doi = {10.1007/s00205-018-1232-6},
volume = {229},
year = {2018},
}
@article{6003,
abstract = {Digital fabrication devices are powerful tools for creating tangible reproductions of 3D digital models. Most available printing technologies aim at producing an accurate copy of a tridimensional shape. However, fabrication technologies can also be used to create a stylistic representation of a digital shape. We refer to this class of methods as ‘stylized fabrication methods’. These methods abstract geometric and physical features of a given shape to create an unconventional representation, to produce an optical illusion or to devise a particular interaction with the fabricated model. In this state‐of‐the‐art report, we classify and overview this broad and emerging class of approaches and also propose possible directions for future research.},
author = {Bickel, Bernd and Cignoni, Paolo and Malomo, Luigi and Pietroni, Nico},
issn = {0167-7055},
journal = {Computer Graphics Forum},
number = {6},
pages = {325--342},
publisher = {Wiley},
title = {{State of the art on stylized fabrication}},
doi = {10.1111/cgf.13327},
volume = {37},
year = {2018},
}
@inproceedings{6005,
abstract = {Network games are widely used as a model for selfish resource-allocation problems. In the classicalmodel, each player selects a path connecting her source and target vertices. The cost of traversingan edge depends on theload; namely, number of players that traverse it. Thus, it abstracts the factthat different users may use a resource at different times and for different durations, which playsan important role in determining the costs of the users in reality. For example, when transmittingpackets in a communication network, routing traffic in a road network, or processing a task in aproduction system, actual sharing and congestion of resources crucially depends on time.In [13], we introducedtimed network games, which add a time component to network games.Each vertexvin the network is associated with a cost function, mapping the load onvto theprice that a player pays for staying invfor one time unit with this load. Each edge in thenetwork is guarded by the time intervals in which it can be traversed, which forces the players tospend time in the vertices. In this work we significantly extend the way time can be referred toin timed network games. In the model we study, the network is equipped withclocks, and, as intimed automata, edges are guarded by constraints on the values of the clocks, and their traversalmay involve a reset of some clocks. We argue that the stronger model captures many realisticnetworks. The addition of clocks breaks the techniques we developed in [13] and we developnew techniques in order to show that positive results on classic network games carry over to thestronger timed setting.},
author = {Avni, Guy and Guha, Shibashis and Kupferman, Orna},
issn = {1868-8969},
location = {Liverpool, United Kingdom},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{Timed network games with clocks}},
doi = {10.4230/LIPICS.MFCS.2018.23},
volume = {117},
year = {2018},
}
@article{6006,
abstract = {Network games (NGs) are played on directed graphs and are extensively used in network design and analysis. Search problems for NGs include finding special strategy profiles such as a Nash equilibrium and a globally-optimal solution. The networks modeled by NGs may be huge. In formal verification, abstraction has proven to be an extremely effective technique for reasoning about systems with big and even infinite state spaces. We describe an abstraction-refinement methodology for reasoning about NGs. Our methodology is based on an abstraction function that maps the state space of an NG to a much smaller state space. We search for a global optimum and a Nash equilibrium by reasoning on an under- and an over-approximation defined on top of this smaller state space. When the approximations are too coarse to find such profiles, we refine the abstraction function. We extend the abstraction-refinement methodology to labeled networks, where the objectives of the players are regular languages. Our experimental results demonstrate the effectiveness of the methodology. },
author = {Avni, Guy and Guha, Shibashis and Kupferman, Orna},
issn = {2073-4336},
journal = {Games},
number = {3},
publisher = {MDPI AG},
title = {{An abstraction-refinement methodology for reasoning about network games}},
doi = {10.3390/g9030039},
volume = {9},
year = {2018},
}
@article{6010,
abstract = {The optic tectum (TeO), or superior colliculus, is a multisensory midbrain center that organizes spatially orienting responses to relevant stimuli. To define the stimulus with the highest priority at each moment, a network of reciprocal connections between the TeO and the isthmi promotes competition between concurrent tectal inputs. In the avian midbrain, the neurons mediating enhancement and suppression of tectal inputs are located in separate isthmic nuclei, facilitating the analysis of the neural processes that mediate competition. A specific subset of radial neurons in the intermediate tectal layers relay retinal inputs to the isthmi, but at present it is unclear whether separate neurons innervate individual nuclei or a single neural type sends a common input to several of them. In this study, we used in vitro neural tracing and cell-filling experiments in chickens to show that single neurons innervate, via axon collaterals, the three nuclei that comprise the isthmotectal network. This demonstrates that the input signals representing the strength of the incoming stimuli are simultaneously relayed to the mechanisms promoting both enhancement and suppression of the input signals. By performing in vivo recordings in anesthetized chicks, we also show that this common input generates synchrony between both antagonistic mechanisms, demonstrating that activity enhancement and suppression are closely coordinated. From a computational point of view, these results suggest that these tectal neurons constitute integrative nodes that combine inputs from different sources to drive in parallel several concurrent neural processes, each performing complementary functions within the network through different firing patterns and connectivity.},
author = {Garrido-Charad, Florencia and Vega Zuniga, Tomas A and Gutiérrez-Ibáñez, Cristián and Fernandez, Pedro and López-Jury, Luciana and González-Cabrera, Cristian and Karten, Harvey J. and Luksch, Harald and Marín, Gonzalo J.},
issn = {0027-8424},
journal = {Proceedings of the National Academy of Sciences},
number = {32},
pages = {E7615--E7623},
publisher = {National Academy of Sciences},
title = {{“Shepherd’s crook” neurons drive and synchronize the enhancing and suppressive mechanisms of the midbrain stimulus selection network}},
doi = {10.1073/pnas.1804517115},
volume = {115},
year = {2018},
}
@inproceedings{6011,
abstract = {We establish a data-dependent notion of algorithmic stability for Stochastic Gradient Descent (SGD), and employ it to develop novel generalization bounds. This is in contrast to previous distribution-free algorithmic stability results for SGD which depend on the worst-case constants. By virtue of the data-dependent argument, our bounds provide new insights into learning with SGD on convex and non-convex problems. In the convex case, we show that the bound on the generalization error depends on the risk at the initialization point. In the non-convex case, we prove that the expected curvature of the objective function around the initialization point has crucial influence on the generalization error. In both cases, our results suggest a simple data-driven strategy to stabilize SGD by pre-screening its initialization. As a corollary, our results allow us to show optimistic generalization bounds that exhibit fast convergence rates for SGD subject to a vanishing empirical risk and low noise of stochastic gradient. },
author = {Kuzborskij, Ilja and Lampert, Christoph},
booktitle = {Proceedings of the 35 th International Conference on Machine Learning},
location = {Stockholm, Sweden},
pages = {2815--2824},
publisher = {International Machine Learning Society},
title = {{Data-dependent stability of stochastic gradient descent}},
volume = {80},
year = {2018},
}
@inproceedings{6012,
abstract = {We present an approach to identify concise equations from data using a shallow neural network approach. In contrast to ordinary black-box regression, this approach allows understanding functional relations and generalizing them from observed data to unseen parts of the parameter space. We show how to extend the class of learnable equations for a recently proposed equation learning network to include divisions, and we improve the learning and model selection strategy to be useful for challenging real-world data. For systems governed by analytical expressions, our method can in many cases identify the true underlying equation and extrapolate to unseen domains. We demonstrate its effectiveness by experiments on a cart-pendulum system, where only 2 random rollouts are required to learn the forward dynamics and successfully achieve the swing-up task.},
author = {Sahoo, Subham and Lampert, Christoph and Martius, Georg S},
booktitle = {Proceedings of the 35th International Conference on Machine Learning},
location = {Stockholm, Sweden},
pages = {4442--4450},
publisher = {International Machine Learning Society},
title = {{Learning equations for extrapolation and control}},
volume = {80},
year = {2018},
}
@inproceedings{6031,
abstract = {We introduce Clover, a new library for efficient computation using low-precision data, providing mathematical routines required by fundamental methods in optimization and sparse recovery. Our library faithfully implements variants of stochastic quantization that guarantee convergence at low precision, and supports data formats from 4-bit quantized to 32-bit IEEE-754 on current Intel processors. In particular, we show that 4-bit can be implemented efficiently using Intel AVX despite the lack of native support for this data format. Experimental results with dot product, matrix-vector multiplication (MVM), gradient descent (GD), and iterative hard thresholding (IHT) demonstrate that the attainable speedups are in many cases close to linear with respect to the reduction of precision due to reduced data movement. Finally, for GD and IHT, we show examples of absolute speedup achieved by 4-bit versus 32-bit, by iterating until a given target error is achieved.},
author = {Stojanov, Alen and Smith, Tyler Michael and Alistarh, Dan-Adrian and Puschel, Markus},
booktitle = {2018 IEEE International Workshop on Signal Processing Systems},
location = {Cape Town, South Africa},
publisher = {IEEE},
title = {{Fast quantized arithmetic on x86: Trading compute for data movement}},
doi = {10.1109/SiPS.2018.8598402},
volume = {2018-October},
year = {2018},
}
@article{6032,
abstract = {The main result of this article is a generalization of the classical blossom algorithm for finding perfect matchings. Our algorithm can efficiently solve Boolean CSPs where each variable appears in exactly two constraints (we call it edge CSP) and all constraints are even Δ-matroid relations (represented by lists of tuples). As a consequence of this, we settle the complexity classification of planar Boolean CSPs started by Dvorak and Kupec. Using a reduction to even Δ-matroids, we then extend the tractability result to larger classes of Δ-matroids that we call efficiently coverable. It properly includes classes that were known to be tractable before, namely, co-independent, compact, local, linear, and binary, with the following caveat:We represent Δ-matroids by lists of tuples, while the last two use a representation by matrices. Since an n ×n matrix can represent exponentially many tuples, our tractability result is not strictly stronger than the known algorithm for linear and binary Δ-matroids.},
author = {Kazda, Alexandr and Kolmogorov, Vladimir and Rolinek, Michal},
journal = {ACM Transactions on Algorithms},
number = {2},
publisher = {ACM},
title = {{Even delta-matroids and the complexity of planar boolean CSPs}},
doi = {10.1145/3230649},
volume = {15},
year = {2018},
}
@article{606,
abstract = {We establish the existence of a global solution for a new family of fluid-like equations, which are obtained in certain regimes in as the mean-field evolution of the supercurrent density in a (2D section of a) type-II superconductor with pinning and with imposed electric current. We also consider general vortex-sheet initial data, and investigate the uniqueness and regularity properties of the solution. For some choice of parameters, the equation under investigation coincides with the so-called lake equation from 2D shallow water fluid dynamics, and our analysis then leads to a new existence result for rough initial data.},
author = {Duerinckx, Mitia and Fischer, Julian L},
journal = {Annales de l'Institut Henri Poincare (C) Non Linear Analysis},
number = {5},
pages = {1267--1319},
publisher = {Elsevier},
title = {{Well-posedness for mean-field evolutions arising in superconductivity}},
doi = {10.1016/j.anihpc.2017.11.004},
volume = {35},
year = {2018},
}
@article{7,
abstract = {Animal social networks are shaped by multiple selection pressures, including the need to ensure efficient communication and functioning while simultaneously limiting disease transmission. Social animals could potentially further reduce epidemic risk by altering their social networks in the presence of pathogens, yet there is currently no evidence for such pathogen-triggered responses. We tested this hypothesis experimentally in the ant Lasius niger using a combination of automated tracking, controlled pathogen exposure, transmission quantification, and temporally explicit simulations. Pathogen exposure induced behavioral changes in both exposed ants and their nestmates, which helped contain the disease by reinforcing key transmission-inhibitory properties of the colony's contact network. This suggests that social network plasticity in response to pathogens is an effective strategy for mitigating the effects of disease in social groups.},
author = {Stroeymeyt, Nathalie and Grasse, Anna V and Crespi, Alessandro and Mersch, Danielle and Cremer, Sylvia and Keller, Laurent},
issn = {10959203},
journal = {Science},
number = {6417},
pages = {941 -- 945},
publisher = {NLM },
title = {{Social network plasticity decreases disease transmission in a eusocial insect}},
doi = {10.1126/science.aat4793},
volume = {362},
year = {2018},
}
@article{70,
abstract = {We consider the totally asymmetric simple exclusion process in a critical scaling parametrized by a≥0, which creates a shock in the particle density of order aT−1/3, T the observation time. When starting from step initial data, we provide bounds on the limiting law which in particular imply that in the double limit lima→∞limT→∞ one recovers the product limit law and the degeneration of the correlation length observed at shocks of order 1. This result is shown to apply to a general last-passage percolation model. We also obtain bounds on the two-point functions of several airy processes.},
author = {Nejjar, Peter},
issn = {1980-0436},
journal = {Latin American Journal of Probability and Mathematical Statistics},
number = {2},
pages = {1311--1334},
publisher = {ALEA},
title = {{Transition to shocks in TASEP and decoupling of last passage times}},
doi = {10.30757/ALEA.v15-49},
volume = {15},
year = {2018},
}
@article{703,
abstract = {We consider the NP-hard problem of MAP-inference for undirected discrete graphical models. We propose a polynomial time and practically efficient algorithm for finding a part of its optimal solution. Specifically, our algorithm marks some labels of the considered graphical model either as (i) optimal, meaning that they belong to all optimal solutions of the inference problem; (ii) non-optimal if they provably do not belong to any solution. With access to an exact solver of a linear programming relaxation to the MAP-inference problem, our algorithm marks the maximal possible (in a specified sense) number of labels. We also present a version of the algorithm, which has access to a suboptimal dual solver only and still can ensure the (non-)optimality for the marked labels, although the overall number of the marked labels may decrease. We propose an efficient implementation, which runs in time comparable to a single run of a suboptimal dual solver. Our method is well-scalable and shows state-of-the-art results on computational benchmarks from machine learning and computer vision.},
author = {Shekhovtsov, Alexander and Swoboda, Paul and Savchynskyy, Bogdan},
issn = {01628828},
journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence},
number = {7},
pages = {1668--1682},
publisher = {IEEE},
title = {{Maximum persistency via iterative relaxed inference with graphical models}},
doi = {10.1109/TPAMI.2017.2730884},
volume = {40},
year = {2018},
}
@article{705,
abstract = {Although dopamine receptors D1 and D2 play key roles in hippocampal function, their synaptic localization within the hippocampus has not been fully elucidated. In order to understand precise functions of pre- or postsynaptic dopamine receptors (DRs), the development of protocols to differentiate pre- and postsynaptic DRs is essential. So far, most studies on determination and quantification of DRs did not discriminate between subsynaptic localization. Therefore, the aim of the study was to generate a robust workflow for the localization of DRs. This work provides the basis for future work on hippocampal DRs, in light that DRs may have different functions at pre- or postsynaptic sites. Synaptosomes from rat hippocampi isolated by a sucrose gradient protocol were prepared for super-resolution direct stochastic optical reconstruction microscopy (dSTORM) using Bassoon as a presynaptic zone and Homer1 as postsynaptic density marker. Direct labeling of primary validated antibodies against dopamine receptors D1 (D1R) and D2 (D2R) with Alexa Fluor 594 enabled unequivocal assignment of D1R and D2R to both, pre- and postsynaptic sites. D1R immunoreactivity clusters were observed within the presynaptic active zone as well as at perisynaptic sites at the edge of the presynaptic active zone. The results may be useful for the interpretation of previous studies and the design of future work on DRs in the hippocampus. Moreover, the reduction of the complexity of brain tissue by the use of synaptosomal preparations and dSTORM technology may represent a useful tool for synaptic localization of brain proteins.},
author = {Miklosi, Andras and Del Favero, Giorgia and Bulat, Tanja and Höger, Harald and Shigemoto, Ryuichi and Marko, Doris and Lubec, Gert},
journal = {Molecular Neurobiology},
number = {6},
pages = {4857 – 4869},
publisher = {Springer},
title = {{Super resolution microscopical localization of dopamine receptors 1 and 2 in rat hippocampal synaptosomes}},
doi = {10.1007/s12035-017-0688-y},
volume = {55},
year = {2018},
}
@inproceedings{7116,
abstract = {Training deep learning models has received tremendous research interest recently. In particular, there has been intensive research on reducing the communication cost of training when using multiple computational devices, through reducing the precision of the underlying data representation. Naturally, such methods induce system trade-offs—lowering communication precision could de-crease communication overheads and improve scalability; but, on the other hand, it can also reduce the accuracy of training. In this paper, we study this trade-off space, and ask:Can low-precision communication consistently improve the end-to-end performance of training modern neural networks, with no accuracy loss?From the performance point of view, the answer to this question may appear deceptively easy: compressing communication through low precision should help when the ratio between communication and computation is high. However, this answer is less straightforward when we try to generalize this principle across various neural network architectures (e.g., AlexNet vs. ResNet),number of GPUs (e.g., 2 vs. 8 GPUs), machine configurations(e.g., EC2 instances vs. NVIDIA DGX-1), communication primitives (e.g., MPI vs. NCCL), and even different GPU architectures(e.g., Kepler vs. Pascal). Currently, it is not clear how a realistic realization of all these factors maps to the speed up provided by low-precision communication. In this paper, we conduct an empirical study to answer this question and report the insights.},
author = {Grubic, Demjan and Tam, Leo and Alistarh, Dan-Adrian and Zhang, Ce},
booktitle = {Proceedings of the 21st International Conference on Extending Database Technology},
isbn = {9783893180783},
issn = {2367-2005},
location = {Vienna, Austria},
pages = {145--156},
publisher = {OpenProceedings},
title = {{Synchronous multi-GPU training for deep learning with low-precision communications: An empirical study}},
doi = {10.5441/002/EDBT.2018.14},
year = {2018},
}
@inproceedings{7123,
abstract = {Population protocols are a popular model of distributed computing, in which n agents with limited local state interact randomly, and cooperate to collectively compute global predicates. Inspired by recent developments in DNA programming, an extensive series of papers, across different communities, has examined the computability and complexity characteristics of this model. Majority, or consensus, is a central task in this model, in which agents need to collectively reach a decision as to which one of two states A or B had a higher initial count. Two metrics are important: the time that a protocol requires to stabilize to an output decision, and the state space size that each agent requires to do so. It is known that majority requires Ω(log log n) states per agent to allow for fast (poly-logarithmic time) stabilization, and that O(log2 n) states are sufficient. Thus, there is an exponential gap between the space upper and lower bounds for this problem. This paper addresses this question.
On the negative side, we provide a new lower bound of Ω(log n) states for any protocol which stabilizes in O(n1–c) expected time, for any constant c > 0. This result is conditional on monotonicity and output assumptions, satisfied by all known protocols. Technically, it represents a departure from previous lower bounds, in that it does not rely on the existence of dense configurations. Instead, we introduce a new generalized surgery technique to prove the existence of incorrect executions for any algorithm which would contradict the lower bound. Subsequently, our lower bound also applies to general initial configurations, including ones with a leader. On the positive side, we give a new algorithm for majority which uses O(log n) states, and stabilizes in O(log2 n) expected time. Central to the algorithm is a new leaderless phase clock technique, which allows agents to synchronize in phases of Θ(n log n) consecutive interactions using O(log n) states per agent, exploiting a new connection between population protocols and power-of-two-choices load balancing mechanisms. We also employ our phase clock to build a leader election algorithm with a state space of size O(log n), which stabilizes in O(log2 n) expected time.},
author = {Alistarh, Dan-Adrian and Aspnes, James and Gelashvili, Rati},
booktitle = {Proceedings of the 29th Annual ACM-SIAM Symposium on Discrete Algorithms},
isbn = {9781611975031},
location = {New Orleans, LA, United States},
pages = {2221--2239},
publisher = {ACM},
title = {{Space-optimal majority in population protocols}},
doi = {10.1137/1.9781611975031.144},
year = {2018},
}
@article{723,
abstract = {Escaping local optima is one of the major obstacles to function optimisation. Using the metaphor of a fitness landscape, local optima correspond to hills separated by fitness valleys that have to be overcome. We define a class of fitness valleys of tunable difficulty by considering their length, representing the Hamming path between the two optima and their depth, the drop in fitness. For this function class we present a runtime comparison between stochastic search algorithms using different search strategies. The (1+1) EA is a simple and well-studied evolutionary algorithm that has to jump across the valley to a point of higher fitness because it does not accept worsening moves (elitism). In contrast, the Metropolis algorithm and the Strong Selection Weak Mutation (SSWM) algorithm, a famous process in population genetics, are both able to cross the fitness valley by accepting worsening moves. We show that the runtime of the (1+1) EA depends critically on the length of the valley while the runtimes of the non-elitist algorithms depend crucially on the depth of the valley. Moreover, we show that both SSWM and Metropolis can also efficiently optimise a rugged function consisting of consecutive valleys.},
author = {Oliveto, Pietro and Paixao, Tiago and Pérez Heredia, Jorge and Sudholt, Dirk and Trubenova, Barbora},
journal = {Algorithmica},
number = {5},
pages = {1604 -- 1633},
publisher = {Springer},
title = {{How to escape local optima in black box optimisation when non elitism outperforms elitism}},
doi = {10.1007/s00453-017-0369-2},
volume = {80},
year = {2018},
}
@article{738,
abstract = {This paper is devoted to automatic competitive analysis of real-time scheduling algorithms for firm-deadline tasksets, where only completed tasks con- tribute some utility to the system. Given such a taskset T , the competitive ratio of an on-line scheduling algorithm A for T is the worst-case utility ratio of A over the utility achieved by a clairvoyant algorithm. We leverage the theory of quantitative graph games to address the competitive analysis and competitive synthesis problems. For the competitive analysis case, given any taskset T and any finite-memory on- line scheduling algorithm A , we show that the competitive ratio of A in T can be computed in polynomial time in the size of the state space of A . Our approach is flexible as it also provides ways to model meaningful constraints on the released task sequences that determine the competitive ratio. We provide an experimental study of many well-known on-line scheduling algorithms, which demonstrates the feasibility of our competitive analysis approach that effectively replaces human ingenuity (required Preliminary versions of this paper have appeared in Chatterjee et al. ( 2013 , 2014 ). B Andreas Pavlogiannis pavlogiannis@ist.ac.at Krishnendu Chatterjee krish.chat@ist.ac.at Alexander Kößler koe@ecs.tuwien.ac.at Ulrich Schmid s@ecs.tuwien.ac.at 1 IST Austria (Institute of Science and Technology Austria), Am Campus 1, 3400 Klosterneuburg, Austria 2 Embedded Computing Systems Group, Vienna University of Technology, Treitlstrasse 3, 1040 Vienna, Austria 123 Real-Time Syst for finding worst-case scenarios) by computing power. For the competitive synthesis case, we are just given a taskset T , and the goal is to automatically synthesize an opti- mal on-line scheduling algorithm A , i.e., one that guarantees the largest competitive ratio possible for T . We show how the competitive synthesis problem can be reduced to a two-player graph game with partial information, and establish that the compu- tational complexity of solving this game is Np -complete. The competitive synthesis problem is hence in Np in the size of the state space of the non-deterministic labeled transition system encoding the taskset. Overall, the proposed framework assists in the selection of suitable scheduling algorithms for a given taskset, which is in fact the most common situation in real-time systems design. },
author = {Chatterjee, Krishnendu and Pavlogiannis, Andreas and Kößler, Alexander and Schmid, Ulrich},
journal = {Real-Time Systems},
number = {1},
pages = {166 -- 207},
publisher = {Springer},
title = {{Automated competitive analysis of real time scheduling with graph games}},
doi = {10.1007/s11241-017-9293-4},
volume = {54},
year = {2018},
}
@inproceedings{7407,
abstract = {Proofs of space (PoS) [Dziembowski et al., CRYPTO'15] are proof systems where a prover can convince a verifier that he "wastes" disk space. PoS were introduced as a more ecological and economical replacement for proofs of work which are currently used to secure blockchains like Bitcoin. In this work we investigate extensions of PoS which allow the prover to embed useful data into the dedicated space, which later can be recovered. Our first contribution is a security proof for the original PoS from CRYPTO'15 in the random oracle model (the original proof only applied to a restricted class of adversaries which can store a subset of the data an honest prover would store). When this PoS is instantiated with recent constructions of maximally depth robust graphs, our proof implies basically optimal security. As a second contribution we show three different extensions of this PoS where useful data can be embedded into the space required by the prover. Our security proof for the PoS extends (non-trivially) to these constructions. We discuss how some of these variants can be used as proofs of catalytic space (PoCS), a notion we put forward in this work, and which basically is a PoS where most of the space required by the prover can be used to backup useful data. Finally we discuss how one of the extensions is a candidate construction for a proof of replication (PoR), a proof system recently suggested in the Filecoin whitepaper. },
author = {Pietrzak, Krzysztof Z},
booktitle = {10th Innovations in Theoretical Computer Science Conference (ITCS 2019)},
isbn = {978-3-95977-095-8},
issn = {1868-8969},
location = {San Diego, CA, United States},
pages = {59:1--59:25},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{Proofs of catalytic space}},
doi = {10.4230/LIPICS.ITCS.2019.59},
volume = {124},
year = {2018},
}
@article{742,
abstract = {We give a detailed and easily accessible proof of Gromov’s Topological Overlap Theorem. Let X be a finite simplicial complex or, more generally, a finite polyhedral cell complex of dimension d. Informally, the theorem states that if X has sufficiently strong higher-dimensional expansion properties (which generalize edge expansion of graphs and are defined in terms of cellular cochains of X) then X has the following topological overlap property: for every continuous map (Formula presented.) there exists a point (Formula presented.) that is contained in the images of a positive fraction (Formula presented.) of the d-cells of X. More generally, the conclusion holds if (Formula presented.) is replaced by any d-dimensional piecewise-linear manifold M, with a constant (Formula presented.) that depends only on d and on the expansion properties of X, but not on M.},
author = {Dotterrer, Dominic and Kaufman, Tali and Wagner, Uli},
journal = {Geometriae Dedicata},
number = {1},
pages = {307–317},
publisher = {Springer},
title = {{On expansion and topological overlap}},
doi = {10.1007/s10711-017-0291-4},
volume = {195},
year = {2018},
}
@unpublished{75,
abstract = {We prove that any convex body in the plane can be partitioned into m convex parts of equal areas and perimeters for any integer m≥2; this result was previously known for prime powers m=pk. We also give a higher-dimensional generalization.},
author = {Akopyan, Arseniy and Avvakumov, Sergey and Karasev, Roman},
pages = {11},
publisher = {arXiv},
title = {{Convex fair partitions into arbitrary number of pieces}},
year = {2018},
}
@article{76,
abstract = {Consider a fully-connected synchronous distributed system consisting of n nodes, where up to f nodes may be faulty and every node starts in an arbitrary initial state. In the synchronous C-counting problem, all nodes need to eventually agree on a counter that is increased by one modulo C in each round for given C>1. In the self-stabilising firing squad problem, the task is to eventually guarantee that all non-faulty nodes have simultaneous responses to external inputs: if a subset of the correct nodes receive an external “go” signal as input, then all correct nodes should agree on a round (in the not-too-distant future) in which to jointly output a “fire” signal. Moreover, no node should generate a “fire” signal without some correct node having previously received a “go” signal as input. We present a framework reducing both tasks to binary consensus at very small cost. For example, we obtain a deterministic algorithm for self-stabilising Byzantine firing squads with optimal resilience f<n/3, asymptotically optimal stabilisation and response time O(f), and message size O(log f). As our framework does not restrict the type of consensus routines used, we also obtain efficient randomised solutions.},
author = {Lenzen, Christoph and Rybicki, Joel},
journal = {Distributed Computing},
publisher = {Springer},
title = {{Near-optimal self-stabilising counting and firing squads}},
doi = {10.1007/s00446-018-0342-6},
year = {2018},
}
@article{77,
abstract = {Holes confined in quantum dots have gained considerable interest in the past few years due to their potential as spin qubits. Here we demonstrate two-axis control of a spin 3/2 qubit in natural Ge. The qubit is formed in a hut wire double quantum dot device. The Pauli spin blockade principle allowed us to demonstrate electric dipole spin resonance by applying a radio frequency electric field to one of the electrodes defining the double quantum dot. Coherent hole spin oscillations with Rabi frequencies reaching 140 MHz are demonstrated and dephasing times of 130 ns are measured. The reported results emphasize the potential of Ge as a platform for fast and electrically tunable hole spin qubit devices.},
author = {Watzinger, Hannes and Kukucka, Josip and Vukusic, Lada and Gao, Fei and Wang, Ting and Schäffler, Friedrich and Zhang, Jian and Katsaros, Georgios},
journal = {Nature Communications},
number = {3902 },
publisher = {Nature Publishing Group},
title = {{A germanium hole spin qubit}},
doi = {10.1038/s41467-018-06418-4},
volume = {9},
year = {2018},
}
@article{106,
abstract = {The goal of this article is to introduce the reader to the theory of intrinsic geometry of convex surfaces. We illustrate the power of the tools by proving a theorem on convex surfaces containing an arbitrarily long closed simple geodesic. Let us remind ourselves that a curve in a surface is called geodesic if every sufficiently short arc of the curve is length minimizing; if, in addition, it has no self-intersections, we call it simple geodesic. A tetrahedron with equal opposite edges is called isosceles. The axiomatic method of Alexandrov geometry allows us to work with the metrics of convex surfaces directly, without approximating it first by a smooth or polyhedral metric. Such approximations destroy the closed geodesics on the surface; therefore it is difficult (if at all possible) to apply approximations in the proof of our theorem. On the other hand, a proof in the smooth or polyhedral case usually admits a translation into Alexandrov’s language; such translation makes the result more general. In fact, our proof resembles a translation of the proof given by Protasov. Note that the main theorem implies in particular that a smooth convex surface does not have arbitrarily long simple closed geodesics. However we do not know a proof of this corollary that is essentially simpler than the one presented below.},
author = {Akopyan, Arseniy and Petrunin, Anton},
journal = {Mathematical Intelligencer},
number = {3},
pages = {26 -- 31},
publisher = {Springer},
title = {{Long geodesics on convex surfaces}},
doi = {10.1007/s00283-018-9795-5},
volume = {40},
year = {2018},
}
@article{1064,
abstract = {In 1945, A.W. Goodman and R.E. Goodman proved the following conjecture by P. Erdős: Given a family of (round) disks of radii r1, … , rn in the plane, it is always possible to cover them by a disk of radius R= ∑ ri, provided they cannot be separated into two subfamilies by a straight line disjoint from the disks. In this note we show that essentially the same idea may work for different analogues and generalizations of their result. In particular, we prove the following: Given a family of positive homothetic copies of a fixed convex body K⊂ Rd with homothety coefficients τ1, … , τn> 0 , it is always possible to cover them by a translate of d+12(∑τi)K, provided they cannot be separated into two subfamilies by a hyperplane disjoint from the homothets.},
author = {Akopyan, Arseniy and Balitskiy, Alexey and Grigorev, Mikhail},
issn = {14320444},
journal = {Discrete & Computational Geometry},
number = {4},
pages = {1001--1009},
publisher = {Springer},
title = {{On the circle covering theorem by A.W. Goodman and R.E. Goodman}},
doi = {10.1007/s00454-017-9883-x},
volume = {59},
year = {2018},
}
@article{107,
abstract = {We introduce the notion of “non-malleable codes” which relaxes the notion of error correction and error detection. Informally, a code is non-malleable if the message contained in a modified codeword is either the original message, or a completely unrelated value. In contrast to error correction and error detection, non-malleability can be achieved for very rich classes of modifications. We construct an efficient code that is non-malleable with respect to modifications that affect each bit of the codeword arbitrarily (i.e., leave it untouched, flip it, or set it to either 0 or 1), but independently of the value of the other bits of the codeword. Using the probabilistic method, we also show a very strong and general statement: there exists a non-malleable code for every “small enough” family F of functions via which codewords can be modified. Although this probabilistic method argument does not directly yield efficient constructions, it gives us efficient non-malleable codes in the random-oracle model for very general classes of tampering functions—e.g., functions where every bit in the tampered codeword can depend arbitrarily on any 99% of the bits in the original codeword. As an application of non-malleable codes, we show that they provide an elegant algorithmic solution to the task of protecting functionalities implemented in hardware (e.g., signature cards) against “tampering attacks.” In such attacks, the secret state of a physical system is tampered, in the hopes that future interaction with the modified system will reveal some secret information. This problem was previously studied in the work of Gennaro et al. in 2004 under the name “algorithmic tamper proof security” (ATP). We show that non-malleable codes can be used to achieve important improvements over the prior work. In particular, we show that any functionality can be made secure against a large class of tampering attacks, simply by encoding the secret state with a non-malleable code while it is stored in memory.},
author = {Dziembowski, Stefan and Pietrzak, Krzysztof Z and Wichs, Daniel},
journal = {Journal of the ACM},
number = {4},
publisher = {ACM},
title = {{Non-malleable codes}},
doi = {10.1145/3178432},
volume = {65},
year = {2018},
}
@inproceedings{108,
abstract = {Universal hashing found a lot of applications in computer science. In cryptography the most important fact about universal families is the so called Leftover Hash Lemma, proved by Impagliazzo, Levin and Luby. In the language of modern cryptography it states that almost universal families are good extractors. In this work we provide a somewhat surprising characterization in the opposite direction. Namely, every extractor with sufficiently good parameters yields a universal family on a noticeable fraction of its inputs. Our proof technique is based on tools from extremal graph theory applied to the \'collision graph\' induced by the extractor, and may be of independent interest. We discuss possible applications to the theory of randomness extractors and non-malleable codes.},
author = {Obremski, Marciej and Skorski, Maciej},
location = {Vail, CO, USA},
publisher = {IEEE},
title = {{Inverted leftover hash lemma}},
doi = {10.1109/ISIT.2018.8437654},
volume = {2018},
year = {2018},
}
@inbook{10864,
abstract = {We prove that every congruence distributive variety has directed Jónsson terms, and every congruence modular variety has directed Gumm terms. The directed terms we construct witness every case of absorption witnessed by the original Jónsson or Gumm terms. This result is equivalent to a pair of claims about absorption for admissible preorders in congruence distributive and congruence modular varieties, respectively. For finite algebras, these absorption theorems have already seen significant applications, but until now, it was not clear if the theorems hold for general algebras as well. Our method also yields a novel proof of a result by P. Lipparini about the existence of a chain of terms (which we call Pixley terms) in varieties that are at the same time congruence distributive and k-permutable for some k.},
author = {Kazda, Alexandr and Kozik, Marcin and McKenzie, Ralph and Moore, Matthew},
booktitle = {Don Pigozzi on Abstract Algebraic Logic, Universal Algebra, and Computer Science},
editor = {Czelakowski, J},
isbn = {9783319747712},
issn = {2211-2758},
pages = {203--220},
publisher = {Springer Nature},
title = {{Absorption and directed Jónsson terms}},
doi = {10.1007/978-3-319-74772-9_7},
volume = {16},
year = {2018},
}
@article{10880,
abstract = {Acquisition of evolutionary novelties is a fundamental process for adapting to the external environment and invading new niches and results in the diversification of life, which we can see in the world today. How such novel phenotypic traits are acquired in the course of evolution and are built up in developing embryos has been a central question in biology. Whole-genome duplication (WGD) is a process of genome doubling that supplies raw genetic materials and increases genome complexity. Recently, it has been gradually revealed that WGD and subsequent fate changes of duplicated genes can facilitate phenotypic evolution. Here, we review the current understanding of the relationship between WGD and the acquisition of evolutionary novelties. We show some examples of this link and discuss how WGD and subsequent duplicated genes can facilitate phenotypic evolution as well as when such genomic doubling can be advantageous for adaptation.},
author = {Yuuta, Moriyama and Koshiba-Takeuchi, Kazuko},
issn = {2041-2649},
journal = {Briefings in Functional Genomics},
keywords = {Genetics, Molecular Biology, Biochemistry, General Medicine},
number = {5},
pages = {329--338},
publisher = {Oxford University Press},
title = {{Significance of whole-genome duplications on the emergence of evolutionary novelties}},
doi = {10.1093/bfgp/ely007},
volume = {17},
year = {2018},
}
@article{10881,
abstract = {Strigolactones (SLs) are a relatively recent addition to the list of plant hormones that control different aspects of plant development. SL signalling is perceived by an α/β hydrolase, DWARF 14 (D14). A close homolog of D14, KARRIKIN INSENSTIVE2 (KAI2), is involved in perception of an uncharacterized molecule called karrikin (KAR). Recent studies in Arabidopsis identified the SUPPRESSOR OF MAX2 1 (SMAX1) and SMAX1-LIKE 7 (SMXL7) to be potential SCF–MAX2 complex-mediated proteasome targets of KAI2 and D14, respectively. Genetic studies on SMXL7 and SMAX1 demonstrated distinct developmental roles for each, but very little is known about these repressors in terms of their sequence features. In this study, we performed an extensive comparative analysis of SMXLs and determined their phylogenetic and evolutionary history in the plant lineage. Our results show that SMXL family members can be sub-divided into four distinct phylogenetic clades/classes, with an ancient SMAX1. Further, we identified the clade-specific motifs that have evolved and that might act as determinants of SL-KAR signalling specificity. These specificities resulted from functional diversities among the clades. Our results suggest that a gradual co-evolution of SMXL members with their upstream receptors D14/KAI2 provided an increased specificity to both the SL perception and response in land plants.},
author = {Moturu, Taraka Ramji and Thula, Sravankumar and Singh, Ravi Kumar and Nodzyński, Tomasz and Vařeková, Radka Svobodová and Friml, Jiří and Simon, Sibu},
issn = {0022-0957},
journal = {Journal of Experimental Botany},
keywords = {Plant Science, Physiology},
number = {9},
pages = {2367--2378},
publisher = {Oxford University Press},
title = {{Molecular evolution and diversification of the SMXL gene family}},
doi = {10.1093/jxb/ery097},
volume = {69},
year = {2018},
}
@inproceedings{11,
abstract = {We report on a novel strategy to derive mean-field limits of quantum mechanical systems in which a large number of particles weakly couple to a second-quantized radiation field. The technique combines the method of counting and the coherent state approach to study the growth of the correlations among the particles and in the radiation field. As an instructional example, we derive the Schrödinger–Klein–Gordon system of equations from the Nelson model with ultraviolet cutoff and possibly massless scalar field. In particular, we prove the convergence of the reduced density matrices (of the nonrelativistic particles and the field bosons) associated with the exact time evolution to the projectors onto the solutions of the Schrödinger–Klein–Gordon equations in trace norm. Furthermore, we derive explicit bounds on the rate of convergence of the one-particle reduced density matrix of the nonrelativistic particles in Sobolev norm.},
author = {Leopold, Nikolai K and Pickl, Peter},
location = {Munich, Germany},
pages = {185 -- 214},
publisher = {Springer},
title = {{Mean-field limits of particles in interaction with quantised radiation fields}},
doi = {10.1007/978-3-030-01602-9_9},
volume = {270},
year = {2018},
}
@article{154,
abstract = {We give a lower bound on the ground state energy of a system of two fermions of one species interacting with two fermions of another species via point interactions. We show that there is a critical mass ratio m2 ≈ 0.58 such that the system is stable, i.e., the energy is bounded from below, for m∈[m2,m2−1]. So far it was not known whether this 2 + 2 system exhibits a stable region at all or whether the formation of four-body bound states causes an unbounded spectrum for all mass ratios, similar to the Thomas effect. Our result gives further evidence for the stability of the more general N + M system.},
author = {Moser, Thomas and Seiringer, Robert},
issn = {15729656},
journal = {Mathematical Physics Analysis and Geometry},
number = {3},
publisher = {Springer},
title = {{Stability of the 2+2 fermionic system with point interactions}},
doi = {10.1007/s11040-018-9275-3},
volume = {21},
year = {2018},
}
@inproceedings{155,
abstract = {There is currently significant interest in operating devices in the quantum regime, where their behaviour cannot be explained through classical mechanics. Quantum states, including entangled states, are fragile and easily disturbed by excessive thermal noise. Here we address the question of whether it is possible to create non-reciprocal devices that encourage the flow of thermal noise towards or away from a particular quantum device in a network. Our work makes use of the cascaded systems formalism to answer this question in the affirmative, showing how a three-port device can be used as an effective thermal transistor, and illustrates how this formalism maps onto an experimentally-realisable optomechanical system. Our results pave the way to more resilient quantum devices and to the use of thermal noise as a resource.},
author = {Xuereb, André and Aquilina, Matteo and Barzanjeh, Shabir},
editor = {Andrews, D L and Ostendorf, A and Bain, A J and Nunzi, J M},
location = {Strasbourg, France},
publisher = {SPIE},
title = {{Routing thermal noise through quantum networks}},
doi = {10.1117/12.2309928},
volume = {10672},
year = {2018},
}
@inproceedings{156,
abstract = {Imprecision in timing can sometimes be beneficial: Metric interval temporal logic (MITL), disabling the expression of punctuality constraints, was shown to translate to timed automata, yielding an elementary decision procedure. We show how this principle extends to other forms of dense-time specification using regular expressions. By providing a clean, automaton-based formal framework for non-punctual languages, we are able to recover and extend several results in timed systems. Metric interval regular expressions (MIRE) are introduced, providing regular expressions with non-singular duration constraints. We obtain that MIRE are expressively complete relative to a class of one-clock timed automata, which can be determinized using additional clocks. Metric interval dynamic logic (MIDL) is then defined using MIRE as temporal modalities. We show that MIDL generalizes known extensions of MITL, while translating to timed automata at comparable cost.},
author = {Ferrere, Thomas},
location = {Oxford, UK},
pages = {147 -- 164},
publisher = {Springer},
title = {{The compound interest in relaxing punctuality}},
doi = {10.1007/978-3-319-95582-7_9},
volume = {10951},
year = {2018},
}
@article{157,
abstract = {Social dilemmas occur when incentives for individuals are misaligned with group interests 1-7 . According to the 'tragedy of the commons', these misalignments can lead to overexploitation and collapse of public resources. The resulting behaviours can be analysed with the tools of game theory 8 . The theory of direct reciprocity 9-15 suggests that repeated interactions can alleviate such dilemmas, but previous work has assumed that the public resource remains constant over time. Here we introduce the idea that the public resource is instead changeable and depends on the strategic choices of individuals. An intuitive scenario is that cooperation increases the public resource, whereas defection decreases it. Thus, cooperation allows the possibility of playing a more valuable game with higher payoffs, whereas defection leads to a less valuable game. We analyse this idea using the theory of stochastic games 16-19 and evolutionary game theory. We find that the dependence of the public resource on previous interactions can greatly enhance the propensity for cooperation. For these results, the interaction between reciprocity and payoff feedback is crucial: neither repeated interactions in a constant environment nor single interactions in a changing environment yield similar cooperation rates. Our framework shows which feedbacks between exploitation and environment - either naturally occurring or designed - help to overcome social dilemmas.},
author = {Hilbe, Christian and Šimsa, Štepán and Chatterjee, Krishnendu and Nowak, Martin},
journal = {Nature},
number = {7713},
pages = {246 -- 249},
publisher = {Nature Publishing Group},
title = {{Evolution of cooperation in stochastic games}},
doi = {10.1038/s41586-018-0277-x},
volume = {559},
year = {2018},
}
@article{158,
abstract = {The angiosperm seed is composed of three genetically distinct tissues: the diploid embryo that originates from the fertilized egg cell, the triploid endosperm that is produced from the fertilized central cell, and the maternal sporophytic integuments that develop into the seed coat1. At the onset of embryo development in Arabidopsis thaliana, the zygote divides asymmetrically, producing a small apical embryonic cell and a larger basal cell that connects the embryo to the maternal tissue2. The coordinated and synchronous development of the embryo and the surrounding integuments, and the alignment of their growth axes, suggest communication between maternal tissues and the embryo. In contrast to animals, however, where a network of maternal factors that direct embryo patterning have been identified3,4, only a few maternal mutations have been described to affect embryo development in plants5–7. Early embryo patterning in Arabidopsis requires accumulation of the phytohormone auxin in the apical cell by directed transport from the suspensor8–10. However, the origin of this auxin has remained obscure. Here we investigate the source of auxin for early embryogenesis and provide evidence that the mother plant coordinates seed development by supplying auxin to the early embryo from the integuments of the ovule. We show that auxin response increases in ovules after fertilization, due to upregulated auxin biosynthesis in the integuments, and this maternally produced auxin is required for correct embryo development.},
author = {Robert, Hélène and Park, Chulmin and Gutièrrez, Carla and Wójcikowska, Barbara and Pěnčík, Aleš and Novák, Ondřej and Chen, Junyi and Grunewald, Wim and Dresselhaus, Thomas and Friml, Jirí and Laux, Thomas},
journal = {Nature Plants},
number = {8},
pages = {548 -- 553},
publisher = {Nature Publishing Group},
title = {{Maternal auxin supply contributes to early embryo patterning in Arabidopsis}},
doi = {10.1038/s41477-018-0204-z},
volume = {4},
year = {2018},
}
@article{159,
abstract = {L-type Ca2+ channels (LTCCs) play a crucial role in excitation-contraction coupling and release of hormones from secretory cells. They are targets of antihypertensive and antiarrhythmic drugs such as diltiazem. Here, we present a photoswitchable diltiazem, FHU-779, which can be used to reversibly block endogenous LTCCs by light. FHU-779 is as potent as diltiazem and can be used to place pancreatic β-cell function and cardiac activity under optical control.},
author = {Fehrentz, Timm and Huber, Florian and Hartrampf, Nina and Bruegmann, Tobias and Frank, James and Fine, Nicholas and Malan, Daniela and Danzl, Johann G and Tikhonov, Denis and Sumser, Maritn and Sasse, Philipp and Hodson, David and Zhorov, Boris and Klocker, Nikolaj and Trauner, Dirk},
journal = {Nature Chemical Biology},
number = {8},
pages = {764 -- 767},
publisher = {Nature Publishing Group},
title = {{Optical control of L-type Ca2+ channels using a diltiazem photoswitch}},
doi = {10.1038/s41589-018-0090-8},
volume = {14},
year = {2018},
}
@article{16,
abstract = {We report quantitative evidence of mixing-layer elastic instability in a viscoelastic fluid flow between two widely spaced obstacles hindering a channel flow at Re 1 and Wi 1. Two mixing layers with nonuniform shear velocity profiles are formed in the region between the obstacles. The mixing-layer instability arises in the vicinity of an inflection point on the shear velocity profile with a steep variation in the elastic stress. The instability results in an intermittent appearance of small vortices in the mixing layers and an amplification of spatiotemporal averaged vorticity in the elastic turbulence regime. The latter is characterized through scaling of friction factor with Wi and both pressure and velocity spectra. Furthermore, the observations reported provide improved understanding of the stability of the mixing layer in a viscoelastic fluid at large elasticity, i.e., Wi 1 and Re 1 and oppose the current view of suppression of vorticity solely by polymer additives.},
author = {Varshney, Atul and Steinberg, Victor},
journal = {Physical Review Fluids},
number = {10},
publisher = {American Physical Society},
title = {{Mixing layer instability and vorticity amplification in a creeping viscoelastic flow}},
doi = {10.1103/PhysRevFluids.3.103303},
volume = {3},
year = {2018},
}
@inproceedings{160,
abstract = {We present layered concurrent programs, a compact and expressive notation for specifying refinement proofs of concurrent programs. A layered concurrent program specifies a sequence of connected concurrent programs, from most concrete to most abstract, such that common parts of different programs are written exactly once. These programs are expressed in the ordinary syntax of imperative concurrent programs using gated atomic actions, sequencing, choice, and (recursive) procedure calls. Each concurrent program is automatically extracted from the layered program. We reduce refinement to the safety of a sequence of concurrent checker programs, one each to justify the connection between every two consecutive concurrent programs. These checker programs are also automatically extracted from the layered program. Layered concurrent programs have been implemented in the CIVL verifier which has been successfully used for the verification of several complex concurrent programs.},
author = {Kragl, Bernhard and Qadeer, Shaz},
location = {Oxford, UK},
pages = {79 -- 102},
publisher = {Springer},
title = {{Layered Concurrent Programs}},
doi = {10.1007/978-3-319-96145-3_5},
volume = {10981},
year = {2018},
}
@article{161,
abstract = {Which properties of metabolic networks can be derived solely from stoichiometry? Predictive results have been obtained by flux balance analysis (FBA), by postulating that cells set metabolic fluxes to maximize growth rate. Here we consider a generalization of FBA to single-cell level using maximum entropy modeling, which we extend and test experimentally. Specifically, we define for Escherichia coli metabolism a flux distribution that yields the experimental growth rate: the model, containing FBA as a limit, provides a better match to measured fluxes and it makes a wide range of predictions: on flux variability, regulation, and correlations; on the relative importance of stoichiometry vs. optimization; on scaling relations for growth rate distributions. We validate the latter here with single-cell data at different sub-inhibitory antibiotic concentrations. The model quantifies growth optimization as emerging from the interplay of competitive dynamics in the population and regulation of metabolism at the level of single cells.},
author = {De Martino, Daniele and Mc, Andersson Anna and Bergmiller, Tobias and Guet, Calin C and Tkacik, Gasper},
journal = {Nature Communications},
number = {1},
publisher = {Springer Nature},
title = {{Statistical mechanics for metabolic networks during steady state growth}},
doi = {10.1038/s41467-018-05417-9},
volume = {9},
year = {2018},
}
@article{162,
abstract = {Facial shape is the basis for facial recognition and categorization. Facial features reflect the underlying geometry of the skeletal structures. Here, we reveal that cartilaginous nasal capsule (corresponding to upper jaw and face) is shaped by signals generated by neural structures: brain and olfactory epithelium. Brain-derived Sonic Hedgehog (SHH) enables the induction of nasal septum and posterior nasal capsule, whereas the formation of a capsule roof is controlled by signals from the olfactory epithelium. Unexpectedly, the cartilage of the nasal capsule turned out to be important for shaping membranous facial bones during development. This suggests that conserved neurosensory structures could benefit from protection and have evolved signals inducing cranial cartilages encasing them. Experiments with mutant mice revealed that the genomic regulatory regions controlling production of SHH in the nervous system contribute to facial cartilage morphogenesis, which might be a mechanism responsible for the adaptive evolution of animal faces and snouts.},
author = {Kaucka, Marketa and Petersen, Julian and Tesarova, Marketa and Szarowska, Bara and Kastriti, Maria and Xie, Meng and Kicheva, Anna and Annusver, Karl and Kasper, Maria and Symmons, Orsolya and Pan, Leslie and Spitz, Francois and Kaiser, Jozef and Hovorakova, Maria and Zikmund, Tomas and Sunadome, Kazunori and Matise, Michael P and Wang, Hui and Marklund, Ulrika and Abdo, Hind and Ernfors, Patrik and Maire, Pascal and Wurmser, Maud and Chagin, Andrei S and Fried, Kaj and Adameyko, Igor},
journal = {eLife},
publisher = {eLife Sciences Publications},
title = {{Signals from the brain and olfactory epithelium control shaping of the mammalian nasal capsule cartilage}},
doi = {10.7554/eLife.34465},
volume = {7},
year = {2018},
}
@article{163,
abstract = {For ultrafast fixation of biological samples to avoid artifacts, high-pressure freezing (HPF) followed by freeze substitution (FS) is preferred over chemical fixation at room temperature. After HPF, samples are maintained at low temperature during dehydration and fixation, while avoiding damaging recrystallization. This is a notoriously slow process. McDonald and Webb demonstrated, in 2011, that sample agitation during FS dramatically reduces the necessary time. Then, in 2015, we (H.G. and S.R.) introduced an agitation module into the cryochamber of an automated FS unit and demonstrated that the preparation of algae could be shortened from days to a couple of hours. We argued that variability in the processing, reproducibility, and safety issues are better addressed using automated FS units. For dissemination, we started low-cost manufacturing of agitation modules for two of the most widely used FS units, the Automatic Freeze Substitution Systems, AFS(1) and AFS2, from Leica Microsystems, using three dimensional (3D)-printing of the major components. To test them, several labs independently used the modules on a wide variety of specimens that had previously been processed by manual agitation, or without agitation. We demonstrate that automated processing with sample agitation saves time, increases flexibility with respect to sample requirements and protocols, and produces data of at least as good quality as other approaches.},
author = {Reipert, Siegfried and Goldammer, Helmuth and Richardson, Christine and Goldberg, Martin and Hawkins, Timothy and Hollergschwandtner, Elena and Kaufmann, Walter and Antreich, Sebastian and Stierhof, York},
journal = {Journal of Histochemistry and Cytochemistry},
number = {12},
pages = {903--921},
publisher = {Histochemical Society},
title = {{Agitation modules: Flexible means to accelerate automated freeze substitution}},
doi = {10.1369/0022155418786698},
volume = {66},
year = {2018},
}
@article{17,
abstract = {Creeping flow of polymeric fluid without inertia exhibits elastic instabilities and elastic turbulence accompanied by drag enhancement due to elastic stress produced by flow-stretched polymers. However, in inertia-dominated flow at high Re and low fluid elasticity El, a reduction in turbulent frictional drag is caused by an intricate competition between inertial and elastic stresses. Here we explore the effect of inertia on the stability of viscoelastic flow in a broad range of control parameters El and (Re,Wi). We present the stability diagram of observed flow regimes in Wi-Re coordinates and find that the instabilities' onsets show an unexpectedly nonmonotonic dependence on El. Further, three distinct regions in the diagram are identified based on El. Strikingly, for high-elasticity fluids we discover a complete relaminarization of flow at Reynolds number in the range of 1 to 10, different from a well-known turbulent drag reduction. These counterintuitive effects may be explained by a finite polymer extensibility and a suppression of vorticity at high Wi. Our results call for further theoretical and numerical development to uncover the role of inertial effect on elastic turbulence in a viscoelastic flow.},
author = {Varshney, Atul and Steinberg, Victor},
journal = {Physical Review Fluids},
number = {10},
publisher = {American Physical Society},
title = {{Drag enhancement and drag reduction in viscoelastic flow}},
doi = {10.1103/PhysRevFluids.3.103302},
volume = {3},
year = {2018},
}
@article{178,
abstract = {We give an upper bound for the number of rational points of height at most B, lying on a surface defined by a quadratic form Q. The bound shows an explicit dependence on Q. It is optimal with respect to B, and is also optimal for typical forms Q.},
author = {Browning, Timothy D and Heath-Brown, Roger},
issn = {2397-3129},
journal = {Discrete Analysis},
pages = {1 -- 29},
publisher = {Alliance of Diamond Open Access Journals},
title = {{Counting rational points on quadric surfaces}},
doi = {10.19086/da.4375},
volume = {15},
year = {2018},
}
@article{18,
abstract = {An N-superconcentrator is a directed, acyclic graph with N input nodes and N output nodes such that every subset of the inputs and every subset of the outputs of same cardinality can be connected by node-disjoint paths. It is known that linear-size and bounded-degree superconcentrators exist. We prove the existence of such superconcentrators with asymptotic density 25.3 (where the density is the number of edges divided by N). The previously best known densities were 28 [12] and 27.4136 [17].},
author = {Kolmogorov, Vladimir and Rolinek, Michal},
issn = {0381-7032},
journal = {Ars Combinatoria},
number = {10},
pages = {269 -- 304},
publisher = {Charles Babbage Research Centre},
title = {{Superconcentrators of density 25.3}},
volume = {141},
year = {2018},
}
@article{180,
abstract = {In this paper we define and study the classical Uniform Electron Gas (UEG), a system of infinitely many electrons whose density is constant everywhere in space. The UEG is defined differently from Jellium, which has a positive constant background but no constraint on the density. We prove that the UEG arises in Density Functional Theory in the limit of a slowly varying density, minimizing the indirect Coulomb energy. We also construct the quantum UEG and compare it to the classical UEG at low density.},
author = {Lewi, Mathieu and Lieb, Élliott and Seiringer, Robert},
journal = {Journal de l'Ecole Polytechnique - Mathematiques},
pages = {79 -- 116},
publisher = {Ecole Polytechnique},
title = {{Statistical mechanics of the uniform electron gas}},
doi = {10.5802/jep.64},
volume = {5},
year = {2018},
}
@article{181,
abstract = {We consider large random matrices X with centered, independent entries but possibly di erent variances. We compute the normalized trace of f(X)g(X∗) for f, g functions analytic on the spectrum of X. We use these results to compute the long time asymptotics for systems of coupled di erential equations with random coe cients. We show that when the coupling is critical, the norm squared of the solution decays like t−1/2.},
author = {Erdös, László and Krüger, Torben H and Renfrew, David T},
journal = {SIAM Journal on Mathematical Analysis},
number = {3},
pages = {3271 -- 3290},
publisher = {Society for Industrial and Applied Mathematics },
title = {{Power law decay for systems of randomly coupled differential equations}},
doi = {10.1137/17M1143125},
volume = {50},
year = {2018},
}
@inproceedings{182,
abstract = {We describe a new algorithm for the parametric identification problem for signal temporal logic (STL), stated as follows. Given a densetime real-valued signal w and a parameterized temporal logic formula φ, compute the subset of the parameter space that renders the formula satisfied by the signal. Unlike previous solutions, which were based on search in the parameter space or quantifier elimination, our procedure works recursively on φ and computes the evolution over time of the set of valid parameter assignments. This procedure is similar to that of monitoring or computing the robustness of φ relative to w. Our implementation and experiments demonstrate that this approach can work well in practice.},
author = {Bakhirkin, Alexey and Ferrere, Thomas and Maler, Oded},
booktitle = {Proceedings of the 21st International Conference on Hybrid Systems},
isbn = {978-1-4503-5642-8 },
location = {Porto, Portugal},
pages = {177 -- 186},
publisher = {ACM},
title = {{Efficient parametric identification for STL}},
doi = {10.1145/3178126.3178132},
year = {2018},
}
@inproceedings{183,
abstract = {Fault-localization is considered to be a very tedious and time-consuming activity in the design of complex Cyber-Physical Systems (CPS). This laborious task essentially requires expert knowledge of the system in order to discover the cause of the fault. In this context, we propose a new procedure that AIDS designers in debugging Simulink/Stateflow hybrid system models, guided by Signal Temporal Logic (STL) specifications. The proposed method relies on three main ingredients: (1) a monitoring and a trace diagnostics procedure that checks whether a tested behavior satisfies or violates an STL specification, localizes time segments and interfaces variables contributing to the property violations; (2) a slicing procedure that maps these observable behavior segments to the internal states and transitions of the Simulink model; and (3) a spectrum-based fault-localization method that combines the previous analysis from multiple tests to identify the internal states and/or transitions that are the most likely to explain the fault. We demonstrate the applicability of our approach on two Simulink models from the automotive and the avionics domain.},
author = {Bartocci, Ezio and Ferrere, Thomas and Manjunath, Niveditha and Nickovic, Dejan},
location = {Porto, Portugal},
pages = {197 -- 206},
publisher = {Association for Computing Machinery, Inc},
title = {{Localizing faults in simulink/stateflow models with STL}},
doi = {10.1145/3178126.3178131},
year = {2018},
}
@inproceedings{184,
abstract = {We prove that for every d ≥ 2, deciding if a pure, d-dimensional, simplicial complex is shellable is NP-hard, hence NP-complete. This resolves a question raised, e.g., by Danaraj and Klee in 1978. Our reduction also yields that for every d ≥ 2 and k ≥ 0, deciding if a pure, d-dimensional, simplicial complex is k-decomposable is NP-hard. For d ≥ 3, both problems remain NP-hard when restricted to contractible pure d-dimensional complexes.},
author = {Goaoc, Xavier and Paták, Pavel and Patakova, Zuzana and Tancer, Martin and Wagner, Uli},
location = {Budapest, Hungary},
pages = {41:1 -- 41:16},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{Shellability is NP-complete}},
doi = {10.4230/LIPIcs.SoCG.2018.41},
volume = {99},
year = {2018},
}
@inproceedings{185,
abstract = {We resolve in the affirmative conjectures of A. Skopenkov and Repovš (1998), and M. Skopenkov (2003) generalizing the classical Hanani-Tutte theorem to the setting of approximating maps of graphs on 2-dimensional surfaces by embeddings. Our proof of this result is constructive and almost immediately implies an efficient algorithm for testing whether a given piecewise linear map of a graph in a surface is approximable by an embedding. More precisely, an instance of this problem consists of (i) a graph G whose vertices are partitioned into clusters and whose inter-cluster edges are partitioned into bundles, and (ii) a region R of a 2-dimensional compact surface M given as the union of a set of pairwise disjoint discs corresponding to the clusters and a set of pairwise disjoint "pipes" corresponding to the bundles, connecting certain pairs of these discs. We are to decide whether G can be embedded inside M so that the vertices in every cluster are drawn in the corresponding disc, the edges in every bundle pass only through its corresponding pipe, and every edge crosses the boundary of each disc at most once.},
author = {Fulek, Radoslav and Kynčl, Jan},
isbn = {978-3-95977-066-8},
location = {Budapest, Hungary},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{Hanani-Tutte for approximating maps of graphs}},
doi = {10.4230/LIPIcs.SoCG.2018.39},
volume = {99},
year = {2018},
}
@inproceedings{187,
abstract = {Given a locally finite X ⊆ ℝd and a radius r ≥ 0, the k-fold cover of X and r consists of all points in ℝd that have k or more points of X within distance r. We consider two filtrations - one in scale obtained by fixing k and increasing r, and the other in depth obtained by fixing r and decreasing k - and we compute the persistence diagrams of both. While standard methods suffice for the filtration in scale, we need novel geometric and topological concepts for the filtration in depth. In particular, we introduce a rhomboid tiling in ℝd+1 whose horizontal integer slices are the order-k Delaunay mosaics of X, and construct a zigzag module from Delaunay mosaics that is isomorphic to the persistence module of the multi-covers. },
author = {Edelsbrunner, Herbert and Osang, Georg F},
location = {Budapest, Hungary},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{The multi-cover persistence of Euclidean balls}},
doi = {10.4230/LIPIcs.SoCG.2018.34},
volume = {99},
year = {2018},
}
@inproceedings{188,
abstract = {Smallest enclosing spheres of finite point sets are central to methods in topological data analysis. Focusing on Bregman divergences to measure dissimilarity, we prove bounds on the location of the center of a smallest enclosing sphere. These bounds depend on the range of radii for which Bregman balls are convex.},
author = {Edelsbrunner, Herbert and Virk, Ziga and Wagner, Hubert},
location = {Budapest, Hungary},
pages = {35:1 -- 35:13},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{Smallest enclosing spheres and Chernoff points in Bregman geometry}},
doi = {10.4230/LIPIcs.SoCG.2018.35},
volume = {99},
year = {2018},
}
@article{19,
abstract = {Bacteria regulate genes to survive antibiotic stress, but regulation can be far from perfect. When regulation is not optimal, mutations that change gene expression can contribute to antibiotic resistance. It is not systematically understood to what extent natural gene regulation is or is not optimal for distinct antibiotics, and how changes in expression of specific genes quantitatively affect antibiotic resistance. Here we discover a simple quantitative relation between fitness, gene expression, and antibiotic potency, which rationalizes our observation that a multitude of genes and even innate antibiotic defense mechanisms have expression that is critically nonoptimal under antibiotic treatment. First, we developed a pooled-strain drug-diffusion assay and screened Escherichia coli overexpression and knockout libraries, finding that resistance to a range of 31 antibiotics could result from changing expression of a large and functionally diverse set of genes, in a primarily but not exclusively drug-specific manner. Second, by synthetically controlling the expression of single-drug and multidrug resistance genes, we observed that their fitness-expression functions changed dramatically under antibiotic treatment in accordance with a log-sensitivity relation. Thus, because many genes are nonoptimally expressed under antibiotic treatment, many regulatory mutations can contribute to resistance by altering expression and by activating latent defenses.},
author = {Palmer, Adam and Chait, Remy P and Kishony, Roy},
journal = {Molecular Biology and Evolution},
number = {11},
pages = {2669 -- 2684},
publisher = {NLM },
title = {{Nonoptimal gene expression creates latent potential for antibiotic resistance}},
doi = {10.1093/molbev/msy163},
volume = {35},
year = {2018},
}
@article{190,
abstract = {The German cockroach, Blattella germanica, is a worldwide pest that infests buildings, including homes, restaurants, and hospitals, often living in unsanitary conditions. As a disease vector and producer of allergens, this species has major health and economic impacts on humans. Factors contributing to the success of the German cockroach include its resistance to a broad range of insecticides, immunity to many pathogens, and its ability, as an extreme generalist omnivore, to survive on most food sources. The recently published genome shows that B. germanica has an exceptionally high number of protein coding genes. In this study, we investigate the functions of the 93 significantly expanded gene families with the aim to better understand the success of B. germanica as a major pest despite such inhospitable conditions. We find major expansions in gene families with functions related to the detoxification of insecticides and allelochemicals, defense against pathogens, digestion, sensory perception, and gene regulation. These expansions might have allowed B. germanica to develop multiple resistance mechanisms to insecticides and pathogens, and enabled a broad, flexible diet, thus explaining its success in unsanitary conditions and under recurrent chemical control. The findings and resources presented here provide insights for better understanding molecular mechanisms that will facilitate more effective cockroach control.},
author = {Harrison, Mark and Arning, Nicolas and Kremer, Lucas and Ylla, Guillem and Belles, Xavier and Bornberg Bauer, Erich and Huylmans, Ann K and Jongepier, Evelien and Puilachs, Maria and Richards, Stephen and Schal, Coby},
journal = {Journal of Experimental Zoology Part B: Molecular and Developmental Evolution},
pages = {254--264},
publisher = {Wiley},
title = {{Expansions of key protein families in the German cockroach highlight the molecular basis of its remarkable success as a global indoor pest}},
doi = {10.1002/jez.b.22824},
volume = {330},
year = {2018},
}
@article{192,
abstract = {The phytohormone auxin is the information carrier in a plethora of developmental and physiological processes in plants(1). It has been firmly established that canonical, nuclear auxin signalling acts through regulation of gene transcription(2). Here, we combined microfluidics, live imaging, genetic engineering and computational modelling to reanalyse the classical case of root growth inhibition(3) by auxin. We show that Arabidopsis roots react to addition and removal of auxin by extremely rapid adaptation of growth rate. This process requires intracellular auxin perception but not transcriptional reprogramming. The formation of the canonical TIR1/AFB-Aux/IAA co-receptor complex is required for the growth regulation, hinting to a novel, non-transcriptional branch of this signalling pathway. Our results challenge the current understanding of root growth regulation by auxin and suggest another, presumably non-transcriptional, signalling output of the canonical auxin pathway.},
author = {Fendrych, Matyas and Akhmanova, Maria and Merrin, Jack and Glanc, Matous and Hagihara, Shinya and Takahashi, Koji and Uchida, Naoyuki and Torii, Keiko U and Friml, Jirí},
journal = {Nature Plants},
number = {7},
pages = {453 -- 459},
publisher = {Springer Nature},
title = {{Rapid and reversible root growth inhibition by TIR1 auxin signalling}},
doi = {10.1038/s41477-018-0190-1},
volume = {4},
year = {2018},
}
@inproceedings{193,
abstract = {We show attacks on five data-independent memory-hard functions (iMHF) that were submitted to the password hashing competition (PHC). Informally, an MHF is a function which cannot be evaluated on dedicated hardware, like ASICs, at significantly lower hardware and/or energy cost than evaluating a single instance on a standard single-core architecture. Data-independent means the memory access pattern of the function is independent of the input; this makes iMHFs harder to construct than data-dependent ones, but the latter can be attacked by various side-channel attacks. Following [Alwen-Blocki'16], we capture the evaluation of an iMHF as a directed acyclic graph (DAG). The cumulative parallel pebbling complexity of this DAG is a measure for the hardware cost of evaluating the iMHF on an ASIC. Ideally, one would like the complexity of a DAG underlying an iMHF to be as close to quadratic in the number of nodes of the graph as possible. Instead, we show that (the DAGs underlying) the following iMHFs are far from this bound: Rig.v2, TwoCats and Gambit each having an exponent no more than 1.75. Moreover, we show that the complexity of the iMHF modes of the PHC finalists Pomelo and Lyra2 have exponents at most 1.83 and 1.67 respectively. To show this we investigate a combinatorial property of each underlying DAG (called its depth-robustness. By establishing upper bounds on this property we are then able to apply the general technique of [Alwen-Block'16] for analyzing the hardware costs of an iMHF.},
author = {Alwen, Joel F and Gazi, Peter and Kamath Hosdurg, Chethan and Klein, Karen and Osang, Georg F and Pietrzak, Krzysztof Z and Reyzin, Lenoid and Rolinek, Michal and Rybar, Michal},
booktitle = {Proceedings of the 2018 on Asia Conference on Computer and Communication Security},
location = {Incheon, Republic of Korea},
pages = {51 -- 65},
publisher = {ACM},
title = {{On the memory hardness of data independent password hashing functions}},
doi = {10.1145/3196494.3196534},
year = {2018},
}
@article{194,
abstract = {Ants are emerging model systems to study cellular signaling because distinct castes possess different physiologic phenotypes within the same colony. Here we studied the functionality of inotocin signaling, an insect ortholog of mammalian oxytocin (OT), which was recently discovered in ants. In Lasius ants, we determined that specialization within the colony, seasonal factors, and physiologic conditions down-regulated the expression of the OT-like signaling system. Given this natural variation, we interrogated its function using RNAi knockdowns. Next-generation RNA sequencing of OT-like precursor knock-down ants highlighted its role in the regulation of genes involved in metabolism. Knock-down ants exhibited higher walking activity and increased self-grooming in the brood chamber. We propose that OT-like signaling in ants is important for regulating metabolic processes and locomotion.},
author = {Liutkeviciute, Zita and Gil Mansilla, Esther and Eder, Thomas and Casillas Perez, Barbara E and Giulia Di Giglio, Maria and Muratspahić, Edin and Grebien, Florian and Rattei, Thomas and Muttenthaler, Markus and Cremer, Sylvia and Gruber, Christian},
issn = {08926638},
journal = {The FASEB Journal},
number = {12},
pages = {6808--6821},
publisher = {FASEB},
title = {{Oxytocin-like signaling in ants influences metabolic gene expression and locomotor activity}},
doi = {10.1096/fj.201800443},
volume = {32},
year = {2018},
}
@article{195,
abstract = {We demonstrate that identical impurities immersed in a two-dimensional many-particle bath can be viewed as flux-tube-charged-particle composites described by fractional statistics. In particular, we find that the bath manifests itself as an external magnetic flux tube with respect to the impurities, and hence the time-reversal symmetry is broken for the effective Hamiltonian describing the impurities. The emerging flux tube acts as a statistical gauge field after a certain critical coupling. This critical coupling corresponds to the intersection point between the quasiparticle state and the phonon wing, where the angular momentum is transferred from the impurity to the bath. This amounts to a novel configuration with emerging anyons. The proposed setup paves the way to realizing anyons using electrons interacting with superfluid helium or lattice phonons, as well as using atomic impurities in ultracold gases.},
author = {Yakaboylu, Enderalp and Lemeshko, Mikhail},
journal = {Physical Review B - Condensed Matter and Materials Physics},
number = {4},
publisher = {American Physical Society},
title = {{Anyonic statistics of quantum impurities in two dimensions}},
doi = {10.1103/PhysRevB.98.045402},
volume = {98},
year = {2018},
}
@phdthesis{197,
abstract = {Modern computer vision systems heavily rely on statistical machine learning models, which typically require large amounts of labeled data to be learned reliably. Moreover, very recently computer vision research widely adopted techniques for representation learning, which further increase the demand for labeled data. However, for many important practical problems there is relatively small amount of labeled data available, so it is problematic to leverage full potential of the representation learning methods. One way to overcome this obstacle is to invest substantial resources into producing large labelled datasets. Unfortunately, this can be prohibitively expensive in practice. In this thesis we focus on the alternative way of tackling the aforementioned issue. We concentrate on methods, which make use of weakly-labeled or even unlabeled data. Specifically, the first half of the thesis is dedicated to the semantic image segmentation task. We develop a technique, which achieves competitive segmentation performance and only requires annotations in a form of global image-level labels instead of dense segmentation masks. Subsequently, we present a new methodology, which further improves segmentation performance by leveraging tiny additional feedback from a human annotator. By using our methods practitioners can greatly reduce the amount of data annotation effort, which is required to learn modern image segmentation models. In the second half of the thesis we focus on methods for learning from unlabeled visual data. We study a family of autoregressive models for modeling structure of natural images and discuss potential applications of these models. Moreover, we conduct in-depth study of one of these applications, where we develop the state-of-the-art model for the probabilistic image colorization task.},
author = {Kolesnikov, Alexander},
pages = {113},
publisher = {IST Austria},
title = {{Weakly-Supervised Segmentation and Unsupervised Modeling of Natural Images}},
doi = {10.15479/AT:ISTA:th_1021},
year = {2018},
}
@article{198,
abstract = {We consider a class of students learning a language from a teacher. The situation can be interpreted as a group of child learners receiving input from the linguistic environment. The teacher provides sample sentences. The students try to learn the grammar from the teacher. In addition to just listening to the teacher, the students can also communicate with each other. The students hold hypotheses about the grammar and change them if they receive counter evidence. The process stops when all students have converged to the correct grammar. We study how the time to convergence depends on the structure of the classroom by introducing and evaluating various complexity measures. We find that structured communication between students, although potentially introducing confusion, can greatly reduce some of the complexity measures. Our theory can also be interpreted as applying to the scientific process, where nature is the teacher and the scientists are the students.},
author = {Ibsen-Jensen, Rasmus and Tkadlec, Josef and Chatterjee, Krishnendu and Nowak, Martin},
journal = {Journal of the Royal Society Interface},
number = {140},
publisher = {Royal Society},
title = {{Language acquisition with communication between learners}},
doi = {10.1098/rsif.2018.0073},
volume = {15},
year = {2018},
}
@article{608,
abstract = {Synthesis is the automated construction of a system from its specification. In real life, hardware and software systems are rarely constructed from scratch. Rather, a system is typically constructed from a library of components. Lustig and Vardi formalized this intuition and studied LTL synthesis from component libraries. In real life, designers seek optimal systems. In this paper we add optimality considerations to the setting. We distinguish between quality considerations (for example, size - the smaller a system is, the better it is), and pricing (for example, the payment to the company who manufactured the component). We study the problem of designing systems with minimal quality-cost and price. A key point is that while the quality cost is individual - the choices of a designer are independent of choices made by other designers that use the same library, pricing gives rise to a resource-allocation game - designers that use the same component share its price, with the share being proportional to the number of uses (a component can be used several times in a design). We study both closed and open settings, and in both we solve the problem of finding an optimal design. In a setting with multiple designers, we also study the game-theoretic problems of the induced resource-allocation game.},
author = {Avni, Guy and Kupferman, Orna},
journal = {Theoretical Computer Science},
pages = {50 -- 72},
publisher = {Elsevier},
title = {{Synthesis from component libraries with costs}},
doi = {10.1016/j.tcs.2017.11.001},
volume = {712},
year = {2018},
}
@article{616,
abstract = {Social insects protect their colonies from infectious disease through collective defences that result in social immunity. In ants, workers first try to prevent infection of colony members. Here, we show that if this fails and a pathogen establishes an infection, ants employ an efficient multicomponent behaviour − "destructive disinfection" − to prevent further spread of disease through the colony. Ants specifically target infected pupae during the pathogen's non-contagious incubation period, relying on chemical 'sickness cues' emitted by pupae. They then remove the pupal cocoon, perforate its cuticle and administer antimicrobial poison, which enters the body and prevents pathogen replication from the inside out. Like the immune system of a body that specifically targets and eliminates infected cells, this social immunity measure sacrifices infected brood to stop the pathogen completing its lifecycle, thus protecting the rest of the colony. Hence, the same principles of disease defence apply at different levels of biological organisation.},
author = {Pull, Christopher and Ugelvig, Line V and Wiesenhofer, Florian and Grasse, Anna V and Tragust, Simon and Schmitt, Thomas and Brown, Mark and Cremer, Sylvia},
journal = {eLife},
publisher = {eLife Sciences Publications},
title = {{Destructive disinfection of infected brood prevents systemic disease spread in ant colonies}},
doi = {10.7554/eLife.32073},
volume = {7},
year = {2018},
}
@unpublished{6183,
abstract = {We study the unique solution $m$ of the Dyson equation \[ -m(z)^{-1} = z - a
+ S[m(z)] \] on a von Neumann algebra $\mathcal{A}$ with the constraint
$\mathrm{Im}\,m\geq 0$. Here, $z$ lies in the complex upper half-plane, $a$ is
a self-adjoint element of $\mathcal{A}$ and $S$ is a positivity-preserving
linear operator on $\mathcal{A}$. We show that $m$ is the Stieltjes transform
of a compactly supported $\mathcal{A}$-valued measure on $\mathbb{R}$. Under
suitable assumptions, we establish that this measure has a uniformly
$1/3$-H\"{o}lder continuous density with respect to the Lebesgue measure, which
is supported on finitely many intervals, called bands. In fact, the density is
analytic inside the bands with a square-root growth at the edges and internal
cubic root cusps whenever the gap between two bands vanishes. The shape of
these singularities is universal and no other singularity may occur. We give a
precise asymptotic description of $m$ near the singular points. These
asymptotics generalize the analysis at the regular edges given in the companion
paper on the Tracy-Widom universality for the edge eigenvalue statistics for
correlated random matrices [arXiv:1804.07744] and they play a key role in the
proof of the Pearcey universality at the cusp for Wigner-type matrices
[arXiv:1809.03971,arXiv:1811.04055]. We also extend the finite dimensional band
mass formula from [arXiv:1804.07744] to the von Neumann algebra setting by
showing that the spectral mass of the bands is topologically rigid under
deformations and we conclude that these masses are quantized in some important
cases.},
author = {Alt, Johannes and Erdös, László and Krüger, Torben H},
booktitle = {arXiv},
title = {{The Dyson equation with linear self-energy: Spectral bands, edges and cusps}},
year = {2018},
}
@inproceedings{6195,
abstract = {In the context of robotic manipulation and grasping, the shift from a view that is static (force closure of a single posture) and contact-deprived (only contact for force closure is allowed, everything else is obstacle) towards a view that is dynamic and contact-rich (soft manipulation) has led to an increased interest in soft hands. These hands can easily exploit environmental constraints and object surfaces without risk, and safely interact with humans, but present also some challenges. Designing them is difficult, as well as predicting, modelling, and “programming” their interactions with the objects and the environment. This paper tackles the problem of simulating them in a fast and effective way, leveraging on novel and existing simulation technologies. We present a triple-layered simulation framework where dynamic properties such as stiffness are determined from slow but accurate FEM simulation data once, and then condensed into a lumped parameter model that can be used to fast simulate soft fingers and soft hands. We apply our approach to the simulation of soft pneumatic fingers.},
author = {Pozzi, Maria and Miguel Villalba, Eder and Deimel, Raphael and Malvezzi, Monica and Bickel, Bernd and Brock, Oliver and Prattichizzo, Domenico},
isbn = {9781538630815},
location = {Brisbane, Australia},
publisher = {IEEE},
title = {{Efficient FEM-based simulation of soft robots modeled as kinematic chains}},
doi = {10.1109/icra.2018.8461106},
year = {2018},
}
@article{62,
abstract = {Imaging is a dominant strategy for data collection in neuroscience, yielding stacks of images that often scale to gigabytes of data for a single experiment. Machine learning algorithms from computer vision can serve as a pair of virtual eyes that tirelessly processes these images, automatically detecting and identifying microstructures. Unlike learning methods, our Flexible Learning-free Reconstruction of Imaged Neural volumes (FLoRIN) pipeline exploits structure-specific contextual clues and requires no training. This approach generalizes across different modalities, including serially-sectioned scanning electron microscopy (sSEM) of genetically labeled and contrast enhanced processes, spectral confocal reflectance (SCoRe) microscopy, and high-energy synchrotron X-ray microtomography (μCT) of large tissue volumes. We deploy the FLoRIN pipeline on newly published and novel mouse datasets, demonstrating the high biological fidelity of the pipeline’s reconstructions. FLoRIN reconstructions are of sufficient quality for preliminary biological study, for example examining the distribution and morphology of cells or extracting single axons from functional data. Compared to existing supervised learning methods, FLoRIN is one to two orders of magnitude faster and produces high-quality reconstructions that are tolerant to noise and artifacts, as is shown qualitatively and quantitatively.},
author = {Shabazi, Ali and Kinnison, Jeffery and Vescovi, Rafael and Du, Ming and Hill, Robert and Jösch, Maximilian A and Takeno, Marc and Zeng, Hongkui and Da Costa, Nuno and Grutzendler, Jaime and Kasthuri, Narayanan and Scheirer, Walter},
journal = {Scientific Reports},
number = {1},
publisher = {Nature Publishing Group},
title = {{Flexible learning-free segmentation and reconstruction of neural volumes}},
doi = {10.1038/s41598-018-32628-3},
volume = {8},
year = {2018},
}
@article{620,
abstract = {Clathrin-mediated endocytosis requires the coordinated assembly of various endocytic proteins and lipids at the plasma membrane. Accumulating evidence demonstrates a crucial role for phosphatidylinositol-4,5-bisphosphate (PtdIns(4,5)P2) in endocytosis, but specific roles for PtdIns(4)P other than as the biosynthetic precursor of PtdIns(4,5)P2 have not been clarified. In this study we investigated the role of PtdIns(4)P or PtdIns(4,5)P2 in receptor-mediated endocytosis through the construction of temperature-sensitive (ts) mutants for the PI 4-kinases Stt4p and Pik1p and the PtdIns(4) 5-kinase Mss4p. Quantitative analyses of endocytosis revealed that both the stt4(ts)pik1(ts) and mss4(ts) mutants have a severe defect in endocytic internalization. Live-cell imaging of endocytic protein dynamics in stt4(ts)pik1(ts) and mss4(ts) mutants revealed that PtdIns(4)P is required for the recruitment of the alpha-factor receptor Ste2p to clathrin-coated pits whereas PtdIns(4,5)P2 is required for membrane internalization. We also found that the localization to endocytic sites of the ENTH/ANTH domain-bearing clathrin adaptors, Ent1p/Ent2p and Yap1801p/Yap1802p, is significantly impaired in the stt4(ts)pik1(ts) mutant, but not in the mss4(ts) mutant. These results suggest distinct roles in successive steps for PtdIns(4)P and PtdIns(4,5)P2 during receptor-mediated endocytosis.},
author = {Yamamoto, Wataru and Wada, Suguru and Nagano, Makoto and Aoshima, Kaito and Siekhaus, Daria E and Toshima, Junko and Toshima, Jiro},
journal = {Journal of Cell Science},
number = {1},
publisher = {Company of Biologists},
title = {{Distinct roles for plasma membrane PtdIns 4 P and PtdIns 4 5 P2 during yeast receptor mediated endocytosis}},
doi = {10.1242/jcs.207696},
volume = {131},
year = {2018},
}
@article{63,
abstract = {African cichlids display a remarkable assortment of jaw morphologies, pigmentation patterns, and mating behaviors. In addition to this previously documented diversity, recent studies have documented a rich diversity of sex chromosomes within these fishes. Here we review the known sex-determination network within vertebrates, and the extraordinary number of sex chromosomes systems segregating in African cichlids. We also propose a model for understanding the unusual number of sex chromosome systems within this clade.},
author = {Gammerdinger, William J and Kocher, Thomas},
journal = {Genes},
number = {10},
publisher = {MDPI AG},
title = {{Unusual diversity of sex chromosomes in African cichlid fishes}},
doi = {10.3390/genes9100480},
volume = {9},
year = {2018},
}
@article{6339,
abstract = {We introduce a diagrammatic Monte Carlo approach to angular momentum properties of quantum many-particle systems possessing a macroscopic number of degrees of freedom. The treatment is based on a diagrammatic expansion that merges the usual Feynman diagrams with the angular momentum diagrams known from atomic and nuclear structure theory, thereby incorporating the non-Abelian algebra inherent to quantum rotations. Our approach is applicable at arbitrary coupling, is free of systematic errors and of finite-size effects, and naturally provides access to the impurity Green function. We exemplify the technique by obtaining an all-coupling solution of the angulon model; however, the method is quite general and can be applied to a broad variety of systems in which particles exchange quantum angular momentum with their many-body environment.},
author = {Bighin, Giacomo and Tscherbul, Timur and Lemeshko, Mikhail},
journal = {Physical Review Letters},
number = {16},
publisher = {APS},
title = {{Diagrammatic Monte Carlo approach to angular momentum in quantum many-particle systems}},
doi = {10.1103/physrevlett.121.165301},
volume = {121},
year = {2018},
}
@article{6354,
abstract = {Blood platelets are critical for hemostasis and thrombosis, but also play diverse roles during immune responses. We have recently reported that platelets migrate at sites of infection in vitro and in vivo. Importantly, platelets use their ability to migrate to collect and bundle fibrin (ogen)-bound bacteria accomplishing efficient intravascular bacterial trapping. Here, we describe a method that allows analyzing platelet migration in vitro, focusing on their ability to collect bacteria and trap bacteria under flow.},
author = {Fan, Shuxia and Lorenz, Michael and Massberg, Steffen and Gärtner, Florian R},
issn = {2331-8325},
journal = {Bio-Protocol},
keywords = {Platelets, Cell migration, Bacteria, Shear flow, Fibrinogen, E. coli},
number = {18},
publisher = {Bio-Protocol},
title = {{Platelet migration and bacterial trapping assay under flow}},
doi = {10.21769/bioprotoc.3018},
volume = {8},
year = {2018},
}
@article{6355,
abstract = {We prove that any cyclic quadrilateral can be inscribed in any closed convex C1-curve. The smoothness condition is not required if the quadrilateral is a rectangle.},
author = {Akopyan, Arseniy and Avvakumov, Sergey},
issn = {2050-5094},
journal = {Forum of Mathematics, Sigma},
publisher = {Cambridge University Press},
title = {{Any cyclic quadrilateral can be inscribed in any closed convex smooth curve}},
doi = {10.1017/fms.2018.7},
volume = {6},
year = {2018},
}
@article{64,
abstract = {Tropical geometry, an established field in pure mathematics, is a place where string theory, mirror symmetry, computational algebra, auction theory, and so forth meet and influence one another. In this paper, we report on our discovery of a tropical model with self-organized criticality (SOC) behavior. Our model is continuous, in contrast to all known models of SOC, and is a certain scaling limit of the sandpile model, the first and archetypical model of SOC. We describe how our model is related to pattern formation and proportional growth phenomena and discuss the dichotomy between continuous and discrete models in several contexts. Our aim in this context is to present an idealized tropical toy model (cf. Turing reaction-diffusion model), requiring further investigation.},
author = {Kalinin, Nikita and Guzmán Sáenz, Aldo and Prieto, Y and Shkolnikov, Mikhail and Kalinina, V and Lupercio, Ernesto},
issn = {00278424},
journal = {PNAS: Proceedings of the National Academy of Sciences of the United States of America},
number = {35},
pages = {E8135 -- E8142},
publisher = {National Academy of Sciences},
title = {{Self-organized criticality and pattern emergence through the lens of tropical geometry}},
doi = {10.1073/pnas.1805847115},
volume = {115},
year = {2018},
}
@misc{6459,
author = {Petritsch, Barbara},
keywords = {Open Access, Publication Analysis},
location = {Graz, Austria},
publisher = {IST Austria},
title = {{Open Access at IST Austria 2009-2017}},
doi = {10.5281/zenodo.1410279},
year = {2018},
}
@article{6497,
abstract = {T cells are actively scanning pMHC-presenting cells in lymphoid organs and nonlymphoid tissues (NLTs) with divergent topologies and confinement. How the T cell actomyosin cytoskeleton facilitates this task in distinct environments is incompletely understood. Here, we show that lack of Myosin IXb (Myo9b), a negative regulator of the small GTPase Rho, led to increased Rho-GTP levels and cell surface stiffness in primary T cells. Nonetheless, intravital imaging revealed robust motility of Myo9b−/− CD8+ T cells in lymphoid tissue and similar expansion and differentiation during immune responses. In contrast, accumulation of Myo9b−/− CD8+ T cells in NLTs was strongly impaired. Specifically, Myo9b was required for T cell crossing of basement membranes, such as those which are present between dermis and epidermis. As consequence, Myo9b−/− CD8+ T cells showed impaired control of skin infections. In sum, we show that Myo9b is critical for the CD8+ T cell adaptation from lymphoid to NLT surveillance and the establishment of protective tissue–resident T cell populations.},
author = {Moalli, Federica and Ficht, Xenia and Germann, Philipp and Vladymyrov, Mykhailo and Stolp, Bettina and de Vries, Ingrid and Lyck, Ruth and Balmer, Jasmin and Fiocchi, Amleto and Kreutzfeldt, Mario and Merkler, Doron and Iannacone, Matteo and Ariga, Akitaka and Stoffel, Michael H. and Sharpe, James and Bähler, Martin and Sixt, Michael K and Diz-Muñoz, Alba and Stein, Jens V.},
issn = {0022-1007},
journal = {The Journal of Experimental Medicine},
number = {7},
pages = {1869–1890},
publisher = {Rockefeller University Press},
title = {{The Rho regulator Myosin IXb enables nonlymphoid tissue seeding of protective CD8+T cells}},
doi = {10.1084/jem.20170896},
volume = {2015},
year = {2018},
}
@article{6499,
abstract = {Expansion microscopy is a recently introduced imaging technique that achieves super‐resolution through physically expanding the specimen by ~4×, after embedding into a swellable gel. The resolution attained is, correspondingly, approximately fourfold better than the diffraction limit, or ~70 nm. This is a major improvement over conventional microscopy, but still lags behind modern STED or STORM setups, whose resolution can reach 20–30 nm. We addressed this issue here by introducing an improved gel recipe that enables an expansion factor of ~10× in each dimension, which corresponds to an expansion of the sample volume by more than 1,000‐fold. Our protocol, which we termed X10 microscopy, achieves a resolution of 25–30 nm on conventional epifluorescence microscopes. X10 provides multi‐color images similar or even superior to those produced with more challenging methods, such as STED, STORM, and iterative expansion microscopy (iExM). X10 is therefore the cheapest and easiest option for high‐quality super‐resolution imaging currently available. X10 should be usable in any laboratory, irrespective of the machinery owned or of the technical knowledge.},
author = {Truckenbrodt, Sven M and Maidorn, Manuel and Crzan, Dagmar and Wildhagen, Hanna and Kabatas, Selda and Rizzoli, Silvio O},
issn = {1469-221X},
journal = {EMBO reports},
number = {9},
publisher = {EMBO},
title = {{X10 expansion microscopy enables 25‐nm resolution on conventional microscopes}},
doi = {10.15252/embr.201845836},
volume = {19},
year = {2018},
}
@inbook{6525,
abstract = {This chapter finds an agreement of equivariant indices of semi-classical homomorphisms between pairwise mirror branes in the GL2 Higgs moduli space on a Riemann surface. On one side of the agreement, components of the Lagrangian brane of U(1,1) Higgs bundles, whose mirror was proposed by Hitchin to be certain even exterior powers of the hyperholomorphic Dirac bundle on the SL2 Higgs moduli space, are present. The agreement arises from a mysterious functional equation. This gives strong computational evidence for Hitchin’s proposal.},
author = {Hausel, Tamás and Mellit, Anton and Pei, Du},
booktitle = {Geometry and Physics: Volume I},
isbn = {9780198802013},
pages = {189--218},
publisher = {Oxford University Press},
title = {{Mirror symmetry with branes by equivariant verlinde formulas}},
doi = {10.1093/oso/9780198802013.003.0009},
year = {2018},
}
@inproceedings{6558,
abstract = {This paper studies the problem of distributed stochastic optimization in an adversarial setting where, out of m machines which allegedly compute stochastic gradients every iteration, an α-fraction are Byzantine, and may behave adversarially. Our main result is a variant of stochastic gradient descent (SGD) which finds ε-approximate minimizers of convex functions in T=O~(1/ε²m+α²/ε²) iterations. In contrast, traditional mini-batch SGD needs T=O(1/ε²m) iterations, but cannot tolerate Byzantine failures. Further, we provide a lower bound showing that, up to logarithmic factors, our algorithm is information-theoretically optimal both in terms of sample complexity and time complexity.},
author = {Alistarh, Dan-Adrian and Allen-Zhu, Zeyuan and Li, Jerry},
booktitle = {Advances in Neural Information Processing Systems},
editor = {Bengio, S. and Wallach, H. and Larochelle, H. and Grauman, K. and Cesa-Bianchi, N. and Garnett, R.},
location = {Montreal, Canada},
pages = {4613--4623},
publisher = {Neural Information Processing Systems Foundation},
title = {{Byzantine Stochastic Gradient Descent}},
volume = {Volume 2018},
year = {2018},
}
@inproceedings{6589,
abstract = {Distributed training of massive machine learning models, in particular deep neural networks, via Stochastic Gradient Descent (SGD) is becoming commonplace. Several families of communication-reduction methods, such as quantization, large-batch methods, and gradient sparsification, have been proposed. To date, gradient sparsification methods--where each node sorts gradients by magnitude, and only communicates a subset of the components, accumulating the rest locally--are known to yield some of the largest practical gains. Such methods can reduce the amount of communication per step by up to \emph{three orders of magnitude}, while preserving model accuracy. Yet, this family of methods currently has no theoretical justification. This is the question we address in this paper. We prove that, under analytic assumptions, sparsifying gradients by magnitude with local error correction provides convergence guarantees, for both convex and non-convex smooth objectives, for data-parallel SGD. The main insight is that sparsification methods implicitly maintain bounds on the maximum impact of stale updates, thanks to selection by magnitude. Our analysis and empirical validation also reveal that these methods do require analytical conditions to converge well, justifying existing heuristics.},
author = {Alistarh, Dan-Adrian and Hoefler, Torsten and Johansson, Mikael and Konstantinov, Nikola H and Khirirat, Sarit and Renggli, Cedric},
booktitle = {Advances in Neural Information Processing Systems 31},
location = {Montreal, Canada},
pages = {5973--5983},
publisher = {Neural information processing systems},
title = {{The convergence of sparsified gradient methods}},
volume = {Volume 2018},
year = {2018},
}
@article{6774,
abstract = {A central problem of algebraic topology is to understand the homotopy groups 𝜋𝑑(𝑋) of a topological space X. For the computational version of the problem, it is well known that there is no algorithm to decide whether the fundamental group 𝜋1(𝑋) of a given finite simplicial complex X is trivial. On the other hand, there are several algorithms that, given a finite simplicial complex X that is simply connected (i.e., with 𝜋1(𝑋) trivial), compute the higher homotopy group 𝜋𝑑(𝑋) for any given 𝑑≥2 . However, these algorithms come with a caveat: They compute the isomorphism type of 𝜋𝑑(𝑋) , 𝑑≥2 as an abstract finitely generated abelian group given by generators and relations, but they work with very implicit representations of the elements of 𝜋𝑑(𝑋) . Converting elements of this abstract group into explicit geometric maps from the d-dimensional sphere 𝑆𝑑 to X has been one of the main unsolved problems in the emerging field of computational homotopy theory. Here we present an algorithm that, given a simply connected space X, computes 𝜋𝑑(𝑋) and represents its elements as simplicial maps from a suitable triangulation of the d-sphere 𝑆𝑑 to X. For fixed d, the algorithm runs in time exponential in size(𝑋) , the number of simplices of X. Moreover, we prove that this is optimal: For every fixed 𝑑≥2 , we construct a family of simply connected spaces X such that for any simplicial map representing a generator of 𝜋𝑑(𝑋) , the size of the triangulation of 𝑆𝑑 on which the map is defined, is exponential in size(𝑋) .},
author = {Filakovský, Marek and Franek, Peter and Wagner, Uli and Zhechev, Stephan Y},
issn = {2367-1734},
journal = {Journal of Applied and Computational Topology},
number = {3-4},
pages = {177--231},
publisher = {Springer},
title = {{Computing simplicial representatives of homotopy group elements}},
doi = {10.1007/s41468-018-0021-5},
volume = {2},
year = {2018},
}
@phdthesis{68,
abstract = {The most common assumption made in statistical learning theory is the assumption of the independent and identically distributed (i.i.d.) data. While being very convenient mathematically, it is often very clearly violated in practice. This disparity between the machine learning theory and applications underlies a growing demand in the development of algorithms that learn from dependent data and theory that can provide generalization guarantees similar to the independent situations. This thesis is dedicated to two variants of dependencies that can arise in practice. One is a dependence on the level of samples in a single learning task. Another dependency type arises in the multi-task setting when the tasks are dependent on each other even though the data for them can be i.i.d. In both cases we model the data (samples or tasks) as stochastic processes and introduce new algorithms for both settings that take into account and exploit the resulting dependencies. We prove the theoretical guarantees on the performance of the introduced algorithms under different evaluation criteria and, in addition, we compliment the theoretical study by the empirical one, where we evaluate some of the algorithms on two real world datasets to highlight their practical applicability.},
author = {Zimin, Alexander},
pages = {92},
publisher = {IST Austria},
title = {{Learning from dependent data}},
doi = {10.15479/AT:ISTA:TH1048},
year = {2018},
}
@article{690,
abstract = {We consider spectral properties and the edge universality of sparse random matrices, the class of random matrices that includes the adjacency matrices of the Erdős–Rényi graph model G(N, p). We prove a local law for the eigenvalue density up to the spectral edges. Under a suitable condition on the sparsity, we also prove that the rescaled extremal eigenvalues exhibit GOE Tracy–Widom fluctuations if a deterministic shift of the spectral edge due to the sparsity is included. For the adjacency matrix of the Erdős–Rényi graph this establishes the Tracy–Widom fluctuations of the second largest eigenvalue when p is much larger than N−2/3 with a deterministic shift of order (Np)−1.},
author = {Lee, Jii and Schnelli, Kevin},
journal = {Probability Theory and Related Fields},
number = {1-2},
publisher = {Springer},
title = {{Local law and Tracy–Widom limit for sparse random matrices}},
doi = {10.1007/s00440-017-0787-8},
volume = {171},
year = {2018},
}
@article{691,
abstract = {Background: Transport protein particle (TRAPP) is a multisubunit complex that regulates membrane trafficking through the Golgi apparatus. The clinical phenotype associated with mutations in various TRAPP subunits has allowed elucidation of their functions in specific tissues. The role of some subunits in human disease, however, has not been fully established, and their functions remain uncertain.
Objective: We aimed to expand the range of neurodevelopmental disorders associated with mutations in TRAPP subunits by exome sequencing of consanguineous families.
Methods: Linkage and homozygosity mapping and candidate gene analysis were used to identify homozygous mutations in families. Patient fibroblasts were used to study splicing defect and zebrafish to model the disease.
Results: We identified six individuals from three unrelated families with a founder homozygous splice mutation in TRAPPC6B, encoding a core subunit of the complex TRAPP I. Patients manifested a neurodevelopmental disorder characterised by microcephaly, epilepsy and autistic features, and showed splicing defect. Zebrafish trappc6b morphants replicated the human phenotype, displaying decreased head size and neuronal hyperexcitability, leading to a lower seizure threshold.
Conclusion: This study provides clinical and functional evidence of the role of TRAPPC6B in brain development and function.},
author = {Marin Valencia, Isaac and Novarino, Gaia and Johansen, Anide and Rosti, Başak and Issa, Mahmoud and Musaev, Damir and Bhat, Gifty and Scott, Eric and Silhavy, Jennifer and Stanley, Valentina and Rosti, Rasim and Gleeson, Jeremy and Imam, Farhad and Zaki, Maha and Gleeson, Joseph},
journal = {Journal of Medical Genetics},
number = {1},
pages = {48 -- 54},
publisher = {BMJ Publishing Group},
title = {{A homozygous founder mutation in TRAPPC6B associates with a neurodevelopmental disorder characterised by microcephaly epilepsy and autistic features}},
doi = {10.1136/jmedgenet-2017-104627},
volume = {55},
year = {2018},
}
@article{692,
abstract = {We consider families of confocal conics and two pencils of Apollonian circles having the same foci. We will show that these families of curves generate trivial 3-webs and find the exact formulas describing them.},
author = {Akopyan, Arseniy},
journal = {Geometriae Dedicata},
number = {1},
pages = {55 -- 64},
publisher = {Springer},
title = {{3-Webs generated by confocal conics and circles}},
doi = {10.1007/s10711-017-0265-6},
volume = {194},
year = {2018},
}
@inproceedings{6941,
abstract = {Bitcoin has become the most successful cryptocurrency ever deployed, and its most distinctive feature is that it is decentralized. Its underlying protocol (Nakamoto consensus) achieves this by using proof of work, which has the drawback that it causes the consumption of vast amounts of energy to maintain the ledger. Moreover, Bitcoin mining dynamics have become less distributed over time.
Towards addressing these issues, we propose SpaceMint, a cryptocurrency based on proofs of space instead of proofs of work. Miners in SpaceMint dedicate disk space rather than computation. We argue that SpaceMint’s design solves or alleviates several of Bitcoin’s issues: most notably, its large energy consumption. SpaceMint also rewards smaller miners fairly according to their contribution to the network, thus incentivizing more distributed participation.
This paper adapts proof of space to enable its use in cryptocurrency, studies the attacks that can arise against a Bitcoin-like blockchain that uses proof of space, and proposes a new blockchain format and transaction types to address these attacks. Our prototype shows that initializing 1 TB for mining takes about a day (a one-off setup cost), and miners spend on average just a fraction of a second per block mined. Finally, we provide a game-theoretic analysis modeling SpaceMint as an extensive game (the canonical game-theoretic notion for games that take place over time) and show that this stylized game satisfies a strong equilibrium notion, thereby arguing for SpaceMint ’s stability and consensus.},
author = {Park, Sunoo and Kwon, Albert and Fuchsbauer, Georg and Gazi, Peter and Alwen, Joel F and Pietrzak, Krzysztof Z},
booktitle = {22nd International Conference on Financial Cryptography and Data Security},
isbn = {9783662583869},
issn = {0302-9743},
location = {Nieuwpoort, Curacao},
pages = {480--499},
publisher = {Springer Nature},
title = {{SpaceMint: A cryptocurrency based on proofs of space}},
doi = {10.1007/978-3-662-58387-6_26},
volume = {10957},
year = {2018},
}
@article{87,
abstract = {Using the geodesic distance on the n-dimensional sphere, we study the expected radius function of the Delaunay mosaic of a random set of points. Specifically, we consider the partition of the mosaic into intervals of the radius function and determine the expected number of intervals whose radii are less than or equal to a given threshold. We find that the expectations are essentially the same as for the Poisson–Delaunay mosaic in n-dimensional Euclidean space. Assuming the points are not contained in a hemisphere, the Delaunay mosaic is isomorphic to the boundary complex of the convex hull in Rn+1, so we also get the expected number of faces of a random inscribed polytope. As proved in Antonelli et al. [Adv. in Appl. Probab. 9–12 (1977–1980)], an orthant section of the n-sphere is isometric to the standard n-simplex equipped with the Fisher information metric. It follows that the latter space has similar stochastic properties as the n-dimensional Euclidean space. Our results are therefore relevant in information geometry and in population genetics.},
author = {Edelsbrunner, Herbert and Nikitenko, Anton},
journal = {Annals of Applied Probability},
number = {5},
pages = {3215 -- 3238},
publisher = {Institute of Mathematical Statistics},
title = {{Random inscribed polytopes have similar radius functions as Poisson-Delaunay mosaics}},
doi = {10.1214/18-AAP1389},
volume = {28},
year = {2018},
}
@article{913,
abstract = {Coordinated cell polarization in developing tissues is a recurrent theme in multicellular organisms. In plants, a directional distribution of the plant hormone auxin is at the core of many developmental programs. A feedback regulation of auxin on the polarized localization of PIN auxin transporters in individual cells has been proposed as a self-organizing mechanism for coordinated tissue polarization, but the molecular mechanisms linking auxin signalling to PIN-dependent auxin transport remain unknown. We performed a microarray-based approach to find regulators of the auxin-induced PIN relocation in the Arabidopsis thaliana root. We identified a subset of a family of phosphatidylinositol transfer proteins (PITP), the PATELLINs (PATL). Here, we show that PATLs are expressed in partially overlapping cells types in different tissues going through mitosis or initiating differentiation programs. PATLs are plasma membrane-associated proteins accumulated in Arabidopsis embryos, primary roots, lateral root primordia, and developing stomata. Higher order patl mutants display reduced PIN1 repolarization in response to auxin, shorter root apical meristem, and drastic defects in embryo and seedling development. This suggests PATLs redundantly play a crucial role in polarity and patterning in Arabidopsis.},
author = {Tejos, Ricardo and Rodríguez Furlán, Cecilia and Adamowski, Maciek and Sauer, Michael and Norambuena, Lorena and Friml, Jirí},
issn = {00219533},
journal = {Journal of Cell Science},
number = {2},
publisher = {Company of Biologists},
title = {{PATELLINS are regulators of auxin mediated PIN1 relocation and plant development in Arabidopsis thaliana}},
doi = {10.1242/jcs.204198},
volume = {131},
year = {2018},
}
@article{9229,
author = {Danzl, Johann G},
issn = {2500-2295},
journal = {Opera Medica et Physiologica},
number = {S1},
pages = {11},
publisher = {Lobachevsky State University of Nizhny Novgorod},
title = {{Diffraction-unlimited optical imaging for synaptic physiology}},
doi = {10.20388/omp2018.00s1.001},
volume = {4},
year = {2018},
}
@article{12,
abstract = {Molding is a popular mass production method, in which the initial expenses for the mold are offset by the low per-unit production cost. However, the physical fabrication constraints of the molding technique commonly restrict the shape of moldable objects. For a complex shape, a decomposition of the object into moldable parts is a common strategy to address these constraints, with plastic model kits being a popular and illustrative example. However, conducting such a decomposition requires considerable expertise, and it depends on the technical aspects of the fabrication technique, as well as aesthetic considerations. We present an interactive technique to create such decompositions for two-piece molding, in which each part of the object is cast between two rigid mold pieces. Given the surface description of an object, we decompose its thin-shell equivalent into moldable parts by first performing a coarse decomposition and then utilizing an active contour model for the boundaries between individual parts. Formulated as an optimization problem, the movement of the contours is guided by an energy reflecting fabrication constraints to ensure the moldability of each part. Simultaneously, the user is provided with editing capabilities to enforce aesthetic guidelines. Our interactive interface provides control of the contour positions by allowing, for example, the alignment of part boundaries with object features. Our technique enables a novel workflow, as it empowers novice users to explore the design space, and it generates fabrication-ready two-piece molds that can be used either for casting or industrial injection molding of free-form objects.},
author = {Nakashima, Kazutaka and Auzinger, Thomas and Iarussi, Emmanuel and Zhang, Ran and Igarashi, Takeo and Bickel, Bernd},
journal = {ACM Transaction on Graphics},
number = {4},
publisher = {ACM},
title = {{CoreCavity: Interactive shell decomposition for fabrication with two-piece rigid molds}},
doi = {10.1145/3197517.3201341},
volume = {37},
year = {2018},
}
@article{1215,
abstract = {Two generalizations of Itô formula to infinite-dimensional spaces are given.
The first one, in Hilbert spaces, extends the classical one by taking advantage of
cancellations when they occur in examples and it is applied to the case of a group
generator. The second one, based on the previous one and a limit procedure, is an Itô
formula in a special class of Banach spaces having a product structure with the noise
in a Hilbert component; again the key point is the extension due to a cancellation. This
extension to Banach spaces and in particular the specific cancellation are motivated
by path-dependent Itô calculus.},
author = {Flandoli, Franco and Russo, Francesco and Zanco, Giovanni A},
journal = {Journal of Theoretical Probability},
number = {2},
pages = {789--826},
publisher = {Springer},
title = {{Infinite-dimensional calculus under weak spatial regularity of the processes}},
doi = {10.1007/s10959-016-0724-2},
volume = {31},
year = {2018},
}
@article{13,
abstract = {We propose a new method for fabricating digital objects through reusable silicone molds. Molds are generated by casting liquid silicone into custom 3D printed containers called metamolds. Metamolds automatically define the cuts that are needed to extract the cast object from the silicone mold. The shape of metamolds is designed through a novel segmentation technique, which takes into account both geometric and topological constraints involved in the process of mold casting. Our technique is simple, does not require changing the shape or topology of the input objects, and only requires off-the- shelf materials and technologies. We successfully tested our method on a set of challenging examples with complex shapes and rich geometric detail. © 2018 Association for Computing Machinery.},
author = {Alderighi, Thomas and Malomo, Luigi and Giorgi, Daniela and Pietroni, Nico and Bickel, Bernd and Cignoni, Paolo},
journal = {ACM Trans. Graph.},
number = {4},
publisher = {ACM},
title = {{Metamolds: Computational design of silicone molds}},
doi = {10.1145/3197517.3201381},
volume = {37},
year = {2018},
}
@article{131,
abstract = {XY systems usually show chromosome-wide compensation of X-linked genes, while in many ZW systems, compensation is restricted to a minority of dosage-sensitive genes. Why such differences arose is still unclear. Here, we combine comparative genomics, transcriptomics and proteomics to obtain a complete overview of the evolution of gene dosage on the Z-chromosome of Schistosoma parasites. We compare the Z-chromosome gene content of African (Schistosoma mansoni and S. haematobium) and Asian (S. japonicum) schistosomes and describe lineage-specific evolutionary strata. We use these to assess gene expression evolution following sex-linkage. The resulting patterns suggest a reduction in expression of Z-linked genes in females, combined with upregulation of the Z in both sexes, in line with the first step of Ohno’s classic model of dosage compensation evolution. Quantitative proteomics suggest that post-transcriptional mechanisms do not play a major role in balancing the expression of Z-linked genes. },
author = {Picard, Marion A and Cosseau, Celine and Ferré, Sabrina and Quack, Thomas and Grevelding, Christoph and Couté, Yohann and Vicoso, Beatriz},
journal = {eLife},
publisher = {eLife Sciences Publications},
title = {{Evolution of gene dosage on the Z-chromosome of schistosome parasites}},
doi = {10.7554/eLife.35684},
volume = {7},
year = {2018},
}