@article{929,
abstract = {An essential question of morphogenesis is how patterns arise without preexisting positional information, as inspired by Turing. In the past few years, cytoskeletal flows in the cell cortex have been identified as a key mechanism of molecular patterning at the subcellular level. Theoretical and in vitro studies have suggested that biological polymers such as actomyosin gels have the property to self-organize, but the applicability of this concept in an in vivo setting remains unclear. Here, we report that the regular spacing pattern of supracellular actin rings in the Drosophila tracheal tubule is governed by a self-organizing principle. We propose a simple biophysical model where pattern formation arises from the interplay of myosin contractility and actin turnover. We validate the hypotheses of the model using photobleaching experiments and report that the formation of actin rings is contractility dependent. Moreover, genetic and pharmacological perturbations of the physical properties of the actomyosin gel modify the spacing of the pattern, as the model predicted. In addition, our model posited a role of cortical friction in stabilizing the spacing pattern of actin rings. Consistently, genetic depletion of apical extracellular matrix caused strikingly dynamic movements of actin rings, mirroring our model prediction of a transition from steady to chaotic actin patterns at low cortical friction. Our results therefore demonstrate quantitatively that a hydrodynamical instability of the actin cortex can trigger regular pattern formation and drive morphogenesis in an in vivo setting. },
author = {Hannezo, Edouard and Dong, Bo and Recho, Pierre and Joanny, Jean F and Hayashi, Shigeo},
journal = {PNAS},
number = {28},
pages = {8620 -- 8625},
publisher = {National Academy of Sciences},
title = {{Cortical instability drives periodic supracellular actin pattern formation in epithelial tubes}},
doi = {10.1073/pnas.1504762112},
volume = {112},
year = {2015},
}
@article{981,
abstract = {The tunability of topological surface states and controllable opening of the Dirac gap are of fundamental and practical interest in the field of topological materials. In the newly discovered topological crystalline insulators (TCIs), theory predicts that the Dirac node is protected by a crystalline symmetry and that the surface state electrons can acquire a mass if this symmetry is broken. Recent studies have detected signatures of a spontaneously generated Dirac gap in TCIs; however, the mechanism of mass formation remains elusive. In this work, we present scanning tunnelling microscopy (STM) measurements of the TCI Pb 1â'x Sn x Se for a wide range of alloy compositions spanning the topological and non-topological regimes. The STM topographies reveal a symmetry-breaking distortion on the surface, which imparts mass to the otherwise massless Dirac electrons-a mechanism analogous to the long sought-after Higgs mechanism in particle physics. Interestingly, the measured Dirac gap decreases on approaching the trivial phase, whereas the magnitude of the distortion remains nearly constant. Our data and calculations reveal that the penetration depth of Dirac surface states controls the magnitude of the Dirac mass. At the limit of the critical composition, the penetration depth is predicted to go to infinity, resulting in zero mass, consistent with our measurements. Finally, we discover the existence of surface states in the non-topological regime, which have the characteristics of gapped, double-branched Dirac fermions and could be exploited in realizing superconductivity in these materials.},
author = {Zeljkovic, Ilija and Okada, Yoshinori and Maksym Serbyn and Sankar, Raman and Walkup, Daniel and Zhou, Wenwen and Liu, Junwei and Chang, Guoqing and Wang, Yungjui and Hasan, Md Z and Chou, Fangcheng and Lin, Hsin and Bansil, Arun and Fu, Liang and Madhavan, Vidya},
journal = {Nature Materials},
number = {3},
pages = {318 -- 324},
publisher = {Nature Publishing Group},
title = {{Dirac mass generation from crystal symmetry breaking on the surfaces of topological crystalline insulators}},
doi = {10.1038/nmat4215},
volume = {14},
year = {2015},
}
@article{99,
abstract = {Quasiparticle excitations can compromise the performance of superconducting devices, causing high-frequency dissipation, decoherence in Josephson qubits, and braiding errors in proposed Majorana-based topological quantum computers. Quasiparticle dynamics have been studied in detail in metallic superconductors but remain relatively unexplored in semiconductor-superconductor structures, which are now being intensely pursued in the context of topological superconductivity. To this end, we use a system comprising a gate-confined semiconductor nanowire with an epitaxially grown superconductor layer, yielding an isolated, proximitized nanowire segment. We identify bound states in the semiconductor by means of bias spectroscopy, determine the characteristic temperatures and magnetic fields for quasiparticle excitations, and extract a parity lifetime (poisoning time) of the bound state in the semiconductor exceeding 10 ms.},
author = {Higginbotham, Andrew P and Albrecht, S M and Kiršanskas, Gediminas and Chang, W and Kuemmeth, Ferdinand and Krogstrup, Peter and Jespersen, Thomas and Nygård, Jesper and Flensberg, Karsten and Marcus, Charles},
journal = {Nature Physics},
number = {12},
pages = {1017 -- 1021},
publisher = {Nature Publishing Group},
title = {{Parity lifetime of bound states in a proximitized semiconductor nanowire}},
doi = {10.1038/nphys3461},
volume = {11},
year = {2015},
}
@inproceedings{1601,
abstract = {We propose a flexible exchange format for ω-automata, as typically used in formal verification, and implement support for it in a range of established tools. Our aim is to simplify the interaction of tools, helping the research community to build upon other people’s work. A key feature of the format is the use of very generic acceptance conditions, specified by Boolean combinations of acceptance primitives, rather than being limited to common cases such as Büchi, Streett, or Rabin. Such flexibility in the choice of acceptance conditions can be exploited in applications, for example in probabilistic model checking, and furthermore encourages the development of acceptance-agnostic tools for automata manipulations. The format allows acceptance conditions that are either state-based or transition-based, and also supports alternating automata.},
author = {Babiak, Tomáš and Blahoudek, František and Duret Lutz, Alexandre and Klein, Joachim and Kretinsky, Jan and Mueller, Daniel and Parker, David and Strejček, Jan},
location = {San Francisco, CA, United States},
pages = {479 -- 486},
publisher = {Springer},
title = {{The Hanoi omega-automata format}},
doi = {10.1007/978-3-319-21690-4_31},
volume = {9206},
year = {2015},
}
@misc{5437,
abstract = {We consider the core algorithmic problems related to verification of systems with respect to three classical quantitative properties, namely, the mean-payoff property, the ratio property, and the minimum initial credit for energy property.
The algorithmic problem given a graph and a quantitative property asks to compute the optimal value (the infimum value over all traces) from every node of the graph. We consider graphs with constant treewidth, and it is well-known that the control-flow graphs of most programs have constant treewidth. Let $n$ denote the number of nodes of a graph, $m$ the number of edges (for constant treewidth graphs $m=O(n)$) and $W$ the largest absolute value of the weights.
Our main theoretical results are as follows.
First, for constant treewidth graphs we present an algorithm that approximates the mean-payoff value within a multiplicative factor of $\epsilon$ in time $O(n \cdot \log (n/\epsilon))$ and linear space, as compared to the classical algorithms that require quadratic time. Second, for the ratio property we present an algorithm that for constant treewidth graphs works in time $O(n \cdot \log (|a\cdot b|))=O(n\cdot\log (n\cdot W))$, when the output is $\frac{a}{b}$, as compared to the previously best known algorithm with running time $O(n^2 \cdot \log (n\cdot W))$. Third, for the minimum initial credit problem we show that (i)~for general graphs the problem can be solved in $O(n^2\cdot m)$ time and the associated decision problem can be solved in $O(n\cdot m)$ time, improving the previous known $O(n^3\cdot m\cdot \log (n\cdot W))$ and $O(n^2 \cdot m)$ bounds, respectively; and (ii)~for constant treewidth graphs we present an algorithm that requires $O(n\cdot \log n)$ time, improving the previous known $O(n^4 \cdot \log (n \cdot W))$ bound.
We have implemented some of our algorithms and show that they present a significant speedup on standard benchmarks. },
author = {Chatterjee, Krishnendu and Ibsen-Jensen, Rasmus and Pavlogiannis, Andreas},
issn = {2664-1690},
pages = {27},
publisher = {IST Austria},
title = {{Faster algorithms for quantitative verification in constant treewidth graphs}},
doi = {10.15479/AT:IST-2015-330-v2-1},
year = {2015},
}
@inproceedings{1606,
abstract = {In this paper, we present the first steps toward a runtime verification framework for monitoring hybrid and cyber-physical systems (CPS) development tools based on randomized differential testing. The development tools include hybrid systems reachability analysis tools, model-based development environments like Simulink/Stateflow (SLSF), etc. First, hybrid automaton models are randomly generated. Next, these hybrid automaton models are translated to a number of different tools (currently, SpaceEx, dReach, Flow*, HyCreate, and the MathWorks’ Simulink/Stateflow) using the HyST source transformation and translation tool. Then, the hybrid automaton models are executed in the different tools and their outputs are parsed. The final step is the differential comparison: the outputs of the different tools are compared. If the results do not agree (in the sense that an analysis or verification result from one tool does not match that of another tool, ignoring timeouts, etc.), a candidate bug is flagged and the model is saved for future analysis by the user. The process then repeats and the monitoring continues until the user terminates the process. We present preliminary results that have been useful in identifying a few bugs in the analysis methods of different development tools, and in an earlier version of HyST.},
author = {Nguyen, Luan and Schilling, Christian and Bogomolov, Sergiy and Johnson, Taylor},
location = {Vienna, Austria},
pages = {281 -- 286},
publisher = {Springer},
title = {{Runtime verification for hybrid analysis tools}},
doi = {10.1007/978-3-319-23820-3_19},
volume = {9333},
year = {2015},
}
@inproceedings{1670,
abstract = {Planning in hybrid domains poses a special challenge due to the involved mixed discrete-continuous dynamics. A recent solving approach for such domains is based on applying model checking techniques on a translation of PDDL+ planning problems to hybrid automata. However, the proposed translation is limited because must behavior is only overapproximated, and hence, processes and events are not reflected exactly. In this paper, we present the theoretical foundation of an exact PDDL+ translation. We propose a schema to convert a hybrid automaton with must transitions into an equivalent hybrid automaton featuring only may transitions.},
author = {Bogomolov, Sergiy and Magazzeni, Daniele and Minopoli, Stefano and Wehrle, Martin},
location = {Jerusalem, Israel},
pages = {42 -- 46},
publisher = {AAAI Press},
title = {{PDDL+ planning with hybrid automata: Foundations of translating must behavior}},
year = {2015},
}
@article{1810,
abstract = {Combining antibiotics is a promising strategy for increasing treatment efficacy and for controlling resistance evolution. When drugs are combined, their effects on cells may be amplified or weakened, that is the drugs may show synergistic or antagonistic interactions. Recent work revealed the underlying mechanisms of such drug interactions by elucidating the drugs'; joint effects on cell physiology. Moreover, new treatment strategies that use drug combinations to exploit evolutionary tradeoffs were shown to affect the rate of resistance evolution in predictable ways. High throughput studies have further identified drug candidates based on their interactions with established antibiotics and general principles that enable the prediction of drug interactions were suggested. Overall, the conceptual and technical foundation for the rational design of potent drug combinations is rapidly developing.},
author = {Bollenbach, Mark Tobias},
journal = {Current Opinion in Microbiology},
pages = {1 -- 9},
publisher = {Elsevier},
title = {{Antimicrobial interactions: Mechanisms and implications for drug discovery and resistance evolution}},
doi = {10.1016/j.mib.2015.05.008},
volume = {27},
year = {2015},
}
@inproceedings{1839,
abstract = {We present MultiGain, a tool to synthesize strategies for Markov decision processes (MDPs) with multiple mean-payoff objectives. Our models are described in PRISM, and our tool uses the existing interface and simulator of PRISM. Our tool extends PRISM by adding novel algorithms for multiple mean-payoff objectives, and also provides features such as (i) generating strategies and exploring them for simulation, and checking them with respect to other properties; and (ii) generating an approximate Pareto curve for two mean-payoff objectives. In addition, we present a new practical algorithm for the analysis of MDPs with multiple mean-payoff objectives under memoryless strategies.},
author = {Brázdil, Tomáš and Chatterjee, Krishnendu and Forejt, Vojtěch and Kučera, Antonín},
location = {London, United Kingdom},
pages = {181 -- 187},
publisher = {Springer},
title = {{Multigain: A controller synthesis tool for MDPs with multiple mean-payoff objectives}},
doi = {10.1007/978-3-662-46681-0_12},
volume = {9035},
year = {2015},
}
@article{1846,
abstract = {Modal transition systems (MTS) is a well-studied specification formalism of reactive systems supporting a step-wise refinement methodology. Despite its many advantages, the formalism as well as its currently known extensions are incapable of expressing some practically needed aspects in the refinement process like exclusive, conditional and persistent choices. We introduce a new model called parametric modal transition systems (PMTS) together with a general modal refinement notion that overcomes many of the limitations. We investigate the computational complexity of modal and thorough refinement checking on PMTS and its subclasses and provide a direct encoding of the modal refinement problem into quantified Boolean formulae, allowing us to employ state-of-the-art QBF solvers for modal refinement checking. The experiments we report on show that the feasibility of refinement checking is more influenced by the degree of nondeterminism rather than by the syntactic restrictions on the types of formulae allowed in the description of the PMTS.},
author = {Beneš, Nikola and Kretinsky, Jan and Larsen, Kim and Möller, Mikael and Sickert, Salomon and Srba, Jiří},
journal = {Acta Informatica},
number = {2-3},
pages = {269 -- 297},
publisher = {Springer},
title = {{Refinement checking on parametric modal transition systems}},
doi = {10.1007/s00236-015-0215-4},
volume = {52},
year = {2015},
}
@article{2034,
abstract = {Opacity is a generic security property, that has been defined on (non-probabilistic) transition systems and later on Markov chains with labels. For a secret predicate, given as a subset of runs, and a function describing the view of an external observer, the value of interest for opacity is a measure of the set of runs disclosing the secret. We extend this definition to the richer framework of Markov decision processes, where non-deterministicchoice is combined with probabilistic transitions, and we study related decidability problems with partial or complete observation hypotheses for the schedulers. We prove that all questions are decidable with complete observation and ω-regular secrets. With partial observation, we prove that all quantitative questions are undecidable but the question whether a system is almost surely non-opaquebecomes decidable for a restricted class of ω-regular secrets, as well as for all ω-regular secrets under finite-memory schedulers.},
author = {Bérard, Béatrice and Chatterjee, Krishnendu and Sznajder, Nathalie},
journal = { Information Processing Letters},
number = {1},
pages = {52 -- 59},
publisher = {Elsevier},
title = {{Probabilistic opacity for Markov decision processes}},
doi = {10.1016/j.ipl.2014.09.001},
volume = {115},
year = {2015},
}
@article{1694,
abstract = {
We introduce quantitative timed refinement and timed simulation (directed) metrics, incorporating zenoness checks, for timed systems. These metrics assign positive real numbers which quantify the timing mismatches between two timed systems, amongst non-zeno runs. We quantify timing mismatches in three ways: (1) the maximal timing mismatch that can arise, (2) the “steady-state” maximal timing mismatches, where initial transient timing mismatches are ignored; and (3) the (long-run) average timing mismatches amongst two systems. These three kinds of mismatches constitute three important types of timing differences. Our event times are the global times, measured from the start of the system execution, not just the time durations of individual steps. We present algorithms over timed automata for computing the three quantitative simulation distances to within any desired degree of accuracy. In order to compute the values of the quantitative simulation distances, we use a game theoretic formulation. We introduce two new kinds of objectives for two player games on finite-state game graphs: (1) eventual debit-sum level objectives, and (2) average debit-sum level objectives. We present algorithms for computing the optimal values for these objectives in graph games, and then use these algorithms to compute the values of the timed simulation distances over timed automata.
},
author = {Chatterjee, Krishnendu and Prabhu, Vinayak},
journal = {IEEE Transactions on Automatic Control},
number = {9},
pages = {2291 -- 2306},
publisher = {IEEE},
title = {{Quantitative temporal simulation and refinement distances for timed systems}},
doi = {10.1109/TAC.2015.2404612},
volume = {60},
year = {2015},
}
@inproceedings{1656,
abstract = {Recently there has been a significant effort to handle quantitative properties in formal verification and synthesis. While weighted automata over finite and infinite words provide a natural and flexible framework to express quantitative properties, perhaps surprisingly, some basic system properties such as average response time cannot be expressed using weighted automata, nor in any other know decidable formalism. In this work, we introduce nested weighted automata as a natural extension of weighted automata which makes it possible to express important quantitative properties such as average response time. In nested weighted automata, a master automaton spins off and collects results from weighted slave automata, each of which computes a quantity along a finite portion of an infinite word. Nested weighted automata can be viewed as the quantitative analogue of monitor automata, which are used in run-time verification. We establish an almost complete decidability picture for the basic decision problems about nested weighted automata, and illustrate their applicability in several domains. In particular, nested weighted automata can be used to decide average response time properties.},
author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Otop, Jan},
booktitle = {Proceedings - Symposium on Logic in Computer Science},
location = {Kyoto, Japan},
publisher = {IEEE},
title = {{Nested weighted automata}},
doi = {10.1109/LICS.2015.72},
volume = {2015-July},
year = {2015},
}
@inproceedings{1714,
abstract = {We present a flexible framework for the automated competitive analysis of on-line scheduling algorithms for firm-deadline real-time tasks based on multi-objective graphs: Given a task set and an on-line scheduling algorithm specified as a labeled transition system, along with some optional safety, liveness, and/or limit-average constraints for the adversary, we automatically compute the competitive ratio of the algorithm w.r.t. A clairvoyant scheduler. We demonstrate the flexibility and power of our approach by comparing the competitive ratio of several on-line algorithms, including Dover, that have been proposed in the past, for various task sets. Our experimental results reveal that none of these algorithms is universally optimal, in the sense that there are task sets where other schedulers provide better performance. Our framework is hence a very useful design tool for selecting optimal algorithms for a given application.},
author = {Chatterjee, Krishnendu and Pavlogiannis, Andreas and Kößler, Alexander and Schmid, Ulrich},
booktitle = {Real-Time Systems Symposium},
location = {Rome, Italy},
number = {January},
pages = {118 -- 127},
publisher = {IEEE},
title = {{A framework for automated competitive analysis of on-line scheduling of firm-deadline tasks}},
doi = {10.1109/RTSS.2014.9},
volume = {2015},
year = {2015},
}
@article{7457,
abstract = {A new organic–inorganic ferroelectric hybrid capacitor designed by uniformly incorporating surface modified monodisperse 15 nm ferroelectric BaTiO3 nanocubes into non-polar polymer blends of poly(methyl methacrylate) (PMMA) polymer and acrylonitrile-butadiene-styrene (ABS) terpolymer is described. The investigation of spatial distribution of nanofillers via a non-distractive thermal pulse method illustrates that the surface functionalization of nanocubes plays a key role in the uniform distribution of charge polarization within the polymer matrix. The discharged energy density of the nanocomposite with 30 vol% BaTiO3 nanocubes is ∼44 × 10−3 J cm−3, which is almost six times higher than that of the neat polymer. The facile processing, along with the superior mechanical and electrical properties of the BaTiO3/PMMA–ABS nanocomposites make them suitable for implementation into capacitive electrical energy storage devices.},
author = {Parizi, Saman Salemizadeh and Conley, Gavin and Costanzo, Tommaso and Howell, Bob and Mellinger, Axel and Caruntu, Gabriel},
issn = {2046-2069},
journal = {RSC Advances},
number = {93},
pages = {76356--76362},
publisher = {RSC},
title = {{Fabrication of barium titanate/acrylonitrile-butadiene styrene/poly(methyl methacrylate) nanocomposite films for hybrid ferroelectric capacitors}},
doi = {10.1039/c5ra11347d},
volume = {5},
year = {2015},
}
@inproceedings{1512,
abstract = {We show that very weak topological assumptions are enough to ensure the existence of a Helly-type theorem. More precisely, we show that for any non-negative integers b and d there exists an integer h(b,d) such that the following holds. If F is a finite family of subsets of R^d such that the ith reduced Betti number (with Z_2 coefficients in singular homology) of the intersection of any proper subfamily G of F is at most b for every non-negative integer i less or equal to (d-1)/2, then F has Helly number at most h(b,d). These topological conditions are sharp: not controlling any of these first Betti numbers allow for families with unbounded Helly number. Our proofs combine homological non-embeddability results with a Ramsey-based approach to build, given an arbitrary simplicial complex K, some well-behaved chain map from C_*(K) to C_*(R^d). Both techniques are of independent interest.},
author = {Goaoc, Xavier and Paták, Pavel and Patakova, Zuzana and Tancer, Martin and Wagner, Uli},
location = {Eindhoven, Netherlands},
pages = {507 -- 521},
publisher = {ACM},
title = {{Bounding Helly numbers via Betti numbers}},
doi = {10.4230/LIPIcs.SOCG.2015.507},
volume = {34},
year = {2015},
}
@article{1598,
abstract = {We consider Markov decision processes (MDPs) with specifications given as Büchi (liveness) objectives, and examine the problem of computing the set of almost-sure winning vertices such that the objective can be ensured with probability 1 from these vertices. We study for the first time the average-case complexity of the classical algorithm for computing the set of almost-sure winning vertices for MDPs with Büchi objectives. Our contributions are as follows: First, we show that for MDPs with constant out-degree the expected number of iterations is at most logarithmic and the average-case running time is linear (as compared to the worst-case linear number of iterations and quadratic time complexity). Second, for the average-case analysis over all MDPs we show that the expected number of iterations is constant and the average-case running time is linear (again as compared to the worst-case linear number of iterations and quadratic time complexity). Finally we also show that when all MDPs are equally likely, the probability that the classical algorithm requires more than a constant number of iterations is exponentially small.},
author = {Chatterjee, Krishnendu and Joglekar, Manas and Shah, Nisarg},
journal = {Theoretical Computer Science},
number = {3},
pages = {71 -- 89},
publisher = {Elsevier},
title = {{Average case analysis of the classical algorithm for Markov decision processes with Büchi objectives}},
doi = {10.1016/j.tcs.2015.01.050},
volume = {573},
year = {2015},
}
@article{1311,
abstract = {In this paper, we develop an energy method to study finite speed of propagation and waiting time phenomena for the stochastic porous media equation with linear multiplicative noise in up to three spatial dimensions. Based on a novel iteration technique and on stochastic counterparts of weighted integral estimates used in the deterministic setting, we formulate a sufficient criterion on the growth of initial data which locally guarantees a waiting time phenomenon to occur almost surely. Up to a logarithmic factor, this criterion coincides with the optimal criterion known from the deterministic setting. Our technique can be modified to prove finite speed of propagation as well.},
author = {Julian Fischer and Grün, Günther},
journal = {SIAM Journal on Mathematical Analysis},
number = {1},
pages = {825 -- 854},
publisher = {Society for Industrial and Applied Mathematics },
title = {{Finite speed of propagation and waiting times for the stochastic porous medium equation: A unifying approach}},
doi = {10.1137/140960578},
volume = {47},
year = {2015},
}
@article{1316,
abstract = {In the present work we introduce the notion of a renormalized solution for reaction–diffusion systems with entropy-dissipating reactions. We establish the global existence of renormalized solutions. In the case of integrable reaction terms our notion of a renormalized solution reduces to the usual notion of a weak solution. Our existence result in particular covers all reaction–diffusion systems involving a single reversible reaction with mass-action kinetics and (possibly species-dependent) Fick-law diffusion; more generally, it covers the case of systems of reversible reactions with mass-action kinetics which satisfy the detailed balance condition. For such equations the existence of any kind of solution in general was an open problem, thereby motivating the study of renormalized solutions.},
author = {Julian Fischer},
journal = {Archive for Rational Mechanics and Analysis},
number = {1},
pages = {553 -- 587},
publisher = {Springer},
title = {{Global existence of renormalized solutions to entropy-dissipating reaction–diffusion systems}},
doi = {10.1007/s00205-015-0866-x},
volume = {218},
year = {2015},
}
@inproceedings{1474,
abstract = {Cryptographic access control offers selective access to encrypted data via a combination of key management and functionality-rich cryptographic schemes, such as attribute-based encryption. Using this approach, publicly available meta-data may inadvertently leak information on the access policy that is enforced by cryptography, which renders cryptographic access control unusable in settings where this information is highly sensitive. We begin to address this problem by presenting rigorous definitions for policy privacy in cryptographic access control. For concreteness we set our results in the model of Role-Based Access Control (RBAC), where we identify and formalize several different flavors of privacy, however, our framework should serve as inspiration for other models of access control. Based on our insights we propose a new system which significantly improves on the privacy properties of state-of-the-art constructions. Our design is based on a novel type of privacy-preserving attribute-based encryption, which we introduce and show how to instantiate. We present our results in the context of a cryptographic RBAC system by Ferrara et al. (CSF'13), which uses cryptography to control read access to files, while write access is still delegated to trusted monitors. We give an extension of the construction that permits cryptographic control over write access. Our construction assumes that key management uses out-of-band channels between the policy enforcer and the users but eliminates completely the need for monitoring read/write access to the data.},
author = {Ferrara, Anna and Fuchsbauer, Georg and Liu, Bin and Warinschi, Bogdan},
location = {Verona, Italy},
pages = {46--60},
publisher = {IEEE},
title = {{Policy privacy in cryptographic access control}},
doi = {10.1109/CSF.2015.11},
year = {2015},
}
@inproceedings{1424,
abstract = {We consider the problem of statistical computations with persistence diagrams, a summary representation of topological features in data. These diagrams encode persistent homology, a widely used invariant in topological data analysis. While several avenues towards a statistical treatment of the diagrams have been explored recently, we follow an alternative route that is motivated by the success of methods based on the embedding of probability measures into reproducing kernel Hilbert spaces. In fact, a positive definite kernel on persistence diagrams has recently been proposed, connecting persistent homology to popular kernel-based learning techniques such as support vector machines. However, important properties of that kernel enabling a principled use in the context of probability measure embeddings remain to be explored. Our contribution is to close this gap by proving universality of a variant of the original kernel, and to demonstrate its effective use in twosample hypothesis testing on synthetic as well as real-world data.},
author = {Kwitt, Roland and Huber, Stefan and Niethammer, Marc and Lin, Weili and Bauer, Ulrich},
location = {Montreal, Canada},
pages = {3070 -- 3078},
publisher = {Neural Information Processing Systems},
title = {{Statistical topological data analysis-A kernel perspective}},
volume = {28},
year = {2015},
}
@inbook{1544,
abstract = {Cell division in prokaryotes and eukaryotes is commonly initiated by the well-controlled binding of proteins to the cytoplasmic side of the cell membrane. However, a precise characterization of the spatiotemporal dynamics of membrane-bound proteins is often difficult to achieve in vivo. Here, we present protocols for the use of supported lipid bilayers to rebuild the cytokinetic machineries of cells with greatly different dimensions: the bacterium Escherichia coli and eggs of the vertebrate Xenopus laevis. Combined with total internal reflection fluorescence microscopy, these experimental setups allow for precise quantitative analyses of membrane-bound proteins. The protocols described to obtain glass-supported membranes from bacterial and vertebrate lipids can be used as starting points for other reconstitution experiments. We believe that similar biochemical assays will be instrumental to study the biochemistry and biophysics underlying a variety of complex cellular tasks, such as signaling, vesicle trafficking, and cell motility.},
author = {Nguyen, Phuong and Field, Christine and Groen, Aaron and Mitchison, Timothy and Loose, Martin},
booktitle = {Building a Cell from its Components Parts},
pages = {223 -- 241},
publisher = {Academic Press},
title = {{Using supported bilayers to study the spatiotemporal organization of membrane-bound proteins}},
doi = {10.1016/bs.mcb.2015.01.007},
volume = {128},
year = {2015},
}
@inbook{1549,
abstract = {Nature has incorporated small photochromic molecules, colloquially termed 'photoswitches', in photoreceptor proteins to sense optical cues in photo-taxis and vision. While Nature's ability to employ light-responsive functionalities has long been recognized, it was not until recently that scientists designed, synthesized and applied synthetic photochromes to manipulate many of which open rapidly and locally in their native cell types, biological processes with the temporal and spatial resolution of light. Ion channels in particular have come to the forefront of proteins that can be put under the designer control of synthetic photochromes. Photochromic ion channel controllers are comprised of three classes, photochromic soluble ligands (PCLs), photochromic tethered ligands (PTLs) and photochromic crosslinkers (PXs), and in each class ion channel functionality is controlled through reversible changes in photochrome structure. By acting as light-dependent ion channel agonists, antagonist or modulators, photochromic controllers effectively converted a wide range of ion channels, including voltage-gated ion channels, 'leak channels', tri-, tetra- and pentameric ligand-gated ion channels, and temperaturesensitive ion channels, into man-made photoreceptors. Control by photochromes can be reversible, unlike in the case of 'caged' compounds, and non-invasive with high spatial precision, unlike pharmacology and electrical manipulation. Here, we introduce design principles of emerging photochromic molecules that act on ion channels and discuss the impact that these molecules are beginning to have on ion channel biophysics and neuronal physiology.},
author = {Mckenzie, Catherine and Sanchez Romero, Inmaculada and Janovjak, Harald L},
booktitle = {Novel chemical tools to study ion channel biology},
isbn = {978-1-4939-2844-6},
pages = {101 -- 117},
publisher = {Springer},
title = {{Flipping the photoswitch: Ion channels under light control}},
doi = {10.1007/978-1-4939-2845-3_6},
volume = {869},
year = {2015},
}
@article{1551,
abstract = {Reciprocal coevolution between host and pathogen is widely seen as a major driver of evolution and biological innovation. Yet, to date, the underlying genetic mechanisms and associated trait functions that are unique to rapid coevolutionary change are generally unknown. We here combined experimental evolution of the bacterial biocontrol agent Bacillus thuringiensis and its nematode host Caenorhabditis elegans with large-scale phenotyping, whole genome analysis, and functional genetics to demonstrate the selective benefit of pathogen virulence and the underlying toxin genes during the adaptation process. We show that: (i) high virulence was specifically favoured during pathogen–host coevolution rather than pathogen one-sided adaptation to a nonchanging host or to an environment without host; (ii) the pathogen genotype BT-679 with known nematocidal toxin genes and high virulence specifically swept to fixation in all of the independent replicate populations under coevolution but only some under one-sided adaptation; (iii) high virulence in the BT-679-dominated populations correlated with elevated copy numbers of the plasmid containing the nematocidal toxin genes; (iv) loss of virulence in a toxin-plasmid lacking BT-679 isolate was reconstituted by genetic reintroduction or external addition of the toxins.We conclude that sustained coevolution is distinct from unidirectional selection in shaping the pathogen's genome and life history characteristics. To our knowledge, this study is the first to characterize the pathogen genes involved in coevolutionary adaptation in an animal host–pathogen interaction system.},
author = {El Masri, Leila and Branca, Antoine and Sheppard, Anna and Papkou, Andrei and Laehnemann, David and Guenther, Patrick and Prahl, Swantje and Saebelfeld, Manja and Hollensteiner, Jacqueline and Liesegang, Heiko and Brzuszkiewicz, Elzbieta and Daniel, Rolf and Michiels, Nico and Schulte, Rebecca and Kurtz, Joachim and Rosenstiel, Philip and Telschow, Arndt and Bornberg Bauer, Erich and Schulenburg, Hinrich},
journal = {PLoS Biology},
number = {6},
pages = {1 -- 30},
publisher = {Public Library of Science},
title = {{Host–pathogen coevolution: The selective advantage of Bacillus thuringiensis virulence and its cry toxin genes}},
doi = {10.1371/journal.pbio.1002169},
volume = {13},
year = {2015},
}
@article{1556,
abstract = {The elongator complex subunit 2 (ELP2) protein, one subunit of an evolutionarily conserved histone acetyltransferase complex, has been shown to participate in leaf patterning, plant immune and abiotic stress responses in Arabidopsis thaliana. Here, its role in root development was explored. Compared to the wild type, the elp2 mutant exhibited an accelerated differentiation of its root stem cells and cell division was more active in its quiescent centre (QC). The key transcription factors responsible for maintaining root stem cell and QC identity, such as AP2 transcription factors PLT1 (PLETHORA1) and PLT2 (PLETHORA2), GRAS transcription factors such as SCR (SCARECROW) and SHR (SHORT ROOT) and WUSCHEL-RELATED HOMEOBOX5 transcription factor WOX5, were all strongly down-regulated in the mutant. On the other hand, expression of the G2/M transition activator CYCB1 was substantially induced in elp2. The auxin efflux transporters PIN1 and PIN2 showed decreased protein levels and PIN1 also displayed mild polarity alterations in elp2, which resulted in a reduced auxin content in the root tip. Either the acetylation or methylation level of each of these genes differed between the mutant and the wild type, suggesting that the ELP2 regulation of root development involves the epigenetic modification of a range of transcription factors and other developmental regulators.},
author = {Jia, Yuebin and Tian, Huiyu and Li, Hongjiang and Yu, Qianqian and Wang, Lei and Friml, Jirí and Ding, Zhaojun},
journal = {Journal of Experimental Botany},
number = {15},
pages = {4631 -- 4642},
publisher = {Oxford University Press},
title = {{The Arabidopsis thaliana elongator complex subunit 2 epigenetically affects root development}},
doi = {10.1093/jxb/erv230},
volume = {66},
year = {2015},
}
@article{1563,
abstract = {For a given self-map $f$ of $M$, a closed smooth connected and simply-connected manifold of dimension $m\geq 4$, we provide an algorithm for estimating the values of the topological invariant $D^m_r[f]$, which equals the minimal number of $r$-periodic points in the smooth homotopy class of $f$. Our results are based on the combinatorial scheme for computing $D^m_r[f]$ introduced by G. Graff and J. Jezierski [J. Fixed Point Theory Appl. 13 (2013), 63-84]. An open-source implementation of the algorithm programmed in C++ is publicly available at {\tt http://www.pawelpilarczyk.com/combtop/}.},
author = {Graff, Grzegorz and Pilarczyk, Pawel},
journal = {Topological Methods in Nonlinear Analysis},
number = {1},
pages = {273 -- 286},
publisher = {Juliusz Schauder Center for Nonlinear Studies},
title = {{An algorithmic approach to estimating the minimal number of periodic points for smooth self-maps of simply-connected manifolds}},
doi = {10.12775/TMNA.2015.014},
volume = {45},
year = {2015},
}
@article{1513,
abstract = {Insects of the order Hemiptera (true bugs) use a wide range of mechanisms of sex determination, including genetic sex determination, paternal genome elimination, and haplodiploidy. Genetic sex determination, the prevalent mode, is generally controlled by a pair of XY sex chromosomes or by an XX/X0 system, but different configurations that include additional sex chromosomes are also present. Although this diversity of sex determining systems has been extensively studied at the cytogenetic level, only the X chromosome of the model pea aphid Acyrthosiphon pisum has been analyzed at the genomic level, and little is known about X chromosome biology in the rest of the order.
In this study, we take advantage of published DNA- and RNA-seq data from three additional Hemiptera species to perform a comparative analysis of the gene content and expression of the X chromosome throughout this clade. We find that, despite showing evidence of dosage compensation, the X chromosomes of these species show female-biased expression, and a deficit of male-biased genes, in direct contrast to the pea aphid X. We further detect an excess of shared gene content between these very distant species, suggesting that despite the diversity of sex determining systems, the same chromosomal element is used as the X throughout a large portion of the order. },
author = {Pal, Arka and Vicoso, Beatriz},
journal = {Genome Biology and Evolution},
number = {12},
pages = {3259 -- 3268},
publisher = {Oxford University Press},
title = {{The X chromosome of hemipteran insects: Conservation, dosage compensation and sex-biased expression}},
doi = {10.1093/gbe/evv215 },
volume = {7},
year = {2015},
}
@inproceedings{1520,
abstract = {Creating mechanical automata that can walk in stable and pleasing manners is a challenging task that requires both skill and expertise. We propose to use computational design to offset the technical difficulties of this process. A simple drag-and-drop interface allows casual users to create personalized walking toys from a library of pre-defined template mechanisms. Provided with this input, our method leverages physical simulation and evolutionary optimization to refine the mechanical designs such that the resulting toys are able to walk. The optimization process is guided by an intuitive set of objectives that measure the quality of the walking motions. We demonstrate our approach on a set of simulated mechanical toys with different numbers of legs and various distinct gaits. Two fabricated prototypes showcase the feasibility of our designs.},
author = {Bharaj, Gaurav and Coros, Stelian and Thomaszewski, Bernhard and Tompkin, James and Bickel, Bernd and Pfister, Hanspeter},
isbn = {978-1-4503-3496-9},
location = {Los Angeles, CA, United States},
pages = {93 -- 100},
publisher = {ACM},
title = {{Computational design of walking automata}},
doi = {10.1145/2786784.2786803},
year = {2015},
}
@article{1525,
abstract = {Based on 16 recommendations, efforts should be made to achieve the following goal: By 2025, all scholarly publication activity in Austria should be Open Access. In other words, the final versions of all scholarly publications resulting from the support of public resources must be freely accessible on the Internet without delay (Gold Open Access). The resources required to meet this obligation shall be provided to the authors, or the cost of the publication venues shall be borne directly by the research organisations.},
author = {Bauer, Bruno and Blechl, Guido and Bock, Christoph and Patrick Danowski and Ferus, Andreas and Graschopf, Anton and König, Thomas and Mayer, Katja and Reckling, Falk and Rieck, Katharina and Seitz, Peter and Stöger, Herwig and Welzig, Elvira},
journal = {VÖB Mitteilungen},
number = {3},
pages = {580 -- 607},
publisher = {Verein Österreichischer Bibliothekare},
title = {{Arbeitsgruppe „Nationale Strategie“ des Open Access Network Austria OANA}},
doi = {10.5281/zenodo.33178},
volume = {68},
year = {2015},
}
@article{1506,
abstract = {Consider the square random matrix An = (aij)n,n, where {aij:= a(n)ij , i, j = 1, . . . , n} is a collection of independent real random variables with means zero and variances one. Under the additional moment condition supn max1≤i,j ≤n Ea4ij <∞, we prove Girko's logarithmic law of det An in the sense that as n→∞ log | detAn| ? (1/2) log(n-1)! d/→√(1/2) log n N(0, 1).},
author = {Bao, Zhigang and Pan, Guangming and Zhou, Wang},
journal = {Bernoulli},
number = {3},
pages = {1600 -- 1628},
publisher = {Bernoulli Society for Mathematical Statistics and Probability},
title = {{The logarithmic law of random determinant}},
doi = {10.3150/14-BEJ615},
volume = {21},
year = {2015},
}
@inproceedings{1568,
abstract = {Aiming at the automatic diagnosis of tumors from narrow band imaging (NBI) magnifying endoscopy (ME) images of the stomach, we combine methods from image processing, computational topology, and machine learning to classify patterns into normal, tubular, vessel. Training the algorithm on a small number of images of each type, we achieve a high rate of correct classifications. The analysis of the learning algorithm reveals that a handful of geometric and topological features are responsible for the overwhelming majority of decisions.},
author = {Dunaeva, Olga and Edelsbrunner, Herbert and Lukyanov, Anton and Machin, Michael and Malkova, Daria},
booktitle = {Proceedings - 16th International Symposium on Symbolic and Numeric Algorithms for Scientific Computing},
location = {Timisoara, Romania},
pages = {7034731},
publisher = {IEEE},
title = {{The classification of endoscopy images with persistent homology}},
doi = {10.1109/SYNASC.2014.81},
year = {2015},
}
@article{1570,
abstract = {Grounding autonomous behavior in the nervous system is a fundamental challenge for neuroscience. In particular, self-organized behavioral development provides more questions than answers. Are there special functional units for curiosity, motivation, and creativity? This paper argues that these features can be grounded in synaptic plasticity itself, without requiring any higher-level constructs. We propose differential extrinsic plasticity (DEP) as a new synaptic rule for self-learning systems and apply it to a number of complex robotic systems as a test case. Without specifying any purpose or goal, seemingly purposeful and adaptive rhythmic behavior is developed, displaying a certain level of sensorimotor intelligence. These surprising results require no systemspecific modifications of the DEP rule. They rather arise from the underlying mechanism of spontaneous symmetry breaking,which is due to the tight brain body environment coupling. The new synaptic rule is biologically plausible and would be an interesting target for neurobiological investigation. We also argue that this neuronal mechanism may have been a catalyst in natural evolution.},
author = {Der, Ralf and Martius, Georg S},
journal = {PNAS},
number = {45},
pages = {E6224 -- E6232},
publisher = {National Academy of Sciences},
title = {{Novel plasticity rule can explain the development of sensorimotor intelligence}},
doi = {10.1073/pnas.1508400112},
volume = {112},
year = {2015},
}
@article{1575,
abstract = {The immune response relies on the migration of leukocytes and on their ability to stop in precise anatomical locations to fulfil their task. How leukocyte migration and function are coordinated is unknown. Here we show that in immature dendritic cells, which patrol their environment by engulfing extracellular material, cell migration and antigen capture are antagonistic. This antagonism results from transient enrichment of myosin IIA at the cell front, which disrupts the back-to-front gradient of the motor protein, slowing down locomotion but promoting antigen capture. We further highlight that myosin IIA enrichment at the cell front requires the MHC class II-associated invariant chain (Ii). Thus, by controlling myosin IIA localization, Ii imposes on dendritic cells an intermittent antigen capture behaviour that might facilitate environment patrolling. We propose that the requirement for myosin II in both cell migration and specific cell functions may provide a general mechanism for their coordination in time and space.},
author = {Chabaud, Mélanie and Heuzé, Mélina and Bretou, Marine and Vargas, Pablo and Maiuri, Paolo and Solanes, Paola and Maurin, Mathieu and Terriac, Emmanuel and Le Berre, Maël and Lankar, Danielle and Piolot, Tristan and Adelstein, Robert and Zhang, Yingfan and Sixt, Michael K and Jacobelli, Jordan and Bénichou, Olivier and Voituriez, Raphaël and Piel, Matthieu and Lennon Duménil, Ana},
journal = {Nature Communications},
publisher = {Nature Publishing Group},
title = {{Cell migration and antigen capture are antagonistic processes coupled by myosin II in dendritic cells}},
doi = {10.1038/ncomms8526},
volume = {6},
year = {2015},
}
@article{1582,
abstract = {We investigate weighted straight skeletons from a geometric, graph-theoretical, and combinatorial point of view. We start with a thorough definition and shed light on some ambiguity issues in the procedural definition. We investigate the geometry, combinatorics, and topology of faces and the roof model, and we discuss in which cases a weighted straight skeleton is connected. Finally, we show that the weighted straight skeleton of even a simple polygon may be non-planar and may contain cycles, and we discuss under which restrictions on the weights and/or the input polygon the weighted straight skeleton still behaves similar to its unweighted counterpart. In particular, we obtain a non-procedural description and a linear-time construction algorithm for the straight skeleton of strictly convex polygons with arbitrary weights.},
author = {Biedl, Therese and Held, Martin and Huber, Stefan and Kaaser, Dominik and Palfrader, Peter},
journal = {Computational Geometry: Theory and Applications},
number = {2},
pages = {120 -- 133},
publisher = {Elsevier},
title = {{Weighted straight skeletons in the plane}},
doi = {10.1016/j.comgeo.2014.08.006},
volume = {48},
year = {2015},
}
@article{1587,
abstract = {We investigate the quantum interference shifts between energetically close states, where the state structure is observed by laser spectroscopy. We report a compact and analytical expression that models the quantum interference induced shift for any admixture of circular polarization of the incident laser and angle of observation. An experimental scenario free of quantum interference can thus be predicted with this formula. Although this study is exemplified here for muonic deuterium, it can be applied to any other laser spectroscopy measurement of ns-n′p frequencies of a nonrelativistic atomic system, via an ns→n′p→n′′s scheme.},
author = {Amaro, Pedro and Fratini, Filippo and Safari, Laleh and Antognini, Aldo and Indelicato, Paul and Pohl, Randolf and Santos, José},
journal = {Physical Review A - Atomic, Molecular, and Optical Physics},
number = {6},
publisher = {American Physical Society},
title = {{Quantum interference shifts in laser spectroscopy with elliptical polarization}},
doi = {10.1103/PhysRevA.92.062506},
volume = {92},
year = {2015},
}
@article{1638,
abstract = {The mitochondrial respiratory chain, also known as the electron transport chain (ETC), is crucial to life, and energy production in the form of ATP is the main mitochondrial function. Three proton-translocating enzymes of the ETC, namely complexes I, III and IV, generate proton motive force, which in turn drives ATP synthase (complex V). The atomic structures and basic mechanisms of most respiratory complexes have previously been established, with the exception of complex I, the largest complex in the ETC. Recently, the crystal structure of the entire complex I was solved using a bacterial enzyme. The structure provided novel insights into the core architecture of the complex, the electron transfer and proton translocation pathways, as well as the mechanism that couples these two processes.},
author = {Sazanov, Leonid A},
journal = {Nature Reviews Molecular Cell Biology},
number = {6},
pages = {375 -- 388},
publisher = {Nature Publishing Group},
title = {{A giant molecular proton pump: structure and mechanism of respiratory complex I}},
doi = {10.1038/nrm3997},
volume = {16},
year = {2015},
}
@inproceedings{1645,
abstract = {Secret-key constructions are often proved secure in a model where one or more underlying components are replaced by an idealized oracle accessible to the attacker. This model gives rise to information-theoretic security analyses, and several advances have been made in this area over the last few years. This paper provides a systematic overview of what is achievable in this model, and how existing works fit into this view.},
author = {Gazi, Peter and Tessaro, Stefano},
booktitle = {2015 IEEE Information Theory Workshop},
location = {Jerusalem, Israel},
publisher = {IEEE},
title = {{Secret-key cryptography from ideal primitives: A systematic verview}},
doi = {10.1109/ITW.2015.7133163},
year = {2015},
}
@inproceedings{1652,
abstract = {We develop new theoretical tools for proving lower-bounds on the (amortized) complexity of certain functions in models of parallel computation. We apply the tools to construct a class of functions with high amortized memory complexity in the parallel Random Oracle Model (pROM); a variant of the standard ROM allowing for batches of simultaneous queries. In particular we obtain a new, more robust, type of Memory-Hard Functions (MHF); a security primitive which has recently been gaining acceptance in practice as an effective means of countering brute-force attacks on security relevant functions. Along the way we also demonstrate an important shortcoming of previous definitions of MHFs and give a new definition addressing the problem. The tools we develop represent an adaptation of the powerful pebbling paradigm (initially introduced by Hewitt and Paterson [HP70] and Cook [Coo73]) to a simple and intuitive parallel setting. We define a simple pebbling game Gp over graphs which aims to abstract parallel computation in an intuitive way. As a conceptual contribution we define a measure of pebbling complexity for graphs called cumulative complexity (CC) and show how it overcomes a crucial shortcoming (in the parallel setting) exhibited by more traditional complexity measures used in the past. As a main technical contribution we give an explicit construction of a constant in-degree family of graphs whose CC in Gp approaches maximality to within a polylogarithmic factor for any graph of equal size (analogous to the graphs of Tarjan et. al. [PTC76, LT82] for sequential pebbling games). Finally, for a given graph G and related function fG, we derive a lower-bound on the amortized memory complexity of fG in the pROM in terms of the CC of G in the game Gp.},
author = {Alwen, Joel F and Serbinenko, Vladimir},
booktitle = {Proceedings of the 47th annual ACM symposium on Theory of computing},
location = {Portland, OR, United States},
pages = {595 -- 603},
publisher = {ACM},
title = {{High parallel complexity graphs and memory-hard functions}},
doi = {10.1145/2746539.2746622},
year = {2015},
}
@inproceedings{1626,
abstract = {This paper introduces "OmniAD," a novel data-driven pipeline to model and acquire the aerodynamics of three-dimensional rigid objects. Traditionally, aerodynamics are examined through elaborate wind tunnel experiments or expensive fluid dynamics computations, and are only measured for a small number of discrete wind directions. OmniAD allows the evaluation of aerodynamic forces, such as drag and lift, for any incoming wind direction using a novel representation based on spherical harmonics. Our datadriven technique acquires the aerodynamic properties of an object simply by capturing its falling motion using a single camera. Once model parameters are estimated, OmniAD enables realistic realtime simulation of rigid bodies, such as the tumbling and gliding of leaves, without simulating the surrounding air. In addition, we propose an intuitive user interface based on OmniAD to interactively design three-dimensional kites that actually fly. Various nontraditional kites were designed to demonstrate the physical validity of our model.},
author = {Martin, Tobias and Umetani, Nobuyuki and Bickel, Bernd},
location = {Los Angeles, CA, United States},
number = {4},
publisher = {ACM},
title = {{OmniAD: Data-driven omni-directional aerodynamics}},
doi = {10.1145/2766919},
volume = {34},
year = {2015},
}
@article{1695,
abstract = {We give a comprehensive introduction into a diagrammatic method that allows for the evaluation of Gutzwiller wave functions in finite spatial dimensions. We discuss in detail some numerical schemes that turned out to be useful in the real-space evaluation of the diagrams. The method is applied to the problem of d-wave superconductivity in a two-dimensional single-band Hubbard model. Here, we discuss in particular the role of long-range contributions in our diagrammatic expansion. We further reconsider our previous analysis on the kinetic energy gain in the superconducting state.},
author = {Kaczmarczyk, Jan and Schickling, Tobias and Bünemann, Jörg},
journal = {Physica Status Solidi (B): Basic Solid State Physics},
number = {9},
pages = {2059 -- 2071},
publisher = {Wiley},
title = {{Evaluation techniques for Gutzwiller wave functions in finite dimensions}},
doi = {10.1002/pssb.201552082},
volume = {252},
year = {2015},
}
@article{1703,
abstract = {Vegetation clearing and land-use change have depleted many natural plant communities to the point where restoration is required. A major impediment to the success of rebuilding complex vegetation communities is having regular access to sufficient quantities of high-quality seed. Seed-production areas (SPAs) can help generate this seed, but these must be underpinned by a broad genetic base to maximise the evolutionary potential of restored populations. However, genetic bottlenecks can occur at the collection, establishment and production stages in SPAs, requiring genetic evaluation. This is especially relevant for species that may take many years before a return on SPA investment is realised. Two recently established yellow box (Eucalyptus melliodora A.Cunn. ex Schauer, Myrtaceae) SPAs were evaluated to determine whether genetic bottlenecks had occurred between seed collection and SPA establishment. No evidence was found to suggest that a significant loss of genetic diversity had occurred at this stage, although there was a significant difference in diversity between the two SPAs. Complex population genetic structure was also observed in the seed used to source the SPAs, with up to eight groups identified. Plant survival in the SPAs was influenced by seed collection location but not by SPA location and was not associated with genetic diversity. There were also no associations between genetic diversity and plant growth. These data highlighted the importance of chance events when establishing SPAs and indicated that the two yellow box SPAs are likely to provide genetically diverse seed sources for future restoration projects, especially by pooling seed from both SPAs.},
author = {Broadhurst, Linda and Fifield, Graham and Vanzella, Bindi and Pickup, Melinda},
journal = {Australian Journal of Botany},
number = {5},
pages = {455 -- 466},
publisher = {CSIRO},
title = {{An evaluation of the genetic structure of seed sources and the maintenance of genetic diversity during establishment of two yellow box (Eucalyptus melliodora) seed-production areas}},
doi = {10.1071/BT15023},
volume = {63},
year = {2015},
}
@inproceedings{1669,
abstract = {Computational notions of entropy (a.k.a. pseudoentropy) have found many applications, including leakage-resilient cryptography, deterministic encryption or memory delegation. The most important tools to argue about pseudoentropy are chain rules, which quantify by how much (in terms of quantity and quality) the pseudoentropy of a given random variable X decreases when conditioned on some other variable Z (think for example of X as a secret key and Z as information leaked by a side-channel). In this paper we give a very simple and modular proof of the chain rule for HILL pseudoentropy, improving best known parameters. Our version allows for increasing the acceptable length of leakage in applications up to a constant factor compared to the best previous bounds. As a contribution of independent interest, we provide a comprehensive study of all known versions of the chain rule, comparing their worst-case strength and limitations.},
author = {Pietrzak, Krzysztof Z and Skórski, Maciej},
location = {Guadalajara, Mexico},
pages = {81 -- 98},
publisher = {Springer},
title = {{The chain rule for HILL pseudoentropy, revisited}},
doi = {10.1007/978-3-319-22174-8_5},
volume = {9230},
year = {2015},
}
@inproceedings{1671,
abstract = {This paper studies the concrete security of PRFs and MACs obtained by keying hash functions based on the sponge paradigm. One such hash function is KECCAK, selected as NIST’s new SHA-3 standard. In contrast to other approaches like HMAC, the exact security of keyed sponges is not well understood. Indeed, recent security analyses delivered concrete security bounds which are far from existing attacks. This paper aims to close this gap. We prove (nearly) exact bounds on the concrete PRF security of keyed sponges using a random permutation. These bounds are tight for the most relevant ranges of parameters, i.e., for messages of length (roughly) l ≤ min{2n/4, 2r} blocks, where n is the state size and r is the desired output length; and for l ≤ q queries (to the construction or the underlying permutation). Moreover, we also improve standard-model bounds. As an intermediate step of independent interest, we prove tight bounds on the PRF security of the truncated CBC-MAC construction, which operates as plain CBC-MAC, but only returns a prefix of the output.},
author = {Gazi, Peter and Pietrzak, Krzysztof Z and Tessaro, Stefano},
location = {Santa Barbara, CA, United States},
pages = {368 -- 387},
publisher = {Springer},
title = {{The exact PRF security of truncation: Tight bounds for keyed sponges and truncated CBC}},
doi = {10.1007/978-3-662-47989-6_18},
volume = {9215},
year = {2015},
}
@article{1683,
abstract = {The 1 MDa, 45-subunit proton-pumping NADH-ubiquinone oxidoreductase (complex I) is the largest complex of the mitochondrial electron transport chain. The molecular mechanism of complex I is central to the metabolism of cells, but has yet to be fully characterized. The last two years have seen steady progress towards this goal with the first atomic-resolution structure of the entire bacterial complex I, a 5 Å cryo-electron microscopy map of bovine mitochondrial complex I and a ∼3.8 Å resolution X-ray crystallographic study of mitochondrial complex I from yeast Yarrowia lipotytica. In this review we will discuss what we have learned from these studies and what remains to be elucidated.},
author = {Letts, Jame A and Sazanov, Leonid A},
journal = {Current Opinion in Structural Biology},
number = {8},
pages = {135 -- 145},
publisher = {Elsevier},
title = {{Gaining mass: The structure of respiratory complex I-from bacterial towards mitochondrial versions}},
doi = {10.1016/j.sbi.2015.08.008},
volume = {33},
year = {2015},
}
@article{1688,
abstract = {We estimate the selection constant in the following geometric selection theorem by Pach: For every positive integer d, there is a constant (Formula presented.) such that whenever (Formula presented.) are n-element subsets of (Formula presented.), we can find a point (Formula presented.) and subsets (Formula presented.) for every i∈[d+1], each of size at least cdn, such that p belongs to all rainbowd-simplices determined by (Formula presented.) simplices with one vertex in each Yi. We show a super-exponentially decreasing upper bound (Formula presented.). The ideas used in the proof of the upper bound also help us to prove Pach’s theorem with (Formula presented.), which is a lower bound doubly exponentially decreasing in d (up to some polynomial in the exponent). For comparison, Pach’s original approach yields a triply exponentially decreasing lower bound. On the other hand, Fox, Pach, and Suk recently obtained a hypergraph density result implying a proof of Pach’s theorem with (Formula presented.). In our construction for the upper bound, we use the fact that the minimum solid angle of every d-simplex is super-exponentially small. This fact was previously unknown and might be of independent interest. For the lower bound, we improve the ‘separation’ part of the argument by showing that in one of the key steps only d+1 separations are necessary, compared to 2d separations in the original proof. We also provide a measure version of Pach’s theorem.},
author = {Karasev, Roman and Kynčl, Jan and Paták, Pavel and Patakova, Zuzana and Tancer, Martin},
journal = {Discrete & Computational Geometry},
number = {3},
pages = {610 -- 636},
publisher = {Springer},
title = {{Bounds for Pach's selection theorem and for the minimum solid angle in a simplex}},
doi = {10.1007/s00454-015-9720-z},
volume = {54},
year = {2015},
}
@article{1664,
abstract = {Over a century of research into the origin of turbulence in wall-bounded shear flows has resulted in a puzzling picture in which turbulence appears in a variety of different states competing with laminar background flow. At moderate flow speeds, turbulence is confined to localized patches; it is only at higher speeds that the entire flow becomes turbulent. The origin of the different states encountered during this transition, the front dynamics of the turbulent regions and the transformation to full turbulence have yet to be explained. By combining experiments, theory and computer simulations, here we uncover a bifurcation scenario that explains the transformation to fully turbulent pipe flow and describe the front dynamics of the different states encountered in the process. Key to resolving this problem is the interpretation of the flow as a bistable system with nonlinear propagation (advection) of turbulent fronts. These findings bridge the gap between our understanding of the onset of turbulence and fully turbulent flows.},
author = {Barkley, Dwight and Song, Baofang and Vasudevan, Mukund and Lemoult, Grégoire M and Avila, Marc and Hof, Björn},
journal = {Nature},
number = {7574},
pages = {550 -- 553},
publisher = {Nature Publishing Group},
title = {{The rise of fully turbulent flow}},
doi = {10.1038/nature15701},
volume = {526},
year = {2015},
}
@article{1710,
abstract = {We consider the hollow on the half-plane {(x, y) : y ≤ 0} ⊂ ℝ2 defined by a function u : (-1, 1) → ℝ, u(x) < 0, and a vertical flow of point particles incident on the hollow. It is assumed that u satisfies the so-called single impact condition (SIC): each incident particle is elastically reflected by graph(u) and goes away without hitting the graph of u anymore. We solve the problem: find the function u minimizing the force of resistance created by the flow. We show that the graph of the minimizer is formed by two arcs of parabolas symmetric to each other with respect to the y-axis. Assuming that the resistance of u ≡ 0 equals 1, we show that the minimal resistance equals π/2 - 2arctan(1/2) ≈ 0.6435. This result completes the previously obtained result [SIAM J. Math. Anal., 46 (2014), pp. 2730-2742] stating in particular that the minimal resistance of a hollow in higher dimensions equals 0.5. We additionally consider a similar problem of minimal resistance, where the hollow in the half-space {(x1,...,xd,y) : y ≤ 0} ⊂ ℝd+1 is defined by a radial function U satisfying the SIC, U(x) = u(|x|), with x = (x1,...,xd), u(ξ) < 0 for 0 ≤ ξ < 1, and u(ξ) = 0 for ξ ≥ 1, and the flow is parallel to the y-axis. The minimal resistance is greater than 0.5 (and coincides with 0.6435 when d = 1) and converges to 0.5 as d → ∞.},
author = {Akopyan, Arseniy and Plakhov, Alexander},
journal = {Society for Industrial and Applied Mathematics},
number = {4},
pages = {2754 -- 2769},
publisher = {SIAM},
title = {{Minimal resistance of curves under the single impact assumption}},
doi = {10.1137/140993843},
volume = {47},
year = {2015},
}
@article{1676,
author = {Sixt, Michael K and Raz, Erez},
journal = {Current Opinion in Cell Biology},
number = {10},
pages = {4 -- 6},
publisher = {Elsevier},
title = {{Editorial overview: Cell adhesion and migration}},
doi = {10.1016/j.ceb.2015.09.004},
volume = {36},
year = {2015},
}
@article{1734,
abstract = {Facial appearance capture is now firmly established within academic research and used extensively across various application domains, perhaps most prominently in the entertainment industry through the design of virtual characters in video games and films. While significant progress has occurred over the last two decades, no single survey currently exists that discusses the similarities, differences, and practical considerations of the available appearance capture techniques as applied to human faces. A central difficulty of facial appearance capture is the way light interacts with skin-which has a complex multi-layered structure-and the interactions that occur below the skin surface can, by definition, only be observed indirectly. In this report, we distinguish between two broad strategies for dealing with this complexity. "Image-based methods" try to exhaustively capture the exact face appearance under different lighting and viewing conditions, and then render the face through weighted image combinations. "Parametric methods" instead fit the captured reflectance data to some parametric appearance model used during rendering, allowing for a more lightweight and flexible representation but at the cost of potentially increased rendering complexity or inexact reproduction. The goal of this report is to provide an overview that can guide practitioners and researchers in assessing the tradeoffs between current approaches and identifying directions for future advances in facial appearance capture.},
author = {Klehm, Oliver and Rousselle, Fabrice and Papas, Marios and Bradley, Derek and Hery, Christophe and Bickel, Bernd and Jarosz, Wojciech and Beeler, Thabo},
journal = {Computer Graphics Forum},
number = {2},
pages = {709 -- 733},
publisher = {Wiley-Blackwell},
title = {{Recent advances in facial appearance capture}},
doi = {10.1111/cgf.12594},
volume = {34},
year = {2015},
}
@article{1789,
abstract = {Intellectual disability (ID) has an estimated prevalence of 2-3%. Due to its extreme heterogeneity, the genetic basis of ID remains elusive in many cases. Recently, whole exome sequencing (WES) studies revealed that a large proportion of sporadic cases are caused by de novo gene variants. To identify further genes involved in ID, we performed WES in 250 patients with unexplained ID and their unaffected parents and included exomes of 51 previously sequenced child-parents trios in the analysis. Exome analysis revealed de novo intragenic variants in SET domain-containing 5 (SETD5) in two patients. One patient carried a nonsense variant, and the other an 81 bp deletion located across a splice-donor site. Chromosomal microarray diagnostics further identified four de novo non-recurrent microdeletions encompassing SETD5. CRISPR/Cas9 mutation modelling of the two intragenic variants demonstrated nonsense-mediated decay of the resulting transcripts, pointing to a loss-of-function (LoF) and haploinsufficiency as the common disease-causing mechanism of intragenic SETD5 sequence variants and SETD5-containing microdeletions. In silico domain prediction of SETD5, a predicted SET domain-containing histone methyltransferase (HMT), substantiated the presence of a SET domain and identified a novel putative PHD domain, strengthening a functional link to well-known histone-modifying ID genes. All six patients presented with ID and certain facial dysmorphisms, suggesting that SETD5 sequence variants contribute substantially to the microdeletion 3p25.3 phenotype. The present report of two SETD5 LoF variants in 301 patients demonstrates a prevalence of 0.7% and thus SETD5 variants as a relatively frequent cause of ID.},
author = {Kuechler, Alma and Zink, Alexander and Wieland, Thomas and Lüdecke, Hermann and Cremer, Kirsten and Salviati, Leonardo and Magini, Pamela and Najafi, Kimia and Zweier, Christiane and Czeschik, Johanna and Aretz, Stefan and Endele, Sabine and Tamburrino, Federica and Pinato, Claudia and Clementi, Maurizio and Gundlach, Jasmin and Maylahn, Carina and Mazzanti, Laura and Wohlleber, Eva and Schwarzmayr, Thomas and Kariminejad, Roxana and Schlessinger, Avner and Wieczorek, Dagmar and Strom, Tim and Novarino, Gaia and Engels, Hartmut},
journal = {European Journal of Human Genetics},
number = {6},
pages = {753 -- 760},
publisher = {Nature Publishing Group},
title = {{Loss-of-function variants of SETD5 cause intellectual disability and the core phenotype of microdeletion 3p25.3 syndrome}},
doi = {10.1038/ejhg.2014.165},
volume = {23},
year = {2015},
}