@article{822,
abstract = {Polymicrobial infections constitute small ecosystems that accommodate several bacterial species. Commonly, these bacteria are investigated in isolation. However, it is unknown to what extent the isolates interact and whether their interactions alter bacterial growth and ecosystem resilience in the presence and absence of antibiotics. We quantified the complete ecological interaction network for 72 bacterial isolates collected from 23 individuals diagnosed with polymicrobial urinary tract infections and found that most interactions cluster based on evolutionary relatedness. Statistical network analysis revealed that competitive and cooperative reciprocal interactions are enriched in the global network, while cooperative interactions are depleted in the individual host community networks. A population dynamics model parameterized by our measurements suggests that interactions restrict community stability, explaining the observed species diversity of these communities. We further show that the clinical isolates frequently protect each other from clinically relevant antibiotics. Together, these results highlight that ecological interactions are crucial for the growth and survival of bacteria in polymicrobial infection communities and affect their assembly and resilience. },
author = {De Vos, Marjon and Zagórski, Marcin P and Mcnally, Alan and Bollenbach, Mark Tobias},
issn = {00278424},
journal = {PNAS},
number = {40},
pages = {10666 -- 10671},
publisher = {National Academy of Sciences},
title = {{Interaction networks, ecological stability, and collective antibiotic tolerance in polymicrobial infections}},
doi = {10.1073/pnas.1713372114},
volume = {114},
year = {2017},
}
@article{823,
abstract = {The resolution of a linear system with positive integer variables is a basic yet difficult computational problem with many applications. We consider sparse uncorrelated random systems parametrised by the density c and the ratio α=N/M between number of variables N and number of constraints M. By means of ensemble calculations we show that the space of feasible solutions endows a Van-Der-Waals phase diagram in the plane (c, α). We give numerical evidence that the associated computational problems become more difficult across the critical point and in particular in the coexistence region.},
author = {Colabrese, Simona and De Martino, Daniele and Leuzzi, Luca and Marinari, Enzo},
issn = {17425468},
journal = { Journal of Statistical Mechanics: Theory and Experiment},
number = {9},
publisher = {IOPscience},
title = {{Phase transitions in integer linear problems}},
doi = {10.1088/1742-5468/aa85c3},
volume = {2017},
year = {2017},
}
@article{824,
abstract = {In shear flows at transitional Reynolds numbers, localized patches of turbulence, known as puffs, coexist with the laminar flow. Recently, Avila et al. (Phys. Rev. Lett., vol. 110, 2013, 224502) discovered two spatially localized relative periodic solutions for pipe flow, which appeared in a saddle-node bifurcation at low Reynolds number. Combining slicing methods for continuous symmetry reduction with Poincaré sections for the first time in a shear flow setting, we compute and visualize the unstable manifold of the lower-branch solution and show that it extends towards the neighbourhood of the upper-branch solution. Surprisingly, this connection even persists far above the bifurcation point and appears to mediate the first stage of the puff generation: amplification of streamwise localized fluctuations. When the state-space trajectories on the unstable manifold reach the vicinity of the upper branch, corresponding fluctuations expand in space and eventually take the usual shape of a puff.},
author = {Budanur, Nazmi B and Hof, Björn},
issn = {00221120},
journal = {Journal of Fluid Mechanics},
publisher = {Cambridge University Press},
title = {{Heteroclinic path to spatially localized chaos in pipe flow}},
doi = {10.1017/jfm.2017.516},
volume = {827},
year = {2017},
}
@article{825,
abstract = {What data is needed about data? Describing the process to answer this question for the institutional data repository IST DataRep.},
author = {Petritsch, Barbara},
issn = {10222588},
journal = {Mitteilungen der Vereinigung Österreichischer Bibliothekarinnen & Bibliothekare},
number = {2},
pages = {200 -- 207},
publisher = {VÖB},
title = {{Metadata for research data in practice}},
doi = {10.31263/voebm.v70i2.1678},
volume = {70},
year = {2017},
}
@inproceedings{833,
abstract = {We present an efficient algorithm to compute Euler characteristic curves of gray scale images of arbitrary dimension. In various applications the Euler characteristic curve is used as a descriptor of an image. Our algorithm is the first streaming algorithm for Euler characteristic curves. The usage of streaming removes the necessity to store the entire image in RAM. Experiments show that our implementation handles terabyte scale images on commodity hardware. Due to lock-free parallelism, it scales well with the number of processor cores. Additionally, we put the concept of the Euler characteristic curve in the wider context of computational topology. In particular, we explain the connection with persistence diagrams.},
author = {Heiss, Teresa and Wagner, Hubert},
editor = {Felsberg, Michael and Heyden, Anders and Krüger, Norbert},
issn = {03029743},
location = {Ystad, Sweden},
pages = {397 -- 409},
publisher = {Springer},
title = {{Streaming algorithm for Euler characteristic curves of multidimensional images}},
doi = {10.1007/978-3-319-64689-3_32},
volume = {10424},
year = {2017},
}
@article{834,
abstract = {Thermal and many-body localized phases are separated by a dynamical phase transition of a new kind. We analyze the distribution of off-diagonal matrix elements of local operators across this transition in two different models of disordered spin chains. We show that the behavior of matrix elements can be used to characterize the breakdown of thermalization and to extract the many-body Thouless energy. We find that upon increasing the disorder strength the system enters a critical region around the many-body localization transition. The properties of the system in this region are: (i) the Thouless energy becomes smaller than the level spacing, (ii) the matrix elements show critical dependence on the energy difference, and (iii) the matrix elements, viewed as amplitudes of a fictitious wave function, exhibit strong multifractality. This critical region decreases with the system size, which we interpret as evidence for a diverging correlation length at the many-body localization transition. Our findings show that the correlation length becomes larger than the accessible system sizes in a broad range of disorder strength values and shed light on the critical behavior near the many-body localization transition.},
author = {Serbyn, Maksym and Zlatko, Papic and Abanin, Dmitry},
issn = {24699950},
journal = {Physical Review B - Condensed Matter and Materials Physics},
number = {10},
publisher = {American Physical Society},
title = {{Thouless energy and multifractality across the many-body localization transition}},
doi = {10.1103/PhysRevB.96.104201},
volume = {96},
year = {2017},
}
@inproceedings{836,
abstract = {Recent research has examined how to study the topological features of a continuous self-map by means of the persistence of the eigenspaces, for given eigenvalues, of the endomorphism induced in homology over a field. This raised the question of how to select dynamically significant eigenvalues. The present paper aims to answer this question, giving an algorithm that computes the persistence of eigenspaces for every eigenvalue simultaneously, also expressing said eigenspaces as direct sums of “finite” and “singular” subspaces.},
author = {Ethier, Marc and Jablonski, Grzegorz and Mrozek, Marian},
booktitle = {Special Sessions in Applications of Computer Algebra},
isbn = {978-331956930-7},
location = {Kalamata, Greece},
pages = {119 -- 136},
publisher = {Springer},
title = {{Finding eigenvalues of self-maps with the Kronecker canonical form}},
doi = {10.1007/978-3-319-56932-1_8},
volume = {198},
year = {2017},
}
@phdthesis{837,
abstract = {The hippocampus is a key brain region for memory and notably for spatial memory, and is needed for both spatial working and reference memories. Hippocampal place cells selectively discharge in specific locations of the environment to form mnemonic represen tations of space. Several behavioral protocols have been designed to test spatial memory which requires the experimental subject to utilize working memory and reference memory. However, less is known about how these memory traces are presented in the hippo campus, especially considering tasks that require both spatial working and long -term reference memory demand. The aim of my thesis was to elucidate how spatial working memory, reference memory, and the combination of both are represented in the hippocampus. In this thesis, using a radial eight -arm maze, I examined how the combined demand on these memories influenced place cell assemblies while reference memories were partially updated by changing some of the reward- arms. This was contrasted with task varian ts requiring working or reference memories only. Reference memory update led to gradual place field shifts towards the rewards on the switched arms. Cells developed enhanced firing in passes between newly -rewarded arms as compared to those containing an unchanged reward. The working memory task did not show such gradual changes. Place assemblies on occasions replayed trajectories of the maze; at decision points the next arm choice was preferentially replayed in tasks needing reference memory while in the pure working memory task the previously visited arm was replayed. Hence trajectory replay only reflected the decision of the animal in tasks needing reference memory update. At the reward locations, in all three tasks outbound trajectories of the current arm were preferentially replayed, showing the animals’ next path to the center. At reward locations trajectories were replayed preferentially in reverse temporal order. Moreover, in the center reverse replay was seen in the working memory task but in the other tasks forward replay was seen. Hence, the direction of reactivation was determined by the goal locations so that part of the trajectory which was closer to the goal was reactivated later in an HSE while places further away from the goal were reactivated earlier. Altogether my work demonstrated that reference memory update triggers several levels of reorganization of the hippocampal cognitive map which are not seen in simpler working memory demand s. Moreover, hippocampus is likely to be involved in spatial decisions through reactivating planned trajectories when reference memory recall is required for such a decision. },
author = {Xu, Haibing},
pages = {93},
publisher = {IST Austria},
title = {{Reactivation of the hippocampal cognitive map in goal-directed spatial tasks}},
doi = {10.15479/AT:ISTA:th_858},
year = {2017},
}
@phdthesis{838,
abstract = {In this thesis we discuss the exact security of message authentications codes HMAC , NMAC , and PMAC . NMAC is a mode of operation which turns a fixed input-length keyed hash function f into a variable input-length function. A practical single-key variant of NMAC called HMAC is a very popular and widely deployed message authentication code (MAC). PMAC is a block-cipher based mode of operation, which also happens to be the most famous fully parallel MAC. NMAC was introduced by Bellare, Canetti and Krawczyk Crypto’96, who proved it to be a secure pseudorandom function (PRF), and thus also a MAC, under two assumptions. Unfortunately, for many instantiations of HMAC one of them has been found to be wrong. To restore the provable guarantees for NMAC , Bellare [Crypto’06] showed its security without this assumption. PMAC was introduced by Black and Rogaway at Eurocrypt 2002. If instantiated with a pseudorandom permutation over n -bit strings, PMAC constitutes a provably secure variable input-length PRF. For adversaries making q queries, each of length at most ` (in n -bit blocks), and of total length σ ≤ q` , the original paper proves an upper bound on the distinguishing advantage of O ( σ 2 / 2 n ), while the currently best bound is O ( qσ/ 2 n ). In this work we show that this bound is tight by giving an attack with advantage Ω( q 2 `/ 2 n ). In the PMAC construction one initially XORs a mask to every message block, where the mask for the i th block is computed as τ i := γ i · L , where L is a (secret) random value, and γ i is the i -th codeword of the Gray code. Our attack applies more generally to any sequence of γ i ’s which contains a large coset of a subgroup of GF (2 n ). As for NMAC , our first contribution is a simpler and uniform proof: If f is an ε -secure PRF (against q queries) and a δ - non-adaptively secure PRF (against q queries), then NMAC f is an ( ε + `qδ )-secure PRF against q queries of length at most ` blocks each. We also show that this ε + `qδ bound is basically tight by constructing an f for which an attack with advantage `qδ exists. Moreover, we analyze the PRF-security of a modification of NMAC called NI by An and Bellare that avoids the constant rekeying on multi-block messages in NMAC and allows for an information-theoretic analysis. We carry out such an analysis, obtaining a tight `q 2 / 2 c bound for this step, improving over the trivial bound of ` 2 q 2 / 2 c . Finally, we investigate, if the security of PMAC can be further improved by using τ i ’s that are k -wise independent, for k > 1 (the original has k = 1). We observe that the security of PMAC will not increase in general if k = 2, and then prove that the security increases to O ( q 2 / 2 n ), if the k = 4. Due to simple extension attacks, this is the best bound one can hope for, using any distribution on the masks. Whether k = 3 is already sufficient to get this level of security is left as an open problem. Keywords: Message authentication codes, Pseudorandom functions, HMAC, PMAC. },
author = {Rybar, Michal},
pages = {86},
publisher = {IST Austria},
title = {{(The exact security of) Message authentication codes}},
doi = {10.15479/AT:ISTA:th_828},
year = {2017},
}
@phdthesis{839,
abstract = {This thesis describes a brittle fracture simulation method for visual effects applications. Building upon a symmetric Galerkin boundary element method, we first compute stress intensity factors following the theory of linear elastic fracture mechanics. We then use these stress intensities to simulate the motion of a propagating crack front at a significantly higher resolution than the overall deformation of the breaking object. Allowing for spatial variations of the material's toughness during crack propagation produces visually realistic, highly-detailed fracture surfaces. Furthermore, we introduce approximations for stress intensities and crack opening displacements, resulting in both practical speed-up and theoretically superior runtime complexity compared to previous methods. While we choose a quasi-static approach to fracture mechanics, ignoring dynamic deformations, we also couple our fracture simulation framework to a standard rigid-body dynamics solver, enabling visual effects artists to simulate both large scale motion, as well as fracturing due to collision forces in a combined system. As fractures inside of an object grow, their geometry must be represented both in the coarse boundary element mesh, as well as at the desired fine output resolution. Using a boundary element method, we avoid complicated volumetric meshing operations. Instead we describe a simple set of surface meshing operations that allow us to progressively add cracks to the mesh of an object and still re-use all previously computed entries of the linear boundary element system matrix. On the high resolution level, we opt for an implicit surface representation. We then describe how to capture fracture surfaces during crack propagation, as well as separate the individual fragments resulting from the fracture process, based on this implicit representation. We show results obtained with our method, either solving the full boundary element system in every time step, or alternatively using our fast approximations. These results demonstrate that both of these methods perform well in basic test cases and produce realistic fracture surfaces. Furthermore we show that our fast approximations substantially out-perform the standard approach in more demanding scenarios. Finally, these two methods naturally combine, using the full solution while the problem size is manageably small and switching to the fast approximations later on. The resulting hybrid method gives the user a direct way to choose between speed and accuracy of the simulation. },
author = {Hahn, David},
pages = {124},
publisher = {IST Austria},
title = {{Brittle fracture simulation with boundary elements for computer graphics}},
doi = {10.15479/AT:ISTA:th_855},
year = {2017},
}
@inbook{84,
abstract = {The advent of high-throughput technologies and the concurrent advances in information sciences have led to a data revolution in biology. This revolution is most significant in molecular biology, with an increase in the number and scale of the “omics” projects over the last decade. Genomics projects, for example, have produced impressive advances in our knowledge of the information concealed into genomes, from the many genes that encode for the proteins that are responsible for most if not all cellular functions, to the noncoding regions that are now known to provide regulatory functions. Proteomics initiatives help to decipher the role of post-translation modifications on the protein structures and provide maps of protein-protein interactions, while functional genomics is the field that attempts to make use of the data produced by these projects to understand protein functions. The biggest challenge today is to assimilate the wealth of information provided by these initiatives into a conceptual framework that will help us decipher life. For example, the current views of the relationship between protein structure and function remain fragmented. We know of their sequences, more and more about their structures, we have information on their biological activities, but we have difficulties connecting this dotted line into an informed whole. We lack the experimental and computational tools for directly studying protein structure, function, and dynamics at the molecular and supra-molecular levels. In this chapter, we review some of the current developments in building the computational tools that are needed, focusing on the role that geometry and topology play in these efforts. One of our goals is to raise the general awareness about the importance of geometric methods in elucidating the mysterious foundations of our very existence. Another goal is the broadening of what we consider a geometric algorithm. There is plenty of valuable no-man’s-land between combinatorial and numerical algorithms, and it seems opportune to explore this land with a computational-geometric frame of mind.},
author = {Edelsbrunner, Herbert and Koehl, Patrice},
booktitle = {Handbook of Discrete and Computational Geometry, Third Edition},
editor = {Toth, Csaba and O'Rourke, Joseph and Goodman, Jacob},
pages = {1709 -- 1735},
publisher = {CRC Press},
title = {{Computational topology for structural molecular biology}},
doi = {10.1201/9781315119601},
year = {2017},
}
@article{840,
abstract = {Heavy holes confined in quantum dots are predicted to be promising candidates for the realization of spin qubits with long coherence times. Here we focus on such heavy-hole states confined in germanium hut wires. By tuning the growth density of the latter we can realize a T-like structure between two neighboring wires. Such a structure allows the realization of a charge sensor, which is electrostatically and tunnel coupled to a quantum dot, with charge-transfer signals as high as 0.3 e. By integrating the T-like structure into a radiofrequency reflectometry setup, single-shot measurements allowing the extraction of hole tunneling times are performed. The extracted tunneling times of less than 10 μs are attributed to the small effective mass of Ge heavy-hole states and pave the way toward projective spin readout measurements.},
author = {Vukusic, Lada and Kukucka, Josip and Watzinger, Hannes and Katsaros, Georgios},
issn = {15306984},
journal = {Nano Letters},
number = {9},
pages = {5706 -- 5710},
publisher = {American Chemical Society},
title = {{Fast hole tunneling times in germanium hut wires probed by single-shot reflectometry}},
doi = {10.1021/acs.nanolett.7b02627},
volume = {17},
year = {2017},
}
@inbook{8450,
abstract = {Methyl groups are very useful probes of structure, dynamics, and interactions in protein NMR spectroscopy. In particular, methyl-directed experiments provide high sensitivity even in very large proteins, such as membrane proteins in a membrane-mimicking environment. In this chapter, we discuss the approach for labeling methyl groups in E. coli-based protein expression, as exemplified with the mitochondrial carrier GGC.},
author = {Kurauskas, Vilius and Schanda, Paul and Sounier, Remy},
booktitle = {Membrane protein structure and function characterization},
isbn = {9781493971497},
issn = {1064-3745},
pages = {109--123},
publisher = {Springer Nature},
title = {{Methyl-specific isotope labeling strategies for NMR studies of membrane proteins}},
doi = {10.1007/978-1-4939-7151-0_6},
volume = {1635},
year = {2017},
}
@article{2016,
abstract = {The Ising model is one of the simplest and most famous models of interacting systems. It was originally proposed to model ferromagnetic interactions in statistical physics and is now widely used to model spatial processes in many areas such as ecology, sociology, and genetics, usually without testing its goodness-of-fit. Here, we propose an exact goodness-of-fit test for the finite-lattice Ising model. The theory of Markov bases has been developed in algebraic statistics for exact goodness-of-fit testing using a Monte Carlo approach. However, this beautiful theory has fallen short of its promise for applications, because finding a Markov basis is usually computationally intractable. We develop a Monte Carlo method for exact goodness-of-fit testing for the Ising model which avoids computing a Markov basis and also leads to a better connectivity of the Markov chain and hence to a faster convergence. We show how this method can be applied to analyze the spatial organization of receptors on the cell membrane.},
author = {Martin Del Campo Sanchez, Abraham and Cepeda Humerez, Sarah A and Uhler, Caroline},
issn = {03036898},
journal = {Scandinavian Journal of Statistics},
number = {2},
pages = {285 -- 306},
publisher = {Wiley-Blackwell},
title = {{Exact goodness-of-fit testing for the Ising model}},
doi = {10.1111/sjos.12251},
volume = {44},
year = {2017},
}
@article{715,
abstract = {D-cycloserine ameliorates breathing abnormalities and survival rate in a mouse model of Rett syndrome.},
author = {Novarino, Gaia},
issn = {19466234},
journal = {Science Translational Medicine},
number = {405},
publisher = {American Association for the Advancement of Science},
title = {{More excitation for Rett syndrome}},
doi = {10.1126/scitranslmed.aao4218},
volume = {9},
year = {2017},
}
@article{716,
abstract = {Two-player games on graphs are central in many problems in formal verification and program analysis, such as synthesis and verification of open systems. In this work, we consider solving recursive game graphs (or pushdown game graphs) that model the control flow of sequential programs with recursion.While pushdown games have been studied before with qualitative objectives-such as reachability and ?-regular objectives- in this work, we study for the first time such games with the most well-studied quantitative objective, the mean-payoff objective. In pushdown games, two types of strategies are relevant: (1) global strategies, which depend on the entire global history; and (2) modular strategies, which have only local memory and thus do not depend on the context of invocation but rather only on the history of the current invocation of the module. Our main results are as follows: (1) One-player pushdown games with mean-payoff objectives under global strategies are decidable in polynomial time. (2) Two-player pushdown games with mean-payoff objectives under global strategies are undecidable. (3) One-player pushdown games with mean-payoff objectives under modular strategies are NP-hard. (4) Two-player pushdown games with mean-payoff objectives under modular strategies can be solved in NP (i.e., both one-player and two-player pushdown games with mean-payoff objectives under modular strategies are NP-complete). We also establish the optimal strategy complexity by showing that global strategies for mean-payoff objectives require infinite memory even in one-player pushdown games and memoryless modular strategies are sufficient in two-player pushdown games. Finally, we also show that all the problems have the same complexity if the stack boundedness condition is added, where along with the mean-payoff objective the player must also ensure that the stack height is bounded.},
author = {Chatterjee, Krishnendu and Velner, Yaron},
issn = {00045411},
journal = {Journal of the ACM},
number = {5},
pages = {34},
publisher = {ACM},
title = {{The complexity of mean-payoff pushdown games}},
doi = {10.1145/3121408},
volume = {64},
year = {2017},
}
@misc{7163,
abstract = {The de novo genome assemblies generated for this study, and the associated metadata.},
author = {Fraisse, Christelle},
publisher = {IST Austria},
title = {{Supplementary Files for "The deep conservation of the Lepidoptera Z chromosome suggests a non canonical origin of the W"}},
doi = {10.15479/AT:ISTA:7163},
year = {2017},
}
@article{717,
abstract = {We consider finite-state and recursive game graphs with multidimensional mean-payoff objectives. In recursive games two types of strategies are relevant: global strategies and modular strategies. Our contributions are: (1) We show that finite-state multidimensional mean-payoff games can be solved in polynomial time if the number of dimensions and the maximal absolute value of weights are fixed; whereas for arbitrary dimensions the problem is coNP-complete. (2) We show that one-player recursive games with multidimensional mean-payoff objectives can be solved in polynomial time. Both above algorithms are based on hyperplane separation technique. (3) For recursive games we show that under modular strategies the multidimensional problem is undecidable. We show that if the number of modules, exits, and the maximal absolute value of the weights are fixed, then one-dimensional recursive mean-payoff games under modular strategies can be solved in polynomial time, whereas for unbounded number of exits or modules the problem is NP-hard.},
author = {Chatterjee, Krishnendu and Velner, Yaron},
journal = {Journal of Computer and System Sciences},
pages = {236 -- 259},
publisher = {Academic Press},
title = {{Hyperplane separation technique for multidimensional mean-payoff games}},
doi = {10.1016/j.jcss.2017.04.005},
volume = {88},
year = {2017},
}
@article{718,
abstract = {Mapping every simplex in the Delaunay mosaic of a discrete point set to the radius of the smallest empty circumsphere gives a generalized discrete Morse function. Choosing the points from a Poisson point process in ℝ n , we study the expected number of simplices in the Delaunay mosaic as well as the expected number of critical simplices and nonsingular intervals in the corresponding generalized discrete gradient. Observing connections with other probabilistic models, we obtain precise expressions for the expected numbers in low dimensions. In particular, we obtain the expected numbers of simplices in the Poisson–Delaunay mosaic in dimensions n ≤ 4.},
author = {Edelsbrunner, Herbert and Nikitenko, Anton and Reitzner, Matthias},
issn = {00018678},
journal = {Advances in Applied Probability},
number = {3},
pages = {745 -- 767},
publisher = {Cambridge University Press},
title = {{Expected sizes of poisson Delaunay mosaics and their discrete Morse functions}},
doi = {10.1017/apr.2017.20},
volume = {49},
year = {2017},
}
@article{719,
abstract = {The ubiquity of computation in modern machines and devices imposes a need to assert the correctness of their behavior. Especially in the case of safety-critical systems, their designers need to take measures that enforce their safe operation. Formal methods has emerged as a research field that addresses this challenge: by rigorously proving that all system executions adhere to their specifications, the correctness of an implementation under concern can be assured. To achieve this goal, a plethora of techniques are nowadays available, all of which are optimized for different system types and application domains.},
author = {Chatterjee, Krishnendu and Ehlers, Rüdiger},
issn = {00015903},
journal = {Acta Informatica},
number = {6},
pages = {543 -- 544},
publisher = {Springer},
title = {{Special issue: Synthesis and SYNT 2014}},
doi = {10.1007/s00236-017-0299-0},
volume = {54},
year = {2017},
}