@phdthesis{68,
abstract = {The most common assumption made in statistical learning theory is the assumption of the independent and identically distributed (i.i.d.) data. While being very convenient mathematically, it is often very clearly violated in practice. This disparity between the machine learning theory and applications underlies a growing demand in the development of algorithms that learn from dependent data and theory that can provide generalization guarantees similar to the independent situations. This thesis is dedicated to two variants of dependencies that can arise in practice. One is a dependence on the level of samples in a single learning task. Another dependency type arises in the multi-task setting when the tasks are dependent on each other even though the data for them can be i.i.d. In both cases we model the data (samples or tasks) as stochastic processes and introduce new algorithms for both settings that take into account and exploit the resulting dependencies. We prove the theoretical guarantees on the performance of the introduced algorithms under different evaluation criteria and, in addition, we compliment the theoretical study by the empirical one, where we evaluate some of the algorithms on two real world datasets to highlight their practical applicability.},
author = {Zimin, Alexander},
pages = {92},
publisher = {IST Austria},
title = {{Learning from dependent data}},
doi = {10.15479/AT:ISTA:TH1048},
year = {2018},
}
@phdthesis{69,
abstract = {A qubit, a unit of quantum information, is essentially any quantum mechanical two-level system which can be coherently controlled. Still, to be used for computation, it has to fulfill criteria. Qubits, regardless of the system in which they are realized, suffer from decoherence. This leads to loss of the information stored in the qubit. The upper bound of the time scale on which decoherence happens is set by the spin relaxation time. In this thesis I studied a two-level system consisting of a Zeeman-split hole spin confined in a quantum dot formed in a Ge hut wire. Such Ge hut wires have emerged as a promising material system for the realization of spin qubits, due to the combination of two significant properties: long spin coherence time as expected for group IV semiconductors due to the low hyperfine interaction and a strong valence band spin-orbit coupling. Here, I present how to fabricate quantum dot devices suitable for electrical transport measurements. Coupled quantum dot devices allowed the realization of a charge sensor, which is electrostatically and tunnel coupled to a quantum dot. By integrating the charge sensor into a radio-frequency reflectometry setup, I performed for the first time single-shot readout measurements of hole spins and extracted the hole spin relaxation times in Ge hut wires.},
author = {Vukušić, Lada},
pages = {103},
publisher = {IST Austria},
title = {{Charge sensing and spin relaxation times of holes in Ge hut wires}},
doi = {10.15479/AT:ISTA:TH_1047},
year = {2018},
}
@phdthesis{83,
abstract = {A proof system is a protocol between a prover and a verifier over a common input in which an honest prover convinces the verifier of the validity of true statements. Motivated by the success of decentralized cryptocurrencies, exemplified by Bitcoin, the focus of this thesis will be on proof systems which found applications in some sustainable alternatives to Bitcoin, such as the Spacemint and Chia cryptocurrencies. In particular, we focus on proofs of space and proofs of sequential work.
Proofs of space (PoSpace) were suggested as more ecological, economical, and egalitarian alternative to the energy-wasteful proof-of-work mining of Bitcoin. However, the state-of-the-art constructions of PoSpace are based on sophisticated graph pebbling lower bounds, and are therefore complex. Moreover, when these PoSpace are used in cryptocurrencies like Spacemint, miners can only start mining after ensuring that a commitment to their space is already added in a special transaction to the blockchain. Proofs of sequential work (PoSW) are proof systems in which a prover, upon receiving a statement x and a time parameter T, computes a proof which convinces the verifier that T time units had passed since x was received. Whereas Spacemint assumes synchrony to retain some interesting Bitcoin dynamics, Chia requires PoSW with unique proofs, i.e., PoSW in which it is hard to come up with more than one accepting proof for any true statement. In this thesis we construct simple and practically-efficient PoSpace and PoSW. When using our PoSpace in cryptocurrencies, miners can start mining on the fly, like in Bitcoin, and unlike current constructions of PoSW, which either achieve efficient verification of sequential work, or faster-than-recomputing verification of correctness of proofs, but not both at the same time, ours achieve the best of these two worlds.},
author = {Abusalah, Hamza M},
pages = {59},
publisher = {IST Austria},
title = {{Proof systems for sustainable decentralized cryptocurrencies}},
doi = {10.15479/AT:ISTA:TH_1046},
year = {2018},
}
@phdthesis{9,
abstract = {Immune cells migrating to the sites of infection navigate through diverse tissue architectures and switch their migratory mechanisms upon demand. However, little is known about systemic regulators that could allow the acquisition of these mechanisms. We performed a genetic screen in Drosophila melanogaster to identify regulators of germband invasion by embryonic macrophages into the confined space between the ectoderm and mesoderm. We have found that bZIP circadian transcription factors (TFs) Kayak (dFos) and Vrille (dNFIL3) have opposite effects on macrophage germband infiltration: Kayak facilitated and Vrille inhibited it. These TFs are enriched in the macrophages during migration and genetically interact to control it. Kayak sets a less coordinated mode of migration of the macrophage group and increases the probability and length of Levy walks. Intriguingly, the motility of kayak mutant macrophages was also strongly affected during initial germband invasion but not along another less confined route. Inhibiting Rho1 signaling within the tail ectoderm partially rescued the Kayak mutant phenotype, strongly suggesting that migrating macrophages have to overcome a barrier imposed by the stiffness of the ectoderm. Also, Kayak appeared to be important for the maintenance of the round cell shape and the rear edge translocation of the macrophages invading the germband. Complementary to this, the cortical actin cytoskeleton of Kayak- deficient macrophages was strongly affected. RNA sequencing revealed the filamin Cheerio and tetraspanin TM4SF to be downstream of Kayak. Chromatin immunoprecipitation and immunostaining revealed that the formin Diaphanous is another downstream target of Kayak. Immunostaining revealed that the formin Diaphanous is another downstream target of Kayak. Indeed, Cheerio, TM4SF and Diaphanous are required within macrophages for germband invasion, and expression of constitutively active Diaphanous in macrophages was able to rescue the kayak mutant phenotype. Moreover, Cher and Diaphanous are also reduced in the macrophages overexpressing Vrille. We hypothesize that Kayak, through its targets, increases actin polymerization and cortical tension in macrophages and thus allows extra force generation necessary for macrophage dissemination and migration through confined stiff tissues, while Vrille counterbalances it.},
author = {Belyaeva, Vera},
pages = {96},
publisher = {IST Austria},
title = {{Transcriptional regulation of macrophage migration in the Drosophila melanogaster embryo }},
doi = {10.15479/AT:ISTA:th1064},
year = {2018},
}
@phdthesis{10,
abstract = {Genomic imprinting is an epigenetic process that leads to parent of origin-specific gene expression in a subset of genes. Imprinted genes are essential for brain development, and deregulation of imprinting is associated with neurodevelopmental diseases and the pathogenesis of psychiatric disorders. However, the cell-type specificity of imprinting at single cell resolution, and how imprinting and thus gene dosage regulates neuronal circuit assembly is still largely unknown. Here, MADM (Mosaic Analysis with Double Markers) technology was employed to assess genomic imprinting at single cell level. By visualizing MADM-induced uniparental disomies (UPDs) in distinct colors at single cell level in genetic mosaic animals, this experimental paradigm provides a unique quantitative platform to systematically assay the UPD-mediated imbalances in imprinted gene expression at unprecedented resolution. An experimental pipeline based on FACS, RNA-seq and bioinformatics analysis was established and applied to systematically map cell-type-specific ‘imprintomes’ in the mouse brain. The results revealed that parental-specific expression of imprinted genes per se is rarely cell-type-specific even at the individual cell level. Conversely, when we extended the comparison to downstream responses resulting from imbalanced imprinted gene expression, we discovered an unexpectedly high degree of cell-type specificity. Furthermore, we determined a novel function of genomic imprinting in cortical astrocyte production and in olfactory bulb (OB) granule cell generation. These results suggest important functional implication of genomic imprinting for generating cell-type diversity in the brain. In addition, MADM provides a powerful tool to study candidate genes by concomitant genetic manipulation and fluorescent labelling of single cells. MADM-based candidate gene approach was utilized to identify potential imprinted genes involved in the generation of cortical astrocytes and OB granule cells. We investigated p57Kip2, a maternally expressed gene and known cell cycle regulator. Although we found that p57Kip2 does not play a role in these processes, we detected an unexpected function of the paternal allele previously thought to be silent. Finally, we took advantage of a key property of MADM which is to allow unambiguous investigation of environmental impact on single cells. The experimental pipeline based on FACS and RNA-seq analysis of MADM-labeled cells was established to probe the functional differences of single cell loss of gene function compared to global loss of function on a transcriptional level. With this method, both common and distinct responses were isolated due to cell-autonomous and non-autonomous effects acting on genotypically identical cells. As a result, transcriptional changes were identified which result solely from the surrounding environment. Using the MADM technology to study genomic imprinting at single cell resolution, we have identified cell-type-specific gene expression, novel gene function and the impact of environment on single cell transcriptomes. Together, these provide important insights to the understanding of mechanisms regulating cell-type specificity and thus diversity in the brain.},
author = {Laukoter, Susanne},
pages = {1 -- 139},
publisher = {IST Austria},
title = {{Role of genomic imprinting in cerebral cortex development}},
doi = {10.15479/AT:ISTA:th1057},
year = {2018},
}
@phdthesis{149,
abstract = {The eigenvalue density of many large random matrices is well approximated by a deterministic measure, the self-consistent density of states. In the present work, we show this behaviour for several classes of random matrices. In fact, we establish that, in each of these classes, the self-consistent density of states approximates the eigenvalue density of the random matrix on all scales slightly above the typical eigenvalue spacing. For large classes of random matrices, the self-consistent density of states exhibits several universal features. We prove that, under suitable assumptions, random Gram matrices and Hermitian random matrices with decaying correlations have a 1/3-Hölder continuous self-consistent density of states ρ on R, which is analytic, where it is positive, and has either a square root edge or a cubic root cusp, where it vanishes. We, thus, extend the validity of the corresponding result for Wigner-type matrices from [4, 5, 7]. We show that ρ is determined as the inverse Stieltjes transform of the normalized trace of the unique solution m(z) to the Dyson equation −m(z) −1 = z − a + S[m(z)] on C N×N with the constraint Im m(z) ≥ 0. Here, z lies in the complex upper half-plane, a is a self-adjoint element of C N×N and S is a positivity-preserving operator on C N×N encoding the first two moments of the random matrix. In order to analyze a possible limit of ρ for N → ∞ and address some applications in free probability theory, we also consider the Dyson equation on infinite dimensional von Neumann algebras. We present two applications to random matrices. We first establish that, under certain assumptions, large random matrices with independent entries have a rotationally symmetric self-consistent density of states which is supported on a centered disk in C. Moreover, it is infinitely often differentiable apart from a jump on the boundary of this disk. Second, we show edge universality at all regular (not necessarily extreme) spectral edges for Hermitian random matrices with decaying correlations.},
author = {Alt, Johannes},
pages = {456},
publisher = {IST Austria},
title = {{Dyson equation and eigenvalue statistics of random matrices}},
doi = {10.15479/AT:ISTA:TH_1040},
year = {2018},
}
@phdthesis{202,
abstract = {Restriction-modification (RM) represents the simplest and possibly the most widespread mechanism of self/non-self discrimination in nature. In order to provide bacteria with immunity against bacteriophages and other parasitic genetic elements, RM systems rely on a balance between two enzymes: the restriction enzyme, which cleaves non-self DNA at specific restriction sites, and the modification enzyme, which tags the host’s DNA as self and thus protects it from cleavage. In this thesis, I use population and single-cell level experiments in combination with mathematical modeling to study different aspects of the interplay between RM systems, bacteria and bacteriophages. First, I analyze how mutations in phage restriction sites affect the probability of phage escape – an inherently stochastic process, during which phages accidently get modified instead of restricted. Next, I use single-cell experiments to show that RM systems can, with a low probability, attack the genome of their bacterial host and that this primitive form of autoimmunity leads to a tradeoff between the evolutionary cost and benefit of RM systems. Finally, I investigate the nature of interactions between bacteria, RM systems and temperate bacteriophages to find that, as a consequence of phage escape and its impact on population dynamics, RM systems can promote acquisition of symbiotic bacteriophages, rather than limit it. The results presented here uncover new fundamental biological properties of RM systems and highlight their importance in the ecology and evolution of bacteria, bacteriophages and their interactions.},
author = {Pleska, Maros},
pages = {126},
publisher = {IST Austria},
title = {{Biology of restriction-modification systems at the single-cell and population level}},
doi = {10.15479/AT:ISTA:th_916},
year = {2017},
}
@phdthesis{6287,
abstract = {The main objects considered in the present work are simplicial and CW-complexes with vertices forming a random point cloud. In particular, we consider a Poisson point process in R^n and study Delaunay and Voronoi complexes of the first and higher orders and weighted Delaunay complexes obtained as sections of Delaunay complexes, as well as the Čech complex. Further, we examine theDelaunay complex of a Poisson point process on the sphere S^n, as well as of a uniform point cloud, which is equivalent to the convex hull, providing a connection to the theory of random polytopes. Each of the complexes in question can be endowed with a radius function, which maps its cells to the radii of appropriately chosen circumspheres, called the radius of the cell. Applying and developing discrete Morse theory for these functions, joining it together with probabilistic and sometimes analytic machinery, and developing several integral geometric tools, we aim at getting the distributions of circumradii of typical cells. For all considered complexes, we are able to generalize and obtain up to constants the distribution of radii of typical intervals of all types. In low dimensions the constants can be computed explicitly, thus providing the explicit expressions for the expected numbers of cells. In particular, it allows to find the expected density of simplices of every dimension for a Poisson point process in R^4, whereas the result for R^3 was known already in 1970's.},
author = {Nikitenko, Anton},
pages = {86},
publisher = {IST Austria},
title = {{Discrete Morse theory for random complexes }},
doi = {10.15479/AT:ISTA:th_873},
year = {2017},
}
@phdthesis{6291,
abstract = {Bacteria and their pathogens – phages – are the most abundant living entities on Earth. Throughout their coevolution, bacteria have evolved multiple immune systems to overcome the ubiquitous threat from the phages. Although the molecu- lar details of these immune systems’ functions are relatively well understood, their epidemiological consequences for the phage-bacterial communities have been largely neglected. In this thesis we employed both experimental and theoretical methods to explore whether herd and social immunity may arise in bacterial popu- lations. Using our experimental system consisting of Escherichia coli strains with a CRISPR based immunity to the T7 phage we show that herd immunity arises in phage-bacterial communities and that it is accentuated when the populations are spatially structured. By fitting a mathematical model, we inferred expressions for the herd immunity threshold and the velocity of spread of a phage epidemic in partially resistant bacterial populations, which both depend on the bacterial growth rate, phage burst size and phage latent period. We also investigated the poten- tial for social immunity in Streptococcus thermophilus and its phage 2972 using a bioinformatic analysis of potentially coding short open reading frames with a signalling signature, encoded within the CRISPR associated genes. Subsequently, we tested one identified potentially signalling peptide and found that its addition to a phage-challenged culture increases probability of survival of bacteria two fold, although the results were only marginally significant. Together, these results demonstrate that the ubiquitous arms races between bacteria and phages have further consequences at the level of the population.},
author = {Payne, Pavel},
pages = {83},
publisher = {IST Austria},
title = {{Bacterial herd and social immunity to phages}},
year = {2017},
}
@phdthesis{818,
abstract = {Antibiotics have diverse effects on bacteria, including massive changes in bacterial gene expression. Whereas the gene expression changes under many antibiotics have been measured, the temporal organization of these responses and their dependence on the bacterial growth rate are unclear. As described in Chapter 1, we quantified the temporal gene expression changes in the bacterium Escherichia coli in response to the sudden exposure to antibiotics using a fluorescent reporter library and a robotic system. Our data show temporally structured gene expression responses, with response times for individual genes ranging from tens of minutes to several hours. We observed that many stress response genes were activated in response to antibiotics. As certain stress responses cross-protect bacteria from other stressors, we then asked whether cellular responses to antibiotics have a similar protective role in Chapter 2. Indeed, we found that the trimethoprim-induced acid stress response protects bacteria from subsequent acid stress. We combined microfluidics with time-lapse imaging to monitor survival, intracellular pH, and acid stress response in single cells. This approach revealed that the variable expression of the acid resistance operon gadBC strongly correlates with single-cell survival time. Cells with higher gadBC expression following trimethoprim maintain higher intracellular pH and survive the acid stress longer. Overall, we provide a way to identify single-cell cross-protection between antibiotics and environmental stressors from temporal gene expression data, and show how antibiotics can increase bacterial fitness in changing environments. While gene expression changes to antibiotics show a clear temporal structure at the population-level, it is unclear whether this clear temporal order is followed by every single cell. Using dual-reporter strains described in Chapter 3, we measured gene expression dynamics of promoter pairs in the same cells using microfluidics and microscopy. Chapter 4 shows that the oxidative stress response and the DNA stress response showed little timing variability and a clear temporal order under the antibiotic nitrofurantoin. In contrast, the acid stress response under trimethoprim ran independently from all other activated response programs including the DNA stress response, which showed particularly high timing variability in this stress condition. In summary, this approach provides insight into the temporal organization of gene expression programs at the single-cell level and suggests dependencies between response programs and the underlying variability-introducing mechanisms. Altogether, this work advances our understanding of the diverse effects that antibiotics have on bacteria. These results were obtained by taking into account gene expression dynamics, which allowed us to identify general principles, molecular mechanisms, and dependencies between genes. Our findings may have implications for infectious disease treatments, and microbial communities in the human body and in nature. },
author = {Mitosch, Karin},
pages = {113},
publisher = {IST Austria},
title = {{Timing, variability and cross-protection in bacteria – insights from dynamic gene expression responses to antibiotics}},
doi = {10.15479/AT:ISTA:th_862},
year = {2017},
}
@phdthesis{819,
abstract = {Contagious diseases must transmit from infectious to susceptible hosts in order to reproduce. Whilst vectored pathogens can rely on intermediaries to find new hosts for them, many infectious pathogens require close contact or direct interaction between hosts for transmission. Hence, this means that conspecifics are often the main source of infection for most animals and so, in theory, animals should avoid conspecifics to reduce their risk of infection. Of course, in reality animals must interact with one another, as a bare minimum, to mate. However, being social provides many additional benefits and group living has become a taxonomically diverse and widespread trait. How then do social animals overcome the issue of increased disease? Over the last few decades, the social insects (ants, termites and some bees and wasps) have become a model system for studying disease in social animals. On paper, a social insect colony should be particularly susceptible to disease, given that they often contain thousands of potential hosts that are closely related and frequently interact, as well as exhibiting stable environmental conditions that encourage microbial growth. Yet, disease outbreaks appear to be rare and attempts to eradicate pest species using pathogens have failed time and again. Evolutionary biologists investigating this observation have discovered that the reduced disease susceptibility in social insects is, in part, due to collectively performed disease defences of the workers. These defences act like a “social immune system” for the colony, resulting in a per capita decrease in disease, termed social immunity. Our understanding of social immunity, and its importance in relation to the immunological defences of each insect, continues to grow, but there remain many open questions. In this thesis I have studied disease defence in garden ants. In the first data chapter, I use the invasive garden ant, Lasius neglectus, to investigate how colonies mitigate lethal infections and prevent them from spreading systemically. I find that ants have evolved ‘destructive disinfection’ – a behaviour that uses endogenously produced acidic poison to kill diseased brood and to prevent the pathogen from replicating. In the second experimental chapter, I continue to study the use of poison in invasive garden ant colonies, finding that it is sprayed prophylactically within the nest. However, this spraying has negative effects on developing pupae when they have had their cocoons artificially removed. Hence, I suggest that acidic nest sanitation may be maintaining larval cocoon spinning in this species. In the next experimental chapter, I investigated how colony founding black garden ant queens (Lasius niger) prevent disease when a co-foundress dies. I show that ant queens prophylactically perform undertaking behaviours, similar to those performed by the workers in mature nests. When a co-foundress was infected, these undertaking behaviours improved the survival of the healthy queen. In the final data chapter, I explored how immunocompetence (measured as antifungal activity) changes as incipient black garden ant colonies grow and mature, from the solitary queen phase to colonies with several hundred workers. Queen and worker antifungal activity varied throughout this time period, but despite social immunity, did not decrease as colonies matured. In addition to the above data chapters, this thesis includes two co-authored reviews. In the first, we examine the state of the art in the field of social immunity and how it might develop in the future. In the second, we identify several challenges and open questions in the study of disease defence in animals. We highlight how social insects offer a unique model to tackle some of these problems, as disease defence can be studied from the cell to the society. },
author = {Pull, Christopher},
pages = {122},
publisher = {IST Austria},
title = {{Disease defence in garden ants}},
doi = {10.15479/AT:ISTA:th_861},
year = {2017},
}
@phdthesis{820,
abstract = {The lac operon is a classic model system for bacterial gene regulation, and has been studied extensively in E. coli, a classic model organism. However, not much is known about E. coli’s ecology and life outside the laboratory, in particular in soil and water environments. The natural diversity of the lac operon outside the laboratory, its role in the ecology of E. coli and the selection pressures it is exposed to, are similarly unknown.
In Chapter Two of this thesis, I explore the genetic diversity, phylogenetic history and signatures of selection of the lac operon across 20 natural isolates of E. coli and divergent clades of Escherichia. I found that complete lac operons were present in all isolates examined, which in all but one case were functional. The lac operon phylogeny conformed to the whole-genome phylogeny of the divergent Escherichia clades, which excludes horizontal gene transfer as an explanation for the presence of functional lac operons in these clades. All lac operon genes showed a signature of purifying selection; this signature was strongest for the lacY gene. Lac operon genes of human and environmental isolates showed similar signatures of selection, except the lacZ gene, which showed a stronger signature of selection in environmental isolates.
In Chapter Three, I try to identify the natural genetic variation relevant for phenotype and fitness in the lac operon, comparing growth rate on lactose and LacZ activity of the lac operons of these wild isolates in a common genetic background. Sequence variation in the lac promoter region, upstream of the -10 and -35 RNA polymerase binding motif, predicted variation in LacZ activity at full induction, using a thermodynamic model of polymerase binding (Tugrul, 2016). However, neither variation in LacZ activity, nor RNA polymerase binding predicted by the model correlated with variation in growth rate. Lac operons of human and environmental isolates did not differ systematically in either growth rate on lactose or LacZ protein activity, suggesting that these lac operons have been exposed to similar selection pressures. We thus have no evidence that the phenotypic variation we measured is relevant for fitness.
To start assessing the effect of genomic background on the growth phenotype conferred by the lac operon, I compared growth on minimal medium with lactose between lac operon constructs and the corresponding original isolates, I found that maximal growth rate was determined by genomic background, with almost all backgrounds conferring higher growth rates than lab strain K12 MG1655. However, I found no evidence that the lactose concentration at which growth was half maximal depended on genomic background.},
author = {Jesse, Fabienne},
pages = {87},
publisher = {IST Austria},
title = {{The lac operon in the wild}},
doi = {10.15479/AT:ISTA:th_857},
year = {2017},
}
@phdthesis{821,
abstract = {This dissertation focuses on algorithmic aspects of program verification, and presents modeling and complexity advances on several problems related to the
static analysis of programs, the stateless model checking of concurrent programs, and the competitive analysis of real-time scheduling algorithms.
Our contributions can be broadly grouped into five categories.
Our first contribution is a set of new algorithms and data structures for the quantitative and data-flow analysis of programs, based on the graph-theoretic notion of treewidth.
It has been observed that the control-flow graphs of typical programs have special structure, and are characterized as graphs of small treewidth.
We utilize this structural property to provide faster algorithms for the quantitative and data-flow analysis of recursive and concurrent programs.
In most cases we make an algebraic treatment of the considered problem,
where several interesting analyses, such as the reachability, shortest path, and certain kind of data-flow analysis problems follow as special cases.
We exploit the constant-treewidth property to obtain algorithmic improvements for on-demand versions of the problems,
and provide data structures with various tradeoffs between the resources spent in the preprocessing and querying phase.
We also improve on the algorithmic complexity of quantitative problems outside the algebraic path framework,
namely of the minimum mean-payoff, minimum ratio, and minimum initial credit for energy problems.
Our second contribution is a set of algorithms for Dyck reachability with applications to data-dependence analysis and alias analysis.
In particular, we develop an optimal algorithm for Dyck reachability on bidirected graphs, which are ubiquitous in context-insensitive, field-sensitive points-to analysis.
Additionally, we develop an efficient algorithm for context-sensitive data-dependence analysis via Dyck reachability,
where the task is to obtain analysis summaries of library code in the presence of callbacks.
Our algorithm preprocesses libraries in almost linear time, after which the contribution of the library in the complexity of the client analysis is (i)~linear in the number of call sites and (ii)~only logarithmic in the size of the whole library, as opposed to linear in the size of the whole library.
Finally, we prove that Dyck reachability is Boolean Matrix Multiplication-hard in general, and the hardness also holds for graphs of constant treewidth.
This hardness result strongly indicates that there exist no combinatorial algorithms for Dyck reachability with truly subcubic complexity.
Our third contribution is the formalization and algorithmic treatment of the Quantitative Interprocedural Analysis framework.
In this framework, the transitions of a recursive program are annotated as good, bad or neutral, and receive a weight which measures
the magnitude of their respective effect.
The Quantitative Interprocedural Analysis problem asks to determine whether there exists an infinite run of the program where the long-run ratio of the bad weights over the good weights is above a given threshold.
We illustrate how several quantitative problems related to static analysis of recursive programs can be instantiated in this framework,
and present some case studies to this direction.
Our fourth contribution is a new dynamic partial-order reduction for the stateless model checking of concurrent programs. Traditional approaches rely on the standard Mazurkiewicz equivalence between traces, by means of partitioning the trace space into equivalence classes, and attempting to explore a few representatives from each class.
We present a new dynamic partial-order reduction method called the Data-centric Partial Order Reduction (DC-DPOR).
Our algorithm is based on a new equivalence between traces, called the observation equivalence.
DC-DPOR explores a coarser partitioning of the trace space than any exploration method based on the standard Mazurkiewicz equivalence.
Depending on the program, the new partitioning can be even exponentially coarser.
Additionally, DC-DPOR spends only polynomial time in each explored class.
Our fifth contribution is the use of automata and game-theoretic verification techniques in the competitive analysis and synthesis of real-time scheduling algorithms for firm-deadline tasks.
On the analysis side, we leverage automata on infinite words to compute the competitive ratio of real-time schedulers subject to various environmental constraints.
On the synthesis side, we introduce a new instance of two-player mean-payoff partial-information games, and show
how the synthesis of an optimal real-time scheduler can be reduced to computing winning strategies in this new type of games.},
author = {Pavlogiannis, Andreas},
pages = {418},
publisher = {IST Austria},
title = {{Algorithmic advances in program analysis and their applications}},
doi = {10.15479/AT:ISTA:th_854},
year = {2017},
}
@phdthesis{838,
abstract = {In this thesis we discuss the exact security of message authentications codes HMAC , NMAC , and PMAC . NMAC is a mode of operation which turns a fixed input-length keyed hash function f into a variable input-length function. A practical single-key variant of NMAC called HMAC is a very popular and widely deployed message authentication code (MAC). PMAC is a block-cipher based mode of operation, which also happens to be the most famous fully parallel MAC. NMAC was introduced by Bellare, Canetti and Krawczyk Crypto’96, who proved it to be a secure pseudorandom function (PRF), and thus also a MAC, under two assumptions. Unfortunately, for many instantiations of HMAC one of them has been found to be wrong. To restore the provable guarantees for NMAC , Bellare [Crypto’06] showed its security without this assumption. PMAC was introduced by Black and Rogaway at Eurocrypt 2002. If instantiated with a pseudorandom permutation over n -bit strings, PMAC constitutes a provably secure variable input-length PRF. For adversaries making q queries, each of length at most ` (in n -bit blocks), and of total length σ ≤ q` , the original paper proves an upper bound on the distinguishing advantage of O ( σ 2 / 2 n ), while the currently best bound is O ( qσ/ 2 n ). In this work we show that this bound is tight by giving an attack with advantage Ω( q 2 `/ 2 n ). In the PMAC construction one initially XORs a mask to every message block, where the mask for the i th block is computed as τ i := γ i · L , where L is a (secret) random value, and γ i is the i -th codeword of the Gray code. Our attack applies more generally to any sequence of γ i ’s which contains a large coset of a subgroup of GF (2 n ). As for NMAC , our first contribution is a simpler and uniform proof: If f is an ε -secure PRF (against q queries) and a δ - non-adaptively secure PRF (against q queries), then NMAC f is an ( ε + `qδ )-secure PRF against q queries of length at most ` blocks each. We also show that this ε + `qδ bound is basically tight by constructing an f for which an attack with advantage `qδ exists. Moreover, we analyze the PRF-security of a modification of NMAC called NI by An and Bellare that avoids the constant rekeying on multi-block messages in NMAC and allows for an information-theoretic analysis. We carry out such an analysis, obtaining a tight `q 2 / 2 c bound for this step, improving over the trivial bound of ` 2 q 2 / 2 c . Finally, we investigate, if the security of PMAC can be further improved by using τ i ’s that are k -wise independent, for k > 1 (the original has k = 1). We observe that the security of PMAC will not increase in general if k = 2, and then prove that the security increases to O ( q 2 / 2 n ), if the k = 4. Due to simple extension attacks, this is the best bound one can hope for, using any distribution on the masks. Whether k = 3 is already sufficient to get this level of security is left as an open problem. Keywords: Message authentication codes, Pseudorandom functions, HMAC, PMAC. },
author = {Rybar, Michal},
pages = {86},
publisher = {IST Austria},
title = {{(The exact security of) Message authentication codes}},
doi = {10.15479/AT:ISTA:th_828},
year = {2017},
}
@phdthesis{839,
abstract = {This thesis describes a brittle fracture simulation method for visual effects applications. Building upon a symmetric Galerkin boundary element method, we first compute stress intensity factors following the theory of linear elastic fracture mechanics. We then use these stress intensities to simulate the motion of a propagating crack front at a significantly higher resolution than the overall deformation of the breaking object. Allowing for spatial variations of the material's toughness during crack propagation produces visually realistic, highly-detailed fracture surfaces. Furthermore, we introduce approximations for stress intensities and crack opening displacements, resulting in both practical speed-up and theoretically superior runtime complexity compared to previous methods. While we choose a quasi-static approach to fracture mechanics, ignoring dynamic deformations, we also couple our fracture simulation framework to a standard rigid-body dynamics solver, enabling visual effects artists to simulate both large scale motion, as well as fracturing due to collision forces in a combined system. As fractures inside of an object grow, their geometry must be represented both in the coarse boundary element mesh, as well as at the desired fine output resolution. Using a boundary element method, we avoid complicated volumetric meshing operations. Instead we describe a simple set of surface meshing operations that allow us to progressively add cracks to the mesh of an object and still re-use all previously computed entries of the linear boundary element system matrix. On the high resolution level, we opt for an implicit surface representation. We then describe how to capture fracture surfaces during crack propagation, as well as separate the individual fragments resulting from the fracture process, based on this implicit representation. We show results obtained with our method, either solving the full boundary element system in every time step, or alternatively using our fast approximations. These results demonstrate that both of these methods perform well in basic test cases and produce realistic fracture surfaces. Furthermore we show that our fast approximations substantially out-perform the standard approach in more demanding scenarios. Finally, these two methods naturally combine, using the full solution while the problem size is manageably small and switching to the fast approximations later on. The resulting hybrid method gives the user a direct way to choose between speed and accuracy of the simulation. },
author = {Hahn, David},
pages = {124},
publisher = {IST Austria},
title = {{Brittle fracture simulation with boundary elements for computer graphics}},
doi = {10.15479/AT:ISTA:th_855},
year = {2017},
}
@phdthesis{938,
abstract = {The thesis encompasses several topics of plant cell biology which were studied in the model plant Arabidopsis thaliana. Chapter 1 concerns the plant hormone auxin and its polar transport through cells and tissues. The highly controlled, directional transport of auxin is facilitated by plasma membrane-localized transporters. Transporters from the PIN family direct auxin transport due to their polarized localizations at cell membranes. Substantial effort has been put into research on cellular trafficking of PIN proteins, which is thought to underlie their polar distribution. I participated in a forward genetic screen aimed at identifying novel regulators of PIN polarity. The screen yielded several genes which may be involved in PIN polarity regulation or participate in polar auxin transport by other means. Chapter 2 focuses on the endomembrane system, with particular attention to clathrin-mediated endocytosis. The project started with identification of several proteins that interact with clathrin light chains. Among them, I focused on two putative homologues of auxilin, which in non-plant systems is an endocytotic factor known for uncoating clathrin-coated vesicles in the final step of endocytosis. The body of my work consisted of an in-depth characterization of transgenic A. thaliana lines overexpressing these putative auxilins in an inducible manner. Overexpression of these proteins leads to an inhibition of endocytosis, as documented by imaging of cargoes and clathrin-related endocytic machinery. An extension of this work is an investigation into a concept of homeostatic regulation acting between distinct transport processes in the endomembrane system. With auxilin overexpressing lines, where endocytosis is blocked specifically, I made observations on the mutual relationship between two opposite trafficking processes of secretion and endocytosis. In Chapter 3, I analyze cortical microtubule arrays and their relationship to auxin signaling and polarized growth in elongating cells. In plants, microtubules are organized into arrays just below the plasma membrane, and it is thought that their function is to guide membrane-docked cellulose synthase complexes. These, in turn, influence cell wall structure and cell shape by directed deposition of cellulose fibres. In elongating cells, cortical microtubule arrays are able to reorient in relation to long cell axis, and these reorientations have been linked to cell growth and to signaling of growth-regulating factors such as auxin or light. In this chapter, I am addressing the causal relationship between microtubule array reorientation, growth, and auxin signaling. I arrive at a model where array reorientation is not guided by auxin directly, but instead is only controlled by growth, which, in turn, is regulated by auxin.},
author = {Adamowski, Maciek},
pages = {117},
publisher = {IST Austria},
title = {{Investigations into cell polarity and trafficking in the plant model Arabidopsis thaliana }},
doi = {10.15479/AT:ISTA:th_842},
year = {2017},
}
@phdthesis{961,
abstract = {Cell-cell contact formation constitutes the first step in the emergence of multicellularity in evolution, thereby allowing the differentiation of specialized cell types. In metazoan development, cell-cell contact formation is thought to influence cell fate specification, and cell fate specification has been implicated in cell-cell contact formation. However, remarkably little is yet known about whether and how the interaction and feedback between cell-cell contact formation and cell fate specification affect development. Here we identify a positive feedback loop between cell-cell contact duration, morphogen signaling and mesendoderm cell fate specification during zebrafish gastrulation. We show that long lasting cell-cell contacts enhance the competence of prechordal plate (ppl) progenitor cells to respond to Nodal signaling, required for proper ppl cell fate specification. We further show that Nodal signalling romotes ppl cell-cell contact duration, thereby generating an effective positive feedback loop between ppl cell-cell contact duration and cell fate specification. Finally, by using a combination of theoretical modeling and experimentation, we show that this feedback loop determines whether anterior axial mesendoderm cells become ppl progenitors or, instead, turn into endoderm progenitors. Our findings reveal that the gene regulatory networks leading to cell fate diversification within the developing embryo are controlled by the interdependent activities of cell-cell signaling and contact formation.},
author = {Barone, Vanessa},
pages = {109},
publisher = {IST Austria},
title = {{Cell adhesion and cell fate: An effective feedback loop during zebrafish gastrulation}},
doi = {10.15479/AT:ISTA:th_825},
year = {2017},
}
@phdthesis{992,
abstract = {An instance of the Constraint Satisfaction Problem (CSP) is given by a finite set of
variables, a finite domain of labels, and a set of constraints, each constraint acting on
a subset of the variables. The goal is to find an assignment of labels to its variables
that satisfies all constraints (or decide whether one exists). If we allow more general
“soft” constraints, which come with (possibly infinite) costs of particular assignments,
we obtain instances from a richer class called Valued Constraint Satisfaction Problem
(VCSP). There the goal is to find an assignment with minimum total cost.
In this thesis, we focus (assuming that P
6
=
NP) on classifying computational com-
plexity of CSPs and VCSPs under certain restricting conditions. Two results are the core
content of the work. In one of them, we consider VCSPs parametrized by a constraint
language, that is the set of “soft” constraints allowed to form the instances, and finish
the complexity classification modulo (missing pieces of) complexity classification for
analogously parametrized CSP. The other result is a generalization of Edmonds’ perfect
matching algorithm. This generalization contributes to complexity classfications in two
ways. First, it gives a new (largest known) polynomial-time solvable class of Boolean
CSPs in which every variable may appear in at most two constraints and second, it
settles full classification of Boolean CSPs with planar drawing (again parametrized by a
constraint language).},
author = {Rolinek, Michal},
pages = {97},
publisher = {IST Austria},
title = {{Complexity of constraint satisfaction}},
doi = {10.15479/AT:ISTA:th_815},
year = {2017},
}
@phdthesis{1127,
abstract = {Plant hormone auxin and its transport between cells belong to the most important
mechanisms controlling plant development. Auxin itself could change localization of PINs and
thereby control direction of its own flow. We performed an expression profiling experiment
in Arabidopsis roots to identify potential regulators of PIN polarity which are transcriptionally
regulated by auxin signalling. We identified several novel regulators and performed a detailed
characterization of the transcription factor WRKY23 (At2g47260) and its role in auxin
feedback on PIN polarity. Gain-of-function and dominant-negative mutants revealed that
WRKY23 plays a crucial role in mediating the auxin effect on PIN polarity. In concordance,
typical polar auxin transport processes such as gravitropism and leaf vascular pattern
formation were disturbed by interfering with WRKY23 function.
In order to identify direct targets of WRKY23, we performed consequential expression
profiling experiments using a WRKY23 inducible gain-of-function line and dominant-negative
WRKY23 line that is defunct in PIN re-arrangement. Among several genes mostly related to
the groups of cell wall and defense process regulators, we identified LYSINE-HISTIDINE
TRANSPORTER 1 (LHT1; At5g40780), a small amino acid permease gene from the amino
acid/auxin permease family (AAAP), we present its detailed characterisation in auxin feedback
on PIN repolarization, identified its transcriptional regulation, we propose a potential
mechanism of its action. Moreover, we identified also a member of receptor-like protein
kinase LRR-RLK (LEUCINE-RICH REPEAT TRANSMEMBRANE PROTEIN KINASE PROTEIN 1;
LRRK1; At1g05700), which also affects auxin-dependent PIN re-arrangement. We described
its transcriptional behaviour, subcellular localization. Based on global expression data, we
tried to identify ligand responsible for mechanism of signalling and suggest signalling partner
and interactors. Additionally, we described role of novel phytohormone group, strigolactone,
in auxin-dependent PIN re-arrangement, that could be a fundament for future studies in this
field.
Our results provide first insights into an auxin transcriptional network targeting PIN
localization and thus regulating plant development. We highlighted WRKY23 transcriptional
network and characterised its mediatory role in plant development. We identified direct
effectors of this network, LHT1 and LRRK1, and describe their roles in PIN re-arrangement and
PIN-dependent auxin transport processes.},
author = {Prat, Tomas},
pages = {131},
publisher = {IST Austria},
title = {{Identification of novel regulators of PIN polarity and development of novel auxin sensor}},
year = {2017},
}
@phdthesis{1155,
abstract = {This dissertation concerns the automatic verification of probabilistic systems and programs with arrays by statistical and logical methods. Although statistical and logical methods are different in nature, we show that they can be successfully combined for system analysis. In the first part of the dissertation we present a new statistical algorithm for the verification of probabilistic systems with respect to unbounded properties, including linear temporal logic. Our algorithm often performs faster than the previous approaches, and at the same time requires less information about the system. In addition, our method can be generalized to unbounded quantitative properties such as mean-payoff bounds. In the second part, we introduce two techniques for comparing probabilistic systems. Probabilistic systems are typically compared using the notion of equivalence, which requires the systems to have the equal probability of all behaviors. However, this notion is often too strict, since probabilities are typically only empirically estimated, and any imprecision may break the relation between processes. On the one hand, we propose to replace the Boolean notion of equivalence by a quantitative distance of similarity. For this purpose, we introduce a statistical framework for estimating distances between Markov chains based on their simulation runs, and we investigate which distances can be approximated in our framework. On the other hand, we propose to compare systems with respect to a new qualitative logic, which expresses that behaviors occur with probability one or a positive probability. This qualitative analysis is robust with respect to modeling errors and applicable to many domains. In the last part, we present a new quantifier-free logic for integer arrays, which allows us to express counting. Counting properties are prevalent in array-manipulating programs, however they cannot be expressed in the quantified fragments of the theory of arrays. We present a decision procedure for our logic, and provide several complexity results.},
author = {Daca, Przemyslaw},
pages = {163},
publisher = {IST Austria},
title = {{Statistical and logical methods for property checking}},
doi = {10.15479/AT:ISTA:TH_730},
year = {2017},
}
@phdthesis{837,
abstract = {The hippocampus is a key brain region for memory and notably for spatial memory, and is needed for both spatial working and reference memories. Hippocampal place cells selectively discharge in specific locations of the environment to form mnemonic represen tations of space. Several behavioral protocols have been designed to test spatial memory which requires the experimental subject to utilize working memory and reference memory. However, less is known about how these memory traces are presented in the hippo campus, especially considering tasks that require both spatial working and long -term reference memory demand. The aim of my thesis was to elucidate how spatial working memory, reference memory, and the combination of both are represented in the hippocampus. In this thesis, using a radial eight -arm maze, I examined how the combined demand on these memories influenced place cell assemblies while reference memories were partially updated by changing some of the reward- arms. This was contrasted with task varian ts requiring working or reference memories only. Reference memory update led to gradual place field shifts towards the rewards on the switched arms. Cells developed enhanced firing in passes between newly -rewarded arms as compared to those containing an unchanged reward. The working memory task did not show such gradual changes. Place assemblies on occasions replayed trajectories of the maze; at decision points the next arm choice was preferentially replayed in tasks needing reference memory while in the pure working memory task the previously visited arm was replayed. Hence trajectory replay only reflected the decision of the animal in tasks needing reference memory update. At the reward locations, in all three tasks outbound trajectories of the current arm were preferentially replayed, showing the animals’ next path to the center. At reward locations trajectories were replayed preferentially in reverse temporal order. Moreover, in the center reverse replay was seen in the working memory task but in the other tasks forward replay was seen. Hence, the direction of reactivation was determined by the goal locations so that part of the trajectory which was closer to the goal was reactivated later in an HSE while places further away from the goal were reactivated earlier. Altogether my work demonstrated that reference memory update triggers several levels of reorganization of the hippocampal cognitive map which are not seen in simpler working memory demand s. Moreover, hippocampus is likely to be involved in spatial decisions through reactivating planned trajectories when reference memory recall is required for such a decision. },
author = {Xu, Haibing},
pages = {93},
publisher = {IST Austria},
title = {{Reactivation of the hippocampal cognitive map in goal-directed spatial tasks}},
doi = {10.15479/AT:ISTA:th_858},
year = {2017},
}
@phdthesis{1121,
abstract = {Horizontal gene transfer (HGT), the lateral acquisition of genes across existing species
boundaries, is a major evolutionary force shaping microbial genomes that facilitates
adaptation to new environments as well as resistance to antimicrobial drugs. As such,
understanding the mechanisms and constraints that determine the outcomes of HGT
events is crucial to understand the dynamics of HGT and to design better strategies to
overcome the challenges that originate from it.
Following the insertion and expression of a newly transferred gene, the success of an
HGT event will depend on the fitness effect it has on the recipient (host) cell. Therefore,
predicting the impact of HGT on the genetic composition of a population critically
depends on the distribution of fitness effects (DFE) of horizontally transferred genes.
However, to date, we have little knowledge of the DFE of newly transferred genes, and
hence little is known about the shape and scale of this distribution.
It is particularly important to better understand the selective barriers that determine
the fitness effects of newly transferred genes. In spite of substantial bioinformatics
efforts to identify horizontally transferred genes and selective barriers, a systematic
experimental approach to elucidate the roles of different selective barriers in defining
the fate of a transfer event has largely been absent. Similarly, although the fact that
environment might alter the fitness effect of a horizontally transferred gene may seem
obvious, little attention has been given to it in a systematic experimental manner.
In this study, we developed a systematic experimental approach that consists of
transferring 44 arbitrarily selected Salmonella typhimurium orthologous genes into an
Escherichia coli host, and estimating the fitness effects of these transferred genes at a
constant expression level by performing competition assays against the wild type.
In chapter 2, we performed one-to-one competition assays between a mutant strain
carrying a transferred gene and the wild type strain. By using flow cytometry we
estimated selection coefficients for the transferred genes with a precision level of 10-3,and obtained the DFE of horizontally transferred genes. We then investigated if these
fitness effects could be predicted by any of the intrinsic properties of the genes, namely,
functional category, degree of complexity (protein-protein interactions), GC content,
codon usage and length. Our analyses revealed that the functional category and length
of the genes act as potential selective barriers. Finally, using the same procedure with
the endogenous E. coli orthologs of these 44 genes, we demonstrated that gene dosage is
the most prominent selective barrier to HGT.
In chapter 3, using the same set of genes we investigated the role of environment on the
success of HGT events. Under six different environments with different levels of stress
we performed more complex competition assays, where we mixed all 44 mutant strains
carrying transferred genes with the wild type strain. To estimate the fitness effects of
genes relative to wild type we used next generation sequencing. We found that the DFEs
of horizontally transferred genes are highly dependent on the environment, with
abundant gene–by-environment interactions. Furthermore, we demonstrated a
relationship between average fitness effect of a gene across all environments and its
environmental variance, and thus its predictability. Finally, in spite of the fitness effects
of genes being highly environment-dependent, we still observed a common shape of
DFEs across all tested environments.},
author = {Acar, Hande},
pages = {75},
publisher = {IST Austria},
title = {{Selective barriers to horizontal gene transfer}},
year = {2016},
}
@phdthesis{1122,
abstract = {Computer graphics is an extremely exciting field for two reasons. On the one hand,
there is a healthy injection of pragmatism coming from the visual effects industry
that want robust algorithms that work so they can produce results at an increasingly
frantic pace. On the other hand, they must always try to push the envelope and
achieve the impossible to wow their audiences in the next blockbuster, which means
that the industry has not succumb to conservatism, and there is plenty of room to
try out new and crazy ideas if there is a chance that it will pan into something
useful.
Water simulation has been in visual effects for decades, however it still remains
extremely challenging because of its high computational cost and difficult artdirectability.
The work in this thesis tries to address some of these difficulties.
Specifically, we make the following three novel contributions to the state-of-the-art
in water simulation for visual effects.
First, we develop the first algorithm that can convert any sequence of closed
surfaces in time into a moving triangle mesh. State-of-the-art methods at the time
could only handle surfaces with fixed connectivity, but we are the first to be able to
handle surfaces that merge and split apart. This is important for water simulation
practitioners, because it allows them to convert splashy water surfaces extracted
from particles or simulated using grid-based level sets into triangle meshes that can
be either textured and enhanced with extra surface dynamics as a post-process.
We also apply our algorithm to other phenomena that merge and split apart, such
as morphs and noisy reconstructions of human performances.
Second, we formulate a surface-based energy that measures the deviation of a
water surface froma physically valid state. Such discrepancies arise when there is a
mismatch in the degrees of freedom between the water surface and the underlying
physics solver. This commonly happens when practitioners use a moving triangle
mesh with a grid-based physics solver, or when high-resolution grid-based surfaces
are combined with low-resolution physics. Following the direction of steepest
descent on our surface-based energy, we can either smooth these artifacts or turn
them into high-resolution waves by interpreting the energy as a physical potential.
Third, we extend state-of-the-art techniques in non-reflecting boundaries to handle spatially and time-varying background flows. This allows a novel new
workflow where practitioners can re-simulate part of an existing simulation, such
as removing a solid obstacle, adding a new splash or locally changing the resolution.
Such changes can easily lead to new waves in the re-simulated region that would
reflect off of the new simulation boundary, effectively ruining the illusion of a
seamless simulation boundary between the existing and new simulations. Our
non-reflecting boundaries makes sure that such waves are absorbed.},
author = {Bojsen-Hansen, Morten},
pages = {114},
publisher = {IST Austria},
title = {{Tracking, correcting and absorbing water surface waves}},
doi = {10.15479/AT:ISTA:th_640},
year = {2016},
}
@phdthesis{1123,
abstract = {Motivated by topological Tverberg-type problems in topological combinatorics and by classical
results about embeddings (maps without double points), we study the question whether a finite
simplicial complex K can be mapped into Rd without triple, quadruple, or, more generally, r-fold points (image points with at least r distinct preimages), for a given multiplicity r ≤ 2. In particular, we are interested in maps f : K → Rd that have no global r -fold intersection points, i.e., no r -fold points with preimages in r pairwise disjoint simplices of K , and we seek necessary and sufficient conditions for the existence of such maps.
We present higher-multiplicity analogues of several classical results for embeddings, in particular of the completeness of the Van Kampen obstruction for embeddability of k -dimensional
complexes into R2k , k ≥ 3. Speciffically, we show that under suitable restrictions on the dimensions(viz., if dimK = (r ≥ 1)k and d = rk \ for some k ≥ 3), a well-known deleted product criterion (DPC ) is not only necessary but also sufficient for the existence of maps without global r -fold points. Our main technical tool is a higher-multiplicity version of the classical Whitney trick , by which pairs of isolated r -fold points of opposite sign can be eliminated by local modiffications of the map, assuming codimension d – dimK ≥ 3.
An important guiding idea for our work was that suffciency of the DPC, together with an old
result of Özaydin's on the existence of equivariant maps, might yield an approach to disproving the remaining open cases of the the long-standing topological Tverberg conjecture , i.e., to construct maps from the N -simplex σN to Rd without r-Tverberg points when r not a prime power and
N = (d + 1)(r – 1). Unfortunately, our proof of the sufficiency of the DPC requires codimension d – dimK ≥ 3, which is not satisfied for K = σN .
In 2015, Frick [16] found a very elegant way to overcome this \codimension 3 obstacle" and
to construct the first counterexamples to the topological Tverberg conjecture for all parameters(d; r ) with d ≥ 3r + 1 and r not a prime power, by a reduction1 to a suitable lower-dimensional skeleton, for which the codimension 3 restriction is satisfied and maps without r -Tverberg points exist by Özaydin's result and sufficiency of the DPC.
In this thesis, we present a different construction (which does not use the constraint method) that yields counterexamples for d ≥ 3r , r not a prime power. },
author = {Mabillard, Isaac},
pages = {55},
publisher = {IST Austria},
title = {{Eliminating higher-multiplicity intersections: an r-fold Whitney trick for the topological Tverberg conjecture}},
year = {2016},
}
@phdthesis{1124,
author = {Morri, Maurizio},
pages = {129},
publisher = {IST Austria},
title = {{Optical functionalization of human class A orphan G-protein coupled receptors}},
year = {2016},
}
@phdthesis{1125,
abstract = {Natural environments are never constant but subject to spatial and temporal change on
all scales, increasingly so due to human activity. Hence, it is crucial to understand the
impact of environmental variation on evolutionary processes. In this thesis, I present
three topics that share the common theme of environmental variation, yet illustrate its
effect from different perspectives.
First, I show how a temporally fluctuating environment gives rise to second-order
selection on a modifier for stress-induced mutagenesis. Without fluctuations, when
populations are adapted to their environment, mutation rates are minimized. I argue
that a stress-induced mutator mechanism may only be maintained if the population is
repeatedly subjected to diverse environmental challenges, and I outline implications of
the presented results to antibiotic treatment strategies.
Second, I discuss my work on the evolution of dispersal. Besides reproducing
known results about the effect of heterogeneous habitats on dispersal, it identifies
spatial changes in dispersal type frequencies as a source for selection for increased
propensities to disperse. This concept contains effects of relatedness that are known
to promote dispersal, and I explain how it identifies other forces selecting for dispersal
and puts them on a common scale.
Third, I analyse genetic variances of phenotypic traits under multivariate stabilizing
selection. For the case of constant environments, I generalize known formulae of
equilibrium variances to multiple traits and discuss how the genetic variance of a focal
trait is influenced by selection on background traits. I conclude by presenting ideas and
preliminary work aiming at including environmental fluctuations in the form of moving
trait optima into the model.},
author = {Novak, Sebastian},
pages = {124},
publisher = {IST Austria},
title = {{Evolutionary proccesses in variable emvironments}},
year = {2016},
}
@phdthesis{1126,
abstract = {Traditionally machine learning has been focusing on the problem of solving a single
task in isolation. While being quite well understood, this approach disregards an
important aspect of human learning: when facing a new problem, humans are able to
exploit knowledge acquired from previously learned tasks. Intuitively, access to several
problems simultaneously or sequentially could also be advantageous for a machine
learning system, especially if these tasks are closely related. Indeed, results of many
empirical studies have provided justification for this intuition. However, theoretical
justifications of this idea are rather limited.
The focus of this thesis is to expand the understanding of potential benefits of information
transfer between several related learning problems. We provide theoretical
analysis for three scenarios of multi-task learning - multiple kernel learning, sequential
learning and active task selection. We also provide a PAC-Bayesian perspective on
lifelong learning and investigate how the task generation process influences the generalization
guarantees in this scenario. In addition, we show how some of the obtained
theoretical results can be used to derive principled multi-task and lifelong learning
algorithms and illustrate their performance on various synthetic and real-world datasets.},
author = {Pentina, Anastasia},
pages = {127},
publisher = {IST Austria},
title = {{Theoretical foundations of multi-task lifelong learning}},
doi = {10.15479/AT:ISTA:TH_776},
year = {2016},
}
@phdthesis{1128,
abstract = {The process of gene expression is central to the modern understanding of how cellular systems
function. In this process, a special kind of regulatory proteins, called transcription factors,
are important to determine how much protein is produced from a given gene. As biological
information is transmitted from transcription factor concentration to mRNA levels to amounts of
protein, various sources of noise arise and pose limits to the fidelity of intracellular signaling.
This thesis concerns itself with several aspects of stochastic gene expression: (i) the mathematical
description of complex promoters responsible for the stochastic production of biomolecules,
(ii) fundamental limits to information processing the cell faces due to the interference from multiple
fluctuating signals, (iii) how the presence of gene expression noise influences the evolution
of regulatory sequences, (iv) and tools for the experimental study of origins and consequences
of cell-cell heterogeneity, including an application to bacterial stress response systems.},
author = {Rieckh, Georg},
pages = {114},
publisher = {IST Austria},
title = {{Studying the complexities of transcriptional regulation}},
year = {2016},
}
@phdthesis{1129,
abstract = {Directed cell migration is a hallmark feature, present in almost all multi-cellular
organisms. Despite its importance, basic questions regarding force transduction
or directional sensing are still heavily investigated. Directed migration of cells
guided by immobilized guidance cues - haptotaxis - occurs in key-processes,
such as embryonic development and immunity (Middleton et al., 1997; Nguyen
et al., 2000; Thiery, 1984; Weber et al., 2013). Immobilized guidance cues
comprise adhesive ligands, such as collagen and fibronectin (Barczyk et al.,
2009), or chemokines - the main guidance cues for migratory leukocytes
(Middleton et al., 1997; Weber et al., 2013). While adhesive ligands serve as
attachment sites guiding cell migration (Carter, 1965), chemokines instruct
haptotactic migration by inducing adhesion to adhesive ligands and directional
guidance (Rot and Andrian, 2004; Schumann et al., 2010). Quantitative analysis
of the cellular response to immobilized guidance cues requires in vitro assays
that foster cell migration, offer accurate control of the immobilized cues on a
subcellular scale and in the ideal case closely reproduce in vivo conditions. The
exploration of haptotactic cell migration through design and employment of such
assays represents the main focus of this work.
Dendritic cells (DCs) are leukocytes, which after encountering danger
signals such as pathogens in peripheral organs instruct naïve T-cells and
consequently the adaptive immune response in the lymph node (Mellman and
Steinman, 2001). To reach the lymph node from the periphery, DCs follow
haptotactic gradients of the chemokine CCL21 towards lymphatic vessels
(Weber et al., 2013). Questions about how DCs interpret haptotactic CCL21
gradients have not yet been addressed. The main reason for this is the lack of
an assay that offers diverse haptotactic environments, hence allowing the study
of DC migration as a response to different signals of immobilized guidance cue.
In this work, we developed an in vitro assay that enables us to
quantitatively assess DC haptotaxis, by combining precisely controllable
chemokine photo-patterning with physically confining migration conditions. With this tool at hand, we studied the influence of CCL21 gradient properties and
concentration on DC haptotaxis. We found that haptotactic gradient sensing
depends on the absolute CCL21 concentration in combination with the local
steepness of the gradient. Our analysis suggests that the directionality of
migrating DCs is governed by the signal-to-noise ratio of CCL21 binding to its
receptor CCR7. Moreover, the haptotactic CCL21 gradient formed in vivo
provides an optimal shape for DCs to recognize haptotactic guidance cue.
By reconstitution of the CCL21 gradient in vitro we were also able to
study the influence of CCR7 signal termination on DC haptotaxis. To this end,
we used DCs lacking the G-protein coupled receptor kinase GRK6, which is
responsible for CCL21 induced CCR7 receptor phosphorylation and
desensitization (Zidar et al., 2009). We found that CCR7 desensitization by
GRK6 is crucial for maintenance of haptotactic CCL21 gradient sensing in vitro
and confirm those observations in vivo.
In the context of the organism, immobilized haptotactic guidance cues
often coincide and compete with soluble chemotactic guidance cues. During
wound healing, fibroblasts are exposed and influenced by adhesive cues and
soluble factors at the same time (Wu et al., 2012; Wynn, 2008). Similarly,
migrating DCs are exposed to both, soluble chemokines (CCL19 and truncated
CCL21) inducing chemotactic behavior as well as the immobilized CCL21. To
quantitatively assess these complex coinciding immobilized and soluble
guidance cues, we implemented our chemokine photo-patterning technique in a
microfluidic system allowing for chemotactic gradient generation. To validate
the assay, we observed DC migration in competing CCL19/CCL21
environments.
Adhesiveness guided haptotaxis has been studied intensively over the
last century. However, quantitative studies leading to conceptual models are
largely missing, again due to the lack of a precisely controllable in vitro assay. A
requirement for such an in vitro assay is that it must prevent any uncontrolled
cell adhesion. This can be accomplished by stable passivation of the surface. In
addition, controlled adhesion must be sustainable, quantifiable and dose
dependent in order to create homogenous gradients. Therefore, we developed a novel covalent photo-patterning technique satisfying all these needs. In
combination with a sustainable poly-vinyl alcohol (PVA) surface coating we
were able to generate gradients of adhesive cue to direct cell migration. This
approach allowed us to characterize the haptotactic migratory behavior of
zebrafish keratocytes in vitro. Furthermore, defined patterns of adhesive cue
allowed us to control for cell shape and growth on a subcellular scale.},
author = {Schwarz, Jan},
pages = {178},
publisher = {IST Austria},
title = {{Quantitative analysis of haptotactic cell migration}},
year = {2016},
}
@phdthesis{1130,
abstract = {In this thesis we present a computer-aided programming approach to concurrency. Our approach
helps the programmer by automatically fixing concurrency-related bugs, i.e. bugs that occur
when the program is executed using an aggressive preemptive scheduler, but not when using a
non-preemptive (cooperative) scheduler. Bugs are program behaviours that are incorrect w.r.t.
a specification. We consider both user-provided explicit specifications in the form of assertion
statements in the code as well as an implicit specification. The implicit specification is inferred
from the non-preemptive behaviour. Let us consider sequences of calls that the program makes
to an external interface. The implicit specification requires that any such sequence produced
under a preemptive scheduler should be included in the set of sequences produced under a
non-preemptive scheduler.
We consider several semantics-preserving fixes that go beyond atomic sections typically
explored in the synchronisation synthesis literature. Our synthesis is able to place locks, barriers
and wait-signal statements and last, but not least reorder independent statements. The latter
may be useful if a thread is released to early, e.g., before some initialisation is completed. We
guarantee that our synthesis does not introduce deadlocks and that the synchronisation inserted
is optimal w.r.t. a given objective function.
We dub our solution trace-based synchronisation synthesis and it is loosely based on
counterexample-guided inductive synthesis (CEGIS). The synthesis works by discovering a
trace that is incorrect w.r.t. the specification and identifying ordering constraints crucial to trigger
the specification violation. Synchronisation may be placed immediately (greedy approach) or
delayed until all incorrect traces are found (non-greedy approach). For the non-greedy approach
we construct a set of global constraints over synchronisation placements. Each model of the
global constraints set corresponds to a correctness-ensuring synchronisation placement. The
placement that is optimal w.r.t. the given objective function is chosen as the synchronisation
solution.
We evaluate our approach on a number of realistic (albeit simplified) Linux device-driver
benchmarks. The benchmarks are versions of the drivers with known concurrency-related bugs.
For the experiments with an explicit specification we added assertions that would detect the bugs
in the experiments. Device drivers lend themselves to implicit specification, where the device and
the operating system are the external interfaces. Our experiments demonstrate that our synthesis
method is precise and efficient. We implemented objective functions for coarse-grained and
fine-grained locking and observed that different synchronisation placements are produced for
our experiments, favouring e.g. a minimal number of synchronisation operations or maximum
concurrency.},
author = {Tarrach, Thorsten},
pages = {151},
publisher = {IST Austria},
title = {{Automatic synthesis of synchronisation primitives for concurrent programs}},
year = {2016},
}
@phdthesis{1131,
abstract = {Evolution of gene regulation is important for phenotypic evolution and diversity. Sequence-specific binding of regulatory proteins is one of the key regulatory mechanisms determining gene expression. Although there has been intense interest in evolution of regulatory binding sites in the last decades, a theoretical understanding is far from being complete. In this thesis, I aim at a better understanding of the evolution of transcriptional regulatory binding sequences by using biophysical and population genetic models.
In the first part of the thesis, I discuss how to formulate the evolutionary dynamics of binding se- quences in a single isolated binding site and in promoter/enhancer regions. I develop a theoretical framework bridging between a thermodynamical model for transcription and a mutation-selection-drift model for monomorphic populations. I mainly address the typical evolutionary rates, and how they de- pend on biophysical parameters (e.g. binding length and specificity) and population genetic parameters (e.g. population size and selection strength).
In the second part of the thesis, I analyse empirical data for a better evolutionary and biophysical understanding of sequence-specific binding of bacterial RNA polymerase. First, I infer selection on regulatory and non-regulatory binding sites of RNA polymerase in the E. coli K12 genome. Second, I infer the chemical potential of RNA polymerase, an important but unknown physical parameter defining the threshold energy for strong binding. Furthermore, I try to understand the relation between the lac promoter sequence diversity and the LacZ activity variation among 20 bacterial isolates by constructing a simple but biophysically motivated gene expression model. Lastly, I lay out a statistical framework to predict adaptive point mutations in de novo promoter evolution in a selection experiment.},
author = {Tugrul, Murat},
pages = {89},
publisher = {IST Austria},
title = {{Evolution of transcriptional regulatory sequences}},
year = {2016},
}
@phdthesis{1189,
abstract = {Within the scope of this thesis, we show that a driven-dissipative system with
few ultracold atoms can exhibit dissipatively bound states, even if the atom-atom
interaction is purely repulsive. This bond arises due to the dipole-dipole inter-
action, which is restricted to one of the lower electronic energy states, resulting
in the distance-dependent coherent population trapping. The quality of this al-
ready established method of dissipative binding is improved and the application
is extended to higher dimensions and a larger number of atoms. Here, we simu-
late two- and three-atom systems using an adapted approach to the Monte Carlo
wave-function method and analyse the results. Finally, we examine the possi-
bility of finding a setting allowing trimer states but prohibiting dimer states.
In the context of open quantum systems, such a three-body bound states corre-
sponds to the driven-dissipative analogue of a Borromean state. These states can
be detected in modern experiments with dipolar and Rydberg-dressed ultracold
atomic gases.
},
author = {Jochum, Clemens},
pages = {1 -- 77},
publisher = {Technical University Vienna},
title = {{Dissipative Few-Body Quantum Systems}},
year = {2016},
}
@phdthesis{1396,
abstract = {CA3 pyramidal neurons are thought to pay a key role in memory storage and pattern completion by activity-dependent synaptic plasticity between CA3-CA3 recurrent excitatory synapses. To examine the induction rules of synaptic plasticity at CA3-CA3 synapses, we performed whole-cell patch-clamp recordings in acute hippocampal slices from rats (postnatal 21-24 days) at room temperature. Compound excitatory postsynaptic potentials (ESPSs) were recorded by tract stimulation in stratum oriens in the presence of 10 µM gabazine. High-frequency stimulation (HFS) induced N-methyl-D-aspartate (NMDA) receptor-dependent long-term potentiation (LTP). Although LTP by HFS did not requier postsynaptic spikes, it was blocked by Na+-channel blockers suggesting that local active processes (e.g.) dendritic spikes) may contribute to LTP induction without requirement of a somatic action potential (AP). We next examined the properties of spike timing-dependent plasticity (STDP) at CA3-CA3 synapses. Unexpectedly, low-frequency pairing of EPSPs and backpropagated action potentialy (bAPs) induced LTP, independent of temporal order. The STDP curve was symmetric and broad, with a half-width of ~150 ms. Consistent with these specific STDP induction properties, post-presynaptic sequences led to a supralinear summation of spine [Ca2+] transients. Furthermore, in autoassociative network models, storage and recall was substantially more robust with symmetric than with asymmetric STDP rules. In conclusion, we found associative forms of LTP at CA3-CA3 recurrent collateral synapses with distinct induction rules. LTP induced by HFS may be associated with dendritic spikes. In contrast, low frequency pairing of pre- and postsynaptic activity induced LTP only if EPSP-AP were temporally very close. Together, these induction mechanisms of synaptiic plasticity may contribute to memory storage in the CA3-CA3 microcircuit at different ranges of activity.},
author = {Mishra, Rajiv Kumar},
pages = {83},
publisher = {IST Austria},
title = {{Synaptic plasticity rules at CA3-CA3 recurrent synapses in hippocampus}},
year = {2016},
}
@phdthesis{1397,
abstract = {We study partially observable Markov decision processes (POMDPs) with objectives used in verification and artificial intelligence. The qualitative analysis problem given a POMDP and an objective asks whether there is a strategy (policy) to ensure that the objective is satisfied almost surely (with probability 1), resp. with positive probability (with probability greater than 0). For POMDPs with limit-average payoff, where a reward value in the interval [0,1] is associated to every transition, and the payoff of an infinite path is the long-run average of the rewards, we consider two types of path constraints: (i) a quantitative limit-average constraint defines the set of paths where the payoff is at least a given threshold L1 = 1. Our main results for qualitative limit-average constraint under almost-sure winning are as follows: (i) the problem of deciding the existence of a finite-memory controller is EXPTIME-complete; and (ii) the problem of deciding the existence of an infinite-memory controller is undecidable. For quantitative limit-average constraints we show that the problem of deciding the existence of a finite-memory controller is undecidable. We present a prototype implementation of our EXPTIME algorithm. For POMDPs with w-regular conditions specified as parity objectives, while the qualitative analysis problems are known to be undecidable even for very special case of parity objectives, we establish decidability (with optimal complexity) of the qualitative analysis problems for POMDPs with parity objectives under finite-memory strategies. We establish optimal (exponential) memory bounds and EXPTIME-completeness of the qualitative analysis problems under finite-memory strategies for POMDPs with parity objectives. Based on our theoretical algorithms we also present a practical approach, where we design heuristics to deal with the exponential complexity, and have applied our implementation on a number of well-known POMDP examples for robotics applications. For POMDPs with a set of target states and an integer cost associated with every transition, we study the optimization objective that asks to minimize the expected total cost of reaching a state in the target set, while ensuring that the target set is reached almost surely. We show that for general integer costs approximating the optimal cost is undecidable. For positive costs, our results are as follows: (i) we establish matching lower and upper bounds for the optimal cost, both double and exponential in the POMDP state space size; (ii) we show that the problem of approximating the optimal cost is decidable and present approximation algorithms that extend existing algorithms for POMDPs with finite-horizon objectives. We show experimentally that it performs well in many examples of interest. We study more deeply the problem of almost-sure reachability, where given a set of target states, the question is to decide whether there is a strategy to ensure that the target set is reached almost surely. While in general the problem EXPTIME-complete, in many practical cases strategies with a small amount of memory suffice. Moreover, the existing solution to the problem is explicit, which first requires to construct explicitly an exponential reduction to a belief-support MDP. We first study the existence of observation-stationary strategies, which is NP-complete, and then small-memory strategies. We present a symbolic algorithm by an efficient encoding to SAT and using a SAT solver for the problem. We report experimental results demonstrating the scalability of our symbolic (SAT-based) approach. Decentralized POMDPs (DEC-POMDPs) extend POMDPs to a multi-agent setting, where several agents operate in an uncertain environment independently to achieve a joint objective. In this work we consider Goal DEC-POMDPs, where given a set of target states, the objective is to ensure that the target set is reached with minimal cost. We consider the indefinite-horizon (infinite-horizon with either discounted-sum, or undiscounted-sum, where absorbing goal states have zero-cost) problem. We present a new and novel method to solve the problem that extends methods for finite-horizon DEC-POMDPs and the real-time dynamic programming approach for POMDPs. We present experimental results on several examples, and show that our approach presents promising results. In the end we present a short summary of a few other results related to verification of MDPs and POMDPs.},
author = {Chmelik, Martin},
pages = {232},
publisher = {IST Austria},
title = {{Algorithms for partially observable markov decision processes}},
year = {2016},
}
@phdthesis{1398,
abstract = {Hybrid zones represent evolutionary laboratories, where recombination brings together alleles in combinations which have not previously been tested by selection. This provides an excellent opportunity to test the effect of molecular variation on fitness, and how this variation is able to spread through populations in a natural context. The snapdragon Antirrhinum majus is polymorphic in the wild for two loci controlling the distribution of yellow and magenta floral pigments. Where the yellow A. m. striatum and the magenta A. m. pseudomajus meet along a valley in the Spanish Pyrenees they form a stable hybrid zone Alleles at these loci recombine to give striking transgressive variation for flower colour. The sharp transition in phenotype over ~1km implies strong selection maintaining the hybrid zone. An indirect assay of pollinator visitation in the field found that pollinators forage in a positive-frequency dependent manner on Antirrhinum, matching previous data on fruit set. Experimental arrays and paternity analysis of wild-pollinated seeds demonstrated assortative mating for pigmentation alleles, and that pollinator behaviour alone is sufficient to explain this pattern. Selection by pollinators should be sufficiently strong to maintain the hybrid zone, although other mechanisms may be at work. At a broader scale I examined evolutionary transitions between yellow and anthocyanin pigmentation in the tribe Antirrhinae, and found that selection has acted strate that pollinators are a major determinant of reproductive success and mating patterns in wild Antirrhinum.},
author = {Ellis, Thomas},
pages = {130},
publisher = {IST Austria},
title = {{The role of pollinator-mediated selection in the maintenance of a flower color polymorphism in an Antirrhinum majus hybrid zone}},
doi = {10.15479/AT:ISTA:TH_526 },
year = {2016},
}
@phdthesis{1399,
abstract = {This thesis is concerned with the computation and approximation of intrinsic volumes. Given a smooth body M and a certain digital approximation of it, we develop algorithms to approximate various intrinsic volumes of M using only measurements taken from its digital approximations. The crucial idea behind our novel algorithms is to link the recent theory of persistent homology to the theory of intrinsic volumes via the Crofton formula from integral geometry and, in particular, via Euler characteristic computations. Our main contributions are a multigrid convergent digital algorithm to compute the first intrinsic volume of a solid body in R^n as well as an appropriate integration pipeline to approximate integral-geometric integrals defined over the Grassmannian manifold.},
author = {Pausinger, Florian},
pages = {144},
publisher = {IST Austria},
title = {{On the approximation of intrinsic volumes}},
year = {2015},
}
@phdthesis{1400,
abstract = {Cancer results from an uncontrolled growth of abnormal cells. Sequentially accumulated genetic and epigenetic alterations decrease cell death and increase cell replication. We used mathematical models to quantify the effect of driver gene mutations. The recently developed targeted therapies can lead to dramatic regressions. However, in solid cancers, clinical responses are often short-lived because resistant cancer cells evolve. We estimated that approximately 50 different mutations can confer resistance to a typical targeted therapeutic agent. We find that resistant cells are likely to be present in expanded subclones before the start of the treatment. The dominant strategy to prevent the evolution of resistance is combination therapy. Our analytical results suggest that in most patients, dual therapy, but not monotherapy, can result in long-term disease control. However, long-term control can only occur if there are no possible mutations in the genome that can cause cross-resistance to both drugs. Furthermore, we showed that simultaneous therapy with two drugs is much more likely to result in long-term disease control than sequential therapy with the same drugs. To improve our understanding of the underlying subclonal evolution we reconstruct the evolutionary history of a patient's cancer from next-generation sequencing data of spatially-distinct DNA samples. Using a quantitative measure of genetic relatedness, we found that pancreatic cancers and their metastases demonstrated a higher level of relatedness than that expected for any two cells randomly taken from a normal tissue. This minimal amount of genetic divergence among advanced lesions indicates that genetic heterogeneity, when quantitatively defined, is not a fundamental feature of the natural history of untreated pancreatic cancers. Our newly developed, phylogenomic tool Treeomics finds evidence for seeding patterns of metastases and can directly be used to discover rules governing the evolution of solid malignancies to transform cancer into a more predictable disease.},
author = {Reiter, Johannes},
pages = {183},
publisher = {IST Austria},
title = {{The subclonal evolution of cancer}},
year = {2015},
}
@phdthesis{1401,
abstract = {The human ability to recognize objects in complex scenes has driven research in the computer vision field over couple of decades. This thesis focuses on the object recognition task in images. That is, given the image, we want the computer system to be able to predict the class of the object that appears in the image. A recent succesful attempt to bridge semantic understanding of the image perceived by humans and by computers uses attribute-based models. Attributes are semantic properties of the objects shared across different categories, which humans and computers can decide on. To explore the attribute-based models we take a statistical machine learning approach, and address two key learning challenges in view of object recognition task: learning augmented attributes as mid-level discriminative feature representation, and learning with attributes as privileged information. Our main contributions are parametric and non-parametric models and algorithms to solve these frameworks. In the parametric approach, we explore an autoencoder model combined with the large margin nearest neighbor principle for mid-level feature learning, and linear support vector machines for learning with privileged information. In the non-parametric approach, we propose a supervised Indian Buffet Process for automatic augmentation of semantic attributes, and explore the Gaussian Processes classification framework for learning with privileged information. A thorough experimental analysis shows the effectiveness of the proposed models in both parametric and non-parametric views.},
author = {Sharmanska, Viktoriia},
pages = {144},
publisher = {IST Austria},
title = {{Learning with attributes for object recognition: Parametric and non-parametrics views}},
year = {2015},
}
@phdthesis{1395,
abstract = {In this thesis I studied various individual and social immune defences employed by the invasive garden ant Lasius neglectus mostly against entomopathogenic fungi. The first two chapters of this thesis address the phenomenon of 'social immunisation'. Social immunisation, that is the immunological protection of group members due to social contact to a pathogen-exposed nestmate, has been described in various social insect species against different types of pathogens. However, in the case of entomopathogenic fungi it has, so far, only been demonstrated that social immunisation exists at all. Its underlying mechanisms r any other properties were, however, unknown. In the first chapter of this thesis I identified the mechanistic basis of social immunisation in L. neglectus against the entomopathogenous fungus Metarhizium. I could show that nestmates of a pathogen-exposed individual contract low-level infections due to social interactions. These low-level infections are, however, non-lethal and cause an active stimulation of the immune system, which protects the nestmates upon subsequent pathogen encounters. In the second chapter of this thesis I investigated the specificity and colony level effects of social immunisation. I demonstrated that the protection conferred by social immunisation is highly specific, protecting ants only against the same pathogen strain. In addition, depending on the respective context, social immunisation may even cause fitness costs. I further showed that social immunisation crucially affects sanitary behaviour and disease dynamics within ant groups. In the third chapter of this thesis I studied the effects of the ectosymbiotic fungus Laboulbenia formicarum on its host L. neglectus. Although Laboulbeniales are the largest order of insect-parasitic fungi, research concerning host fitness consequence is sparse. I showed that highly Laboulbenia-infected ants sustain fitness costs under resource limitation, however, gain fitness benefits when exposed to an entomopathogenus fungus. These effects are probably cause by a prophylactic upregulation of behavioural as well as physiological immune defences in highly infected ants.},
author = {Konrad, Matthias},
pages = {131},
publisher = {IST Austria},
title = {{Immune defences in ants: Effects of social immunisation and a fungal ectosymbiont in the ant Lasius neglectus}},
year = {2014},
}
@phdthesis{1402,
abstract = {Phosphatidylinositol (Ptdlns) is a structural phospholipid that can be phosphorylated into various lipid signaling molecules, designated polyphosphoinositides (PPIs). The reversible phosphorylation of PPIs on the 3, 4, or 5 position of inositol is performed by a set of organelle-specific kinases and phosphatases, and the characteristic head groups make these molecules ideal for regulating biological processes in time and space. In yeast and mammals, Ptdlns3P and Ptdlns(3,5)P2 play crucial roles in trafficking toward the lytic compartments, whereas the role in plants is not yet fully understood. Here we identified the role of a land plant-specific subgroup of PPI phosphatases, the suppressor of actin 2 (SAC2) to SAC5, during vauolar trafficking and morphogenesis in Arabidopsis thaliana. SAC2-SAC5 localize to the tonoplast along with Ptdlns3P, the presumable product of their activity. in SAC gain- and loss-of-function mutants, the levels of Ptdlns monophosphates and bisphosphates were changed, with opposite effects on the morphology of storage and lytic vacuoles, and the trafficking toward the vacuoles was defective. Moreover, multiple sac knockout mutants had an increased number of smaller storage and lytic vacuoles, whereas extralarge vacuoles were observed in the overexpression lines, correlating with various growth and developmental defects. The fragmented vacuolar phenotype of sac mutants could be mimicked by treating wild-type seedlings with Ptdlns(3,5)P2, corroborating that this PPI is important for vacuole morphology. Taken together, these results provide evidence that PPIs, together with their metabolic enzymes SAC2-SAC5, are crucial for vacuolar trafficking and for vacuolar morphology and function in plants.},
author = {Marhavá, Petra},
pages = {90},
publisher = {IST Austria},
title = {{Molecular mechanisms of patterning and subcellular trafficking in Arabidopsis thaliana}},
year = {2014},
}
@phdthesis{1403,
abstract = {A variety of developmental and disease related processes depend on epithelial cell sheet spreading. In order to gain insight into the biophysical mechanism(s) underlying the tissue morphogenesis we studied the spreading of an epithelium during the early development of the zebrafish embryo. In zebrafish epiboly the enveloping cell layer (EVL), a simple squamous epithelium, spreads over the yolk cell to completely engulf it at the end of gastrulation. Previous studies have proposed that an actomyosin ring forming within the yolk syncytial layer (YSL) acts as purse string that through constriction along its circumference pulls on the margin of the EVL. Direct biophysical evidence for this hypothesis has however been missing. The aim of the thesis was to understand how the actomyosin ring may generate pulling forces onto the EVL and what cellular mechanism(s) may facilitate the spreading of the epithelium. Using laser ablation to measure cortical tension within the actomyosin ring we found an anisotropic tension distribution, which was highest along the circumference of the ring. However the low degree of anisotropy was incompatible with the actomyosin ring functioning as a purse string only. Additionally, we observed retrograde cortical flow from vegetal parts of the ring into the EVL margin. Interpreting the experimental data using a theoretical distribution that models the tissues as active viscous gels led us to proposen that the actomyosin ring has a twofold contribution to EVL epiboly. It not only acts as a purse string through constriction along its circumference, but in addition constriction along the width of the ring generates pulling forces through friction-resisted cortical flow. Moreover, when rendering the purse string mechanism unproductive EVL epiboly proceeded normally indicating that the flow-friction mechanism is sufficient to drive the process. Aiming to understand what cellular mechanism(s) may facilitate the spreading of the epithelium we found that tension-oriented EVL cell divisions limit tissue anisotropy by releasing tension along the division axis and promote epithelial spreading. Notably, EVL cells undergo ectopic cell fusion in conditions in which oriented-cell division is impaired or the epithelium is mechanically challenged. Taken together our study of EVL epiboly suggests a novel mechanism of force generation for actomyosin rings through friction-resisted cortical flow and highlights the importance of tension-oriented cell divisions in epithelial morphogenesis.},
author = {Behrndt, Martin},
pages = {91},
publisher = {IST Austria},
title = {{Forces driving epithelial spreading in zebrafish epiboly}},
year = {2014},
}
@phdthesis{1404,
abstract = {The co-evolution of hosts and pathogens is characterized by continuous adaptations of both parties. Pathogens of social insects need to adapt towards disease defences at two levels: 1) individual immunity of each colony member consisting of behavioural defence strategies as well as humoral and cellular immune responses and 2) social immunity that is collectively performed by all group members comprising behavioural, physiological and organisational defence strategies.
To disentangle the selection pressure on pathogens by the collective versus individual level of disease defence in social insects, we performed an evolution experiment using the Argentine Ant, Linepithema humile, as a host and a mixture of the general insect pathogenic fungus Metarhizium spp. (6 strains) as a pathogen. We allowed pathogen evolution over 10 serial host passages to two different evolution host treatments: (1) only individual host immunity in a single host treatment, and (2) simultaneously acting individual and social immunity in a social host treatment, in which an exposed ant was accompanied by two untreated nestmates.
Before starting the pathogen evolution experiment, the 6 Metarhizium spp. strains were characterised concerning conidiospore size killing rates in singly and socially reared ants, their competitiveness under coinfecting conditions and their influence on ant behaviour. We analysed how the ancestral atrain mixture changed in conidiospere size, killing rate and strain composition dependent on host treatment (single or social hosts) during 10 passages and found that killing rate and conidiospere size of the pathogen increased under both evolution regimes, but different depending on host treatment.
Testing the evolved strain mixtures that evolved under either the single or social host treatment under both single and social current rearing conditions in a full factorial design experiment revealed that the additional collective defences in insect societies add new selection pressure for their coevolving pathogens that compromise their ability to adapt to its host at the group level. To our knowledge, this is the first study directly measuring the influence of social immunity on pathogen evolution.},
author = {Stock, Miriam},
pages = {101},
publisher = {IST Austria},
title = {{Evolution of a fungal pathogen towards individual versus social immunity in ants}},
year = {2014},
}
@phdthesis{1405,
abstract = {Motivated by the analysis of highly dynamic message-passing systems, i.e. unbounded thread creation, mobility, etc. we present a framework for the analysis of depth-bounded systems. Depth-bounded systems are one of the most expressive known fragment of the π-calculus for which interesting verification problems are still decidable. Even though they are infinite state systems depth-bounded systems are well-structured, thus can be analyzed algorithmically. We give an interpretation of depth-bounded systems as graph-rewriting systems. This gives more flexibility and ease of use to apply depth-bounded systems to other type of systems like shared memory concurrency.
First, we develop an adequate domain of limits for depth-bounded systems, a prerequisite for the effective representation of downward-closed sets. Downward-closed sets are needed by forward saturation-based algorithms to represent potentially infinite sets of states. Then, we present an abstract interpretation framework to compute the covering set of well-structured transition systems. Because, in general, the covering set is not computable, our abstraction over-approximates the actual covering set. Our abstraction captures the essence of acceleration based-algorithms while giving up enough precision to ensure convergence. We have implemented the analysis in the PICASSO tool and show that it is accurate in practice. Finally, we build some further analyses like termination using the covering set as starting point.},
author = {Zufferey, Damien},
pages = {134},
publisher = {IST Austria},
title = {{Analysis of dynamic message passing programs}},
year = {2013},
}
@phdthesis{1406,
abstract = {Epithelial spreading is a critical part of various developmental and wound repair processes. Here we use zebrafish epiboly as a model system to study the cellular and molecular mechanisms underlying the spreading of epithelial sheets. During zebrafish epiboly the enveloping cell layer (EVL), a simple squamous epithelium, spreads over the embryo to eventually cover the entire yolk cell by the end of gastrulation. The EVL leading edge is anchored through tight junctions to the yolk syncytial layer (YSL), where directly adjacent to the EVL margin a contractile actomyosin ring is formed that is thought to drive EVL epiboly. The prevalent view in the field was that the contractile ring exerts a pulling force on the EVL margin, which pulls the EVL towards the vegetal pole. However, how this force is generated and how it affects EVL morphology still remains elusive. Moreover, the cellular mechanisms mediating the increase in EVL surface area, while maintaining tissue integrity and function are still unclear. Here we show that the YSL actomyosin ring pulls on the EVL margin by two distinct force-generating mechanisms. One mechanism is based on contraction of the ring around its circumference, as previously proposed. The second mechanism is based on actomyosin retrogade flows, generating force through resistance against the substrate. The latter can function at any epiboly stage even in situations where the contraction-based mechanism is unproductive. Additionally, we demonstrate that during epiboly the EVL is subjected to anisotropic tension, which guides the orientation of EVL cell division along the main axis (animal-vegetal) of tension. The influence of tension in cell division orientation involves cell elongation and requires myosin-2 activity for proper spindle alignment. Strikingly, we reveal that tension-oriented cell divisions release anisotropic tension within the EVL and that in the absence of such divisions, EVL cells undergo ectopic fusions. We conclude that forces applied to the EVL by the action of the YSL actomyosin ring generate a tension anisotropy in the EVL that orients cell divisions, which in turn limit tissue tension increase thereby facilitating tissue spreading.},
author = {Campinho, Pedro},
pages = {123},
publisher = {IST Austria},
title = {{Mechanics of zebrafish epiboly: Tension-oriented cell divisions limit anisotropic tissue tension in epithelial spreading}},
year = {2013},
}
@phdthesis{2964,
abstract = {CA3 pyramidal neurons are important for memory formation and pattern completion in the hippocampal network. These neurons receive multiple excitatory inputs from numerous sources. Therefore, the rules of spatiotemporal integration of multiple synaptic inputs and propagation of action potentials are important to understand how CA3 neurons contribute to higher brain functions at cellular level. By using confocally targeted patch-clamp recording techniques, we investigated the biophysical properties of rat CA3 pyramidal neuron dendrites. We found two distinct dendritic domains critical for action potential initiation and propagation: In the proximal domain, action potentials initiated in the axon backpropagate actively with large amplitude and fast time course. In the distal domain, Na+-channel mediated dendritic spikes are efficiently evoked by local dendritic depolarization or waveforms mimicking synaptic events. These findings can be explained by a high Na+-to-K+ conductance density ratio of CA3 pyramidal neuron dendrites. The results challenge the prevailing view that proximal mossy fiber inputs activate CA3 pyramidal neurons more efficiently than distal perforant inputs by showing that the distal synapses trigger a different form of activity represented by dendritic spikes. The high probability of dendritic spike initiation in the distal area may enhance the computational power of CA3 pyramidal neurons in the hippocampal network. },
author = {Kim, Sooyun},
pages = {65},
publisher = {IST Austria},
title = {{Active properties of hippocampal CA3 pyramidal neuron dendrites}},
year = {2012},
}
@phdthesis{3273,
author = {Maître, Jean-Léon},
publisher = {IST Austria},
title = {{Mechanics of adhesion and de‐adhesion in zebrafish germ layer progenitors}},
year = {2011},
}
@phdthesis{3275,
abstract = {Chemokines organize immune cell trafficking by inducing either directed (tactic) or random (kinetic) migration and by activating integrins in order to support surface adhesion (haptic). Beyond that the same chemokines can establish clearly defined functional areas in secondary lymphoid organs. Until now it is unclear how chemokines can fulfill such diverse functions. One decisive prerequisite to explain these capacities is to know how chemokines are presented in tissue. In theory chemokines could occur either soluble or immobilized, and could be distributed either homogenously or as a concentration gradient. To dissect if and how the presenting mode of chemokines influences immune cells, I tested the response of dendritic cells (DCs) to differentially displayed chemokines. DCs are antigen presenting cells that reside in the periphery and migrate into draining lymph nodes (LNs) once exposed to inflammatory stimuli to activate naïve T cells. DCs are guided to and within the LN by the chemokine receptor CCR7, which has two ligands, the chemokines CCL19 and CCL21. Both CCR7 ligands are expressed by fibroblastic reticular cells in the LN, but differ in their ability to bind to heparan sulfate residues. CCL21 has a highly charged C-terminal extension, which mediates binding to anionic surfaces, whereas CCL19 is lacking such residues and likely distributes as a soluble molecule. This study shows that surface-bound CCL21 causes random, haptokinetic DC motility, which is confined to the chemokine coated area by insideout activation of β2 integrins that mediate cell binding to the surface. CCL19 on the other hand forms concentration gradients which trigger directional, chemotactic movement, but no surface adhesion. In addition DCs can actively manipulate this system by recruiting and activating serine proteases on their surfaces, which create - by proteolytically removing the adhesive C-terminus - a solubilized variant of CCL21 that functionally resembles CCL19. By generating a CCL21 concentration gradient DCs establish a positive feedback loop to recruit further DCs from the periphery to the CCL21 coated region. In addition DCs can sense chemotactic gradients as well as immobilized haptokinetic fields at the same time and integrate these signals. The result is chemotactically biased haptokinesis - directional migration confined to a chemokine coated track or area - which could explain the dynamic but spatially tightly controlled swarming leukocyte locomotion patterns that have been observed in lymphatic organs by intravital microscopists. The finding that DCs can approach soluble cues in a non-adhesive manner while they attach to surfaces coated with immobilized cues raises the question how these cells transmit intracellular forces to the environment, especially in the non-adherent migration mode. In order to migrate, cells have to generate and transmit force to the extracellular substrate. Force transmission is the prerequisite to procure an expansion of the leading edge and a forward motion of the whole cell body. In the current conceptions actin polymerization at the leading edge is coupled to extracellular ligands via the integrin family of transmembrane receptors, which allows the transmission of intracellular force. Against the paradigm of force transmission during migration, leukocytes, like DCs, are able to migrate in threedimensional environments without using integrin transmembrane receptors (Lämmermann et al., 2008). This reflects the biological function of leukocytes, as they can invade almost all tissues, whereby their migration has to be independent from the extracellular environment. How the cells can achieve this is unclear. For this study I examined DC migration in a defined threedimensional environment and highlighted actin-dynamics with the probe Lifeact-GFP. The result was that chemotactic DCs can switch between integrin-dependent and integrin- independent locomotion and can thereby adapt to the adhesive properties of their environment. If the cells are able to couple their actin cytoskeleton to the substrate, actin polymerization is entirely converted into protrusion. Without coupling the actin cortex undergoes slippage and retrograde actin flow can be observed. But retrograde actin flow can be completely compensated by higher actin polymerization rate keeping the migration velocity and the shape of the cells unaltered. Mesenchymal cells like fibroblast cannot balance the loss of adhesive interaction, cannot protrude into open space and, therefore, strictly depend on integrinmediated force coupling. This leukocyte specific phenomenon of “adaptive force transmission” endows these cells with the unique ability to transit and invade almost every type of tissue. },
author = {Schumann, Kathrin},
pages = {141},
publisher = {IST Austria},
title = {{The role of chemotactic gradients in dendritic cell migration}},
year = {2011},
}
@phdthesis{2075,
abstract = {This thesis investigates the combination of data-driven and physically based techniques for acquiring, modeling, and animating deformable materials, with a special focus on human faces. Furthermore, based on these techniques, we introduce a data-driven process for designing and fabricating materials with desired deformation behavior.
Realistic simulation behavior, surface details, and appearance are still demanding tasks. Neither pure data-driven, pure procedural, nor pure physical methods are best suited for accurate synthesis of facial motion and details (both for appearance and geometry), due to the difficulties in model design, parameter estimation, and desired controllability for animators. Capturing of a small but representative amount of real data, and then synthesizing diverse on-demand examples with physically-based models and real data as input benefits from both sides: Highly realistic model behavior due to real-world data and controllability due to physically-based models.
To model the face and its behavior, hybrid physically-based and data-driven approaches are elaborated. We investigate surface-based representations as well as a solid representation based on FEM. To achieve realistic behavior, we propose to build light-weighted data capture devices to acquire real-world data to estimate model parameters and to employ concepts from data-driven modeling techniques and machine learning. The resulting models support simple acquisition systems, offer techniques to process and extract model parameters from real-world data, provide a compact representation of the facial geometry and its motion, and allow intuitive editing. We demonstrate applications such as capture of facial geometry and motion and real-time animation and transfer of facial details, and show that our soft tissue model can react to external forces and produce realistic deformations beyond facial expressions.
Based on this model, we furthermore introduce a data-driven process for designing and fabricating materials with desired deformation behavior. The process starts with measuring deformation properties of base materials. Each material is represented as a non-linear stress-strain relationship in a finite-element model. For material design and fabrication, we introduce an optimization process that finds the best combination of base materials that meets a user’s criteria specified by example deformations. Our algorithm employs a number of strategies to prune poor solutions from the combinatorial search space. We finally demonstrate the complete process by designing and fabricating objects with complex heterogeneous materials using modern multi-material 3D printers.
},
author = {Bernd Bickel},
booktitle = {Unknown},
number = {7458},
publisher = {Unknown},
title = {{Measurement-based modeling and fabrication of deformable materials for human faces}},
doi = {dx.doi.org/10.3929/ethz-a-006354908},
volume = {499},
year = {2010},
}
@phdthesis{3296,
abstract = {Accurate computational representations of highly deformable surfaces are indispensable in the fields of computer animation, medical simulation, computer vision, digital modeling, and computational physics. The focus of this dissertation is on the animation of physics-based phenomena with highly detailed deformable surfaces represented by triangle meshes.
We first present results from an algorithm that generates continuum mechanics animations with intricate surface features. This method combines a finite element method with a tetrahedral mesh generator and a high resolution surface mesh, and it is orders of magnitude more efficient than previous approaches. Next, we present an efficient solution for the challenging problem of computing topological changes in detailed dynamic surface meshes. We then introduce a new physics-inspired surface tracking algorithm that is capable of preserving arbitrarily thin features and reproducing realistic fine-scale topological changes like Rayleigh-Plateau instabilities. This physics-inspired surface tracking technique also opens the door for a unique coupling between surficial finite element methods and volumetric finite difference methods, in order to simulate liquid surface tension phenomena more efficiently than any previous method. Due to its dramatic increase in computational resolution and efficiency, this method yielded the first computer simulations of a fully developed crown splash with droplet pinch off.},
author = {Wojtan, Chris},
pages = {1 -- 175},
publisher = {Georgia Institute of Technology},
title = {{Animating physical phenomena with embedded surface meshes}},
year = {2010},
}
@phdthesis{3962,
author = {Pflicke, Holger},
publisher = {IST Austria},
title = {{Dendritic cell migration across basement membranes in the skin}},
year = {2010},
}
@phdthesis{4232,
author = {Harold Vladar},
publisher = {Faculty of mathematical and natural sciences, University of Groningen},
title = {{Stochasticity and Variability in the dynamics and genetics of populations}},
doi = {3811},
year = {2009},
}
@phdthesis{4363,
author = {Vasu Singh},
booktitle = {Formalizing and Verifying Transactional Memories},
publisher = {EPFL Lausanne},
title = {{Formalizing and Verifying Transactional Memories}},
year = {2009},
}
@phdthesis{3400,
abstract = {Invasive fungal infections pose a serious threat to immunocompromised people. Most of these infections are caused by either Candida or Aspergillus species, with A. fumigatus being the predominant causative agent of Invasive Aspergillosis. Affected people comprise mainly haematopoietic stem cell or solid organ transplant patients who receive either high-dose corticosteroids or immunosuppressants. These risk factors predispose to the development of Invasive
Aspergillosis which is lethal in 20 to 80 % of the cases, largely due to insufficient efficacy of current antifungal therapy. Thus one major aim in current mycological research is the identification of new drug targets.
The polysaccharide-based fungal cell wall is both essential to fungi and absent from human cells which makes it appear an attractive new target. Notably, many components of the A. fumigatus cell wall, including the polysaccharide galactomannan, glycoproteins, and glycolipids, contain the unusual sugar galactofuranose (Galf). In contrast to the other cell wall monosaccharides, Galf does not occur on human cells but is known as component of cell surface molecules of many pathogenic bacteria and protozoa, such as Mycobacterium tuberculosis or Leishmania major. These molecules are often essential for virulence or viability of these organisms which suggested a possible role of Galf in the pathogenicity of A. fumigatus.
To address the importance of Galf in A. fumigatus, the key biosynthesis gene glfA, encoding UDPgalactopyranose mutase (UGM), was deleted. In different experimental approaches it was demonstrated that the absence of the glfA gene led to a complete loss of Galf-containing glycans.
Analysis of the DeltaglfA phenotype revealed growth and sporulation defects, reduced thermotolerance and an increased susceptibility to antifungal drugs. Electron Microscopy indicated a cell wall defect as a likely cause for the observed impairments. Furthermore, the virulence of the DeltaglfA mutant was found to be severely attenuated in a murine model of Invasive Aspergillosis.
The second focus of this study was laid on further elucidation of the galactofuranosylation pathway in A. fumigatus. In eukaryotes, a UDP-Galf transporter is likely required to transport UDP-Galf from the
cytosol into the organelles of the secretory pathway, but no such activity had been described. Sixteen candidate genes were identified in the A. fumigatus genome of which one, glfB, was found in close proximity to the glfA gene. In vitro transport assays revealed specificity of GlfB for UDP-Galf suggesting that glfB encoded indeed a UDP-Galf transporter. The influence of glfB on
galactofuranosylation was determined by a DeltaglfB deletion mutant, which closely recapitulated the DeltaglfA phenotype and was likewise found to be completely devoid of Galf. It could be concluded that all galactofuranosylation processes in A. fumigatus occur in the secretory pathway, including the biosynthesis of the cell wall polysaccharide galactomannan whose subcellular origin was previously disputed.
Thus in the course of this study the first UDP-Galf specific nucleotide sugar transporter was identified and its requirement for galactofuranosylation in A. fumigatus demonstrated. Moreover, it was shown that blocking the galactofuranosylation pathway impaired virulence of A. fumigatus which suggests the UDP-Galf biosynthesis enzyme UGM as a target for new antifungal drugs.},
author = {Philipp Schmalhorst},
pages = {1 -- 72},
publisher = {Gottfried Wilhelm Leibniz Universität Hannover},
title = {{Biosynthesis of Galactofuranose Containing Glycans and Their Relevance for the Pathogenic Fungus Aspergillus fumigatus}},
year = {2009},
}
@phdthesis{4409,
abstract = {Models of timed systems must incorporate not only the sequence of system events, but the timings of these events as well to capture the real-time aspects of physical systems. Timed automata are models of real-time systems in which states consist of discrete locations and values for real-time clocks. The presence of real-time clocks leads to an uncountable state space. This thesis studies verification problems on timed automata in a game theoretic framework.
For untimed systems, two systems are close if every sequence of events of one system is also observable in the second system. For timed systems, the difference in timings of the two corresponding sequences is also of importance. We propose the notion of bisimulation distance which quantifies timing differences; if the bisimulation distance between two systems is epsilon, then (a) every sequence of events of one system has a corresponding matching sequence in the other, and (b) the timings of matching events in between the two corresponding traces do not differ by more than epsilon. We show that we can compute the bisimulation distance between two timed automata to within any desired degree of accuracy. We also show that the timed verification logic TCTL is robust with respect to our notion of quantitative bisimilarity, in particular, if a system satisfies a formula, then every close system satisfies a close formula.
Timed games are used for distinguishing between the actions of several agents, typically a controller and an environment. The controller must achieve its objective against all possible choices of the environment. The modeling of the passage of time leads to the presence of zeno executions, and corresponding unrealizable strategies of the controller which may achieve objectives by blocking time. We disallow such unreasonable strategies by restricting all agents to use only receptive strategies --strategies which while not being required to ensure time divergence by any agent, are such that no agent is responsible for blocking time. Time divergence is guaranteed when all players use receptive strategies. We show that timed automaton games with receptive strategies can be solved by a reduction to finite state turn based game graphs. We define the logic timed alternating-time temporal logic for verification of timed automaton games and show that the logic can be model checked in EXPTIME. We also show that the minimum time required by an agent to reach a desired location, and the maximum time an agent can stay safe within a set of locations, against all possible actions of its adversaries are both computable.
We next study the memory requirements of winning strategies for timed automaton games. We prove that finite memory strategies suffice for safety objectives, and that winning strategies for reachability objectives may require infinite memory in general. We introduce randomized strategies in which an agent can propose a probabilistic distribution of moves and show that finite memory randomized strategies suffice for all omega-regular objectives. We also show that while randomization helps in simplifying winning strategies, and thus allows the construction of simpler controllers, it does not help a player in winning at more states, and thus does not allow the construction of more powerful controllers.
Finally we study robust winning strategies in timed games. In a physical system, a controller may propose an action together with a time delay, but the action cannot be assumed to be executed at the exact proposed time delay. We present robust strategies which incorporate such jitters and show that the set of states from which an agent can win robustly is computable.},
author = {Prabhu, Vinayak S},
pages = {1 -- 137},
publisher = {University of California, Berkeley},
title = {{Games for the verification of timed systems}},
year = {2008},
}
@phdthesis{4415,
abstract = {Many computing applications, especially those in safety critical embedded systems, require highly predictable timing properties. However, time is often not present in the prevailing computing and networking abstractions. In fact, most advances in computer architecture, software, and networking favor average-case performance over timing predictability. This thesis studies several methods for the design of concurrent and/or distributed embedded systems with precise timing guarantees. The focus is on flexible and compositional methods for programming and verification of the timing properties. The presented methods together with related formalisms cover two levels of design: (1) Programming language/model level. We propose the distributed variant of Giotto, a coordination programming language with an explicit temporal semantics—the logical execution time (LET) semantics. The LET of a task is an interval of time that specifies the time instants at which task inputs and outputs become available (task release and termination instants). The LET of a task is always non-zero. This allows us to communicate values across the network without changing the timing information of the task, and without introducing nondeterminism. We show how this methodology supports distributed code generation for distributed real-time systems. The method gives up some performance in favor of composability and predictability. We characterize the tradeoff by comparing the LET semantics with the semantics used in Simulink. (2) Abstract task graph level. We study interface-based design and verification of applications represented with task graphs. We consider task sequence graphs with general event models, and cyclic graphs with periodic event models with jitter and phase. Here an interface of a component exposes time and resource constraints of the component. Together with interfaces we formally define interface composition operations and the refinement relation. For efficient and flexible composability checking two properties are important: incremental design and independent refinement. According to the incremental design property the composition of interfaces can be performed in any order, even if interfaces for some components are not known. The refinement relation is defined such that in a design we can always substitute a refined interface for an abstract one. We show that the framework supports independent refinement, i.e., the refinement relation is preserved under composition operations.},
author = {Matic, Slobodan},
pages = {1 -- 148},
publisher = {University of California, Berkeley},
title = {{Compositionality in deterministic real-time embedded systems}},
year = {2008},
}
@phdthesis{4524,
abstract = {Complex requirements, time-to-market pressure and regulatory constraints have made the designing of embedded systems extremely challenging. This is evident by the increase in effort and expenditure for design of safety-driven real-time control-dominated applications like automotive and avionic controllers. Design processes are often challenged by lack of proper programming tools for specifying and verifying critical requirements (e.g. timing and reliability) of such applications. Platform based design, an approach for designing embedded systems, addresses the above concerns by separating requirement from architecture. The requirement specifies the intended behavior of an application while the architecture specifies the guarantees (e.g. execution speed, failure rate etc). An implementation, a mapping of the requirement on the architecture, is then analyzed for correctness. The orthogonalization of concerns makes the specification and analyses simpler. An effective use of such design methodology has been proposed in Logical Execution Time (LET) model of real-time tasks. The model separates the timing requirements (specified by release and termination instances of a task) from the architecture guarantees (specified by worst-case execution time of the task).
This dissertation proposes a coordination language, Hierarchical Timing Language (HTL), that captures the timing and reliability requirements of real-time applications. An implementation of the program on an architecture is then analyzed to check whether desired timing and reliability requirements are met or not. The core framework extends the LET model by accounting for reliability and refinement. The reliability model separates the reliability requirements of tasks from the reliability guarantees of the architecture. The requirement expresses the desired long-term reliability while the architecture provides a short-term reliability guarantee (e.g. failure rate for each iteration). The analysis checks if the short-term guarantee ensures the desired long-term reliability. The refinement model allows replacing a task by another task during program execution. Refinement preserves schedulability and reliability, i.e., if a refined task is schedulable and reliable for an implementation, then the refining task is also schedulable and reliable for the implementation. Refinement helps in concise specification without overloading analysis.
The work presents the formal model, the analyses (both with and without refinement), and a compiler for HTL programs. The compiler checks composition and refinement constraints, performs schedulability and reliability analyses, and generates code for implementation of an HTL program on a virtual machine. Three real-time controllers, one each from automatic control, automotive control and avionic control, are used to illustrate the steps in modeling and analyzing HTL programs.},
author = {Ghosal, Arkadeb},
pages = {1 -- 210},
publisher = {University of California, Berkeley},
title = {{A hierarchical coordination language for reliable real-time tasks}},
year = {2008},
}
@phdthesis{4559,
abstract = {We study games played on graphs with omega-regular conditions specified as parity, Rabin, Streett or Muller conditions. These games have applications in the verification, synthesis, modeling, testing, and compatibility checking of reactive systems. Important distinctions between graph games are as follows: (a) turn-based vs. concurrent games, depending on whether at a state of the game only a single player makes a move, or players make moves simultaneously; (b) deterministic vs. stochastic, depending on whether the transition function is a deterministic or a probabilistic function over successor states; and (c) zero-sum vs. non-zero-sum, depending on whether the objectives of the players are strictly conflicting or not.
We establish that the decision problem for turn-based stochastic zero-sum games with Rabin, Streett, and Muller objectives are NP-complete, coNP-complete, and PSPACE-complete, respectively, substantially improving the previously known 3EXPTIME bound. We also present strategy improvement style algorithms for turn-based stochastic Rabin and Streett games. In the case of concurrent stochastic zero-sum games with parity objectives we obtain a PSPACE bound, again improving the previously known 3EXPTIME bound. As a consequence, concurrent stochastic zero-sum games with Rabin, Streett, and Muller objectives can be solved in EXPSPACE, improving the previously known 4EXPTIME bound. We also present an elementary and combinatorial proof of the existence of memoryless \epsilon-optimal strategies in concurrent stochastic games with reachability objectives, for all real \epsilon>0, where an \epsilon-optimal strategy achieves the value of the game with in \epsilon against all strategies of the opponent. We also use the proof techniques to present a strategy improvement style algorithm for concurrent stochastic reachability games.
We then go beyond \omega-regular objectives and study the complexity of an important class of quantitative objectives, namely, limit-average objectives. In the case of limit-average games, the states of the graph is labeled with rewards and the goal is to maximize the long-run average of the rewards. We show that concurrent stochastic zero-sum games with limit-average objectives can be solved in EXPTIME.
Finally, we introduce a new notion of equilibrium, called secure equilibrium, in non-zero-sum games which captures the notion of conditional competitiveness. We prove the existence of unique maximal secure equilibrium payoff profiles in turn-based deterministic games, and present algorithms to compute such payoff profiles. We also show how the notion of secure equilibrium extends the assume-guarantee style of reasoning in the game theoretic framework.},
author = {Krishnendu Chatterjee},
pages = {1 -- 247},
publisher = {University of California, Berkeley},
title = {{Stochastic ω-Regular Games}},
year = {2007},
}
@phdthesis{4566,
abstract = {Complex system design today calls for compositional design and implementation. However each component is designed with certain assumptions about the environment it is meant to operate in, and delivering certain guarantees if those assumptions are satisfied; numerous inter-component interaction errors are introduced in the manual and error-prone integration process as there is little support in design environments for machine-readably representing these assumptions and guarantees and automatically checking consistency during integration.
Based on Interface Automata we propose a framework for compositional design and analysis of systems: a set of domain-specific automata-theoretic type systems for compositional system specification and analysis by behavioral specification of open systems. We focus on three different domains: component-based hardware systems communicating on bidirectional wires. concurrent distributed recursive message-passing software systems, and embedded software system components operating in resource-constrained environments. For these domains we present approaches to formally represent the assumptions and conditional guarantees between interacting open system components. Composition of such components produces new components with the appropriate assumptions and guarantees. We check satisfaction of temporal logic specifications by such components, and the substitutability of one component with another in an arbitrary context. Using this framework one can analyze large systems incrementally without needing extensive summary information to close the system at each stage. Furthermore, we focus only on the inter-component interaction behavior without dealing with the full implementation details of each component. Many of the merits of automata-theoretic model-checking are combined with the compositionality afforded by type-system based techniques. We also present an integer-based extension of the conventional boolean verification framework motivated by our interface formalism for embedded software components.
Our algorithms for checking the behavioral compatibility of component interfaces are available in our tool Chic, which can be used as a plug-in for the Java IDE JBuilder and the heterogenous modeling and design environment Ptolemy II.
Finally, we address the complementary problem of partitioning a large system into meaningful coherent components by analyzing the interaction patterns between its basic elements. We demonstrate the usefulness of our partitioning approach by evaluating its efficacy in improving unit-test branch coverage for a large software system implemented in C.},
author = {Chakrabarti, Arindam},
pages = {1 -- 244},
publisher = {University of California, Berkeley},
title = {{A framework for compositional design and analysis of systems}},
year = {2007},
}
@phdthesis{4236,
author = {de Vladar,Harold Paul},
publisher = {Centro de estudios avazados, IVIC},
title = {{Métodos no lineales y sus aplicaciones en dinámicas aleatorias de poblaciones celulares}},
doi = {3810},
year = {2004},
}
@phdthesis{4424,
abstract = {The enormous cost and ubiquity of software errors necessitates the need for techniques and tools that can precisely analyze large systems and prove that they meet given specifications, or if they don't, return counterexample behaviors showing how the system fails. Recent advances in model checking, decision procedures, program analysis and type systems, and a shift of focus to partial specifications common to several systems (e.g., memory safety and race freedom) have resulted in several practical verification methods. However, these methods are either precise or they are scalable, depending on whether they track the values of variables or only a fixed small set of dataflow facts (e.g., types), and are usually insufficient for precisely verifying large programs.
We describe a new technique called Lazy Abstraction (LA) which achieves both precision and scalability by localizing the use of precise information. LA automatically builds, explores and refines a single abstract model of the program in a way that different parts of the model exhibit different degrees of precision, namely just enough to verify the desired property. The algorithm automatically mines the information required by partitioning mechanical proofs of unsatisfiability of spurious counterexamples into Craig Interpolants. For multithreaded systems, we give a new technique based on analyzing the behavior of a single thread executing in a context which is an abstraction of the other (arbitrarily many) threads. We define novel context models and show how to automatically infer them and analyze the full system (thread + context) using LA.
LA is implemented in BLAST. We have run BLAST on Windows and Linux Device Drivers to verify API conformance properties, and have used it to find (or guarantee the absence of) data races in multithreaded Networked Embedded Systems (NESC) applications. BLAST is able to prove the absence of races in several cases where earlier methods, which depend on lock-based synchronization, fail.},
author = {Jhala, Ranjit},
pages = {1 -- 165},
publisher = {University of California, Berkeley},
title = {{Program verification by lazy abstraction}},
year = {2004},
}
@phdthesis{2414,
author = {Uli Wagner},
publisher = {ETH Zurich},
title = {{On k-Sets and Their Applications}},
doi = {10.3929/ethz-a-004708408},
year = {2003},
}
@phdthesis{4416,
abstract = {Methods for the formal specification and verification of systems are indispensible for the development of complex yet correct systems. In formal verification, the designer describes the system in a modeling language with a well-defined semantics, and this system description is analyzed against a set of correctness requirements. Model checking is an algorithmic technique to check that a system description indeed satisfies correctness requirements given as logical specifications. While successful in hardware verification, the potential for model checking for software and embedded systems has not yet been realized. This is because traditional model checking focuses on systems modeled as finite state-transition graphs. While a natural model for hardware (especially synchronous hardware), state-transition graphs often do not capture software and embedded systems at an appropriate level of granularity. This dissertation considers two orthogonal extensions to finite state-transition graphs making model checking techniques applicable to both a wider class of systems and a wider class of properties.
The first direction is an extension to infinite-state structures finitely represented using constraints and operations on constraints. Infinite state arises when we wish to model variables with unbounded range (e.g., integers), or data structures, or real time. We provide a uniform framework of symbolic region algebras to study model checking of infinite-state systems. We also provide sufficient language-independent termination conditions for symbolic model checking algorithms on infinite state systems.
The second direction supplements verification with game theoretic reasoning. Games are natural models for interactions between components. We study game theoretic behavior with winning conditions given by temporal logic objectives both in the deterministic and in the probabilistic context. For deterministic games, we provide an extremal model characterization of fixpoint algorithms that link solutions of verification problems to solutions for games. For probabilistic games we study fixpoint characterization of winning probabilities for games with omega-regular winning objectives, and construct (epsilon-)optimal winning strategies.},
author = {Majumdar, Ritankar S},
pages = {1 -- 201},
publisher = {University of California, Berkeley},
title = {{Symbolic algorithms for verification and control}},
year = {2003},
}
@phdthesis{4425,
abstract = {Giotto provides a time-triggered programmer’s model for the implementation of embedded control systems with hard real-time constraints. Giotto’s precise semantics and predictabil- ity make it suitable for safety-critical applications.
Giotto is based around the idea that time-triggered task invocation together with time-triggered mode switching can form a useful programming model for real-time systems. To substantiate this claim, we describe the use of Giotto to refactor the software of a small, autonomous helicopter. The ease with which Giotto expresses the existing software provides evidence that Giotto is an appropriate programming language for control systems.
Since Giotto is a real-time programming language, ensuring that Giotto programs meet their deadlines is crucial. To study precedence-constrained Giotto scheduling, we first examine single-mode, single-processor scheduling. We extend to an infinite, periodic setting the classical problem of meeting deadlines for a set of tasks with release times, deadlines, precedence constraints, and preemption. We then develop an algorithm for scheduling Giotto programs on a single processor by representing Giotto programs as instances of the extended scheduling problem.
Next, we study multi-mode, single-processor Giotto scheduling. This problem is different from classical scheduling problems, since in our precedence-constrained approach, the deadlines of tasks may vary depending on the mode switching behavior of the program. We present conditional scheduling models which capture this varying-deadline behavior. We develop polynomial-time algorithms for some conditional scheduling models, and prove oth- ers to be computationally hard. We show how to represent multi-mode Giotto programs as instances of the model, resulting in an algorithm for scheduling multi-mode Giotto programs on a single processor.
Finally, we show that the problem of scheduling Giotto programs for multiple net- worked processors is strongly NP-hard.},
author = {Horowitz, Benjamin},
pages = {1 -- 237},
publisher = {University of California, Berkeley},
title = {{Giotto: A time-triggered language for embedded programming}},
year = {2003},
}
@phdthesis{3678,
author = {Christoph Lampert},
booktitle = {Bonner Mathematische Schriften},
pages = {1 -- 165},
publisher = {Universität Bonn, Fachbibliothek Mathematik},
title = {{The Neumann operator in strictly pseudoconvex domains with weighted Bergman metric }},
volume = {356},
year = {2003},
}
@phdthesis{4414,
abstract = {This dissertation investigates game-theoretic approaches to the algorithmic analysis of concurrent, reactive systems. A concurrent system comprises a number of components working concurrently; a reactive system maintains an ongoing interaction with its environment. Traditional approaches to the formal analysis of concurrent reactive systems usually view the system as an unstructured state-transition graphs; instead, we view them as collections of interacting components, where each one is an open system which accepts inputs from the other components. The interactions among the components are naturally modeled as games.
Adopting this game-theoretic view, we study three related problems pertaining to the verification and synthesis of systems. Firstly, we propose two novel game-theoretic techniques for the model-checking of concurrent reactive systems, and improve the performance of model-checking. The first technique discovers an error as soon as it cannot be prevented, which can be long before it actually occurs. This technique is based on the key observation that "unpreventability" is a local property to a module: an error is unpreventable in a module state if no environment can prevent it. The second technique attempts to decompose a model-checking proof into smaller proof obligations by constructing abstract modules automatically, using reachability and "unpreventability" information about the concrete modules. Three increasingly powerful proof decomposition rules are proposed and we show that in practice, the resulting abstract modules are often significantly smaller than the concrete modules and can drastically reduce the space and time requirements for verification. Both techniques fall into the category of compositional reasoning.
Secondly, we investigate the composition and control of synchronous systems. An essential property of synchronous systems for compositional reasoning is non-blocking. In the composition of synchronous systems, however, due to circular causal dependency of input and output signals, non-blocking is not always guaranteed. Blocking compositions of systems can be ruled out semantically, by insisting on the existence of certain fixed points, or syntactically, by equipping systems with types, which make the dependencies between input and output signals transparent. We characterize various typing mechanisms in game-theoretic terms, and study their effects on the controller synthesis problem. We show that our typing systems are general enough to capture interesting real-life synchronous systems such as all delay-insensitive digital circuits. We then study their corresponding single-step control problems --a restricted form of controller synthesis problem whose solutions can be iterated in appropriate manners to solve all LTL controller synthesis problems. We also consider versions of the controller synthesis problem in which the type of the controller is given. We show that the solution of these fixed-type control problems requires the evaluation of partially ordered (Henkin) quantifiers on boolean formulas, and is therefore harder (nondeterministic exponential time) than more traditional control questions.
Thirdly, we study the synthesis of a class of open systems, namely, uninitialized state machines. The sequential synthesis problem, which is closely related to Church's solvability problem, asks, given a specification in the form of a binary relation between input and output streams, for the construction of a finite-state stream transducer that converts inputs to appropriate outputs. For efficiency reasons, practical sequential hardware is often designed to operate without prior initialization. Such hardware designs can be modeled by uninitialized state machines, which are required to satisfy their specification if started from any state. We solve the sequential synthesis problem for uninitialized systems, that is, we construct uninitialized finite-state stream transducers. We consider specifications given by LTL formulas, deterministic, nondeterministic, universal, and alternating Buechi automata. We solve this uninitialized synthesis problem by reducing it to the well-understood initialized synthesis problem. While our solution is straightforward, it leads, for some specification formalisms, to upper bounds that are exponentially worse than the complexity of the corresponding initialized problems. However, we prove lower bounds to show that our simple solutions are optimal for all considered specification formalisms. The lower bound proofs require nontrivial generic reductions.},
author = {Mang, Freddy Y},
pages = {1 -- 116},
publisher = {University of California, Berkeley},
title = {{Games in open systems verification and synthesis}},
year = {2002},
}
@phdthesis{4411,
abstract = {Model checking algorithms for the verification of reactive systems proceed by a systematic and exhaustive exploration of the system state space. They do not scale to large designs because of the state explosion problem --the number of states grows exponentially with the number of components in the design. Consequently, the model checking problem is PSPACE-hard in the size of the design description. This dissertation proposes three novel techniques to combat the state explosion problem.
One of the most important advances in model checking in recent years has been the discovery of symbolic methods, which use a calculus of expressions, such as binary decision diagrams, to represent the state sets encountered during state space exploration. Symbolic model checking has proved to be effective for verifying hardware designs. Traditionally, symbolic checking of temporal logic specifications is performed by backward fixpoint reasoning with the operator Pre. Backward reasoning can be wasteful since unreachable states are explored. We suggest the use of forward fixpoint reasoning based on the operator Post. We show how all linear temporal logic specifications can be model checked symbolically by forward reasoning. In contrast to backward reasoning, forward reasoning performs computations only on the reachable states.
Heuristics that improve algorithms for application domains, such as symbolic methods for hardware designs, are useful but not enough to make model checking feasible on industrial designs. Currently, exhaustive state exploration is possible only on designs with about 50-100 boolean state variables. Assume-guarantee verification attempts to combat the state explosion problem by using the principle of "divide and conquer," where the components of the implementation are analyzed one at a time. Typically, an implementation component refines its specification only when its inputs are suitably constrained by other components in the implementation. The assume-guarantee principle states that instead of constraining the inputs by implementation components, it is sound to constrain them by the corresponding specification components, which can be significantly smaller. We extend the assume-guarantee proof rule to deal with the case where the specification operates at a coarser time scale than the implementation. Using our model checker Mocha, which implements this methodology, we verify VGI, a parallel DSP processor chip with 64 compute processors each containing approximately 800 state variables and 30K gates.
Our third contribution is a systematic model checking methodology for verifying the abstract shared-memory interface of sequential consistency on multiprocessor systems with three parameters --number of processors, number of memory locations, and number of data values. Sequential consistency requires that some interleaving of the local temporal orders of read/write events at different processors be a trace of serial memory. Therefore, it suffices to construct a non-interfering serializer that watches and reorders read/write events so that a trace of serial memory is obtained. While in general such a serializer must be unbounded even for fixed values of the parameters --checking sequential consistency is undecidable!-- we show that the paradigmatic class of snoopy cache coherence protocols has finite-state serializers. In order to reduce the arbitrary-parameter problem to the fixed-parameter problem, we develop a novel framework for induction over the number of processors and use the notion of a serializer to reduce the problem of verifying sequential consistency to that of checking language inclusion between finite state machines.},
author = {Qadeer,Shaz},
pages = {1 -- 150},
publisher = {University of California, Berkeley},
title = {{Algorithms and Methodology for Scalable Model Checking}},
year = {1999},
}
@phdthesis{4419,
author = {Kopke, Peter W},
publisher = {Cornell University},
title = {{The Theory of Rectangular Hybrid Automata}},
year = {1996},
}
@phdthesis{4428,
abstract = {Hybrid systems are real-time systems that react to both discrete and continuous activities (such as analog signals, time, temperature, and speed). Typical examples of hybrid systems are embedded systems, timing-based communication protocols, and digital circuits at the transistor level. Due to the rapid development of microprocessor technology, hybrid systems directly control much of what we depend on in our daily lives. Consequently, the formal specification and verification of hybrid systems has become an active area of research. This dissertation presents the first general framework for the formal specification and verification of hybrid systems, as well as the first hybrid-system analysis tool--HyTech. The framework consists of a graphical finite-state-machine-like language for modeling hybrid systems, a temporal logic for modeling the requirements of hybrid systems, and a computer procedure that verifies modeled hybrid systems against modeled requirements. The tool HyTech is the implementation of the framework using C++ and Mathematica.
More specifically, our hybrid-system modeling language, Hybrid Automata, is an extension of timed automata with discrete and continuous variables whose dynamics are governed by differential equations. Our requirement modeling language, ICTL, is a branching-time temporal logic, and is an extension of TCTL with stop-watch variables. Our verification procedure is a symbolic model-checking procedure that verifies linear hybrid automata against ICTL formulas. To make HyTech more efficient and effective, we use model-checking strategies and abstract operators that can expedite the verification process. To enable HyTech to verify nonlinear hybrid automata, we introduce two translations from nonlinear hybrid automata to linear hybrid automata. We have applied HyTech to analyze more than 30 hybrid-system benchmarks. In this dissertation, we present the application of HyTech to three nontrivial hybrid systems taken from the literature.},
author = {Ho, Pei-Hsin},
pages = {1 -- 188},
publisher = {Cornell University},
title = {{Automatic Analysis of Hybrid Systems}},
doi = {CSD-TR95-1536},
year = {1995},
}
@phdthesis{4516,
author = {Thomas Henzinger},
publisher = {Stanford University},
title = {{The Temporal Specification and Verification of Real-time Systems }},
year = {1991},
}
@phdthesis{4337,
author = {Nicholas Barton},
publisher = {University of East Anglia},
title = {{A hybrid zone in the alpine grasshopper Podisma pedestris}},
year = {1979},
}