@phdthesis{938,
abstract = {The thesis encompasses several topics of plant cell biology which were studied in the model plant Arabidopsis thaliana. Chapter 1 concerns the plant hormone auxin and its polar transport through cells and tissues. The highly controlled, directional transport of auxin is facilitated by plasma membrane-localized transporters. Transporters from the PIN family direct auxin transport due to their polarized localizations at cell membranes. Substantial effort has been put into research on cellular trafficking of PIN proteins, which is thought to underlie their polar distribution. I participated in a forward genetic screen aimed at identifying novel regulators of PIN polarity. The screen yielded several genes which may be involved in PIN polarity regulation or participate in polar auxin transport by other means. Chapter 2 focuses on the endomembrane system, with particular attention to clathrin-mediated endocytosis. The project started with identification of several proteins that interact with clathrin light chains. Among them, I focused on two putative homologues of auxilin, which in non-plant systems is an endocytotic factor known for uncoating clathrin-coated vesicles in the final step of endocytosis. The body of my work consisted of an in-depth characterization of transgenic A. thaliana lines overexpressing these putative auxilins in an inducible manner. Overexpression of these proteins leads to an inhibition of endocytosis, as documented by imaging of cargoes and clathrin-related endocytic machinery. An extension of this work is an investigation into a concept of homeostatic regulation acting between distinct transport processes in the endomembrane system. With auxilin overexpressing lines, where endocytosis is blocked specifically, I made observations on the mutual relationship between two opposite trafficking processes of secretion and endocytosis. In Chapter 3, I analyze cortical microtubule arrays and their relationship to auxin signaling and polarized growth in elongating cells. In plants, microtubules are organized into arrays just below the plasma membrane, and it is thought that their function is to guide membrane-docked cellulose synthase complexes. These, in turn, influence cell wall structure and cell shape by directed deposition of cellulose fibres. In elongating cells, cortical microtubule arrays are able to reorient in relation to long cell axis, and these reorientations have been linked to cell growth and to signaling of growth-regulating factors such as auxin or light. In this chapter, I am addressing the causal relationship between microtubule array reorientation, growth, and auxin signaling. I arrive at a model where array reorientation is not guided by auxin directly, but instead is only controlled by growth, which, in turn, is regulated by auxin.},
author = {Adamowski, Maciek},
pages = {117},
publisher = {IST Austria},
title = {{Investigations into cell polarity and trafficking in the plant model Arabidopsis thaliana }},
doi = {10.15479/AT:ISTA:th_842},
year = {2017},
}
@phdthesis{961,
abstract = {Cell-cell contact formation constitutes the first step in the emergence of multicellularity in evolution, thereby allowing the differentiation of specialized cell types. In metazoan development, cell-cell contact formation is thought to influence cell fate specification, and cell fate specification has been implicated in cell-cell contact formation. However, remarkably little is yet known about whether and how the interaction and feedback between cell-cell contact formation and cell fate specification affect development. Here we identify a positive feedback loop between cell-cell contact duration, morphogen signaling and mesendoderm cell fate specification during zebrafish gastrulation. We show that long lasting cell-cell contacts enhance the competence of prechordal plate (ppl) progenitor cells to respond to Nodal signaling, required for proper ppl cell fate specification. We further show that Nodal signalling romotes ppl cell-cell contact duration, thereby generating an effective positive feedback loop between ppl cell-cell contact duration and cell fate specification. Finally, by using a combination of theoretical modeling and experimentation, we show that this feedback loop determines whether anterior axial mesendoderm cells become ppl progenitors or, instead, turn into endoderm progenitors. Our findings reveal that the gene regulatory networks leading to cell fate diversification within the developing embryo are controlled by the interdependent activities of cell-cell signaling and contact formation.},
author = {Barone, Vanessa},
pages = {109},
publisher = {IST Austria},
title = {{Cell adhesion and cell fate: An effective feedback loop during zebrafish gastrulation}},
doi = {10.15479/AT:ISTA:th_825},
year = {2017},
}
@phdthesis{992,
abstract = {An instance of the Constraint Satisfaction Problem (CSP) is given by a finite set of
variables, a finite domain of labels, and a set of constraints, each constraint acting on
a subset of the variables. The goal is to find an assignment of labels to its variables
that satisfies all constraints (or decide whether one exists). If we allow more general
“soft” constraints, which come with (possibly infinite) costs of particular assignments,
we obtain instances from a richer class called Valued Constraint Satisfaction Problem
(VCSP). There the goal is to find an assignment with minimum total cost.
In this thesis, we focus (assuming that P
6
=
NP) on classifying computational com-
plexity of CSPs and VCSPs under certain restricting conditions. Two results are the core
content of the work. In one of them, we consider VCSPs parametrized by a constraint
language, that is the set of “soft” constraints allowed to form the instances, and finish
the complexity classification modulo (missing pieces of) complexity classification for
analogously parametrized CSP. The other result is a generalization of Edmonds’ perfect
matching algorithm. This generalization contributes to complexity classfications in two
ways. First, it gives a new (largest known) polynomial-time solvable class of Boolean
CSPs in which every variable may appear in at most two constraints and second, it
settles full classification of Boolean CSPs with planar drawing (again parametrized by a
constraint language).},
author = {Rolinek, Michal},
pages = {97},
publisher = {IST Austria},
title = {{Complexity of constraint satisfaction}},
doi = {10.15479/AT:ISTA:th_815},
year = {2017},
}
@phdthesis{1127,
abstract = {Plant hormone auxin and its transport between cells belong to the most important
mechanisms controlling plant development. Auxin itself could change localization of PINs and
thereby control direction of its own flow. We performed an expression profiling experiment
in Arabidopsis roots to identify potential regulators of PIN polarity which are transcriptionally
regulated by auxin signalling. We identified several novel regulators and performed a detailed
characterization of the transcription factor WRKY23 (At2g47260) and its role in auxin
feedback on PIN polarity. Gain-of-function and dominant-negative mutants revealed that
WRKY23 plays a crucial role in mediating the auxin effect on PIN polarity. In concordance,
typical polar auxin transport processes such as gravitropism and leaf vascular pattern
formation were disturbed by interfering with WRKY23 function.
In order to identify direct targets of WRKY23, we performed consequential expression
profiling experiments using a WRKY23 inducible gain-of-function line and dominant-negative
WRKY23 line that is defunct in PIN re-arrangement. Among several genes mostly related to
the groups of cell wall and defense process regulators, we identified LYSINE-HISTIDINE
TRANSPORTER 1 (LHT1; At5g40780), a small amino acid permease gene from the amino
acid/auxin permease family (AAAP), we present its detailed characterisation in auxin feedback
on PIN repolarization, identified its transcriptional regulation, we propose a potential
mechanism of its action. Moreover, we identified also a member of receptor-like protein
kinase LRR-RLK (LEUCINE-RICH REPEAT TRANSMEMBRANE PROTEIN KINASE PROTEIN 1;
LRRK1; At1g05700), which also affects auxin-dependent PIN re-arrangement. We described
its transcriptional behaviour, subcellular localization. Based on global expression data, we
tried to identify ligand responsible for mechanism of signalling and suggest signalling partner
and interactors. Additionally, we described role of novel phytohormone group, strigolactone,
in auxin-dependent PIN re-arrangement, that could be a fundament for future studies in this
field.
Our results provide first insights into an auxin transcriptional network targeting PIN
localization and thus regulating plant development. We highlighted WRKY23 transcriptional
network and characterised its mediatory role in plant development. We identified direct
effectors of this network, LHT1 and LRRK1, and describe their roles in PIN re-arrangement and
PIN-dependent auxin transport processes.},
author = {Prat, Tomas},
pages = {131},
publisher = {IST Austria},
title = {{Identification of novel regulators of PIN polarity and development of novel auxin sensor}},
year = {2017},
}
@phdthesis{1155,
abstract = {This dissertation concerns the automatic verification of probabilistic systems and programs with arrays by statistical and logical methods. Although statistical and logical methods are different in nature, we show that they can be successfully combined for system analysis. In the first part of the dissertation we present a new statistical algorithm for the verification of probabilistic systems with respect to unbounded properties, including linear temporal logic. Our algorithm often performs faster than the previous approaches, and at the same time requires less information about the system. In addition, our method can be generalized to unbounded quantitative properties such as mean-payoff bounds. In the second part, we introduce two techniques for comparing probabilistic systems. Probabilistic systems are typically compared using the notion of equivalence, which requires the systems to have the equal probability of all behaviors. However, this notion is often too strict, since probabilities are typically only empirically estimated, and any imprecision may break the relation between processes. On the one hand, we propose to replace the Boolean notion of equivalence by a quantitative distance of similarity. For this purpose, we introduce a statistical framework for estimating distances between Markov chains based on their simulation runs, and we investigate which distances can be approximated in our framework. On the other hand, we propose to compare systems with respect to a new qualitative logic, which expresses that behaviors occur with probability one or a positive probability. This qualitative analysis is robust with respect to modeling errors and applicable to many domains. In the last part, we present a new quantifier-free logic for integer arrays, which allows us to express counting. Counting properties are prevalent in array-manipulating programs, however they cannot be expressed in the quantified fragments of the theory of arrays. We present a decision procedure for our logic, and provide several complexity results.},
author = {Daca, Przemyslaw},
pages = {163},
publisher = {IST Austria},
title = {{Statistical and logical methods for property checking}},
doi = {10.15479/AT:ISTA:TH_730},
year = {2017},
}
@phdthesis{1189,
abstract = {Within the scope of this thesis, we show that a driven-dissipative system with
few ultracold atoms can exhibit dissipatively bound states, even if the atom-atom
interaction is purely repulsive. This bond arises due to the dipole-dipole inter-
action, which is restricted to one of the lower electronic energy states, resulting
in the distance-dependent coherent population trapping. The quality of this al-
ready established method of dissipative binding is improved and the application
is extended to higher dimensions and a larger number of atoms. Here, we simu-
late two- and three-atom systems using an adapted approach to the Monte Carlo
wave-function method and analyse the results. Finally, we examine the possi-
bility of finding a setting allowing trimer states but prohibiting dimer states.
In the context of open quantum systems, such a three-body bound states corre-
sponds to the driven-dissipative analogue of a Borromean state. These states can
be detected in modern experiments with dipolar and Rydberg-dressed ultracold
atomic gases.
},
author = {Jochum, Clemens},
pages = {1 -- 77},
publisher = {Technical University Vienna},
title = {{Dissipative Few-Body Quantum Systems}},
year = {2016},
}
@phdthesis{1396,
abstract = {CA3 pyramidal neurons are thought to pay a key role in memory storage and pattern completion by activity-dependent synaptic plasticity between CA3-CA3 recurrent excitatory synapses. To examine the induction rules of synaptic plasticity at CA3-CA3 synapses, we performed whole-cell patch-clamp recordings in acute hippocampal slices from rats (postnatal 21-24 days) at room temperature. Compound excitatory postsynaptic potentials (ESPSs) were recorded by tract stimulation in stratum oriens in the presence of 10 µM gabazine. High-frequency stimulation (HFS) induced N-methyl-D-aspartate (NMDA) receptor-dependent long-term potentiation (LTP). Although LTP by HFS did not requier postsynaptic spikes, it was blocked by Na+-channel blockers suggesting that local active processes (e.g.) dendritic spikes) may contribute to LTP induction without requirement of a somatic action potential (AP). We next examined the properties of spike timing-dependent plasticity (STDP) at CA3-CA3 synapses. Unexpectedly, low-frequency pairing of EPSPs and backpropagated action potentialy (bAPs) induced LTP, independent of temporal order. The STDP curve was symmetric and broad, with a half-width of ~150 ms. Consistent with these specific STDP induction properties, post-presynaptic sequences led to a supralinear summation of spine [Ca2+] transients. Furthermore, in autoassociative network models, storage and recall was substantially more robust with symmetric than with asymmetric STDP rules. In conclusion, we found associative forms of LTP at CA3-CA3 recurrent collateral synapses with distinct induction rules. LTP induced by HFS may be associated with dendritic spikes. In contrast, low frequency pairing of pre- and postsynaptic activity induced LTP only if EPSP-AP were temporally very close. Together, these induction mechanisms of synaptiic plasticity may contribute to memory storage in the CA3-CA3 microcircuit at different ranges of activity.},
author = {Mishra, Rajiv Kumar},
pages = {83},
publisher = {IST Austria},
title = {{Synaptic plasticity rules at CA3-CA3 recurrent synapses in hippocampus}},
year = {2016},
}
@phdthesis{1397,
abstract = {We study partially observable Markov decision processes (POMDPs) with objectives used in verification and artificial intelligence. The qualitative analysis problem given a POMDP and an objective asks whether there is a strategy (policy) to ensure that the objective is satisfied almost surely (with probability 1), resp. with positive probability (with probability greater than 0). For POMDPs with limit-average payoff, where a reward value in the interval [0,1] is associated to every transition, and the payoff of an infinite path is the long-run average of the rewards, we consider two types of path constraints: (i) a quantitative limit-average constraint defines the set of paths where the payoff is at least a given threshold L1 = 1. Our main results for qualitative limit-average constraint under almost-sure winning are as follows: (i) the problem of deciding the existence of a finite-memory controller is EXPTIME-complete; and (ii) the problem of deciding the existence of an infinite-memory controller is undecidable. For quantitative limit-average constraints we show that the problem of deciding the existence of a finite-memory controller is undecidable. We present a prototype implementation of our EXPTIME algorithm. For POMDPs with w-regular conditions specified as parity objectives, while the qualitative analysis problems are known to be undecidable even for very special case of parity objectives, we establish decidability (with optimal complexity) of the qualitative analysis problems for POMDPs with parity objectives under finite-memory strategies. We establish optimal (exponential) memory bounds and EXPTIME-completeness of the qualitative analysis problems under finite-memory strategies for POMDPs with parity objectives. Based on our theoretical algorithms we also present a practical approach, where we design heuristics to deal with the exponential complexity, and have applied our implementation on a number of well-known POMDP examples for robotics applications. For POMDPs with a set of target states and an integer cost associated with every transition, we study the optimization objective that asks to minimize the expected total cost of reaching a state in the target set, while ensuring that the target set is reached almost surely. We show that for general integer costs approximating the optimal cost is undecidable. For positive costs, our results are as follows: (i) we establish matching lower and upper bounds for the optimal cost, both double and exponential in the POMDP state space size; (ii) we show that the problem of approximating the optimal cost is decidable and present approximation algorithms that extend existing algorithms for POMDPs with finite-horizon objectives. We show experimentally that it performs well in many examples of interest. We study more deeply the problem of almost-sure reachability, where given a set of target states, the question is to decide whether there is a strategy to ensure that the target set is reached almost surely. While in general the problem EXPTIME-complete, in many practical cases strategies with a small amount of memory suffice. Moreover, the existing solution to the problem is explicit, which first requires to construct explicitly an exponential reduction to a belief-support MDP. We first study the existence of observation-stationary strategies, which is NP-complete, and then small-memory strategies. We present a symbolic algorithm by an efficient encoding to SAT and using a SAT solver for the problem. We report experimental results demonstrating the scalability of our symbolic (SAT-based) approach. Decentralized POMDPs (DEC-POMDPs) extend POMDPs to a multi-agent setting, where several agents operate in an uncertain environment independently to achieve a joint objective. In this work we consider Goal DEC-POMDPs, where given a set of target states, the objective is to ensure that the target set is reached with minimal cost. We consider the indefinite-horizon (infinite-horizon with either discounted-sum, or undiscounted-sum, where absorbing goal states have zero-cost) problem. We present a new and novel method to solve the problem that extends methods for finite-horizon DEC-POMDPs and the real-time dynamic programming approach for POMDPs. We present experimental results on several examples, and show that our approach presents promising results. In the end we present a short summary of a few other results related to verification of MDPs and POMDPs.},
author = {Chmelik, Martin},
pages = {232},
publisher = {IST Austria},
title = {{Algorithms for partially observable markov decision processes}},
year = {2016},
}
@phdthesis{1398,
abstract = {Hybrid zones represent evolutionary laboratories, where recombination brings together alleles in combinations which have not previously been tested by selection. This provides an excellent opportunity to test the effect of molecular variation on fitness, and how this variation is able to spread through populations in a natural context. The snapdragon Antirrhinum majus is polymorphic in the wild for two loci controlling the distribution of yellow and magenta floral pigments. Where the yellow A. m. striatum and the magenta A. m. pseudomajus meet along a valley in the Spanish Pyrenees they form a stable hybrid zone Alleles at these loci recombine to give striking transgressive variation for flower colour. The sharp transition in phenotype over ~1km implies strong selection maintaining the hybrid zone. An indirect assay of pollinator visitation in the field found that pollinators forage in a positive-frequency dependent manner on Antirrhinum, matching previous data on fruit set. Experimental arrays and paternity analysis of wild-pollinated seeds demonstrated assortative mating for pigmentation alleles, and that pollinator behaviour alone is sufficient to explain this pattern. Selection by pollinators should be sufficiently strong to maintain the hybrid zone, although other mechanisms may be at work. At a broader scale I examined evolutionary transitions between yellow and anthocyanin pigmentation in the tribe Antirrhinae, and found that selection has acted strate that pollinators are a major determinant of reproductive success and mating patterns in wild Antirrhinum.},
author = {Ellis, Thomas},
pages = {130},
publisher = {IST Austria},
title = {{The role of pollinator-mediated selection in the maintenance of a flower color polymorphism in an Antirrhinum majus hybrid zone}},
doi = {10.15479/AT:ISTA:TH_526 },
year = {2016},
}
@phdthesis{1121,
abstract = {Horizontal gene transfer (HGT), the lateral acquisition of genes across existing species
boundaries, is a major evolutionary force shaping microbial genomes that facilitates
adaptation to new environments as well as resistance to antimicrobial drugs. As such,
understanding the mechanisms and constraints that determine the outcomes of HGT
events is crucial to understand the dynamics of HGT and to design better strategies to
overcome the challenges that originate from it.
Following the insertion and expression of a newly transferred gene, the success of an
HGT event will depend on the fitness effect it has on the recipient (host) cell. Therefore,
predicting the impact of HGT on the genetic composition of a population critically
depends on the distribution of fitness effects (DFE) of horizontally transferred genes.
However, to date, we have little knowledge of the DFE of newly transferred genes, and
hence little is known about the shape and scale of this distribution.
It is particularly important to better understand the selective barriers that determine
the fitness effects of newly transferred genes. In spite of substantial bioinformatics
efforts to identify horizontally transferred genes and selective barriers, a systematic
experimental approach to elucidate the roles of different selective barriers in defining
the fate of a transfer event has largely been absent. Similarly, although the fact that
environment might alter the fitness effect of a horizontally transferred gene may seem
obvious, little attention has been given to it in a systematic experimental manner.
In this study, we developed a systematic experimental approach that consists of
transferring 44 arbitrarily selected Salmonella typhimurium orthologous genes into an
Escherichia coli host, and estimating the fitness effects of these transferred genes at a
constant expression level by performing competition assays against the wild type.
In chapter 2, we performed one-to-one competition assays between a mutant strain
carrying a transferred gene and the wild type strain. By using flow cytometry we
estimated selection coefficients for the transferred genes with a precision level of 10-3,and obtained the DFE of horizontally transferred genes. We then investigated if these
fitness effects could be predicted by any of the intrinsic properties of the genes, namely,
functional category, degree of complexity (protein-protein interactions), GC content,
codon usage and length. Our analyses revealed that the functional category and length
of the genes act as potential selective barriers. Finally, using the same procedure with
the endogenous E. coli orthologs of these 44 genes, we demonstrated that gene dosage is
the most prominent selective barrier to HGT.
In chapter 3, using the same set of genes we investigated the role of environment on the
success of HGT events. Under six different environments with different levels of stress
we performed more complex competition assays, where we mixed all 44 mutant strains
carrying transferred genes with the wild type strain. To estimate the fitness effects of
genes relative to wild type we used next generation sequencing. We found that the DFEs
of horizontally transferred genes are highly dependent on the environment, with
abundant gene–by-environment interactions. Furthermore, we demonstrated a
relationship between average fitness effect of a gene across all environments and its
environmental variance, and thus its predictability. Finally, in spite of the fitness effects
of genes being highly environment-dependent, we still observed a common shape of
DFEs across all tested environments.},
author = {Acar, Hande},
pages = {75},
publisher = {IST Austria},
title = {{Selective barriers to horizontal gene transfer}},
year = {2016},
}
@phdthesis{1122,
abstract = {Computer graphics is an extremely exciting field for two reasons. On the one hand,
there is a healthy injection of pragmatism coming from the visual effects industry
that want robust algorithms that work so they can produce results at an increasingly
frantic pace. On the other hand, they must always try to push the envelope and
achieve the impossible to wow their audiences in the next blockbuster, which means
that the industry has not succumb to conservatism, and there is plenty of room to
try out new and crazy ideas if there is a chance that it will pan into something
useful.
Water simulation has been in visual effects for decades, however it still remains
extremely challenging because of its high computational cost and difficult artdirectability.
The work in this thesis tries to address some of these difficulties.
Specifically, we make the following three novel contributions to the state-of-the-art
in water simulation for visual effects.
First, we develop the first algorithm that can convert any sequence of closed
surfaces in time into a moving triangle mesh. State-of-the-art methods at the time
could only handle surfaces with fixed connectivity, but we are the first to be able to
handle surfaces that merge and split apart. This is important for water simulation
practitioners, because it allows them to convert splashy water surfaces extracted
from particles or simulated using grid-based level sets into triangle meshes that can
be either textured and enhanced with extra surface dynamics as a post-process.
We also apply our algorithm to other phenomena that merge and split apart, such
as morphs and noisy reconstructions of human performances.
Second, we formulate a surface-based energy that measures the deviation of a
water surface froma physically valid state. Such discrepancies arise when there is a
mismatch in the degrees of freedom between the water surface and the underlying
physics solver. This commonly happens when practitioners use a moving triangle
mesh with a grid-based physics solver, or when high-resolution grid-based surfaces
are combined with low-resolution physics. Following the direction of steepest
descent on our surface-based energy, we can either smooth these artifacts or turn
them into high-resolution waves by interpreting the energy as a physical potential.
Third, we extend state-of-the-art techniques in non-reflecting boundaries to handle spatially and time-varying background flows. This allows a novel new
workflow where practitioners can re-simulate part of an existing simulation, such
as removing a solid obstacle, adding a new splash or locally changing the resolution.
Such changes can easily lead to new waves in the re-simulated region that would
reflect off of the new simulation boundary, effectively ruining the illusion of a
seamless simulation boundary between the existing and new simulations. Our
non-reflecting boundaries makes sure that such waves are absorbed.},
author = {Bojsen-Hansen, Morten},
pages = {114},
publisher = {IST Austria},
title = {{Tracking, correcting and absorbing water surface waves}},
doi = {10.15479/AT:ISTA:th_640},
year = {2016},
}
@phdthesis{1123,
abstract = {Motivated by topological Tverberg-type problems in topological combinatorics and by classical
results about embeddings (maps without double points), we study the question whether a finite
simplicial complex K can be mapped into Rd without triple, quadruple, or, more generally, r-fold points (image points with at least r distinct preimages), for a given multiplicity r ≤ 2. In particular, we are interested in maps f : K → Rd that have no global r -fold intersection points, i.e., no r -fold points with preimages in r pairwise disjoint simplices of K , and we seek necessary and sufficient conditions for the existence of such maps.
We present higher-multiplicity analogues of several classical results for embeddings, in particular of the completeness of the Van Kampen obstruction for embeddability of k -dimensional
complexes into R2k , k ≥ 3. Speciffically, we show that under suitable restrictions on the dimensions(viz., if dimK = (r ≥ 1)k and d = rk \ for some k ≥ 3), a well-known deleted product criterion (DPC ) is not only necessary but also sufficient for the existence of maps without global r -fold points. Our main technical tool is a higher-multiplicity version of the classical Whitney trick , by which pairs of isolated r -fold points of opposite sign can be eliminated by local modiffications of the map, assuming codimension d – dimK ≥ 3.
An important guiding idea for our work was that suffciency of the DPC, together with an old
result of Özaydin's on the existence of equivariant maps, might yield an approach to disproving the remaining open cases of the the long-standing topological Tverberg conjecture , i.e., to construct maps from the N -simplex σN to Rd without r-Tverberg points when r not a prime power and
N = (d + 1)(r – 1). Unfortunately, our proof of the sufficiency of the DPC requires codimension d – dimK ≥ 3, which is not satisfied for K = σN .
In 2015, Frick [16] found a very elegant way to overcome this \codimension 3 obstacle" and
to construct the first counterexamples to the topological Tverberg conjecture for all parameters(d; r ) with d ≥ 3r + 1 and r not a prime power, by a reduction1 to a suitable lower-dimensional skeleton, for which the codimension 3 restriction is satisfied and maps without r -Tverberg points exist by Özaydin's result and sufficiency of the DPC.
In this thesis, we present a different construction (which does not use the constraint method) that yields counterexamples for d ≥ 3r , r not a prime power. },
author = {Mabillard, Isaac},
pages = {55},
publisher = {IST Austria},
title = {{Eliminating higher-multiplicity intersections: an r-fold Whitney trick for the topological Tverberg conjecture}},
year = {2016},
}
@phdthesis{1124,
author = {Morri, Maurizio},
pages = {129},
publisher = {IST Austria},
title = {{Optical functionalization of human class A orphan G-protein coupled receptors}},
year = {2016},
}
@phdthesis{1125,
abstract = {Natural environments are never constant but subject to spatial and temporal change on
all scales, increasingly so due to human activity. Hence, it is crucial to understand the
impact of environmental variation on evolutionary processes. In this thesis, I present
three topics that share the common theme of environmental variation, yet illustrate its
effect from different perspectives.
First, I show how a temporally fluctuating environment gives rise to second-order
selection on a modifier for stress-induced mutagenesis. Without fluctuations, when
populations are adapted to their environment, mutation rates are minimized. I argue
that a stress-induced mutator mechanism may only be maintained if the population is
repeatedly subjected to diverse environmental challenges, and I outline implications of
the presented results to antibiotic treatment strategies.
Second, I discuss my work on the evolution of dispersal. Besides reproducing
known results about the effect of heterogeneous habitats on dispersal, it identifies
spatial changes in dispersal type frequencies as a source for selection for increased
propensities to disperse. This concept contains effects of relatedness that are known
to promote dispersal, and I explain how it identifies other forces selecting for dispersal
and puts them on a common scale.
Third, I analyse genetic variances of phenotypic traits under multivariate stabilizing
selection. For the case of constant environments, I generalize known formulae of
equilibrium variances to multiple traits and discuss how the genetic variance of a focal
trait is influenced by selection on background traits. I conclude by presenting ideas and
preliminary work aiming at including environmental fluctuations in the form of moving
trait optima into the model.},
author = {Novak, Sebastian},
pages = {124},
publisher = {IST Austria},
title = {{Evolutionary proccesses in variable emvironments}},
year = {2016},
}
@phdthesis{1126,
abstract = {Traditionally machine learning has been focusing on the problem of solving a single
task in isolation. While being quite well understood, this approach disregards an
important aspect of human learning: when facing a new problem, humans are able to
exploit knowledge acquired from previously learned tasks. Intuitively, access to several
problems simultaneously or sequentially could also be advantageous for a machine
learning system, especially if these tasks are closely related. Indeed, results of many
empirical studies have provided justification for this intuition. However, theoretical
justifications of this idea are rather limited.
The focus of this thesis is to expand the understanding of potential benefits of information
transfer between several related learning problems. We provide theoretical
analysis for three scenarios of multi-task learning - multiple kernel learning, sequential
learning and active task selection. We also provide a PAC-Bayesian perspective on
lifelong learning and investigate how the task generation process influences the generalization
guarantees in this scenario. In addition, we show how some of the obtained
theoretical results can be used to derive principled multi-task and lifelong learning
algorithms and illustrate their performance on various synthetic and real-world datasets.},
author = {Pentina, Anastasia},
pages = {127},
publisher = {IST Austria},
title = {{Theoretical foundations of multi-task lifelong learning}},
doi = {10.15479/AT:ISTA:TH_776},
year = {2016},
}
@phdthesis{1129,
abstract = {Directed cell migration is a hallmark feature, present in almost all multi-cellular
organisms. Despite its importance, basic questions regarding force transduction
or directional sensing are still heavily investigated. Directed migration of cells
guided by immobilized guidance cues - haptotaxis - occurs in key-processes,
such as embryonic development and immunity (Middleton et al., 1997; Nguyen
et al., 2000; Thiery, 1984; Weber et al., 2013). Immobilized guidance cues
comprise adhesive ligands, such as collagen and fibronectin (Barczyk et al.,
2009), or chemokines - the main guidance cues for migratory leukocytes
(Middleton et al., 1997; Weber et al., 2013). While adhesive ligands serve as
attachment sites guiding cell migration (Carter, 1965), chemokines instruct
haptotactic migration by inducing adhesion to adhesive ligands and directional
guidance (Rot and Andrian, 2004; Schumann et al., 2010). Quantitative analysis
of the cellular response to immobilized guidance cues requires in vitro assays
that foster cell migration, offer accurate control of the immobilized cues on a
subcellular scale and in the ideal case closely reproduce in vivo conditions. The
exploration of haptotactic cell migration through design and employment of such
assays represents the main focus of this work.
Dendritic cells (DCs) are leukocytes, which after encountering danger
signals such as pathogens in peripheral organs instruct naïve T-cells and
consequently the adaptive immune response in the lymph node (Mellman and
Steinman, 2001). To reach the lymph node from the periphery, DCs follow
haptotactic gradients of the chemokine CCL21 towards lymphatic vessels
(Weber et al., 2013). Questions about how DCs interpret haptotactic CCL21
gradients have not yet been addressed. The main reason for this is the lack of
an assay that offers diverse haptotactic environments, hence allowing the study
of DC migration as a response to different signals of immobilized guidance cue.
In this work, we developed an in vitro assay that enables us to
quantitatively assess DC haptotaxis, by combining precisely controllable
chemokine photo-patterning with physically confining migration conditions. With this tool at hand, we studied the influence of CCL21 gradient properties and
concentration on DC haptotaxis. We found that haptotactic gradient sensing
depends on the absolute CCL21 concentration in combination with the local
steepness of the gradient. Our analysis suggests that the directionality of
migrating DCs is governed by the signal-to-noise ratio of CCL21 binding to its
receptor CCR7. Moreover, the haptotactic CCL21 gradient formed in vivo
provides an optimal shape for DCs to recognize haptotactic guidance cue.
By reconstitution of the CCL21 gradient in vitro we were also able to
study the influence of CCR7 signal termination on DC haptotaxis. To this end,
we used DCs lacking the G-protein coupled receptor kinase GRK6, which is
responsible for CCL21 induced CCR7 receptor phosphorylation and
desensitization (Zidar et al., 2009). We found that CCR7 desensitization by
GRK6 is crucial for maintenance of haptotactic CCL21 gradient sensing in vitro
and confirm those observations in vivo.
In the context of the organism, immobilized haptotactic guidance cues
often coincide and compete with soluble chemotactic guidance cues. During
wound healing, fibroblasts are exposed and influenced by adhesive cues and
soluble factors at the same time (Wu et al., 2012; Wynn, 2008). Similarly,
migrating DCs are exposed to both, soluble chemokines (CCL19 and truncated
CCL21) inducing chemotactic behavior as well as the immobilized CCL21. To
quantitatively assess these complex coinciding immobilized and soluble
guidance cues, we implemented our chemokine photo-patterning technique in a
microfluidic system allowing for chemotactic gradient generation. To validate
the assay, we observed DC migration in competing CCL19/CCL21
environments.
Adhesiveness guided haptotaxis has been studied intensively over the
last century. However, quantitative studies leading to conceptual models are
largely missing, again due to the lack of a precisely controllable in vitro assay. A
requirement for such an in vitro assay is that it must prevent any uncontrolled
cell adhesion. This can be accomplished by stable passivation of the surface. In
addition, controlled adhesion must be sustainable, quantifiable and dose
dependent in order to create homogenous gradients. Therefore, we developed a novel covalent photo-patterning technique satisfying all these needs. In
combination with a sustainable poly-vinyl alcohol (PVA) surface coating we
were able to generate gradients of adhesive cue to direct cell migration. This
approach allowed us to characterize the haptotactic migratory behavior of
zebrafish keratocytes in vitro. Furthermore, defined patterns of adhesive cue
allowed us to control for cell shape and growth on a subcellular scale.},
author = {Schwarz, Jan},
pages = {178},
publisher = {IST Austria},
title = {{Quantitative analysis of haptotactic cell migration}},
year = {2016},
}
@phdthesis{1130,
abstract = {In this thesis we present a computer-aided programming approach to concurrency. Our approach
helps the programmer by automatically fixing concurrency-related bugs, i.e. bugs that occur
when the program is executed using an aggressive preemptive scheduler, but not when using a
non-preemptive (cooperative) scheduler. Bugs are program behaviours that are incorrect w.r.t.
a specification. We consider both user-provided explicit specifications in the form of assertion
statements in the code as well as an implicit specification. The implicit specification is inferred
from the non-preemptive behaviour. Let us consider sequences of calls that the program makes
to an external interface. The implicit specification requires that any such sequence produced
under a preemptive scheduler should be included in the set of sequences produced under a
non-preemptive scheduler.
We consider several semantics-preserving fixes that go beyond atomic sections typically
explored in the synchronisation synthesis literature. Our synthesis is able to place locks, barriers
and wait-signal statements and last, but not least reorder independent statements. The latter
may be useful if a thread is released to early, e.g., before some initialisation is completed. We
guarantee that our synthesis does not introduce deadlocks and that the synchronisation inserted
is optimal w.r.t. a given objective function.
We dub our solution trace-based synchronisation synthesis and it is loosely based on
counterexample-guided inductive synthesis (CEGIS). The synthesis works by discovering a
trace that is incorrect w.r.t. the specification and identifying ordering constraints crucial to trigger
the specification violation. Synchronisation may be placed immediately (greedy approach) or
delayed until all incorrect traces are found (non-greedy approach). For the non-greedy approach
we construct a set of global constraints over synchronisation placements. Each model of the
global constraints set corresponds to a correctness-ensuring synchronisation placement. The
placement that is optimal w.r.t. the given objective function is chosen as the synchronisation
solution.
We evaluate our approach on a number of realistic (albeit simplified) Linux device-driver
benchmarks. The benchmarks are versions of the drivers with known concurrency-related bugs.
For the experiments with an explicit specification we added assertions that would detect the bugs
in the experiments. Device drivers lend themselves to implicit specification, where the device and
the operating system are the external interfaces. Our experiments demonstrate that our synthesis
method is precise and efficient. We implemented objective functions for coarse-grained and
fine-grained locking and observed that different synchronisation placements are produced for
our experiments, favouring e.g. a minimal number of synchronisation operations or maximum
concurrency.},
author = {Tarrach, Thorsten},
pages = {151},
publisher = {IST Austria},
title = {{Automatic synthesis of synchronisation primitives for concurrent programs}},
year = {2016},
}
@phdthesis{1131,
abstract = {Evolution of gene regulation is important for phenotypic evolution and diversity. Sequence-specific binding of regulatory proteins is one of the key regulatory mechanisms determining gene expression. Although there has been intense interest in evolution of regulatory binding sites in the last decades, a theoretical understanding is far from being complete. In this thesis, I aim at a better understanding of the evolution of transcriptional regulatory binding sequences by using biophysical and population genetic models.
In the first part of the thesis, I discuss how to formulate the evolutionary dynamics of binding se- quences in a single isolated binding site and in promoter/enhancer regions. I develop a theoretical framework bridging between a thermodynamical model for transcription and a mutation-selection-drift model for monomorphic populations. I mainly address the typical evolutionary rates, and how they de- pend on biophysical parameters (e.g. binding length and specificity) and population genetic parameters (e.g. population size and selection strength).
In the second part of the thesis, I analyse empirical data for a better evolutionary and biophysical understanding of sequence-specific binding of bacterial RNA polymerase. First, I infer selection on regulatory and non-regulatory binding sites of RNA polymerase in the E. coli K12 genome. Second, I infer the chemical potential of RNA polymerase, an important but unknown physical parameter defining the threshold energy for strong binding. Furthermore, I try to understand the relation between the lac promoter sequence diversity and the LacZ activity variation among 20 bacterial isolates by constructing a simple but biophysically motivated gene expression model. Lastly, I lay out a statistical framework to predict adaptive point mutations in de novo promoter evolution in a selection experiment.},
author = {Tugrul, Murat},
pages = {89},
publisher = {IST Austria},
title = {{Evolution of transcriptional regulatory sequences}},
year = {2016},
}
@phdthesis{1128,
abstract = {The process of gene expression is central to the modern understanding of how cellular systems
function. In this process, a special kind of regulatory proteins, called transcription factors,
are important to determine how much protein is produced from a given gene. As biological
information is transmitted from transcription factor concentration to mRNA levels to amounts of
protein, various sources of noise arise and pose limits to the fidelity of intracellular signaling.
This thesis concerns itself with several aspects of stochastic gene expression: (i) the mathematical
description of complex promoters responsible for the stochastic production of biomolecules,
(ii) fundamental limits to information processing the cell faces due to the interference from multiple
fluctuating signals, (iii) how the presence of gene expression noise influences the evolution
of regulatory sequences, (iv) and tools for the experimental study of origins and consequences
of cell-cell heterogeneity, including an application to bacterial stress response systems.},
author = {Rieckh, Georg},
pages = {114},
publisher = {IST Austria},
title = {{Studying the complexities of transcriptional regulation}},
year = {2016},
}
@phdthesis{1399,
abstract = {This thesis is concerned with the computation and approximation of intrinsic volumes. Given a smooth body M and a certain digital approximation of it, we develop algorithms to approximate various intrinsic volumes of M using only measurements taken from its digital approximations. The crucial idea behind our novel algorithms is to link the recent theory of persistent homology to the theory of intrinsic volumes via the Crofton formula from integral geometry and, in particular, via Euler characteristic computations. Our main contributions are a multigrid convergent digital algorithm to compute the first intrinsic volume of a solid body in R^n as well as an appropriate integration pipeline to approximate integral-geometric integrals defined over the Grassmannian manifold.},
author = {Pausinger, Florian},
pages = {144},
publisher = {IST Austria},
title = {{On the approximation of intrinsic volumes}},
year = {2015},
}
@phdthesis{1400,
abstract = {Cancer results from an uncontrolled growth of abnormal cells. Sequentially accumulated genetic and epigenetic alterations decrease cell death and increase cell replication. We used mathematical models to quantify the effect of driver gene mutations. The recently developed targeted therapies can lead to dramatic regressions. However, in solid cancers, clinical responses are often short-lived because resistant cancer cells evolve. We estimated that approximately 50 different mutations can confer resistance to a typical targeted therapeutic agent. We find that resistant cells are likely to be present in expanded subclones before the start of the treatment. The dominant strategy to prevent the evolution of resistance is combination therapy. Our analytical results suggest that in most patients, dual therapy, but not monotherapy, can result in long-term disease control. However, long-term control can only occur if there are no possible mutations in the genome that can cause cross-resistance to both drugs. Furthermore, we showed that simultaneous therapy with two drugs is much more likely to result in long-term disease control than sequential therapy with the same drugs. To improve our understanding of the underlying subclonal evolution we reconstruct the evolutionary history of a patient's cancer from next-generation sequencing data of spatially-distinct DNA samples. Using a quantitative measure of genetic relatedness, we found that pancreatic cancers and their metastases demonstrated a higher level of relatedness than that expected for any two cells randomly taken from a normal tissue. This minimal amount of genetic divergence among advanced lesions indicates that genetic heterogeneity, when quantitatively defined, is not a fundamental feature of the natural history of untreated pancreatic cancers. Our newly developed, phylogenomic tool Treeomics finds evidence for seeding patterns of metastases and can directly be used to discover rules governing the evolution of solid malignancies to transform cancer into a more predictable disease.},
author = {Reiter, Johannes},
pages = {183},
publisher = {IST Austria},
title = {{The subclonal evolution of cancer}},
year = {2015},
}
@phdthesis{1401,
abstract = {The human ability to recognize objects in complex scenes has driven research in the computer vision field over couple of decades. This thesis focuses on the object recognition task in images. That is, given the image, we want the computer system to be able to predict the class of the object that appears in the image. A recent succesful attempt to bridge semantic understanding of the image perceived by humans and by computers uses attribute-based models. Attributes are semantic properties of the objects shared across different categories, which humans and computers can decide on. To explore the attribute-based models we take a statistical machine learning approach, and address two key learning challenges in view of object recognition task: learning augmented attributes as mid-level discriminative feature representation, and learning with attributes as privileged information. Our main contributions are parametric and non-parametric models and algorithms to solve these frameworks. In the parametric approach, we explore an autoencoder model combined with the large margin nearest neighbor principle for mid-level feature learning, and linear support vector machines for learning with privileged information. In the non-parametric approach, we propose a supervised Indian Buffet Process for automatic augmentation of semantic attributes, and explore the Gaussian Processes classification framework for learning with privileged information. A thorough experimental analysis shows the effectiveness of the proposed models in both parametric and non-parametric views.},
author = {Sharmanska, Viktoriia},
pages = {144},
publisher = {IST Austria},
title = {{Learning with attributes for object recognition: Parametric and non-parametrics views}},
year = {2015},
}
@phdthesis{1395,
abstract = {In this thesis I studied various individual and social immune defences employed by the invasive garden ant Lasius neglectus mostly against entomopathogenic fungi. The first two chapters of this thesis address the phenomenon of 'social immunisation'. Social immunisation, that is the immunological protection of group members due to social contact to a pathogen-exposed nestmate, has been described in various social insect species against different types of pathogens. However, in the case of entomopathogenic fungi it has, so far, only been demonstrated that social immunisation exists at all. Its underlying mechanisms r any other properties were, however, unknown. In the first chapter of this thesis I identified the mechanistic basis of social immunisation in L. neglectus against the entomopathogenous fungus Metarhizium. I could show that nestmates of a pathogen-exposed individual contract low-level infections due to social interactions. These low-level infections are, however, non-lethal and cause an active stimulation of the immune system, which protects the nestmates upon subsequent pathogen encounters. In the second chapter of this thesis I investigated the specificity and colony level effects of social immunisation. I demonstrated that the protection conferred by social immunisation is highly specific, protecting ants only against the same pathogen strain. In addition, depending on the respective context, social immunisation may even cause fitness costs. I further showed that social immunisation crucially affects sanitary behaviour and disease dynamics within ant groups. In the third chapter of this thesis I studied the effects of the ectosymbiotic fungus Laboulbenia formicarum on its host L. neglectus. Although Laboulbeniales are the largest order of insect-parasitic fungi, research concerning host fitness consequence is sparse. I showed that highly Laboulbenia-infected ants sustain fitness costs under resource limitation, however, gain fitness benefits when exposed to an entomopathogenus fungus. These effects are probably cause by a prophylactic upregulation of behavioural as well as physiological immune defences in highly infected ants.},
author = {Konrad, Matthias},
pages = {131},
publisher = {IST Austria},
title = {{Immune defences in ants: Effects of social immunisation and a fungal ectosymbiont in the ant Lasius neglectus}},
year = {2014},
}
@phdthesis{1402,
abstract = {Phosphatidylinositol (Ptdlns) is a structural phospholipid that can be phosphorylated into various lipid signaling molecules, designated polyphosphoinositides (PPIs). The reversible phosphorylation of PPIs on the 3, 4, or 5 position of inositol is performed by a set of organelle-specific kinases and phosphatases, and the characteristic head groups make these molecules ideal for regulating biological processes in time and space. In yeast and mammals, Ptdlns3P and Ptdlns(3,5)P2 play crucial roles in trafficking toward the lytic compartments, whereas the role in plants is not yet fully understood. Here we identified the role of a land plant-specific subgroup of PPI phosphatases, the suppressor of actin 2 (SAC2) to SAC5, during vauolar trafficking and morphogenesis in Arabidopsis thaliana. SAC2-SAC5 localize to the tonoplast along with Ptdlns3P, the presumable product of their activity. in SAC gain- and loss-of-function mutants, the levels of Ptdlns monophosphates and bisphosphates were changed, with opposite effects on the morphology of storage and lytic vacuoles, and the trafficking toward the vacuoles was defective. Moreover, multiple sac knockout mutants had an increased number of smaller storage and lytic vacuoles, whereas extralarge vacuoles were observed in the overexpression lines, correlating with various growth and developmental defects. The fragmented vacuolar phenotype of sac mutants could be mimicked by treating wild-type seedlings with Ptdlns(3,5)P2, corroborating that this PPI is important for vacuole morphology. Taken together, these results provide evidence that PPIs, together with their metabolic enzymes SAC2-SAC5, are crucial for vacuolar trafficking and for vacuolar morphology and function in plants.},
author = {Marhavá, Petra},
pages = {90},
publisher = {IST Austria},
title = {{Molecular mechanisms of patterning and subcellular trafficking in Arabidopsis thaliana}},
year = {2014},
}
@phdthesis{1403,
abstract = {A variety of developmental and disease related processes depend on epithelial cell sheet spreading. In order to gain insight into the biophysical mechanism(s) underlying the tissue morphogenesis we studied the spreading of an epithelium during the early development of the zebrafish embryo. In zebrafish epiboly the enveloping cell layer (EVL), a simple squamous epithelium, spreads over the yolk cell to completely engulf it at the end of gastrulation. Previous studies have proposed that an actomyosin ring forming within the yolk syncytial layer (YSL) acts as purse string that through constriction along its circumference pulls on the margin of the EVL. Direct biophysical evidence for this hypothesis has however been missing. The aim of the thesis was to understand how the actomyosin ring may generate pulling forces onto the EVL and what cellular mechanism(s) may facilitate the spreading of the epithelium. Using laser ablation to measure cortical tension within the actomyosin ring we found an anisotropic tension distribution, which was highest along the circumference of the ring. However the low degree of anisotropy was incompatible with the actomyosin ring functioning as a purse string only. Additionally, we observed retrograde cortical flow from vegetal parts of the ring into the EVL margin. Interpreting the experimental data using a theoretical distribution that models the tissues as active viscous gels led us to proposen that the actomyosin ring has a twofold contribution to EVL epiboly. It not only acts as a purse string through constriction along its circumference, but in addition constriction along the width of the ring generates pulling forces through friction-resisted cortical flow. Moreover, when rendering the purse string mechanism unproductive EVL epiboly proceeded normally indicating that the flow-friction mechanism is sufficient to drive the process. Aiming to understand what cellular mechanism(s) may facilitate the spreading of the epithelium we found that tension-oriented EVL cell divisions limit tissue anisotropy by releasing tension along the division axis and promote epithelial spreading. Notably, EVL cells undergo ectopic cell fusion in conditions in which oriented-cell division is impaired or the epithelium is mechanically challenged. Taken together our study of EVL epiboly suggests a novel mechanism of force generation for actomyosin rings through friction-resisted cortical flow and highlights the importance of tension-oriented cell divisions in epithelial morphogenesis.},
author = {Behrndt, Martin},
pages = {91},
publisher = {IST Austria},
title = {{Forces driving epithelial spreading in zebrafish epiboly}},
year = {2014},
}
@phdthesis{1404,
abstract = {The co-evolution of hosts and pathogens is characterized by continuous adaptations of both parties. Pathogens of social insects need to adapt towards disease defences at two levels: 1) individual immunity of each colony member consisting of behavioural defence strategies as well as humoral and cellular immune responses and 2) social immunity that is collectively performed by all group members comprising behavioural, physiological and organisational defence strategies.
To disentangle the selection pressure on pathogens by the collective versus individual level of disease defence in social insects, we performed an evolution experiment using the Argentine Ant, Linepithema humile, as a host and a mixture of the general insect pathogenic fungus Metarhizium spp. (6 strains) as a pathogen. We allowed pathogen evolution over 10 serial host passages to two different evolution host treatments: (1) only individual host immunity in a single host treatment, and (2) simultaneously acting individual and social immunity in a social host treatment, in which an exposed ant was accompanied by two untreated nestmates.
Before starting the pathogen evolution experiment, the 6 Metarhizium spp. strains were characterised concerning conidiospore size killing rates in singly and socially reared ants, their competitiveness under coinfecting conditions and their influence on ant behaviour. We analysed how the ancestral atrain mixture changed in conidiospere size, killing rate and strain composition dependent on host treatment (single or social hosts) during 10 passages and found that killing rate and conidiospere size of the pathogen increased under both evolution regimes, but different depending on host treatment.
Testing the evolved strain mixtures that evolved under either the single or social host treatment under both single and social current rearing conditions in a full factorial design experiment revealed that the additional collective defences in insect societies add new selection pressure for their coevolving pathogens that compromise their ability to adapt to its host at the group level. To our knowledge, this is the first study directly measuring the influence of social immunity on pathogen evolution.},
author = {Stock, Miriam},
pages = {101},
publisher = {IST Austria},
title = {{Evolution of a fungal pathogen towards individual versus social immunity in ants}},
year = {2014},
}
@phdthesis{1405,
abstract = {Motivated by the analysis of highly dynamic message-passing systems, i.e. unbounded thread creation, mobility, etc. we present a framework for the analysis of depth-bounded systems. Depth-bounded systems are one of the most expressive known fragment of the π-calculus for which interesting verification problems are still decidable. Even though they are infinite state systems depth-bounded systems are well-structured, thus can be analyzed algorithmically. We give an interpretation of depth-bounded systems as graph-rewriting systems. This gives more flexibility and ease of use to apply depth-bounded systems to other type of systems like shared memory concurrency.
First, we develop an adequate domain of limits for depth-bounded systems, a prerequisite for the effective representation of downward-closed sets. Downward-closed sets are needed by forward saturation-based algorithms to represent potentially infinite sets of states. Then, we present an abstract interpretation framework to compute the covering set of well-structured transition systems. Because, in general, the covering set is not computable, our abstraction over-approximates the actual covering set. Our abstraction captures the essence of acceleration based-algorithms while giving up enough precision to ensure convergence. We have implemented the analysis in the PICASSO tool and show that it is accurate in practice. Finally, we build some further analyses like termination using the covering set as starting point.},
author = {Zufferey, Damien},
pages = {134},
publisher = {IST Austria},
title = {{Analysis of dynamic message passing programs}},
year = {2013},
}
@phdthesis{1406,
abstract = {Epithelial spreading is a critical part of various developmental and wound repair processes. Here we use zebrafish epiboly as a model system to study the cellular and molecular mechanisms underlying the spreading of epithelial sheets. During zebrafish epiboly the enveloping cell layer (EVL), a simple squamous epithelium, spreads over the embryo to eventually cover the entire yolk cell by the end of gastrulation. The EVL leading edge is anchored through tight junctions to the yolk syncytial layer (YSL), where directly adjacent to the EVL margin a contractile actomyosin ring is formed that is thought to drive EVL epiboly. The prevalent view in the field was that the contractile ring exerts a pulling force on the EVL margin, which pulls the EVL towards the vegetal pole. However, how this force is generated and how it affects EVL morphology still remains elusive. Moreover, the cellular mechanisms mediating the increase in EVL surface area, while maintaining tissue integrity and function are still unclear. Here we show that the YSL actomyosin ring pulls on the EVL margin by two distinct force-generating mechanisms. One mechanism is based on contraction of the ring around its circumference, as previously proposed. The second mechanism is based on actomyosin retrogade flows, generating force through resistance against the substrate. The latter can function at any epiboly stage even in situations where the contraction-based mechanism is unproductive. Additionally, we demonstrate that during epiboly the EVL is subjected to anisotropic tension, which guides the orientation of EVL cell division along the main axis (animal-vegetal) of tension. The influence of tension in cell division orientation involves cell elongation and requires myosin-2 activity for proper spindle alignment. Strikingly, we reveal that tension-oriented cell divisions release anisotropic tension within the EVL and that in the absence of such divisions, EVL cells undergo ectopic fusions. We conclude that forces applied to the EVL by the action of the YSL actomyosin ring generate a tension anisotropy in the EVL that orients cell divisions, which in turn limit tissue tension increase thereby facilitating tissue spreading.},
author = {Campinho, Pedro},
pages = {123},
publisher = {IST Austria},
title = {{Mechanics of zebrafish epiboly: Tension-oriented cell divisions limit anisotropic tissue tension in epithelial spreading}},
year = {2013},
}
@phdthesis{2964,
abstract = {CA3 pyramidal neurons are important for memory formation and pattern completion in the hippocampal network. These neurons receive multiple excitatory inputs from numerous sources. Therefore, the rules of spatiotemporal integration of multiple synaptic inputs and propagation of action potentials are important to understand how CA3 neurons contribute to higher brain functions at cellular level. By using confocally targeted patch-clamp recording techniques, we investigated the biophysical properties of rat CA3 pyramidal neuron dendrites. We found two distinct dendritic domains critical for action potential initiation and propagation: In the proximal domain, action potentials initiated in the axon backpropagate actively with large amplitude and fast time course. In the distal domain, Na+-channel mediated dendritic spikes are efficiently evoked by local dendritic depolarization or waveforms mimicking synaptic events. These findings can be explained by a high Na+-to-K+ conductance density ratio of CA3 pyramidal neuron dendrites. The results challenge the prevailing view that proximal mossy fiber inputs activate CA3 pyramidal neurons more efficiently than distal perforant inputs by showing that the distal synapses trigger a different form of activity represented by dendritic spikes. The high probability of dendritic spike initiation in the distal area may enhance the computational power of CA3 pyramidal neurons in the hippocampal network. },
author = {Kim, Sooyun},
pages = {65},
publisher = {IST Austria},
title = {{Active properties of hippocampal CA3 pyramidal neuron dendrites}},
year = {2012},
}
@phdthesis{3273,
author = {Maître, Jean-Léon},
publisher = {IST Austria},
title = {{Mechanics of adhesion and de‐adhesion in zebrafish germ layer progenitors}},
year = {2011},
}
@phdthesis{3275,
abstract = {Chemokines organize immune cell trafficking by inducing either directed (tactic) or random (kinetic) migration and by activating integrins in order to support surface adhesion (haptic). Beyond that the same chemokines can establish clearly defined functional areas in secondary lymphoid organs. Until now it is unclear how chemokines can fulfill such diverse functions. One decisive prerequisite to explain these capacities is to know how chemokines are presented in tissue. In theory chemokines could occur either soluble or immobilized, and could be distributed either homogenously or as a concentration gradient. To dissect if and how the presenting mode of chemokines influences immune cells, I tested the response of dendritic cells (DCs) to differentially displayed chemokines. DCs are antigen presenting cells that reside in the periphery and migrate into draining lymph nodes (LNs) once exposed to inflammatory stimuli to activate naïve T cells. DCs are guided to and within the LN by the chemokine receptor CCR7, which has two ligands, the chemokines CCL19 and CCL21. Both CCR7 ligands are expressed by fibroblastic reticular cells in the LN, but differ in their ability to bind to heparan sulfate residues. CCL21 has a highly charged C-terminal extension, which mediates binding to anionic surfaces, whereas CCL19 is lacking such residues and likely distributes as a soluble molecule. This study shows that surface-bound CCL21 causes random, haptokinetic DC motility, which is confined to the chemokine coated area by insideout activation of β2 integrins that mediate cell binding to the surface. CCL19 on the other hand forms concentration gradients which trigger directional, chemotactic movement, but no surface adhesion. In addition DCs can actively manipulate this system by recruiting and activating serine proteases on their surfaces, which create - by proteolytically removing the adhesive C-terminus - a solubilized variant of CCL21 that functionally resembles CCL19. By generating a CCL21 concentration gradient DCs establish a positive feedback loop to recruit further DCs from the periphery to the CCL21 coated region. In addition DCs can sense chemotactic gradients as well as immobilized haptokinetic fields at the same time and integrate these signals. The result is chemotactically biased haptokinesis - directional migration confined to a chemokine coated track or area - which could explain the dynamic but spatially tightly controlled swarming leukocyte locomotion patterns that have been observed in lymphatic organs by intravital microscopists. The finding that DCs can approach soluble cues in a non-adhesive manner while they attach to surfaces coated with immobilized cues raises the question how these cells transmit intracellular forces to the environment, especially in the non-adherent migration mode. In order to migrate, cells have to generate and transmit force to the extracellular substrate. Force transmission is the prerequisite to procure an expansion of the leading edge and a forward motion of the whole cell body. In the current conceptions actin polymerization at the leading edge is coupled to extracellular ligands via the integrin family of transmembrane receptors, which allows the transmission of intracellular force. Against the paradigm of force transmission during migration, leukocytes, like DCs, are able to migrate in threedimensional environments without using integrin transmembrane receptors (Lämmermann et al., 2008). This reflects the biological function of leukocytes, as they can invade almost all tissues, whereby their migration has to be independent from the extracellular environment. How the cells can achieve this is unclear. For this study I examined DC migration in a defined threedimensional environment and highlighted actin-dynamics with the probe Lifeact-GFP. The result was that chemotactic DCs can switch between integrin-dependent and integrin- independent locomotion and can thereby adapt to the adhesive properties of their environment. If the cells are able to couple their actin cytoskeleton to the substrate, actin polymerization is entirely converted into protrusion. Without coupling the actin cortex undergoes slippage and retrograde actin flow can be observed. But retrograde actin flow can be completely compensated by higher actin polymerization rate keeping the migration velocity and the shape of the cells unaltered. Mesenchymal cells like fibroblast cannot balance the loss of adhesive interaction, cannot protrude into open space and, therefore, strictly depend on integrinmediated force coupling. This leukocyte specific phenomenon of “adaptive force transmission” endows these cells with the unique ability to transit and invade almost every type of tissue. },
author = {Schumann, Kathrin},
pages = {141},
publisher = {IST Austria},
title = {{The role of chemotactic gradients in dendritic cell migration}},
year = {2011},
}
@phdthesis{2075,
abstract = {This thesis investigates the combination of data-driven and physically based techniques for acquiring, modeling, and animating deformable materials, with a special focus on human faces. Furthermore, based on these techniques, we introduce a data-driven process for designing and fabricating materials with desired deformation behavior.
Realistic simulation behavior, surface details, and appearance are still demanding tasks. Neither pure data-driven, pure procedural, nor pure physical methods are best suited for accurate synthesis of facial motion and details (both for appearance and geometry), due to the difficulties in model design, parameter estimation, and desired controllability for animators. Capturing of a small but representative amount of real data, and then synthesizing diverse on-demand examples with physically-based models and real data as input benefits from both sides: Highly realistic model behavior due to real-world data and controllability due to physically-based models.
To model the face and its behavior, hybrid physically-based and data-driven approaches are elaborated. We investigate surface-based representations as well as a solid representation based on FEM. To achieve realistic behavior, we propose to build light-weighted data capture devices to acquire real-world data to estimate model parameters and to employ concepts from data-driven modeling techniques and machine learning. The resulting models support simple acquisition systems, offer techniques to process and extract model parameters from real-world data, provide a compact representation of the facial geometry and its motion, and allow intuitive editing. We demonstrate applications such as capture of facial geometry and motion and real-time animation and transfer of facial details, and show that our soft tissue model can react to external forces and produce realistic deformations beyond facial expressions.
Based on this model, we furthermore introduce a data-driven process for designing and fabricating materials with desired deformation behavior. The process starts with measuring deformation properties of base materials. Each material is represented as a non-linear stress-strain relationship in a finite-element model. For material design and fabrication, we introduce an optimization process that finds the best combination of base materials that meets a user’s criteria specified by example deformations. Our algorithm employs a number of strategies to prune poor solutions from the combinatorial search space. We finally demonstrate the complete process by designing and fabricating objects with complex heterogeneous materials using modern multi-material 3D printers.
},
author = {Bernd Bickel},
booktitle = {Unknown},
number = {7458},
publisher = {Unknown},
title = {{Measurement-based modeling and fabrication of deformable materials for human faces}},
doi = {dx.doi.org/10.3929/ethz-a-006354908},
volume = {499},
year = {2010},
}
@phdthesis{3962,
author = {Pflicke, Holger},
publisher = {IST Austria},
title = {{Dendritic cell migration across basement membranes in the skin}},
year = {2010},
}
@phdthesis{3296,
abstract = {Accurate computational representations of highly deformable surfaces are indispensable in the fields of computer animation, medical simulation, computer vision, digital modeling, and computational physics. The focus of this dissertation is on the animation of physics-based phenomena with highly detailed deformable surfaces represented by triangle meshes.
We first present results from an algorithm that generates continuum mechanics animations with intricate surface features. This method combines a finite element method with a tetrahedral mesh generator and a high resolution surface mesh, and it is orders of magnitude more efficient than previous approaches. Next, we present an efficient solution for the challenging problem of computing topological changes in detailed dynamic surface meshes. We then introduce a new physics-inspired surface tracking algorithm that is capable of preserving arbitrarily thin features and reproducing realistic fine-scale topological changes like Rayleigh-Plateau instabilities. This physics-inspired surface tracking technique also opens the door for a unique coupling between surficial finite element methods and volumetric finite difference methods, in order to simulate liquid surface tension phenomena more efficiently than any previous method. Due to its dramatic increase in computational resolution and efficiency, this method yielded the first computer simulations of a fully developed crown splash with droplet pinch off.},
author = {Wojtan, Chris},
pages = {1 -- 175},
publisher = {Georgia Institute of Technology},
title = {{Animating physical phenomena with embedded surface meshes}},
year = {2010},
}
@phdthesis{4232,
author = {Harold Vladar},
publisher = {Faculty of mathematical and natural sciences, University of Groningen},
title = {{Stochasticity and Variability in the dynamics and genetics of populations}},
doi = {3811},
year = {2009},
}
@phdthesis{4363,
author = {Vasu Singh},
booktitle = {Formalizing and Verifying Transactional Memories},
publisher = {EPFL Lausanne},
title = {{Formalizing and Verifying Transactional Memories}},
year = {2009},
}
@phdthesis{3400,
abstract = {Invasive fungal infections pose a serious threat to immunocompromised people. Most of these infections are caused by either Candida or Aspergillus species, with A. fumigatus being the predominant causative agent of Invasive Aspergillosis. Affected people comprise mainly haematopoietic stem cell or solid organ transplant patients who receive either high-dose corticosteroids or immunosuppressants. These risk factors predispose to the development of Invasive
Aspergillosis which is lethal in 20 to 80 % of the cases, largely due to insufficient efficacy of current antifungal therapy. Thus one major aim in current mycological research is the identification of new drug targets.
The polysaccharide-based fungal cell wall is both essential to fungi and absent from human cells which makes it appear an attractive new target. Notably, many components of the A. fumigatus cell wall, including the polysaccharide galactomannan, glycoproteins, and glycolipids, contain the unusual sugar galactofuranose (Galf). In contrast to the other cell wall monosaccharides, Galf does not occur on human cells but is known as component of cell surface molecules of many pathogenic bacteria and protozoa, such as Mycobacterium tuberculosis or Leishmania major. These molecules are often essential for virulence or viability of these organisms which suggested a possible role of Galf in the pathogenicity of A. fumigatus.
To address the importance of Galf in A. fumigatus, the key biosynthesis gene glfA, encoding UDPgalactopyranose mutase (UGM), was deleted. In different experimental approaches it was demonstrated that the absence of the glfA gene led to a complete loss of Galf-containing glycans.
Analysis of the DeltaglfA phenotype revealed growth and sporulation defects, reduced thermotolerance and an increased susceptibility to antifungal drugs. Electron Microscopy indicated a cell wall defect as a likely cause for the observed impairments. Furthermore, the virulence of the DeltaglfA mutant was found to be severely attenuated in a murine model of Invasive Aspergillosis.
The second focus of this study was laid on further elucidation of the galactofuranosylation pathway in A. fumigatus. In eukaryotes, a UDP-Galf transporter is likely required to transport UDP-Galf from the
cytosol into the organelles of the secretory pathway, but no such activity had been described. Sixteen candidate genes were identified in the A. fumigatus genome of which one, glfB, was found in close proximity to the glfA gene. In vitro transport assays revealed specificity of GlfB for UDP-Galf suggesting that glfB encoded indeed a UDP-Galf transporter. The influence of glfB on
galactofuranosylation was determined by a DeltaglfB deletion mutant, which closely recapitulated the DeltaglfA phenotype and was likewise found to be completely devoid of Galf. It could be concluded that all galactofuranosylation processes in A. fumigatus occur in the secretory pathway, including the biosynthesis of the cell wall polysaccharide galactomannan whose subcellular origin was previously disputed.
Thus in the course of this study the first UDP-Galf specific nucleotide sugar transporter was identified and its requirement for galactofuranosylation in A. fumigatus demonstrated. Moreover, it was shown that blocking the galactofuranosylation pathway impaired virulence of A. fumigatus which suggests the UDP-Galf biosynthesis enzyme UGM as a target for new antifungal drugs.},
author = {Philipp Schmalhorst},
pages = {1 -- 72},
publisher = {Gottfried Wilhelm Leibniz Universität Hannover},
title = {{Biosynthesis of Galactofuranose Containing Glycans and Their Relevance for the Pathogenic Fungus Aspergillus fumigatus}},
year = {2009},
}
@phdthesis{4409,
abstract = {Models of timed systems must incorporate not only the sequence of system events, but the timings of these events as well to capture the real-time aspects of physical systems. Timed automata are models of real-time systems in which states consist of discrete locations and values for real-time clocks. The presence of real-time clocks leads to an uncountable state space. This thesis studies verification problems on timed automata in a game theoretic framework.
For untimed systems, two systems are close if every sequence of events of one system is also observable in the second system. For timed systems, the difference in timings of the two corresponding sequences is also of importance. We propose the notion of bisimulation distance which quantifies timing differences; if the bisimulation distance between two systems is epsilon, then (a) every sequence of events of one system has a corresponding matching sequence in the other, and (b) the timings of matching events in between the two corresponding traces do not differ by more than epsilon. We show that we can compute the bisimulation distance between two timed automata to within any desired degree of accuracy. We also show that the timed verification logic TCTL is robust with respect to our notion of quantitative bisimilarity, in particular, if a system satisfies a formula, then every close system satisfies a close formula.
Timed games are used for distinguishing between the actions of several agents, typically a controller and an environment. The controller must achieve its objective against all possible choices of the environment. The modeling of the passage of time leads to the presence of zeno executions, and corresponding unrealizable strategies of the controller which may achieve objectives by blocking time. We disallow such unreasonable strategies by restricting all agents to use only receptive strategies --strategies which while not being required to ensure time divergence by any agent, are such that no agent is responsible for blocking time. Time divergence is guaranteed when all players use receptive strategies. We show that timed automaton games with receptive strategies can be solved by a reduction to finite state turn based game graphs. We define the logic timed alternating-time temporal logic for verification of timed automaton games and show that the logic can be model checked in EXPTIME. We also show that the minimum time required by an agent to reach a desired location, and the maximum time an agent can stay safe within a set of locations, against all possible actions of its adversaries are both computable.
We next study the memory requirements of winning strategies for timed automaton games. We prove that finite memory strategies suffice for safety objectives, and that winning strategies for reachability objectives may require infinite memory in general. We introduce randomized strategies in which an agent can propose a probabilistic distribution of moves and show that finite memory randomized strategies suffice for all omega-regular objectives. We also show that while randomization helps in simplifying winning strategies, and thus allows the construction of simpler controllers, it does not help a player in winning at more states, and thus does not allow the construction of more powerful controllers.
Finally we study robust winning strategies in timed games. In a physical system, a controller may propose an action together with a time delay, but the action cannot be assumed to be executed at the exact proposed time delay. We present robust strategies which incorporate such jitters and show that the set of states from which an agent can win robustly is computable.},
author = {Prabhu, Vinayak S},
pages = {1 -- 137},
publisher = {University of California, Berkeley},
title = {{Games for the verification of timed systems}},
year = {2008},
}
@phdthesis{4415,
abstract = {Many computing applications, especially those in safety critical embedded systems, require highly predictable timing properties. However, time is often not present in the prevailing computing and networking abstractions. In fact, most advances in computer architecture, software, and networking favor average-case performance over timing predictability. This thesis studies several methods for the design of concurrent and/or distributed embedded systems with precise timing guarantees. The focus is on flexible and compositional methods for programming and verification of the timing properties. The presented methods together with related formalisms cover two levels of design: (1) Programming language/model level. We propose the distributed variant of Giotto, a coordination programming language with an explicit temporal semantics—the logical execution time (LET) semantics. The LET of a task is an interval of time that specifies the time instants at which task inputs and outputs become available (task release and termination instants). The LET of a task is always non-zero. This allows us to communicate values across the network without changing the timing information of the task, and without introducing nondeterminism. We show how this methodology supports distributed code generation for distributed real-time systems. The method gives up some performance in favor of composability and predictability. We characterize the tradeoff by comparing the LET semantics with the semantics used in Simulink. (2) Abstract task graph level. We study interface-based design and verification of applications represented with task graphs. We consider task sequence graphs with general event models, and cyclic graphs with periodic event models with jitter and phase. Here an interface of a component exposes time and resource constraints of the component. Together with interfaces we formally define interface composition operations and the refinement relation. For efficient and flexible composability checking two properties are important: incremental design and independent refinement. According to the incremental design property the composition of interfaces can be performed in any order, even if interfaces for some components are not known. The refinement relation is defined such that in a design we can always substitute a refined interface for an abstract one. We show that the framework supports independent refinement, i.e., the refinement relation is preserved under composition operations.},
author = {Matic, Slobodan},
pages = {1 -- 148},
publisher = {University of California, Berkeley},
title = {{Compositionality in deterministic real-time embedded systems}},
year = {2008},
}
@phdthesis{4524,
abstract = {Complex requirements, time-to-market pressure and regulatory constraints have made the designing of embedded systems extremely challenging. This is evident by the increase in effort and expenditure for design of safety-driven real-time control-dominated applications like automotive and avionic controllers. Design processes are often challenged by lack of proper programming tools for specifying and verifying critical requirements (e.g. timing and reliability) of such applications. Platform based design, an approach for designing embedded systems, addresses the above concerns by separating requirement from architecture. The requirement specifies the intended behavior of an application while the architecture specifies the guarantees (e.g. execution speed, failure rate etc). An implementation, a mapping of the requirement on the architecture, is then analyzed for correctness. The orthogonalization of concerns makes the specification and analyses simpler. An effective use of such design methodology has been proposed in Logical Execution Time (LET) model of real-time tasks. The model separates the timing requirements (specified by release and termination instances of a task) from the architecture guarantees (specified by worst-case execution time of the task).
This dissertation proposes a coordination language, Hierarchical Timing Language (HTL), that captures the timing and reliability requirements of real-time applications. An implementation of the program on an architecture is then analyzed to check whether desired timing and reliability requirements are met or not. The core framework extends the LET model by accounting for reliability and refinement. The reliability model separates the reliability requirements of tasks from the reliability guarantees of the architecture. The requirement expresses the desired long-term reliability while the architecture provides a short-term reliability guarantee (e.g. failure rate for each iteration). The analysis checks if the short-term guarantee ensures the desired long-term reliability. The refinement model allows replacing a task by another task during program execution. Refinement preserves schedulability and reliability, i.e., if a refined task is schedulable and reliable for an implementation, then the refining task is also schedulable and reliable for the implementation. Refinement helps in concise specification without overloading analysis.
The work presents the formal model, the analyses (both with and without refinement), and a compiler for HTL programs. The compiler checks composition and refinement constraints, performs schedulability and reliability analyses, and generates code for implementation of an HTL program on a virtual machine. Three real-time controllers, one each from automatic control, automotive control and avionic control, are used to illustrate the steps in modeling and analyzing HTL programs.},
author = {Ghosal, Arkadeb},
pages = {1 -- 210},
publisher = {University of California, Berkeley},
title = {{A hierarchical coordination language for reliable real-time tasks}},
year = {2008},
}
@phdthesis{4559,
abstract = {We study games played on graphs with omega-regular conditions specified as parity, Rabin, Streett or Muller conditions. These games have applications in the verification, synthesis, modeling, testing, and compatibility checking of reactive systems. Important distinctions between graph games are as follows: (a) turn-based vs. concurrent games, depending on whether at a state of the game only a single player makes a move, or players make moves simultaneously; (b) deterministic vs. stochastic, depending on whether the transition function is a deterministic or a probabilistic function over successor states; and (c) zero-sum vs. non-zero-sum, depending on whether the objectives of the players are strictly conflicting or not.
We establish that the decision problem for turn-based stochastic zero-sum games with Rabin, Streett, and Muller objectives are NP-complete, coNP-complete, and PSPACE-complete, respectively, substantially improving the previously known 3EXPTIME bound. We also present strategy improvement style algorithms for turn-based stochastic Rabin and Streett games. In the case of concurrent stochastic zero-sum games with parity objectives we obtain a PSPACE bound, again improving the previously known 3EXPTIME bound. As a consequence, concurrent stochastic zero-sum games with Rabin, Streett, and Muller objectives can be solved in EXPSPACE, improving the previously known 4EXPTIME bound. We also present an elementary and combinatorial proof of the existence of memoryless \epsilon-optimal strategies in concurrent stochastic games with reachability objectives, for all real \epsilon>0, where an \epsilon-optimal strategy achieves the value of the game with in \epsilon against all strategies of the opponent. We also use the proof techniques to present a strategy improvement style algorithm for concurrent stochastic reachability games.
We then go beyond \omega-regular objectives and study the complexity of an important class of quantitative objectives, namely, limit-average objectives. In the case of limit-average games, the states of the graph is labeled with rewards and the goal is to maximize the long-run average of the rewards. We show that concurrent stochastic zero-sum games with limit-average objectives can be solved in EXPTIME.
Finally, we introduce a new notion of equilibrium, called secure equilibrium, in non-zero-sum games which captures the notion of conditional competitiveness. We prove the existence of unique maximal secure equilibrium payoff profiles in turn-based deterministic games, and present algorithms to compute such payoff profiles. We also show how the notion of secure equilibrium extends the assume-guarantee style of reasoning in the game theoretic framework.},
author = {Krishnendu Chatterjee},
pages = {1 -- 247},
publisher = {University of California, Berkeley},
title = {{Stochastic ω-Regular Games}},
year = {2007},
}
@phdthesis{4566,
abstract = {Complex system design today calls for compositional design and implementation. However each component is designed with certain assumptions about the environment it is meant to operate in, and delivering certain guarantees if those assumptions are satisfied; numerous inter-component interaction errors are introduced in the manual and error-prone integration process as there is little support in design environments for machine-readably representing these assumptions and guarantees and automatically checking consistency during integration.
Based on Interface Automata we propose a framework for compositional design and analysis of systems: a set of domain-specific automata-theoretic type systems for compositional system specification and analysis by behavioral specification of open systems. We focus on three different domains: component-based hardware systems communicating on bidirectional wires. concurrent distributed recursive message-passing software systems, and embedded software system components operating in resource-constrained environments. For these domains we present approaches to formally represent the assumptions and conditional guarantees between interacting open system components. Composition of such components produces new components with the appropriate assumptions and guarantees. We check satisfaction of temporal logic specifications by such components, and the substitutability of one component with another in an arbitrary context. Using this framework one can analyze large systems incrementally without needing extensive summary information to close the system at each stage. Furthermore, we focus only on the inter-component interaction behavior without dealing with the full implementation details of each component. Many of the merits of automata-theoretic model-checking are combined with the compositionality afforded by type-system based techniques. We also present an integer-based extension of the conventional boolean verification framework motivated by our interface formalism for embedded software components.
Our algorithms for checking the behavioral compatibility of component interfaces are available in our tool Chic, which can be used as a plug-in for the Java IDE JBuilder and the heterogenous modeling and design environment Ptolemy II.
Finally, we address the complementary problem of partitioning a large system into meaningful coherent components by analyzing the interaction patterns between its basic elements. We demonstrate the usefulness of our partitioning approach by evaluating its efficacy in improving unit-test branch coverage for a large software system implemented in C.},
author = {Chakrabarti, Arindam},
pages = {1 -- 244},
publisher = {University of California, Berkeley},
title = {{A framework for compositional design and analysis of systems}},
year = {2007},
}
@phdthesis{4236,
author = {de Vladar,Harold Paul},
publisher = {Centro de estudios avazados, IVIC},
title = {{Métodos no lineales y sus aplicaciones en dinámicas aleatorias de poblaciones celulares}},
doi = {3810},
year = {2004},
}
@phdthesis{4424,
abstract = {The enormous cost and ubiquity of software errors necessitates the need for techniques and tools that can precisely analyze large systems and prove that they meet given specifications, or if they don't, return counterexample behaviors showing how the system fails. Recent advances in model checking, decision procedures, program analysis and type systems, and a shift of focus to partial specifications common to several systems (e.g., memory safety and race freedom) have resulted in several practical verification methods. However, these methods are either precise or they are scalable, depending on whether they track the values of variables or only a fixed small set of dataflow facts (e.g., types), and are usually insufficient for precisely verifying large programs.
We describe a new technique called Lazy Abstraction (LA) which achieves both precision and scalability by localizing the use of precise information. LA automatically builds, explores and refines a single abstract model of the program in a way that different parts of the model exhibit different degrees of precision, namely just enough to verify the desired property. The algorithm automatically mines the information required by partitioning mechanical proofs of unsatisfiability of spurious counterexamples into Craig Interpolants. For multithreaded systems, we give a new technique based on analyzing the behavior of a single thread executing in a context which is an abstraction of the other (arbitrarily many) threads. We define novel context models and show how to automatically infer them and analyze the full system (thread + context) using LA.
LA is implemented in BLAST. We have run BLAST on Windows and Linux Device Drivers to verify API conformance properties, and have used it to find (or guarantee the absence of) data races in multithreaded Networked Embedded Systems (NESC) applications. BLAST is able to prove the absence of races in several cases where earlier methods, which depend on lock-based synchronization, fail.},
author = {Jhala, Ranjit},
pages = {1 -- 165},
publisher = {University of California, Berkeley},
title = {{Program verification by lazy abstraction}},
year = {2004},
}
@phdthesis{2414,
author = {Uli Wagner},
publisher = {ETH Zurich},
title = {{On k-Sets and Their Applications}},
doi = {10.3929/ethz-a-004708408},
year = {2003},
}
@phdthesis{4416,
abstract = {Methods for the formal specification and verification of systems are indispensible for the development of complex yet correct systems. In formal verification, the designer describes the system in a modeling language with a well-defined semantics, and this system description is analyzed against a set of correctness requirements. Model checking is an algorithmic technique to check that a system description indeed satisfies correctness requirements given as logical specifications. While successful in hardware verification, the potential for model checking for software and embedded systems has not yet been realized. This is because traditional model checking focuses on systems modeled as finite state-transition graphs. While a natural model for hardware (especially synchronous hardware), state-transition graphs often do not capture software and embedded systems at an appropriate level of granularity. This dissertation considers two orthogonal extensions to finite state-transition graphs making model checking techniques applicable to both a wider class of systems and a wider class of properties.
The first direction is an extension to infinite-state structures finitely represented using constraints and operations on constraints. Infinite state arises when we wish to model variables with unbounded range (e.g., integers), or data structures, or real time. We provide a uniform framework of symbolic region algebras to study model checking of infinite-state systems. We also provide sufficient language-independent termination conditions for symbolic model checking algorithms on infinite state systems.
The second direction supplements verification with game theoretic reasoning. Games are natural models for interactions between components. We study game theoretic behavior with winning conditions given by temporal logic objectives both in the deterministic and in the probabilistic context. For deterministic games, we provide an extremal model characterization of fixpoint algorithms that link solutions of verification problems to solutions for games. For probabilistic games we study fixpoint characterization of winning probabilities for games with omega-regular winning objectives, and construct (epsilon-)optimal winning strategies.},
author = {Majumdar, Ritankar S},
pages = {1 -- 201},
publisher = {University of California, Berkeley},
title = {{Symbolic algorithms for verification and control}},
year = {2003},
}
@phdthesis{4425,
abstract = {Giotto provides a time-triggered programmer’s model for the implementation of embedded control systems with hard real-time constraints. Giotto’s precise semantics and predictabil- ity make it suitable for safety-critical applications.
Giotto is based around the idea that time-triggered task invocation together with time-triggered mode switching can form a useful programming model for real-time systems. To substantiate this claim, we describe the use of Giotto to refactor the software of a small, autonomous helicopter. The ease with which Giotto expresses the existing software provides evidence that Giotto is an appropriate programming language for control systems.
Since Giotto is a real-time programming language, ensuring that Giotto programs meet their deadlines is crucial. To study precedence-constrained Giotto scheduling, we first examine single-mode, single-processor scheduling. We extend to an infinite, periodic setting the classical problem of meeting deadlines for a set of tasks with release times, deadlines, precedence constraints, and preemption. We then develop an algorithm for scheduling Giotto programs on a single processor by representing Giotto programs as instances of the extended scheduling problem.
Next, we study multi-mode, single-processor Giotto scheduling. This problem is different from classical scheduling problems, since in our precedence-constrained approach, the deadlines of tasks may vary depending on the mode switching behavior of the program. We present conditional scheduling models which capture this varying-deadline behavior. We develop polynomial-time algorithms for some conditional scheduling models, and prove oth- ers to be computationally hard. We show how to represent multi-mode Giotto programs as instances of the model, resulting in an algorithm for scheduling multi-mode Giotto programs on a single processor.
Finally, we show that the problem of scheduling Giotto programs for multiple net- worked processors is strongly NP-hard.},
author = {Horowitz, Benjamin},
pages = {1 -- 237},
publisher = {University of California, Berkeley},
title = {{Giotto: A time-triggered language for embedded programming}},
year = {2003},
}
@phdthesis{3678,
author = {Christoph Lampert},
booktitle = {Bonner Mathematische Schriften},
pages = {1 -- 165},
publisher = {Universität Bonn, Fachbibliothek Mathematik},
title = {{The Neumann operator in strictly pseudoconvex domains with weighted Bergman metric }},
volume = {356},
year = {2003},
}
@phdthesis{4414,
abstract = {This dissertation investigates game-theoretic approaches to the algorithmic analysis of concurrent, reactive systems. A concurrent system comprises a number of components working concurrently; a reactive system maintains an ongoing interaction with its environment. Traditional approaches to the formal analysis of concurrent reactive systems usually view the system as an unstructured state-transition graphs; instead, we view them as collections of interacting components, where each one is an open system which accepts inputs from the other components. The interactions among the components are naturally modeled as games.
Adopting this game-theoretic view, we study three related problems pertaining to the verification and synthesis of systems. Firstly, we propose two novel game-theoretic techniques for the model-checking of concurrent reactive systems, and improve the performance of model-checking. The first technique discovers an error as soon as it cannot be prevented, which can be long before it actually occurs. This technique is based on the key observation that "unpreventability" is a local property to a module: an error is unpreventable in a module state if no environment can prevent it. The second technique attempts to decompose a model-checking proof into smaller proof obligations by constructing abstract modules automatically, using reachability and "unpreventability" information about the concrete modules. Three increasingly powerful proof decomposition rules are proposed and we show that in practice, the resulting abstract modules are often significantly smaller than the concrete modules and can drastically reduce the space and time requirements for verification. Both techniques fall into the category of compositional reasoning.
Secondly, we investigate the composition and control of synchronous systems. An essential property of synchronous systems for compositional reasoning is non-blocking. In the composition of synchronous systems, however, due to circular causal dependency of input and output signals, non-blocking is not always guaranteed. Blocking compositions of systems can be ruled out semantically, by insisting on the existence of certain fixed points, or syntactically, by equipping systems with types, which make the dependencies between input and output signals transparent. We characterize various typing mechanisms in game-theoretic terms, and study their effects on the controller synthesis problem. We show that our typing systems are general enough to capture interesting real-life synchronous systems such as all delay-insensitive digital circuits. We then study their corresponding single-step control problems --a restricted form of controller synthesis problem whose solutions can be iterated in appropriate manners to solve all LTL controller synthesis problems. We also consider versions of the controller synthesis problem in which the type of the controller is given. We show that the solution of these fixed-type control problems requires the evaluation of partially ordered (Henkin) quantifiers on boolean formulas, and is therefore harder (nondeterministic exponential time) than more traditional control questions.
Thirdly, we study the synthesis of a class of open systems, namely, uninitialized state machines. The sequential synthesis problem, which is closely related to Church's solvability problem, asks, given a specification in the form of a binary relation between input and output streams, for the construction of a finite-state stream transducer that converts inputs to appropriate outputs. For efficiency reasons, practical sequential hardware is often designed to operate without prior initialization. Such hardware designs can be modeled by uninitialized state machines, which are required to satisfy their specification if started from any state. We solve the sequential synthesis problem for uninitialized systems, that is, we construct uninitialized finite-state stream transducers. We consider specifications given by LTL formulas, deterministic, nondeterministic, universal, and alternating Buechi automata. We solve this uninitialized synthesis problem by reducing it to the well-understood initialized synthesis problem. While our solution is straightforward, it leads, for some specification formalisms, to upper bounds that are exponentially worse than the complexity of the corresponding initialized problems. However, we prove lower bounds to show that our simple solutions are optimal for all considered specification formalisms. The lower bound proofs require nontrivial generic reductions.},
author = {Mang, Freddy Y},
pages = {1 -- 116},
publisher = {University of California, Berkeley},
title = {{Games in open systems verification and synthesis}},
year = {2002},
}
@phdthesis{4411,
abstract = {Model checking algorithms for the verification of reactive systems proceed by a systematic and exhaustive exploration of the system state space. They do not scale to large designs because of the state explosion problem --the number of states grows exponentially with the number of components in the design. Consequently, the model checking problem is PSPACE-hard in the size of the design description. This dissertation proposes three novel techniques to combat the state explosion problem.
One of the most important advances in model checking in recent years has been the discovery of symbolic methods, which use a calculus of expressions, such as binary decision diagrams, to represent the state sets encountered during state space exploration. Symbolic model checking has proved to be effective for verifying hardware designs. Traditionally, symbolic checking of temporal logic specifications is performed by backward fixpoint reasoning with the operator Pre. Backward reasoning can be wasteful since unreachable states are explored. We suggest the use of forward fixpoint reasoning based on the operator Post. We show how all linear temporal logic specifications can be model checked symbolically by forward reasoning. In contrast to backward reasoning, forward reasoning performs computations only on the reachable states.
Heuristics that improve algorithms for application domains, such as symbolic methods for hardware designs, are useful but not enough to make model checking feasible on industrial designs. Currently, exhaustive state exploration is possible only on designs with about 50-100 boolean state variables. Assume-guarantee verification attempts to combat the state explosion problem by using the principle of "divide and conquer," where the components of the implementation are analyzed one at a time. Typically, an implementation component refines its specification only when its inputs are suitably constrained by other components in the implementation. The assume-guarantee principle states that instead of constraining the inputs by implementation components, it is sound to constrain them by the corresponding specification components, which can be significantly smaller. We extend the assume-guarantee proof rule to deal with the case where the specification operates at a coarser time scale than the implementation. Using our model checker Mocha, which implements this methodology, we verify VGI, a parallel DSP processor chip with 64 compute processors each containing approximately 800 state variables and 30K gates.
Our third contribution is a systematic model checking methodology for verifying the abstract shared-memory interface of sequential consistency on multiprocessor systems with three parameters --number of processors, number of memory locations, and number of data values. Sequential consistency requires that some interleaving of the local temporal orders of read/write events at different processors be a trace of serial memory. Therefore, it suffices to construct a non-interfering serializer that watches and reorders read/write events so that a trace of serial memory is obtained. While in general such a serializer must be unbounded even for fixed values of the parameters --checking sequential consistency is undecidable!-- we show that the paradigmatic class of snoopy cache coherence protocols has finite-state serializers. In order to reduce the arbitrary-parameter problem to the fixed-parameter problem, we develop a novel framework for induction over the number of processors and use the notion of a serializer to reduce the problem of verifying sequential consistency to that of checking language inclusion between finite state machines.},
author = {Qadeer,Shaz},
pages = {1 -- 150},
publisher = {University of California, Berkeley},
title = {{Algorithms and Methodology for Scalable Model Checking}},
year = {1999},
}