@phdthesis{1130,
abstract = {In this thesis we present a computer-aided programming approach to concurrency. Our approach
helps the programmer by automatically fixing concurrency-related bugs, i.e. bugs that occur
when the program is executed using an aggressive preemptive scheduler, but not when using a
non-preemptive (cooperative) scheduler. Bugs are program behaviours that are incorrect w.r.t.
a specification. We consider both user-provided explicit specifications in the form of assertion
statements in the code as well as an implicit specification. The implicit specification is inferred
from the non-preemptive behaviour. Let us consider sequences of calls that the program makes
to an external interface. The implicit specification requires that any such sequence produced
under a preemptive scheduler should be included in the set of sequences produced under a
non-preemptive scheduler.
We consider several semantics-preserving fixes that go beyond atomic sections typically
explored in the synchronisation synthesis literature. Our synthesis is able to place locks, barriers
and wait-signal statements and last, but not least reorder independent statements. The latter
may be useful if a thread is released to early, e.g., before some initialisation is completed. We
guarantee that our synthesis does not introduce deadlocks and that the synchronisation inserted
is optimal w.r.t. a given objective function.
We dub our solution trace-based synchronisation synthesis and it is loosely based on
counterexample-guided inductive synthesis (CEGIS). The synthesis works by discovering a
trace that is incorrect w.r.t. the specification and identifying ordering constraints crucial to trigger
the specification violation. Synchronisation may be placed immediately (greedy approach) or
delayed until all incorrect traces are found (non-greedy approach). For the non-greedy approach
we construct a set of global constraints over synchronisation placements. Each model of the
global constraints set corresponds to a correctness-ensuring synchronisation placement. The
placement that is optimal w.r.t. the given objective function is chosen as the synchronisation
solution.
We evaluate our approach on a number of realistic (albeit simplified) Linux device-driver
benchmarks. The benchmarks are versions of the drivers with known concurrency-related bugs.
For the experiments with an explicit specification we added assertions that would detect the bugs
in the experiments. Device drivers lend themselves to implicit specification, where the device and
the operating system are the external interfaces. Our experiments demonstrate that our synthesis
method is precise and efficient. We implemented objective functions for coarse-grained and
fine-grained locking and observed that different synchronisation placements are produced for
our experiments, favouring e.g. a minimal number of synchronisation operations or maximum
concurrency.},
author = {Tarrach, Thorsten},
pages = {151},
publisher = {IST Austria},
title = {{Automatic synthesis of synchronisation primitives for concurrent programs}},
year = {2016},
}
@phdthesis{1131,
abstract = {Evolution of gene regulation is important for phenotypic evolution and diversity. Sequence-specific binding of regulatory proteins is one of the key regulatory mechanisms determining gene expression. Although there has been intense interest in evolution of regulatory binding sites in the last decades, a theoretical understanding is far from being complete. In this thesis, I aim at a better understanding of the evolution of transcriptional regulatory binding sequences by using biophysical and population genetic models.
In the first part of the thesis, I discuss how to formulate the evolutionary dynamics of binding se- quences in a single isolated binding site and in promoter/enhancer regions. I develop a theoretical framework bridging between a thermodynamical model for transcription and a mutation-selection-drift model for monomorphic populations. I mainly address the typical evolutionary rates, and how they de- pend on biophysical parameters (e.g. binding length and specificity) and population genetic parameters (e.g. population size and selection strength).
In the second part of the thesis, I analyse empirical data for a better evolutionary and biophysical understanding of sequence-specific binding of bacterial RNA polymerase. First, I infer selection on regulatory and non-regulatory binding sites of RNA polymerase in the E. coli K12 genome. Second, I infer the chemical potential of RNA polymerase, an important but unknown physical parameter defining the threshold energy for strong binding. Furthermore, I try to understand the relation between the lac promoter sequence diversity and the LacZ activity variation among 20 bacterial isolates by constructing a simple but biophysically motivated gene expression model. Lastly, I lay out a statistical framework to predict adaptive point mutations in de novo promoter evolution in a selection experiment.},
author = {Tugrul, Murat},
pages = {89},
publisher = {IST Austria},
title = {{Evolution of transcriptional regulatory sequences}},
year = {2016},
}
@phdthesis{1128,
abstract = {The process of gene expression is central to the modern understanding of how cellular systems
function. In this process, a special kind of regulatory proteins, called transcription factors,
are important to determine how much protein is produced from a given gene. As biological
information is transmitted from transcription factor concentration to mRNA levels to amounts of
protein, various sources of noise arise and pose limits to the fidelity of intracellular signaling.
This thesis concerns itself with several aspects of stochastic gene expression: (i) the mathematical
description of complex promoters responsible for the stochastic production of biomolecules,
(ii) fundamental limits to information processing the cell faces due to the interference from multiple
fluctuating signals, (iii) how the presence of gene expression noise influences the evolution
of regulatory sequences, (iv) and tools for the experimental study of origins and consequences
of cell-cell heterogeneity, including an application to bacterial stress response systems.},
author = {Rieckh, Georg},
pages = {114},
publisher = {IST Austria},
title = {{Studying the complexities of transcriptional regulation}},
year = {2016},
}
@phdthesis{1399,
abstract = {This thesis is concerned with the computation and approximation of intrinsic volumes. Given a smooth body M and a certain digital approximation of it, we develop algorithms to approximate various intrinsic volumes of M using only measurements taken from its digital approximations. The crucial idea behind our novel algorithms is to link the recent theory of persistent homology to the theory of intrinsic volumes via the Crofton formula from integral geometry and, in particular, via Euler characteristic computations. Our main contributions are a multigrid convergent digital algorithm to compute the first intrinsic volume of a solid body in R^n as well as an appropriate integration pipeline to approximate integral-geometric integrals defined over the Grassmannian manifold.},
author = {Pausinger, Florian},
pages = {144},
publisher = {IST Austria},
title = {{On the approximation of intrinsic volumes}},
year = {2015},
}
@phdthesis{1400,
abstract = {Cancer results from an uncontrolled growth of abnormal cells. Sequentially accumulated genetic and epigenetic alterations decrease cell death and increase cell replication. We used mathematical models to quantify the effect of driver gene mutations. The recently developed targeted therapies can lead to dramatic regressions. However, in solid cancers, clinical responses are often short-lived because resistant cancer cells evolve. We estimated that approximately 50 different mutations can confer resistance to a typical targeted therapeutic agent. We find that resistant cells are likely to be present in expanded subclones before the start of the treatment. The dominant strategy to prevent the evolution of resistance is combination therapy. Our analytical results suggest that in most patients, dual therapy, but not monotherapy, can result in long-term disease control. However, long-term control can only occur if there are no possible mutations in the genome that can cause cross-resistance to both drugs. Furthermore, we showed that simultaneous therapy with two drugs is much more likely to result in long-term disease control than sequential therapy with the same drugs. To improve our understanding of the underlying subclonal evolution we reconstruct the evolutionary history of a patient's cancer from next-generation sequencing data of spatially-distinct DNA samples. Using a quantitative measure of genetic relatedness, we found that pancreatic cancers and their metastases demonstrated a higher level of relatedness than that expected for any two cells randomly taken from a normal tissue. This minimal amount of genetic divergence among advanced lesions indicates that genetic heterogeneity, when quantitatively defined, is not a fundamental feature of the natural history of untreated pancreatic cancers. Our newly developed, phylogenomic tool Treeomics finds evidence for seeding patterns of metastases and can directly be used to discover rules governing the evolution of solid malignancies to transform cancer into a more predictable disease.},
author = {Reiter, Johannes},
pages = {183},
publisher = {IST Austria},
title = {{The subclonal evolution of cancer}},
year = {2015},
}
@phdthesis{1401,
abstract = {The human ability to recognize objects in complex scenes has driven research in the computer vision field over couple of decades. This thesis focuses on the object recognition task in images. That is, given the image, we want the computer system to be able to predict the class of the object that appears in the image. A recent succesful attempt to bridge semantic understanding of the image perceived by humans and by computers uses attribute-based models. Attributes are semantic properties of the objects shared across different categories, which humans and computers can decide on. To explore the attribute-based models we take a statistical machine learning approach, and address two key learning challenges in view of object recognition task: learning augmented attributes as mid-level discriminative feature representation, and learning with attributes as privileged information. Our main contributions are parametric and non-parametric models and algorithms to solve these frameworks. In the parametric approach, we explore an autoencoder model combined with the large margin nearest neighbor principle for mid-level feature learning, and linear support vector machines for learning with privileged information. In the non-parametric approach, we propose a supervised Indian Buffet Process for automatic augmentation of semantic attributes, and explore the Gaussian Processes classification framework for learning with privileged information. A thorough experimental analysis shows the effectiveness of the proposed models in both parametric and non-parametric views.},
author = {Sharmanska, Viktoriia},
pages = {144},
publisher = {IST Austria},
title = {{Learning with attributes for object recognition: Parametric and non-parametrics views}},
year = {2015},
}
@phdthesis{1395,
abstract = {In this thesis I studied various individual and social immune defences employed by the invasive garden ant Lasius neglectus mostly against entomopathogenic fungi. The first two chapters of this thesis address the phenomenon of 'social immunisation'. Social immunisation, that is the immunological protection of group members due to social contact to a pathogen-exposed nestmate, has been described in various social insect species against different types of pathogens. However, in the case of entomopathogenic fungi it has, so far, only been demonstrated that social immunisation exists at all. Its underlying mechanisms r any other properties were, however, unknown. In the first chapter of this thesis I identified the mechanistic basis of social immunisation in L. neglectus against the entomopathogenous fungus Metarhizium. I could show that nestmates of a pathogen-exposed individual contract low-level infections due to social interactions. These low-level infections are, however, non-lethal and cause an active stimulation of the immune system, which protects the nestmates upon subsequent pathogen encounters. In the second chapter of this thesis I investigated the specificity and colony level effects of social immunisation. I demonstrated that the protection conferred by social immunisation is highly specific, protecting ants only against the same pathogen strain. In addition, depending on the respective context, social immunisation may even cause fitness costs. I further showed that social immunisation crucially affects sanitary behaviour and disease dynamics within ant groups. In the third chapter of this thesis I studied the effects of the ectosymbiotic fungus Laboulbenia formicarum on its host L. neglectus. Although Laboulbeniales are the largest order of insect-parasitic fungi, research concerning host fitness consequence is sparse. I showed that highly Laboulbenia-infected ants sustain fitness costs under resource limitation, however, gain fitness benefits when exposed to an entomopathogenus fungus. These effects are probably cause by a prophylactic upregulation of behavioural as well as physiological immune defences in highly infected ants.},
author = {Konrad, Matthias},
pages = {131},
publisher = {IST Austria},
title = {{Immune defences in ants: Effects of social immunisation and a fungal ectosymbiont in the ant Lasius neglectus}},
year = {2014},
}
@phdthesis{1402,
abstract = {Phosphatidylinositol (Ptdlns) is a structural phospholipid that can be phosphorylated into various lipid signaling molecules, designated polyphosphoinositides (PPIs). The reversible phosphorylation of PPIs on the 3, 4, or 5 position of inositol is performed by a set of organelle-specific kinases and phosphatases, and the characteristic head groups make these molecules ideal for regulating biological processes in time and space. In yeast and mammals, Ptdlns3P and Ptdlns(3,5)P2 play crucial roles in trafficking toward the lytic compartments, whereas the role in plants is not yet fully understood. Here we identified the role of a land plant-specific subgroup of PPI phosphatases, the suppressor of actin 2 (SAC2) to SAC5, during vauolar trafficking and morphogenesis in Arabidopsis thaliana. SAC2-SAC5 localize to the tonoplast along with Ptdlns3P, the presumable product of their activity. in SAC gain- and loss-of-function mutants, the levels of Ptdlns monophosphates and bisphosphates were changed, with opposite effects on the morphology of storage and lytic vacuoles, and the trafficking toward the vacuoles was defective. Moreover, multiple sac knockout mutants had an increased number of smaller storage and lytic vacuoles, whereas extralarge vacuoles were observed in the overexpression lines, correlating with various growth and developmental defects. The fragmented vacuolar phenotype of sac mutants could be mimicked by treating wild-type seedlings with Ptdlns(3,5)P2, corroborating that this PPI is important for vacuole morphology. Taken together, these results provide evidence that PPIs, together with their metabolic enzymes SAC2-SAC5, are crucial for vacuolar trafficking and for vacuolar morphology and function in plants.},
author = {Marhavá, Petra},
pages = {90},
publisher = {IST Austria},
title = {{Molecular mechanisms of patterning and subcellular trafficking in Arabidopsis thaliana}},
year = {2014},
}
@phdthesis{1403,
abstract = {A variety of developmental and disease related processes depend on epithelial cell sheet spreading. In order to gain insight into the biophysical mechanism(s) underlying the tissue morphogenesis we studied the spreading of an epithelium during the early development of the zebrafish embryo. In zebrafish epiboly the enveloping cell layer (EVL), a simple squamous epithelium, spreads over the yolk cell to completely engulf it at the end of gastrulation. Previous studies have proposed that an actomyosin ring forming within the yolk syncytial layer (YSL) acts as purse string that through constriction along its circumference pulls on the margin of the EVL. Direct biophysical evidence for this hypothesis has however been missing. The aim of the thesis was to understand how the actomyosin ring may generate pulling forces onto the EVL and what cellular mechanism(s) may facilitate the spreading of the epithelium. Using laser ablation to measure cortical tension within the actomyosin ring we found an anisotropic tension distribution, which was highest along the circumference of the ring. However the low degree of anisotropy was incompatible with the actomyosin ring functioning as a purse string only. Additionally, we observed retrograde cortical flow from vegetal parts of the ring into the EVL margin. Interpreting the experimental data using a theoretical distribution that models the tissues as active viscous gels led us to proposen that the actomyosin ring has a twofold contribution to EVL epiboly. It not only acts as a purse string through constriction along its circumference, but in addition constriction along the width of the ring generates pulling forces through friction-resisted cortical flow. Moreover, when rendering the purse string mechanism unproductive EVL epiboly proceeded normally indicating that the flow-friction mechanism is sufficient to drive the process. Aiming to understand what cellular mechanism(s) may facilitate the spreading of the epithelium we found that tension-oriented EVL cell divisions limit tissue anisotropy by releasing tension along the division axis and promote epithelial spreading. Notably, EVL cells undergo ectopic cell fusion in conditions in which oriented-cell division is impaired or the epithelium is mechanically challenged. Taken together our study of EVL epiboly suggests a novel mechanism of force generation for actomyosin rings through friction-resisted cortical flow and highlights the importance of tension-oriented cell divisions in epithelial morphogenesis.},
author = {Behrndt, Martin},
pages = {91},
publisher = {IST Austria},
title = {{Forces driving epithelial spreading in zebrafish epiboly}},
year = {2014},
}
@phdthesis{1404,
abstract = {The co-evolution of hosts and pathogens is characterized by continuous adaptations of both parties. Pathogens of social insects need to adapt towards disease defences at two levels: 1) individual immunity of each colony member consisting of behavioural defence strategies as well as humoral and cellular immune responses and 2) social immunity that is collectively performed by all group members comprising behavioural, physiological and organisational defence strategies.
To disentangle the selection pressure on pathogens by the collective versus individual level of disease defence in social insects, we performed an evolution experiment using the Argentine Ant, Linepithema humile, as a host and a mixture of the general insect pathogenic fungus Metarhizium spp. (6 strains) as a pathogen. We allowed pathogen evolution over 10 serial host passages to two different evolution host treatments: (1) only individual host immunity in a single host treatment, and (2) simultaneously acting individual and social immunity in a social host treatment, in which an exposed ant was accompanied by two untreated nestmates.
Before starting the pathogen evolution experiment, the 6 Metarhizium spp. strains were characterised concerning conidiospore size killing rates in singly and socially reared ants, their competitiveness under coinfecting conditions and their influence on ant behaviour. We analysed how the ancestral atrain mixture changed in conidiospere size, killing rate and strain composition dependent on host treatment (single or social hosts) during 10 passages and found that killing rate and conidiospere size of the pathogen increased under both evolution regimes, but different depending on host treatment.
Testing the evolved strain mixtures that evolved under either the single or social host treatment under both single and social current rearing conditions in a full factorial design experiment revealed that the additional collective defences in insect societies add new selection pressure for their coevolving pathogens that compromise their ability to adapt to its host at the group level. To our knowledge, this is the first study directly measuring the influence of social immunity on pathogen evolution.},
author = {Stock, Miriam},
pages = {101},
publisher = {IST Austria},
title = {{Evolution of a fungal pathogen towards individual versus social immunity in ants}},
year = {2014},
}
@phdthesis{1405,
abstract = {Motivated by the analysis of highly dynamic message-passing systems, i.e. unbounded thread creation, mobility, etc. we present a framework for the analysis of depth-bounded systems. Depth-bounded systems are one of the most expressive known fragment of the π-calculus for which interesting verification problems are still decidable. Even though they are infinite state systems depth-bounded systems are well-structured, thus can be analyzed algorithmically. We give an interpretation of depth-bounded systems as graph-rewriting systems. This gives more flexibility and ease of use to apply depth-bounded systems to other type of systems like shared memory concurrency.
First, we develop an adequate domain of limits for depth-bounded systems, a prerequisite for the effective representation of downward-closed sets. Downward-closed sets are needed by forward saturation-based algorithms to represent potentially infinite sets of states. Then, we present an abstract interpretation framework to compute the covering set of well-structured transition systems. Because, in general, the covering set is not computable, our abstraction over-approximates the actual covering set. Our abstraction captures the essence of acceleration based-algorithms while giving up enough precision to ensure convergence. We have implemented the analysis in the PICASSO tool and show that it is accurate in practice. Finally, we build some further analyses like termination using the covering set as starting point.},
author = {Zufferey, Damien},
pages = {134},
publisher = {IST Austria},
title = {{Analysis of dynamic message passing programs}},
year = {2013},
}
@phdthesis{1406,
abstract = {Epithelial spreading is a critical part of various developmental and wound repair processes. Here we use zebrafish epiboly as a model system to study the cellular and molecular mechanisms underlying the spreading of epithelial sheets. During zebrafish epiboly the enveloping cell layer (EVL), a simple squamous epithelium, spreads over the embryo to eventually cover the entire yolk cell by the end of gastrulation. The EVL leading edge is anchored through tight junctions to the yolk syncytial layer (YSL), where directly adjacent to the EVL margin a contractile actomyosin ring is formed that is thought to drive EVL epiboly. The prevalent view in the field was that the contractile ring exerts a pulling force on the EVL margin, which pulls the EVL towards the vegetal pole. However, how this force is generated and how it affects EVL morphology still remains elusive. Moreover, the cellular mechanisms mediating the increase in EVL surface area, while maintaining tissue integrity and function are still unclear. Here we show that the YSL actomyosin ring pulls on the EVL margin by two distinct force-generating mechanisms. One mechanism is based on contraction of the ring around its circumference, as previously proposed. The second mechanism is based on actomyosin retrogade flows, generating force through resistance against the substrate. The latter can function at any epiboly stage even in situations where the contraction-based mechanism is unproductive. Additionally, we demonstrate that during epiboly the EVL is subjected to anisotropic tension, which guides the orientation of EVL cell division along the main axis (animal-vegetal) of tension. The influence of tension in cell division orientation involves cell elongation and requires myosin-2 activity for proper spindle alignment. Strikingly, we reveal that tension-oriented cell divisions release anisotropic tension within the EVL and that in the absence of such divisions, EVL cells undergo ectopic fusions. We conclude that forces applied to the EVL by the action of the YSL actomyosin ring generate a tension anisotropy in the EVL that orients cell divisions, which in turn limit tissue tension increase thereby facilitating tissue spreading.},
author = {Campinho, Pedro},
pages = {123},
publisher = {IST Austria},
title = {{Mechanics of zebrafish epiboly: Tension-oriented cell divisions limit anisotropic tissue tension in epithelial spreading}},
year = {2013},
}
@phdthesis{2964,
abstract = {CA3 pyramidal neurons are important for memory formation and pattern completion in the hippocampal network. These neurons receive multiple excitatory inputs from numerous sources. Therefore, the rules of spatiotemporal integration of multiple synaptic inputs and propagation of action potentials are important to understand how CA3 neurons contribute to higher brain functions at cellular level. By using confocally targeted patch-clamp recording techniques, we investigated the biophysical properties of rat CA3 pyramidal neuron dendrites. We found two distinct dendritic domains critical for action potential initiation and propagation: In the proximal domain, action potentials initiated in the axon backpropagate actively with large amplitude and fast time course. In the distal domain, Na+-channel mediated dendritic spikes are efficiently evoked by local dendritic depolarization or waveforms mimicking synaptic events. These findings can be explained by a high Na+-to-K+ conductance density ratio of CA3 pyramidal neuron dendrites. The results challenge the prevailing view that proximal mossy fiber inputs activate CA3 pyramidal neurons more efficiently than distal perforant inputs by showing that the distal synapses trigger a different form of activity represented by dendritic spikes. The high probability of dendritic spike initiation in the distal area may enhance the computational power of CA3 pyramidal neurons in the hippocampal network. },
author = {Kim, Sooyun},
pages = {65},
publisher = {IST Austria},
title = {{Active properties of hippocampal CA3 pyramidal neuron dendrites}},
year = {2012},
}
@phdthesis{3273,
author = {Maître, Jean-Léon},
publisher = {IST Austria},
title = {{Mechanics of adhesion and de‐adhesion in zebrafish germ layer progenitors}},
year = {2011},
}
@phdthesis{3275,
abstract = {Chemokines organize immune cell trafficking by inducing either directed (tactic) or random (kinetic) migration and by activating integrins in order to support surface adhesion (haptic). Beyond that the same chemokines can establish clearly defined functional areas in secondary lymphoid organs. Until now it is unclear how chemokines can fulfill such diverse functions. One decisive prerequisite to explain these capacities is to know how chemokines are presented in tissue. In theory chemokines could occur either soluble or immobilized, and could be distributed either homogenously or as a concentration gradient. To dissect if and how the presenting mode of chemokines influences immune cells, I tested the response of dendritic cells (DCs) to differentially displayed chemokines. DCs are antigen presenting cells that reside in the periphery and migrate into draining lymph nodes (LNs) once exposed to inflammatory stimuli to activate naïve T cells. DCs are guided to and within the LN by the chemokine receptor CCR7, which has two ligands, the chemokines CCL19 and CCL21. Both CCR7 ligands are expressed by fibroblastic reticular cells in the LN, but differ in their ability to bind to heparan sulfate residues. CCL21 has a highly charged C-terminal extension, which mediates binding to anionic surfaces, whereas CCL19 is lacking such residues and likely distributes as a soluble molecule. This study shows that surface-bound CCL21 causes random, haptokinetic DC motility, which is confined to the chemokine coated area by insideout activation of β2 integrins that mediate cell binding to the surface. CCL19 on the other hand forms concentration gradients which trigger directional, chemotactic movement, but no surface adhesion. In addition DCs can actively manipulate this system by recruiting and activating serine proteases on their surfaces, which create - by proteolytically removing the adhesive C-terminus - a solubilized variant of CCL21 that functionally resembles CCL19. By generating a CCL21 concentration gradient DCs establish a positive feedback loop to recruit further DCs from the periphery to the CCL21 coated region. In addition DCs can sense chemotactic gradients as well as immobilized haptokinetic fields at the same time and integrate these signals. The result is chemotactically biased haptokinesis - directional migration confined to a chemokine coated track or area - which could explain the dynamic but spatially tightly controlled swarming leukocyte locomotion patterns that have been observed in lymphatic organs by intravital microscopists. The finding that DCs can approach soluble cues in a non-adhesive manner while they attach to surfaces coated with immobilized cues raises the question how these cells transmit intracellular forces to the environment, especially in the non-adherent migration mode. In order to migrate, cells have to generate and transmit force to the extracellular substrate. Force transmission is the prerequisite to procure an expansion of the leading edge and a forward motion of the whole cell body. In the current conceptions actin polymerization at the leading edge is coupled to extracellular ligands via the integrin family of transmembrane receptors, which allows the transmission of intracellular force. Against the paradigm of force transmission during migration, leukocytes, like DCs, are able to migrate in threedimensional environments without using integrin transmembrane receptors (Lämmermann et al., 2008). This reflects the biological function of leukocytes, as they can invade almost all tissues, whereby their migration has to be independent from the extracellular environment. How the cells can achieve this is unclear. For this study I examined DC migration in a defined threedimensional environment and highlighted actin-dynamics with the probe Lifeact-GFP. The result was that chemotactic DCs can switch between integrin-dependent and integrin- independent locomotion and can thereby adapt to the adhesive properties of their environment. If the cells are able to couple their actin cytoskeleton to the substrate, actin polymerization is entirely converted into protrusion. Without coupling the actin cortex undergoes slippage and retrograde actin flow can be observed. But retrograde actin flow can be completely compensated by higher actin polymerization rate keeping the migration velocity and the shape of the cells unaltered. Mesenchymal cells like fibroblast cannot balance the loss of adhesive interaction, cannot protrude into open space and, therefore, strictly depend on integrinmediated force coupling. This leukocyte specific phenomenon of “adaptive force transmission” endows these cells with the unique ability to transit and invade almost every type of tissue. },
author = {Schumann, Kathrin},
pages = {141},
publisher = {IST Austria},
title = {{The role of chemotactic gradients in dendritic cell migration}},
year = {2011},
}
@phdthesis{2075,
abstract = {This thesis investigates the combination of data-driven and physically based techniques for acquiring, modeling, and animating deformable materials, with a special focus on human faces. Furthermore, based on these techniques, we introduce a data-driven process for designing and fabricating materials with desired deformation behavior.
Realistic simulation behavior, surface details, and appearance are still demanding tasks. Neither pure data-driven, pure procedural, nor pure physical methods are best suited for accurate synthesis of facial motion and details (both for appearance and geometry), due to the difficulties in model design, parameter estimation, and desired controllability for animators. Capturing of a small but representative amount of real data, and then synthesizing diverse on-demand examples with physically-based models and real data as input benefits from both sides: Highly realistic model behavior due to real-world data and controllability due to physically-based models.
To model the face and its behavior, hybrid physically-based and data-driven approaches are elaborated. We investigate surface-based representations as well as a solid representation based on FEM. To achieve realistic behavior, we propose to build light-weighted data capture devices to acquire real-world data to estimate model parameters and to employ concepts from data-driven modeling techniques and machine learning. The resulting models support simple acquisition systems, offer techniques to process and extract model parameters from real-world data, provide a compact representation of the facial geometry and its motion, and allow intuitive editing. We demonstrate applications such as capture of facial geometry and motion and real-time animation and transfer of facial details, and show that our soft tissue model can react to external forces and produce realistic deformations beyond facial expressions.
Based on this model, we furthermore introduce a data-driven process for designing and fabricating materials with desired deformation behavior. The process starts with measuring deformation properties of base materials. Each material is represented as a non-linear stress-strain relationship in a finite-element model. For material design and fabrication, we introduce an optimization process that finds the best combination of base materials that meets a user’s criteria specified by example deformations. Our algorithm employs a number of strategies to prune poor solutions from the combinatorial search space. We finally demonstrate the complete process by designing and fabricating objects with complex heterogeneous materials using modern multi-material 3D printers.
},
author = {Bernd Bickel},
booktitle = {Unknown},
number = {7458},
publisher = {Unknown},
title = {{Measurement-based modeling and fabrication of deformable materials for human faces}},
doi = {dx.doi.org/10.3929/ethz-a-006354908},
volume = {499},
year = {2010},
}
@phdthesis{3962,
author = {Pflicke, Holger},
publisher = {IST Austria},
title = {{Dendritic cell migration across basement membranes in the skin}},
year = {2010},
}
@phdthesis{3296,
abstract = {Accurate computational representations of highly deformable surfaces are indispensable in the fields of computer animation, medical simulation, computer vision, digital modeling, and computational physics. The focus of this dissertation is on the animation of physics-based phenomena with highly detailed deformable surfaces represented by triangle meshes.
We first present results from an algorithm that generates continuum mechanics animations with intricate surface features. This method combines a finite element method with a tetrahedral mesh generator and a high resolution surface mesh, and it is orders of magnitude more efficient than previous approaches. Next, we present an efficient solution for the challenging problem of computing topological changes in detailed dynamic surface meshes. We then introduce a new physics-inspired surface tracking algorithm that is capable of preserving arbitrarily thin features and reproducing realistic fine-scale topological changes like Rayleigh-Plateau instabilities. This physics-inspired surface tracking technique also opens the door for a unique coupling between surficial finite element methods and volumetric finite difference methods, in order to simulate liquid surface tension phenomena more efficiently than any previous method. Due to its dramatic increase in computational resolution and efficiency, this method yielded the first computer simulations of a fully developed crown splash with droplet pinch off.},
author = {Wojtan, Chris},
pages = {1 -- 175},
publisher = {Georgia Institute of Technology},
title = {{Animating physical phenomena with embedded surface meshes}},
year = {2010},
}
@phdthesis{4232,
author = {Harold Vladar},
publisher = {Faculty of mathematical and natural sciences, University of Groningen},
title = {{Stochasticity and Variability in the dynamics and genetics of populations}},
doi = {3811},
year = {2009},
}
@phdthesis{4363,
author = {Vasu Singh},
booktitle = {Formalizing and Verifying Transactional Memories},
publisher = {EPFL Lausanne},
title = {{Formalizing and Verifying Transactional Memories}},
year = {2009},
}
@phdthesis{3400,
abstract = {Invasive fungal infections pose a serious threat to immunocompromised people. Most of these infections are caused by either Candida or Aspergillus species, with A. fumigatus being the predominant causative agent of Invasive Aspergillosis. Affected people comprise mainly haematopoietic stem cell or solid organ transplant patients who receive either high-dose corticosteroids or immunosuppressants. These risk factors predispose to the development of Invasive
Aspergillosis which is lethal in 20 to 80 % of the cases, largely due to insufficient efficacy of current antifungal therapy. Thus one major aim in current mycological research is the identification of new drug targets.
The polysaccharide-based fungal cell wall is both essential to fungi and absent from human cells which makes it appear an attractive new target. Notably, many components of the A. fumigatus cell wall, including the polysaccharide galactomannan, glycoproteins, and glycolipids, contain the unusual sugar galactofuranose (Galf). In contrast to the other cell wall monosaccharides, Galf does not occur on human cells but is known as component of cell surface molecules of many pathogenic bacteria and protozoa, such as Mycobacterium tuberculosis or Leishmania major. These molecules are often essential for virulence or viability of these organisms which suggested a possible role of Galf in the pathogenicity of A. fumigatus.
To address the importance of Galf in A. fumigatus, the key biosynthesis gene glfA, encoding UDPgalactopyranose mutase (UGM), was deleted. In different experimental approaches it was demonstrated that the absence of the glfA gene led to a complete loss of Galf-containing glycans.
Analysis of the DeltaglfA phenotype revealed growth and sporulation defects, reduced thermotolerance and an increased susceptibility to antifungal drugs. Electron Microscopy indicated a cell wall defect as a likely cause for the observed impairments. Furthermore, the virulence of the DeltaglfA mutant was found to be severely attenuated in a murine model of Invasive Aspergillosis.
The second focus of this study was laid on further elucidation of the galactofuranosylation pathway in A. fumigatus. In eukaryotes, a UDP-Galf transporter is likely required to transport UDP-Galf from the
cytosol into the organelles of the secretory pathway, but no such activity had been described. Sixteen candidate genes were identified in the A. fumigatus genome of which one, glfB, was found in close proximity to the glfA gene. In vitro transport assays revealed specificity of GlfB for UDP-Galf suggesting that glfB encoded indeed a UDP-Galf transporter. The influence of glfB on
galactofuranosylation was determined by a DeltaglfB deletion mutant, which closely recapitulated the DeltaglfA phenotype and was likewise found to be completely devoid of Galf. It could be concluded that all galactofuranosylation processes in A. fumigatus occur in the secretory pathway, including the biosynthesis of the cell wall polysaccharide galactomannan whose subcellular origin was previously disputed.
Thus in the course of this study the first UDP-Galf specific nucleotide sugar transporter was identified and its requirement for galactofuranosylation in A. fumigatus demonstrated. Moreover, it was shown that blocking the galactofuranosylation pathway impaired virulence of A. fumigatus which suggests the UDP-Galf biosynthesis enzyme UGM as a target for new antifungal drugs.},
author = {Philipp Schmalhorst},
pages = {1 -- 72},
publisher = {Gottfried Wilhelm Leibniz Universität Hannover},
title = {{Biosynthesis of Galactofuranose Containing Glycans and Their Relevance for the Pathogenic Fungus Aspergillus fumigatus}},
year = {2009},
}
@phdthesis{4409,
abstract = {Models of timed systems must incorporate not only the sequence of system events, but the timings of these events as well to capture the real-time aspects of physical systems. Timed automata are models of real-time systems in which states consist of discrete locations and values for real-time clocks. The presence of real-time clocks leads to an uncountable state space. This thesis studies verification problems on timed automata in a game theoretic framework.
For untimed systems, two systems are close if every sequence of events of one system is also observable in the second system. For timed systems, the difference in timings of the two corresponding sequences is also of importance. We propose the notion of bisimulation distance which quantifies timing differences; if the bisimulation distance between two systems is epsilon, then (a) every sequence of events of one system has a corresponding matching sequence in the other, and (b) the timings of matching events in between the two corresponding traces do not differ by more than epsilon. We show that we can compute the bisimulation distance between two timed automata to within any desired degree of accuracy. We also show that the timed verification logic TCTL is robust with respect to our notion of quantitative bisimilarity, in particular, if a system satisfies a formula, then every close system satisfies a close formula.
Timed games are used for distinguishing between the actions of several agents, typically a controller and an environment. The controller must achieve its objective against all possible choices of the environment. The modeling of the passage of time leads to the presence of zeno executions, and corresponding unrealizable strategies of the controller which may achieve objectives by blocking time. We disallow such unreasonable strategies by restricting all agents to use only receptive strategies --strategies which while not being required to ensure time divergence by any agent, are such that no agent is responsible for blocking time. Time divergence is guaranteed when all players use receptive strategies. We show that timed automaton games with receptive strategies can be solved by a reduction to finite state turn based game graphs. We define the logic timed alternating-time temporal logic for verification of timed automaton games and show that the logic can be model checked in EXPTIME. We also show that the minimum time required by an agent to reach a desired location, and the maximum time an agent can stay safe within a set of locations, against all possible actions of its adversaries are both computable.
We next study the memory requirements of winning strategies for timed automaton games. We prove that finite memory strategies suffice for safety objectives, and that winning strategies for reachability objectives may require infinite memory in general. We introduce randomized strategies in which an agent can propose a probabilistic distribution of moves and show that finite memory randomized strategies suffice for all omega-regular objectives. We also show that while randomization helps in simplifying winning strategies, and thus allows the construction of simpler controllers, it does not help a player in winning at more states, and thus does not allow the construction of more powerful controllers.
Finally we study robust winning strategies in timed games. In a physical system, a controller may propose an action together with a time delay, but the action cannot be assumed to be executed at the exact proposed time delay. We present robust strategies which incorporate such jitters and show that the set of states from which an agent can win robustly is computable.},
author = {Prabhu, Vinayak S},
pages = {1 -- 137},
publisher = {University of California, Berkeley},
title = {{Games for the verification of timed systems}},
year = {2008},
}
@phdthesis{4415,
abstract = {Many computing applications, especially those in safety critical embedded systems, require highly predictable timing properties. However, time is often not present in the prevailing computing and networking abstractions. In fact, most advances in computer architecture, software, and networking favor average-case performance over timing predictability. This thesis studies several methods for the design of concurrent and/or distributed embedded systems with precise timing guarantees. The focus is on flexible and compositional methods for programming and verification of the timing properties. The presented methods together with related formalisms cover two levels of design: (1) Programming language/model level. We propose the distributed variant of Giotto, a coordination programming language with an explicit temporal semantics—the logical execution time (LET) semantics. The LET of a task is an interval of time that specifies the time instants at which task inputs and outputs become available (task release and termination instants). The LET of a task is always non-zero. This allows us to communicate values across the network without changing the timing information of the task, and without introducing nondeterminism. We show how this methodology supports distributed code generation for distributed real-time systems. The method gives up some performance in favor of composability and predictability. We characterize the tradeoff by comparing the LET semantics with the semantics used in Simulink. (2) Abstract task graph level. We study interface-based design and verification of applications represented with task graphs. We consider task sequence graphs with general event models, and cyclic graphs with periodic event models with jitter and phase. Here an interface of a component exposes time and resource constraints of the component. Together with interfaces we formally define interface composition operations and the refinement relation. For efficient and flexible composability checking two properties are important: incremental design and independent refinement. According to the incremental design property the composition of interfaces can be performed in any order, even if interfaces for some components are not known. The refinement relation is defined such that in a design we can always substitute a refined interface for an abstract one. We show that the framework supports independent refinement, i.e., the refinement relation is preserved under composition operations.},
author = {Matic, Slobodan},
pages = {1 -- 148},
publisher = {University of California, Berkeley},
title = {{Compositionality in deterministic real-time embedded systems}},
year = {2008},
}
@phdthesis{4524,
abstract = {Complex requirements, time-to-market pressure and regulatory constraints have made the designing of embedded systems extremely challenging. This is evident by the increase in effort and expenditure for design of safety-driven real-time control-dominated applications like automotive and avionic controllers. Design processes are often challenged by lack of proper programming tools for specifying and verifying critical requirements (e.g. timing and reliability) of such applications. Platform based design, an approach for designing embedded systems, addresses the above concerns by separating requirement from architecture. The requirement specifies the intended behavior of an application while the architecture specifies the guarantees (e.g. execution speed, failure rate etc). An implementation, a mapping of the requirement on the architecture, is then analyzed for correctness. The orthogonalization of concerns makes the specification and analyses simpler. An effective use of such design methodology has been proposed in Logical Execution Time (LET) model of real-time tasks. The model separates the timing requirements (specified by release and termination instances of a task) from the architecture guarantees (specified by worst-case execution time of the task).
This dissertation proposes a coordination language, Hierarchical Timing Language (HTL), that captures the timing and reliability requirements of real-time applications. An implementation of the program on an architecture is then analyzed to check whether desired timing and reliability requirements are met or not. The core framework extends the LET model by accounting for reliability and refinement. The reliability model separates the reliability requirements of tasks from the reliability guarantees of the architecture. The requirement expresses the desired long-term reliability while the architecture provides a short-term reliability guarantee (e.g. failure rate for each iteration). The analysis checks if the short-term guarantee ensures the desired long-term reliability. The refinement model allows replacing a task by another task during program execution. Refinement preserves schedulability and reliability, i.e., if a refined task is schedulable and reliable for an implementation, then the refining task is also schedulable and reliable for the implementation. Refinement helps in concise specification without overloading analysis.
The work presents the formal model, the analyses (both with and without refinement), and a compiler for HTL programs. The compiler checks composition and refinement constraints, performs schedulability and reliability analyses, and generates code for implementation of an HTL program on a virtual machine. Three real-time controllers, one each from automatic control, automotive control and avionic control, are used to illustrate the steps in modeling and analyzing HTL programs.},
author = {Ghosal, Arkadeb},
pages = {1 -- 210},
publisher = {University of California, Berkeley},
title = {{A hierarchical coordination language for reliable real-time tasks}},
year = {2008},
}
@phdthesis{4559,
abstract = {We study games played on graphs with omega-regular conditions specified as parity, Rabin, Streett or Muller conditions. These games have applications in the verification, synthesis, modeling, testing, and compatibility checking of reactive systems. Important distinctions between graph games are as follows: (a) turn-based vs. concurrent games, depending on whether at a state of the game only a single player makes a move, or players make moves simultaneously; (b) deterministic vs. stochastic, depending on whether the transition function is a deterministic or a probabilistic function over successor states; and (c) zero-sum vs. non-zero-sum, depending on whether the objectives of the players are strictly conflicting or not.
We establish that the decision problem for turn-based stochastic zero-sum games with Rabin, Streett, and Muller objectives are NP-complete, coNP-complete, and PSPACE-complete, respectively, substantially improving the previously known 3EXPTIME bound. We also present strategy improvement style algorithms for turn-based stochastic Rabin and Streett games. In the case of concurrent stochastic zero-sum games with parity objectives we obtain a PSPACE bound, again improving the previously known 3EXPTIME bound. As a consequence, concurrent stochastic zero-sum games with Rabin, Streett, and Muller objectives can be solved in EXPSPACE, improving the previously known 4EXPTIME bound. We also present an elementary and combinatorial proof of the existence of memoryless \epsilon-optimal strategies in concurrent stochastic games with reachability objectives, for all real \epsilon>0, where an \epsilon-optimal strategy achieves the value of the game with in \epsilon against all strategies of the opponent. We also use the proof techniques to present a strategy improvement style algorithm for concurrent stochastic reachability games.
We then go beyond \omega-regular objectives and study the complexity of an important class of quantitative objectives, namely, limit-average objectives. In the case of limit-average games, the states of the graph is labeled with rewards and the goal is to maximize the long-run average of the rewards. We show that concurrent stochastic zero-sum games with limit-average objectives can be solved in EXPTIME.
Finally, we introduce a new notion of equilibrium, called secure equilibrium, in non-zero-sum games which captures the notion of conditional competitiveness. We prove the existence of unique maximal secure equilibrium payoff profiles in turn-based deterministic games, and present algorithms to compute such payoff profiles. We also show how the notion of secure equilibrium extends the assume-guarantee style of reasoning in the game theoretic framework.},
author = {Krishnendu Chatterjee},
pages = {1 -- 247},
publisher = {University of California, Berkeley},
title = {{Stochastic ω-Regular Games}},
year = {2007},
}
@phdthesis{4566,
abstract = {Complex system design today calls for compositional design and implementation. However each component is designed with certain assumptions about the environment it is meant to operate in, and delivering certain guarantees if those assumptions are satisfied; numerous inter-component interaction errors are introduced in the manual and error-prone integration process as there is little support in design environments for machine-readably representing these assumptions and guarantees and automatically checking consistency during integration.
Based on Interface Automata we propose a framework for compositional design and analysis of systems: a set of domain-specific automata-theoretic type systems for compositional system specification and analysis by behavioral specification of open systems. We focus on three different domains: component-based hardware systems communicating on bidirectional wires. concurrent distributed recursive message-passing software systems, and embedded software system components operating in resource-constrained environments. For these domains we present approaches to formally represent the assumptions and conditional guarantees between interacting open system components. Composition of such components produces new components with the appropriate assumptions and guarantees. We check satisfaction of temporal logic specifications by such components, and the substitutability of one component with another in an arbitrary context. Using this framework one can analyze large systems incrementally without needing extensive summary information to close the system at each stage. Furthermore, we focus only on the inter-component interaction behavior without dealing with the full implementation details of each component. Many of the merits of automata-theoretic model-checking are combined with the compositionality afforded by type-system based techniques. We also present an integer-based extension of the conventional boolean verification framework motivated by our interface formalism for embedded software components.
Our algorithms for checking the behavioral compatibility of component interfaces are available in our tool Chic, which can be used as a plug-in for the Java IDE JBuilder and the heterogenous modeling and design environment Ptolemy II.
Finally, we address the complementary problem of partitioning a large system into meaningful coherent components by analyzing the interaction patterns between its basic elements. We demonstrate the usefulness of our partitioning approach by evaluating its efficacy in improving unit-test branch coverage for a large software system implemented in C.},
author = {Chakrabarti, Arindam},
pages = {1 -- 244},
publisher = {University of California, Berkeley},
title = {{A framework for compositional design and analysis of systems}},
year = {2007},
}
@phdthesis{4236,
author = {de Vladar,Harold Paul},
publisher = {Centro de estudios avazados, IVIC},
title = {{Métodos no lineales y sus aplicaciones en dinámicas aleatorias de poblaciones celulares}},
doi = {3810},
year = {2004},
}
@phdthesis{4424,
abstract = {The enormous cost and ubiquity of software errors necessitates the need for techniques and tools that can precisely analyze large systems and prove that they meet given specifications, or if they don't, return counterexample behaviors showing how the system fails. Recent advances in model checking, decision procedures, program analysis and type systems, and a shift of focus to partial specifications common to several systems (e.g., memory safety and race freedom) have resulted in several practical verification methods. However, these methods are either precise or they are scalable, depending on whether they track the values of variables or only a fixed small set of dataflow facts (e.g., types), and are usually insufficient for precisely verifying large programs.
We describe a new technique called Lazy Abstraction (LA) which achieves both precision and scalability by localizing the use of precise information. LA automatically builds, explores and refines a single abstract model of the program in a way that different parts of the model exhibit different degrees of precision, namely just enough to verify the desired property. The algorithm automatically mines the information required by partitioning mechanical proofs of unsatisfiability of spurious counterexamples into Craig Interpolants. For multithreaded systems, we give a new technique based on analyzing the behavior of a single thread executing in a context which is an abstraction of the other (arbitrarily many) threads. We define novel context models and show how to automatically infer them and analyze the full system (thread + context) using LA.
LA is implemented in BLAST. We have run BLAST on Windows and Linux Device Drivers to verify API conformance properties, and have used it to find (or guarantee the absence of) data races in multithreaded Networked Embedded Systems (NESC) applications. BLAST is able to prove the absence of races in several cases where earlier methods, which depend on lock-based synchronization, fail.},
author = {Jhala, Ranjit},
pages = {1 -- 165},
publisher = {University of California, Berkeley},
title = {{Program verification by lazy abstraction}},
year = {2004},
}
@phdthesis{2414,
author = {Uli Wagner},
publisher = {ETH Zurich},
title = {{On k-Sets and Their Applications}},
doi = {10.3929/ethz-a-004708408},
year = {2003},
}
@phdthesis{4416,
abstract = {Methods for the formal specification and verification of systems are indispensible for the development of complex yet correct systems. In formal verification, the designer describes the system in a modeling language with a well-defined semantics, and this system description is analyzed against a set of correctness requirements. Model checking is an algorithmic technique to check that a system description indeed satisfies correctness requirements given as logical specifications. While successful in hardware verification, the potential for model checking for software and embedded systems has not yet been realized. This is because traditional model checking focuses on systems modeled as finite state-transition graphs. While a natural model for hardware (especially synchronous hardware), state-transition graphs often do not capture software and embedded systems at an appropriate level of granularity. This dissertation considers two orthogonal extensions to finite state-transition graphs making model checking techniques applicable to both a wider class of systems and a wider class of properties.
The first direction is an extension to infinite-state structures finitely represented using constraints and operations on constraints. Infinite state arises when we wish to model variables with unbounded range (e.g., integers), or data structures, or real time. We provide a uniform framework of symbolic region algebras to study model checking of infinite-state systems. We also provide sufficient language-independent termination conditions for symbolic model checking algorithms on infinite state systems.
The second direction supplements verification with game theoretic reasoning. Games are natural models for interactions between components. We study game theoretic behavior with winning conditions given by temporal logic objectives both in the deterministic and in the probabilistic context. For deterministic games, we provide an extremal model characterization of fixpoint algorithms that link solutions of verification problems to solutions for games. For probabilistic games we study fixpoint characterization of winning probabilities for games with omega-regular winning objectives, and construct (epsilon-)optimal winning strategies.},
author = {Majumdar, Ritankar S},
pages = {1 -- 201},
publisher = {University of California, Berkeley},
title = {{Symbolic algorithms for verification and control}},
year = {2003},
}
@phdthesis{4425,
abstract = {Giotto provides a time-triggered programmer’s model for the implementation of embedded control systems with hard real-time constraints. Giotto’s precise semantics and predictabil- ity make it suitable for safety-critical applications.
Giotto is based around the idea that time-triggered task invocation together with time-triggered mode switching can form a useful programming model for real-time systems. To substantiate this claim, we describe the use of Giotto to refactor the software of a small, autonomous helicopter. The ease with which Giotto expresses the existing software provides evidence that Giotto is an appropriate programming language for control systems.
Since Giotto is a real-time programming language, ensuring that Giotto programs meet their deadlines is crucial. To study precedence-constrained Giotto scheduling, we first examine single-mode, single-processor scheduling. We extend to an infinite, periodic setting the classical problem of meeting deadlines for a set of tasks with release times, deadlines, precedence constraints, and preemption. We then develop an algorithm for scheduling Giotto programs on a single processor by representing Giotto programs as instances of the extended scheduling problem.
Next, we study multi-mode, single-processor Giotto scheduling. This problem is different from classical scheduling problems, since in our precedence-constrained approach, the deadlines of tasks may vary depending on the mode switching behavior of the program. We present conditional scheduling models which capture this varying-deadline behavior. We develop polynomial-time algorithms for some conditional scheduling models, and prove oth- ers to be computationally hard. We show how to represent multi-mode Giotto programs as instances of the model, resulting in an algorithm for scheduling multi-mode Giotto programs on a single processor.
Finally, we show that the problem of scheduling Giotto programs for multiple net- worked processors is strongly NP-hard.},
author = {Horowitz, Benjamin},
pages = {1 -- 237},
publisher = {University of California, Berkeley},
title = {{Giotto: A time-triggered language for embedded programming}},
year = {2003},
}
@phdthesis{3678,
author = {Christoph Lampert},
booktitle = {Bonner Mathematische Schriften},
pages = {1 -- 165},
publisher = {Universität Bonn, Fachbibliothek Mathematik},
title = {{The Neumann operator in strictly pseudoconvex domains with weighted Bergman metric }},
volume = {356},
year = {2003},
}
@phdthesis{4414,
abstract = {This dissertation investigates game-theoretic approaches to the algorithmic analysis of concurrent, reactive systems. A concurrent system comprises a number of components working concurrently; a reactive system maintains an ongoing interaction with its environment. Traditional approaches to the formal analysis of concurrent reactive systems usually view the system as an unstructured state-transition graphs; instead, we view them as collections of interacting components, where each one is an open system which accepts inputs from the other components. The interactions among the components are naturally modeled as games.
Adopting this game-theoretic view, we study three related problems pertaining to the verification and synthesis of systems. Firstly, we propose two novel game-theoretic techniques for the model-checking of concurrent reactive systems, and improve the performance of model-checking. The first technique discovers an error as soon as it cannot be prevented, which can be long before it actually occurs. This technique is based on the key observation that "unpreventability" is a local property to a module: an error is unpreventable in a module state if no environment can prevent it. The second technique attempts to decompose a model-checking proof into smaller proof obligations by constructing abstract modules automatically, using reachability and "unpreventability" information about the concrete modules. Three increasingly powerful proof decomposition rules are proposed and we show that in practice, the resulting abstract modules are often significantly smaller than the concrete modules and can drastically reduce the space and time requirements for verification. Both techniques fall into the category of compositional reasoning.
Secondly, we investigate the composition and control of synchronous systems. An essential property of synchronous systems for compositional reasoning is non-blocking. In the composition of synchronous systems, however, due to circular causal dependency of input and output signals, non-blocking is not always guaranteed. Blocking compositions of systems can be ruled out semantically, by insisting on the existence of certain fixed points, or syntactically, by equipping systems with types, which make the dependencies between input and output signals transparent. We characterize various typing mechanisms in game-theoretic terms, and study their effects on the controller synthesis problem. We show that our typing systems are general enough to capture interesting real-life synchronous systems such as all delay-insensitive digital circuits. We then study their corresponding single-step control problems --a restricted form of controller synthesis problem whose solutions can be iterated in appropriate manners to solve all LTL controller synthesis problems. We also consider versions of the controller synthesis problem in which the type of the controller is given. We show that the solution of these fixed-type control problems requires the evaluation of partially ordered (Henkin) quantifiers on boolean formulas, and is therefore harder (nondeterministic exponential time) than more traditional control questions.
Thirdly, we study the synthesis of a class of open systems, namely, uninitialized state machines. The sequential synthesis problem, which is closely related to Church's solvability problem, asks, given a specification in the form of a binary relation between input and output streams, for the construction of a finite-state stream transducer that converts inputs to appropriate outputs. For efficiency reasons, practical sequential hardware is often designed to operate without prior initialization. Such hardware designs can be modeled by uninitialized state machines, which are required to satisfy their specification if started from any state. We solve the sequential synthesis problem for uninitialized systems, that is, we construct uninitialized finite-state stream transducers. We consider specifications given by LTL formulas, deterministic, nondeterministic, universal, and alternating Buechi automata. We solve this uninitialized synthesis problem by reducing it to the well-understood initialized synthesis problem. While our solution is straightforward, it leads, for some specification formalisms, to upper bounds that are exponentially worse than the complexity of the corresponding initialized problems. However, we prove lower bounds to show that our simple solutions are optimal for all considered specification formalisms. The lower bound proofs require nontrivial generic reductions.},
author = {Mang, Freddy Y},
pages = {1 -- 116},
publisher = {University of California, Berkeley},
title = {{Games in open systems verification and synthesis}},
year = {2002},
}
@phdthesis{4411,
abstract = {Model checking algorithms for the verification of reactive systems proceed by a systematic and exhaustive exploration of the system state space. They do not scale to large designs because of the state explosion problem --the number of states grows exponentially with the number of components in the design. Consequently, the model checking problem is PSPACE-hard in the size of the design description. This dissertation proposes three novel techniques to combat the state explosion problem.
One of the most important advances in model checking in recent years has been the discovery of symbolic methods, which use a calculus of expressions, such as binary decision diagrams, to represent the state sets encountered during state space exploration. Symbolic model checking has proved to be effective for verifying hardware designs. Traditionally, symbolic checking of temporal logic specifications is performed by backward fixpoint reasoning with the operator Pre. Backward reasoning can be wasteful since unreachable states are explored. We suggest the use of forward fixpoint reasoning based on the operator Post. We show how all linear temporal logic specifications can be model checked symbolically by forward reasoning. In contrast to backward reasoning, forward reasoning performs computations only on the reachable states.
Heuristics that improve algorithms for application domains, such as symbolic methods for hardware designs, are useful but not enough to make model checking feasible on industrial designs. Currently, exhaustive state exploration is possible only on designs with about 50-100 boolean state variables. Assume-guarantee verification attempts to combat the state explosion problem by using the principle of "divide and conquer," where the components of the implementation are analyzed one at a time. Typically, an implementation component refines its specification only when its inputs are suitably constrained by other components in the implementation. The assume-guarantee principle states that instead of constraining the inputs by implementation components, it is sound to constrain them by the corresponding specification components, which can be significantly smaller. We extend the assume-guarantee proof rule to deal with the case where the specification operates at a coarser time scale than the implementation. Using our model checker Mocha, which implements this methodology, we verify VGI, a parallel DSP processor chip with 64 compute processors each containing approximately 800 state variables and 30K gates.
Our third contribution is a systematic model checking methodology for verifying the abstract shared-memory interface of sequential consistency on multiprocessor systems with three parameters --number of processors, number of memory locations, and number of data values. Sequential consistency requires that some interleaving of the local temporal orders of read/write events at different processors be a trace of serial memory. Therefore, it suffices to construct a non-interfering serializer that watches and reorders read/write events so that a trace of serial memory is obtained. While in general such a serializer must be unbounded even for fixed values of the parameters --checking sequential consistency is undecidable!-- we show that the paradigmatic class of snoopy cache coherence protocols has finite-state serializers. In order to reduce the arbitrary-parameter problem to the fixed-parameter problem, we develop a novel framework for induction over the number of processors and use the notion of a serializer to reduce the problem of verifying sequential consistency to that of checking language inclusion between finite state machines.},
author = {Qadeer,Shaz},
pages = {1 -- 150},
publisher = {University of California, Berkeley},
title = {{Algorithms and Methodology for Scalable Model Checking}},
year = {1999},
}
@phdthesis{4419,
author = {Kopke, Peter W},
publisher = {Cornell University},
title = {{The Theory of Rectangular Hybrid Automata}},
year = {1996},
}
@phdthesis{4428,
abstract = {Hybrid systems are real-time systems that react to both discrete and continuous activities (such as analog signals, time, temperature, and speed). Typical examples of hybrid systems are embedded systems, timing-based communication protocols, and digital circuits at the transistor level. Due to the rapid development of microprocessor technology, hybrid systems directly control much of what we depend on in our daily lives. Consequently, the formal specification and verification of hybrid systems has become an active area of research. This dissertation presents the first general framework for the formal specification and verification of hybrid systems, as well as the first hybrid-system analysis tool--HyTech. The framework consists of a graphical finite-state-machine-like language for modeling hybrid systems, a temporal logic for modeling the requirements of hybrid systems, and a computer procedure that verifies modeled hybrid systems against modeled requirements. The tool HyTech is the implementation of the framework using C++ and Mathematica.
More specifically, our hybrid-system modeling language, Hybrid Automata, is an extension of timed automata with discrete and continuous variables whose dynamics are governed by differential equations. Our requirement modeling language, ICTL, is a branching-time temporal logic, and is an extension of TCTL with stop-watch variables. Our verification procedure is a symbolic model-checking procedure that verifies linear hybrid automata against ICTL formulas. To make HyTech more efficient and effective, we use model-checking strategies and abstract operators that can expedite the verification process. To enable HyTech to verify nonlinear hybrid automata, we introduce two translations from nonlinear hybrid automata to linear hybrid automata. We have applied HyTech to analyze more than 30 hybrid-system benchmarks. In this dissertation, we present the application of HyTech to three nontrivial hybrid systems taken from the literature.},
author = {Ho, Pei-Hsin},
pages = {1 -- 188},
publisher = {Cornell University},
title = {{Automatic Analysis of Hybrid Systems}},
doi = {CSD-TR95-1536},
year = {1995},
}
@phdthesis{4516,
author = {Thomas Henzinger},
publisher = {Stanford University},
title = {{The Temporal Specification and Verification of Real-time Systems }},
year = {1991},
}
@phdthesis{4337,
author = {Nicholas Barton},
publisher = {University of East Anglia},
title = {{A hybrid zone in the alpine grasshopper Podisma pedestris}},
year = {1979},
}