@article{3085,
abstract = {Phototropism is an adaptation response, through which plants grow towards the light. It involves light perception and asymmetric distribution of the plant hormone auxin. Here we identify a crucial part of the mechanism for phototropism, revealing how light perception initiates auxin redistribution that leads to directional growth. We show that light polarizes the cellular localization of the auxin efflux carrier PIN3 in hypocotyl endodermis cells, resulting in changes in auxin distribution and differential growth. In the dark, high expression and activity of the PINOID (PID) kinase correlates with apolar targeting of PIN3 to all cell sides. Following illumination, light represses PINOID transcription and PIN3 is polarized specifically to the inner cell sides by GNOM ARF GTPase GEF (guanine nucleotide exchange factor)-dependent trafficking. Thus, differential trafficking at the shaded and illuminated hypocotyl side aligns PIN3 polarity with the light direction, and presumably redirects auxin flow towards the shaded side, where auxin promotes growth, causing hypocotyls to bend towards the light. Our results imply that PID phosphorylation-dependent recruitment of PIN proteins into distinct trafficking pathways is a mechanism to polarize auxin fluxes in response to different environmental and endogenous cues.},
author = {Ding, Zhaojun and Galván-Ampudia, Carlos S and Demarsy, Emilie and Łangowski, Łukasz and Kleine-Vehn, Jürgen and Fan, Yuanwei and Morita, Miyo T and Tasaka, Masao and Fankhauser, Christian and Offringa, Remko and Jirí Friml},
journal = {Nature Cell Biology},
number = {4},
pages = {447 -- 453},
publisher = {Nature Publishing Group},
title = {{Light-mediated polarization of the PIN3 auxin transporter for the phototropic response in Arabidopsis}},
doi = {10.1038/ncb2208},
volume = {13},
year = {2011},
}
@article{3086,
abstract = {PIN-FORMED (PIN)-dependent auxin transport is essential for plant development and its modulation in response to the environment or endogenous signals. A NON-PHOTOTROPIC HYPOCOTYL 3 (NPH3)-like protein, MACCHI-BOU 4 (MAB4), has been shown to control PIN1 localization during organ formation, but its contribution is limited. The Arabidopsis genome contains four genes, MAB4/ENP/NPY1-LIKE1 (MEL1), MEL2, MEL3 and MEL4, highly homologous to MAB4. Genetic analysis disclosed functional redundancy between MAB4 and MEL genes in regulation of not only organ formation but also of root gravitropism, revealing that NPH3 family proteins have a wider range of functions than previously suspected. Multiple mutants showed severe reduction in PIN abundance and PIN polar localization, leading to defective expression of an auxin responsive marker DR5rev::GFP. Pharmacological analyses and fluorescence recovery after photo-bleaching experiments showed that mel mutations increase PIN2 internalization from the plasma membrane, but affect neither intracellular PIN2 trafficking nor PIN2 lateral diffusion at the plasma membrane. Notably, all MAB4 subfamily proteins show polar localization at the cell periphery in plants. The MAB4 polarity was almost identical to PIN polarity. Our results suggest that the MAB4 subfamily proteins specifically retain PIN proteins in a polarized manner at the plasma membrane, thus controlling directional auxin transport and plant development.},
author = {Furutani, Masahiko and Sakamoto, Norihito and Yoshida, Shuhei and Kajiwara, Takahito and Robert, Hélène S and Jirí Friml and Tasaka, Masao},
journal = {Development},
number = {10},
pages = {2069 -- 2078},
publisher = {Company of Biologists},
title = {{Polar localized NPH3-like proteins regulate polarity and endocytosis of PIN-FORMED auxin efflux carriers}},
doi = {10.1242/dev.057745},
volume = {138},
year = {2011},
}
@article{3087,
abstract = {Endocytosis is a crucial mechanism by which eukaryotic cells internalize extracellular and plasma membrane material, and it is required for a multitude of cellular and developmental processes in unicellular and multicellular organisms. In animals and yeast, the best characterized pathway for endocytosis depends on the function of the vesicle coat protein clathrin. Clathrinmediated endocytosis has recently been demonstrated also in plant cells, but its physiological and developmental roles remain unclear. Here, we assessed the roles of the clathrin-mediated mechanism of endocytosis in plants by genetic means. We interfered with clathrin heavy chain (CHC) function through mutants and dominant-negative approaches in Arabidopsis thaliana and established tools to manipulate clathrin function in a cell type-specific manner. The chc2 single mutants and dominant-negative CHC1 (HUB) transgenic lines were defective in bulk endocytosis as well as in internalization of prominent plasma membrane proteins. Interference with clathrin-mediated endocytosis led to defects in constitutive endocytic recycling of PIN auxin transporters and their polar distribution in embryos and roots. Consistent with this, these lines had altered auxin distribution patterns and associated auxin transport-related phenotypes, such as aberrant embryo patterning, imperfect cotyledon specification, agravitropic growth, and impaired lateral root organogenesis. Together, these data demonstrate a fundamental role for clathrin function in cell polarity, growth, patterning, and organogenesis in plants.},
author = {Kitakura, Saeko and Vanneste, Steffen and Robert, Stéphanie and Löfke, Christian and Teichmann, Thomas and Tanaka, Hirokazu and Jirí Friml},
journal = {Plant Cell},
number = {5},
pages = {1920 -- 1931},
publisher = {American Society of Plant Biologists},
title = {{Clathrin mediates endocytosis and polar distribution of PIN auxin transporters in Arabidopsis}},
doi = {10.1105/tpc.111.083030},
volume = {23},
year = {2011},
}
@article{3088,
abstract = {Background: Whereas the majority of animals develop toward a predetermined body plan, plants show iterative growth and continually produce new organs and structures from actively dividing meristems. This raises an intriguing question: How are these newly developed organs patterned? In Arabidopsis embryos, radial symmetry is broken by the bisymmetric specification of the cotyledons in the apical domain. Subsequently, this bisymmetry is propagated to the root promeristem. Results: Here we present a mutually inhibitory feedback loop between auxin and cytokinin that sets distinct boundaries of hormonal output. Cytokinins promote the bisymmetric distribution of the PIN-FORMED (PIN) auxin efflux proteins, which channel auxin toward a central domain. High auxin promotes transcription of the cytokinin signaling inhibitor AHP6, which closes the interaction loop. This bisymmetric auxin response domain specifies the differentiation of protoxylem in a bisymmetric pattern. In embryonic roots, cytokinin is required to translate a bisymmetric auxin response in the cotyledons to a bisymmetric vascular pattern in the root promeristem. Conclusions: Our results present an interactive feedback loop between hormonal signaling and transport by which small biases in hormonal input are propagated into distinct signaling domains to specify the vascular pattern in the root meristem. It is an intriguing possibility that such a mechanism could transform radial patterns and allow continuous vascular connections between other newly emerging organs.},
author = {Bishopp, Anthony and Help, Hanna and El-Showk, Sedeer and Weijers, Dolf and Scheres, Ben and Jirí Friml and Eva Benková and Mähönen, Ari Pekka and Helariutta, Ykä},
journal = {Current Biology},
number = {11},
pages = {917 -- 926},
publisher = {Cell Press},
title = {{A mutually inhibitory interaction between auxin and cytokinin specifies vascular pattern in roots}},
doi = {10.1016/j.cub.2011.04.017},
volume = {21},
year = {2011},
}
@article{3089,
abstract = {The phytohormone auxin is an important determinant of plant development. Directional auxin flow within tissues depends on polar localization of PIN auxin transporters. To explore regulation of PIN-mediated auxin transport, we screened for suppressors of PIN1 overexpression (supo) and identified an inositol polyphosphate 1-phosphatase mutant (supo1), with elevated inositol trisphosphate (InsP 3) and cytosolic Ca 2+ levels. Pharmacological and genetic increases in InsP 3 or Ca 2+ levels also suppressed the PIN1 gain-of-function phenotypes and caused defects in basal PIN localization, auxin transport and auxin-mediated development. In contrast, the reductions in InsP 3 levels and Ca 2+ signaling antagonized the effects of the supo1 mutation and disrupted preferentially apical PIN localization. InsP 3 and Ca 2+ are evolutionarily conserved second messengers involved in various cellular functions, particularly stress responses. Our findings implicate them as modifiers of cell polarity and polar auxin transport, and highlight a potential integration point through which Ca 2+ signaling-related stimuli could influence auxin-mediated development.},
author = {Zhang, Jing and Vanneste, Steffen and Brewer, Philip B and Michniewicz, Marta and Peter Grones and Kleine-Vehn, Jürgen and Löfke, Christian and Teichmann, Thomas and Bielach, Agnieszka and Cannoot, Bernard and Hoyerová, Klára and Xu Chen and Xue, Hong-Wei and Eva Benková and Zažímalová, Eva and Jirí Friml},
journal = {Developmental Cell},
number = {6},
pages = {855 -- 866},
publisher = {Cell Press},
title = {{Inositol trisphosphate-induced ca^2+ signaling modulates auxin transport and pin polarity}},
doi = {10.1016/j.devcel.2011.05.013},
volume = {20},
year = {2011},
}
@article{3090,
abstract = {The polarized transport of the phytohormone auxin [1], which is crucial for the regulation of different stages of plant development [2, 3], depends on the asymmetric plasma membrane distribution of the PIN-FORMED (PIN) auxin efflux carriers [4, 5]. The PIN polar localization results from clathrin-mediated endocytosis (CME) from the plasma membrane and subsequent polar recycling [6]. The Arabidopsis genome encodes two groups of dynamin-related proteins (DRPs) that show homology to mammalian dynamin - a protein required for fission of endocytic vesicles during CME [7, 8]. Here we show by coimmunoprecipitation (coIP), bimolecular fluorescence complementation (BiFC), and Förster resonance energy transfer (FRET) that members of the DRP1 group closely associate with PIN proteins at the cell plate. Localization and phenotypic analysis of novel drp1 mutants revealed a requirement for DRP1 function in correct PIN distribution and in auxin-mediated development. We propose that rapid and specific internalization of PIN proteins mediated by the DRP1 proteins and the associated CME machinery from the cell plate membranes during cytokinesis is an important mechanism for proper polar PIN positioning in interphase cells.},
author = {Mravec, Jozef and Petrášek, Jan and Li, Na and Boeren, Sjef and Karlova, Rumyana and Kitakura, Saeko and Pařezová, Markéta and Naramoto, Satoshi and Nodzyński, Thomasz and Dhonukshe, Pankaj and Bednarek, Sebastian Y and Zažímalová, Eva and De Vries, Sacco and Jirí Friml},
journal = {Current Biology},
number = {12},
pages = {1055 -- 1060},
publisher = {Cell Press},
title = {{Cell plate restricted association of DRP1A and PIN proteins is required for cell polarity establishment in arabidopsis}},
doi = {10.1016/j.cub.2011.05.018},
volume = {21},
year = {2011},
}
@article{3091,
author = {Sauer, Michael and Friml, Jirí},
journal = {Molecular Systems Biology},
publisher = {Nature Publishing Group},
title = {{Fleeting hormone cues get stabilized for plant organogenesis}},
doi = {10.1038/msb.2011.45},
volume = {7},
year = {2011},
}
@article{3092,
abstract = {The phytohormone auxin is vital to plant growth and development. A unique property of auxin among all other plant hormones is its cell-to-cell polar transport that requires activity of polarly localized PIN-FORMED (PIN) auxin efflux transporters. Despite the substantial molecular insight into the cellular PIN polarization, the mechanistic understanding for developmentally and environmentally regulated PIN polarization is scarce. The long-standing belief that auxin modulates its own transport by means of a positive feedback mechanism has inspired both experimentalists and theoreticians for more than two decades. Recently, theoretical models for auxin-dependent patterning in plants include the feedback between auxin transport and the PIN protein localization. These computer models aid to assess the complexity of plant development by testing and predicting plausible scenarios for various developmental processes that occur in planta. Although the majority of these models rely on purely heuristic principles, the most recent mechanistic models tentatively integrate biologically testable components into known cellular processes that underlie the PIN polarity regulation. The existing and emerging computational approaches to describe PIN polarization are presented and discussed in the light of recent experimental data on the PIN polar targeting.},
author = {Wabnik, Krzysztof T and Govaerts, Willy and Friml, Jirí and Kleine Vehn, Jürgen},
journal = {Molecular BioSystems},
number = {8},
pages = {2352 -- 2359},
publisher = {Royal Society of Chemistry},
title = {{Feedback models for polarized auxin transport: An emerging trend}},
doi = {10.1039/c1mb05109a},
volume = {7},
year = {2011},
}
@article{3093,
abstract = {
Plants take up iron from the soil using the IRON-REGULATED TRANSPORTER 1 (IRT1) high-affinity iron transporter at the root surface. Sophisticated regulatory mechanisms allow plants to tightly control the levels of IRT1, ensuring optimal absorption of essential but toxic iron. Here, we demonstrate that overexpression of Arabidopsis thaliana IRT1 leads to constitutive IRT1 protein accumulation, metal overload, and oxidative stress. IRT1 is unexpectedly found in trans-Golgi network/early endosomes of root hair cells, and its levels and localization are unaffected by iron nutrition. Using pharmacological approaches, we show that IRT1 cycles to the plasma membrane to perform iron and metal uptake at the cell surface and is sent to the vacuole for proper turnover. We also prove that IRT1 is monoubiquitinated on several cytosol-exposed residues in vivo and that mutation of two putative monoubiquitination target residues in IRT1 triggers stabilization at the plasma membrane and leads to extreme lethality. Together, these data suggest a model in which monoubiquitin-dependent internalization/sorting and turnover keep the plasma membrane pool of IRT1 low to ensure proper iron uptake and to prevent metal toxicity. More generally, our work demonstrates the existence of monoubiquitin-dependent trafficking to lytic vacuoles in plants and points to proteasome-independent turnover of plasma membrane proteins.},
author = {Barberon, Marie and Zelazny, Enric and Robert, Stéphanie and Conéjéro, Geneviève and Curie, Cathy and Jirí Friml and Vert, Grégory},
journal = {PNAS},
number = {32},
pages = {E450 -- E458},
publisher = {National Academy of Sciences},
title = {{Monoubiquitin dependent endocytosis of the Iron Regulated Transporter 1 IRT1 transporter controls iron uptake in plants}},
doi = {10.1073/pnas.1100659108},
volume = {108},
year = {2011},
}
@article{3094,
abstract = {Summary Gravitropism aligns plant growth with gravity. It involves gravity perception and the asymmetric distribution of the phytohormone auxin. Here we provide insights into the mechanism for hypocotyl gravitropic growth. We show that the Arabidopsis thaliana PIN3 auxin transporter is required for the asymmetric auxin distribution for the gravitropic response. Gravistimulation polarizes PIN3 to the bottom side of hypocotyl endodermal cells, which correlates with an increased auxin response at the lower hypocotyl side. Both PIN3 polarization and hypocotyl bending require the activity of the trafficking regulator GNOM and the protein kinase PINOID. Our data suggest that gravity-induced PIN3 polarization diverts the auxin flow to mediate the asymmetric distribution of auxin for gravitropic shoot bending.},
author = {Rakusová, Hana and Gallego-Bartolomé, Javier and Vanstraelen, Marleen and Robert, Hélène S and Alabadí, David and Blázquez, Miguel A and Eva Benková and Jirí Friml},
journal = {Plant Journal},
number = {5},
pages = {817 -- 826},
publisher = {Wiley-Blackwell},
title = {{Polarization of PIN3 dependent auxin transport for hypocotyl gravitropic response in Arabidopsis thaliana}},
doi = {10.1111/j.1365-313X.2011.04636.x},
volume = {67},
year = {2011},
}
@article{3095,
abstract = {Root system architecture depends on lateral root (LR) initiation that takes place in a relatively narrow developmental window (DW). Here, we analyzed the role of auxin gradients established along the parent root in defining this DW for LR initiation. Correlations between auxin distribution and response, and spatiotemporal control of LR initiation were analyzed in Arabidopsis thaliana and tomato (Solanum lycopersicum). In both Arabidopsis and tomato roots, a well defined zone, where auxin content and response are minimal, demarcates the position of a DW for founder cell specification and LR initiation. We show that in the zone of auxin minimum pericycle cells have highest probability to become founder cells and that auxin perception via the TIR1/AFB pathway, and polar auxin transport, are essential for the establishment of this zone. Altogether, this study reveals that the same morphogen-like molecule, auxin, can act simultaneously as a morphogenetic trigger of LR founder cell identity and as a gradient-dependent signal defining positioning of the founder cell specification. This auxin minimum zone might represent an important control mechanism ensuring the LR initiation steadiness and the acropetal LR initiation pattern. © 2011 The Authors. New Phytologist © 2011 New Phytologist Trust.},
author = {Dubrovsky, Joseph G and Napsucialy-Mendivil, Selene and Duclercq, Jérôme and Cheng, Yan and Shishkova, Svetlana O and Ivanchenko, Maria G and Jirí Friml and Murphy, Angus S and Eva Benková},
journal = {New Phytologist},
number = {4},
pages = {970 -- 983},
publisher = {Wiley-Blackwell},
title = {{Auxin minimum defines a developmental window for lateral root initiation}},
doi = { 10.1111/j.1469-8137.2011.03757.x},
volume = {191},
year = {2011},
}
@article{3096,
abstract = {Carrier-dependent, intercellular auxin transport is central to the developmental patterning of higher plants (tracheophytes). The evolution of this polar auxin transport might be linked to the translocation of some PIN auxin efflux carriers from their presumably ancestral localization at the endoplasmic reticulum (ER) to the polar domains at the plasma membrane. Here we propose an eventually ancient mechanism of intercellular auxin distribution by ER-localized auxin transporters involving intracellular auxin retention and switch-like release from the ER. The proposed model integrates feedback circuits utilizing the conserved nuclear auxin signaling for the regulation of PIN transcription and a hypothetical ER-based signaling for the regulation of PIN-dependent transport activity at the ER. Computer simulations of the model revealed its plausibility for generating auxin channels and localized auxin maxima highlighting the possibility of this alternative mechanism for polar auxin transport.},
author = {Wabnik, Krzysztof T and Kleine Vehn, Jürgen and Govaerts, Willy and Friml, Jirí},
journal = {Trends in Plant Science},
number = {9},
pages = {468 -- 475},
publisher = {Cell Press},
title = {{Prototype cell-to-cell auxin transport mechanism by intracellular auxin compartmentalization}},
doi = {10.1016/j.tplants.2011.05.002},
volume = {16},
year = {2011},
}
@article{3097,
abstract = {Cytokinin is an important regulator of plant growth and development. In Arabidopsis thaliana, the two-component phosphorelay mediated through a family of histidine kinases and response regulators is recognized as the principal cytokinin signal transduction mechanism activating the complex transcriptional response to control various developmental processes. Here, we identified an alternative mode of cytokinin action that uses endocytic trafficking as a means to direct plant organogenesis. This activity occurs downstream of known cytokinin receptors but through a branch of the cytokinin signaling pathway that does not involve transcriptional regulation. We show that cytokinin regulates endocytic recycling of the auxin efflux carrier PINFORMED1 (PIN1) by redirecting it for lytic degradation in vacuoles. Stimulation of the lytic PIN1 degradation is not a default effect for general downregulation of proteins from plasma membranes, but a specific mechanism to rapidly modulate the auxin distribution in cytokinin-mediated developmental processes.},
author = {Peter Marhavy and Bielach, Agnieszka and Abas, Lindy and Abuzeineh, Anas and Duclercq, Jérôme and Tanaka, Hirokazu and Pařezová, Markéta and Petrášek, Jan and Jirí Friml and Kleine-Vehn, Jürgen and Eva Benková},
journal = {Developmental Cell},
number = {4},
pages = {796 -- 804},
publisher = {Cell Press},
title = {{Cytokinin modulates endocytic trafficking of PIN1 auxin efflux carrier to control plant organogenesis}},
doi = {10.1016/j.devcel.2011.08.014},
volume = {21},
year = {2011},
}
@article{3098,
abstract = {Cell polarity reflected by asymmetric distribution of proteins at the plasma membrane is a fundamental feature of unicellular and multicellular organisms. It remains conceptually unclear how cell polarity is kept in cell wall-encapsulated plant cells. We have used super-resolution and semi-quantitative live-cell imaging in combination with pharmacological, genetic, and computational approaches to reveal insights into the mechanism of cell polarity maintenance in Arabidopsis thaliana. We show that polar-competent PIN transporters for the phytohormone auxin are delivered to the center of polar domains by super-polar recycling. Within the plasma membrane, PINs are recruited into non-mobile membrane clusters and their lateral diffusion is dramatically reduced, which ensures longer polar retention. At the circumventing edges of the polar domain, spatially defined internalization of escaped cargos occurs by clathrin-dependent endocytosis. Computer simulations confirm that the combination of these processes provides a robust mechanism for polarity maintenance in plant cells. Moreover, our study suggests that the regulation of lateral diffusion and spatially defined endocytosis, but not super-polar exocytosis have primary importance for PIN polarity maintenance.},
author = {Kleine-Vehn, Jürgen and Krzysztof Wabnik and Martinière, Alexandre and Łangowski, Łukasz and Willig, Katrin and Naramoto, Satoshi and Leitner, Johannes and Tanaka, Hirokazu and Jakobs, Stefan and Robert, Stéphanie and Luschnig, Christian and Govaerts, Willy J and Hell, Stefan W and Runions, John and Jirí Friml},
journal = {Molecular Systems Biology},
publisher = {Nature Publishing Group},
title = {{Recycling, clustering and endocytosis jointly maintain PIN auxin carrier polarity at the plasma membrane}},
doi = {10.1038/msb.2011.72},
volume = {7},
year = {2011},
}
@article{3099,
abstract = {Endomembrane trafficking relies on the coordination of a highly complex, dynamic network of intracellular vesicles. Understanding the network will require a dissection of cargo and vesicle dynamics at the cellular level in vivo. This is also a key to establishing a link between vesicular networks and their functional roles in development. We used a high-content intracellular screen to discover small molecules targeting endomembrane trafficking in vivo in a complex eukaryote, Arabidopsis thaliana. Tens of thousands of molecules were prescreened and a selected subset was interrogated against a panel of plasma membrane (PM) and other endomembrane compartment markers to identify molecules that altered vesicle trafficking. The extensive image dataset was transformed by a flexible algorithm into a marker-by-phenotype-by-treatment time matrix and revealed groups of molecules that induced similar subcellular fingerprints (clusters). This matrix provides a platform for a systems view of trafficking. Molecules from distinct clusters presented avenues and enabled an entry point to dissect recycling at the PM, vacuolar sorting, and cell-plate maturation. Bioactivity in human cells indicated the value of the approach to identifying small molecules that are active in diverse organisms for biology and drug discovery.},
author = {Drakakaki, Georgia and Robert, Stéphanie and Szatmári, Anna-Maria and Brown, Michelle Q and Nagawa, Shingo and Van Damme, Daniël and Leonard, Marylin and Yang, Zhenbiao and Girke, Thomas and Schmid, Sandra L and Russinova, Eugenia and Jirí Friml and Raikhel, Natasha V and Hicks, Glen R},
journal = {PNAS},
number = {43},
pages = {17850 -- 17855},
publisher = {National Academy of Sciences},
title = {{Clusters of bioactive compounds target dynamic endomembrane networks in vivo}},
doi = {10.1073/pnas.1108581108},
volume = {108},
year = {2011},
}
@article{3100,
abstract = {In multicellular organisms, morphogenesis relies on a strict coordination in time and space of cell proliferation and differentiation. In contrast to animals, plant development displays continuous organ formation and adaptive growth responses during their lifespan relying on a tight coordination of cell proliferation. How developmental signals interact with the plant cell-cycle machinery is largely unknown. Here, we characterize plant A2-type cyclins, a small gene family of mitotic cyclins, and show how they contribute to the fine-tuning of local proliferation during plant development. Moreover, the timely repression of CYCA2;3 expression in newly formed guard cells is shown to require the stomatal transcription factors FOUR LIPS/MYB124 and MYB88, providing a direct link between developmental programming and cell-cycle exit in plants. Thus, transcriptional downregulation of CYCA2s represents a critical mechanism to coordinate proliferation during plant development.},
author = {Vanneste, Steffen and Coppens, Frederik and Lee, EunKyoung and Donner, Tyler J and Xie, Zidian and Van Isterdael, Gert and Dhondt, Stijn and De Winter, Freya and De Rybel, Bert and Vuylsteke, Marnik and De Veylder, Lieven and Jirí Friml and Inzé, Dirk and Grotewold, Erich and Scarpella, Enrico and Sack, Fred and Beemster, Gerrit T and Beeckman, Tom},
journal = {EMBO Journal},
number = {16},
pages = {3430 -- 3441},
publisher = {Wiley-Blackwell},
title = {{Developmental regulation of CYCA2s contributes to tissue-specific proliferation in Arabidopsis }},
doi = {10.1038/emboj.2011.240},
volume = {30},
year = {2011},
}
@article{3101,
abstract = {Subcellular trafficking is required for a multitude of functions in eukaryotic cells. It involves regulation of cargo sorting, vesicle formation, trafficking and fusion processes at multiple levels. Adaptor protein (AP) complexes are key regulators of cargo sorting into vesicles in yeast and mammals but their existence and function in plants have not been demonstrated. Here we report the identification of the protein-affected trafficking 4 (pat4) mutant defective in the putative δ subunit of the AP-3 complex. pat4 and pat2, a mutant isolated from the same GFP imaging-based forward genetic screen that lacks a functional putative AP-3 β, as well as dominant negative AP-3 μ transgenic lines display undistinguishable phenotypes characterized by largely normal morphology and development, but strong intracellular accumulation of membrane proteins in aberrant vacuolar structures. All mutants are defective in morphology and function of lytic and protein storage vacuoles (PSVs) but show normal sorting of reserve proteins to PSVs. Immunoprecipitation experiments and genetic studies revealed tight functional and physical associations of putative AP-3 β and AP-3 δ subunits. Furthermore, both proteins are closely linked with putative AP-3 μ and σ subunits and several components of the clathrin and dynamin machineries. Taken together, these results demonstrate that AP complexes, similar to those in other eukaryotes, exist in plants, and that AP-3 plays a specific role in the regulation of biogenesis and function of vacuoles in plant cells. © 2011 IBCB, SIBS, CAS All rights reserved},
author = {Zwiewka, Marta and Feraru, Elena and Möller, Barbara and Hwang, Inhwan and Feraru, Mugurel I and Kleine-Vehn, Jürgen and Weijers, Dolf and Jirí Friml},
journal = {Cell Research},
number = {12},
pages = {1711 -- 1722},
publisher = {Nature Publishing Group},
title = {{The AP 3 adaptor complex is required for vacuolar function in Arabidopsis}},
doi = {10.1038/cr.2011.99},
volume = {21},
year = {2011},
}
@article{3102,
abstract = {Multicellular organisms depend on cell production, cell fate specification, and correct patterning to shape their adult body. In plants, auxin plays a prominent role in the timely coordination of these different cellular processes. A well-studied example is lateral root initiation, in which auxin triggers founder cell specification and cell cycle activation of xylem pole–positioned pericycle cells. Here, we report that the E2Fa transcription factor of Arabidopsis thaliana is an essential component that regulates the asymmetric cell division marking lateral root initiation. Moreover, we demonstrate that E2Fa expression is regulated by the LATERAL ORGAN BOUNDARY DOMAIN18/LATERAL ORGAN BOUNDARY DOMAIN33 (LBD18/LBD33) dimer that is, in turn, regulated by the auxin signaling pathway. LBD18/LBD33 mediates lateral root organogenesis through E2Fa transcriptional activation, whereas E2Fa expression under control of the LBD18 promoter eliminates the need for LBD18. Besides lateral root initiation, vascular patterning is disrupted in E2Fa knockout plants, similarly as it is affected in auxin signaling and lbd mutants, indicating that the transcriptional induction of E2Fa through LBDs represents a general mechanism for auxin-dependent cell cycle activation. Our data illustrate how a conserved mechanism driving cell cycle entry has been adapted evolutionarily to connect auxin signaling with control of processes determining plant architecture. },
author = {Berckmans, Barbara and Vassileva, Valya and Schmid, Stephan P and Maes, Sara and Parizot, Boris and Naramoto, Satoshi and Magyar, Zoltan and Lessa Alvim Kamei, Claire and Koncz, Csaba and Bögre, Laszlo and Persiau, Geert and De Jaeger, Geert and Jirí Friml and Simon, Rüdiger and Beeckman, Tom and de Veyldera, Lieven},
journal = {Plant Cell},
number = {10},
pages = {3671 -- 3683},
publisher = {American Society of Plant Biologists},
title = {{Auxin Dependent cell cycle reactivation through transcriptional regulation of arabidopsis E2Fa by lateral organ boundary proteins}},
doi = {10.1105/tpc.111.088377},
volume = {23},
year = {2011},
}
@article{3103,
abstract = {Endocytosis in plants has an essential role not only for basic cellular functions but also for growth and development, hormonal signaling and communication with the environment including nutrient delivery, toxin avoidance, and pathogen defense. The major endocytic mechanism in plants depends on the coat protein clathrin. It starts by clathrin-coated vesicle formation at the plasma membrane, where specific cargoes are recognized and packaged for internalization. Recently, genetic, biochemical and advanced microscopy studies provided initial insights into mechanisms and roles of clathrin-mediated endocytosis in plants. Here we summarize the present state of knowledge and compare mechanisms of clathrin-mediated endocytosis in plants with animal and yeast paradigms as well as review plant-specific regulations and roles of this process.},
author = {Chen, Xu and Irani, Niloufer and Friml, Jirí},
journal = {Current Opinion in Plant Biology},
number = {6},
pages = {674 -- 682},
publisher = {Elsevier},
title = {{Clathrin-mediated endocytosis: The gateway into plant cells}},
doi = {10.1016/j.pbi.2011.08.006},
volume = {14},
year = {2011},
}
@article{3138,
abstract = {Hippocampal sharp waves (SPWs) and associated fast ("ripple") oscillations (SPW-Rs) in the CA1 region are among the most synchronous physiological patterns in the mammalian brain. Using two-dimensional arrays of electrodes for recording local field potentials and unit discharges in freely moving rats, we studied the emergence of ripple oscillations (140-220 Hz) and compared their origin and cellular-synaptic mechanisms with fast gamma oscillations (90-140 Hz). We show that (1) hippocampal SPW-Rs and fast gamma oscillations are quantitatively distinct patterns but involve the same networks and share similar mechanisms; (2) both the frequency and magnitude of fast oscillations are positively correlated with the magnitude of SPWs; (3) during both ripples and fast gamma oscillations the frequency of network oscillation is higher in CA1 than in CA3; and (4) the emergence of CA3 population bursts, a prerequisite for SPW-Rs, is biased by activity patterns in the dentate gyrus and entorhinal cortex, with the highest probability of ripples associated with an "optimum" level of dentate gamma power. We hypothesize that each hippocampal subnetwork possesses distinct resonant properties, tuned by the magnitude of the excitatory drive.},
author = {Sullivan, David W and Jozsef Csicsvari and Mizuseki, Kenji and Montgomery, Sean M and Diba, Kamran and Buzsáki, György},
journal = {Journal of Neuroscience},
number = {23},
pages = {8605 -- 8616},
publisher = {Society for Neuroscience},
title = {{Relationships between hippocampal sharp waves ripples and fast gamma oscillation Influence of dentate and entorhinal cortical activity}},
doi = {10.1523/JNEUROSCI.0294-11.2011},
volume = {31},
year = {2011},
}
@article{3145,
abstract = {Microinjection of recombinant DNA into zygotic pronuclei has been widely used for producing transgenic mice. However, with this method, the insertion site, integrity, and copy number of the transgene cannot be controlled. Here, we present an integrase-based approach to produce transgenic mice via pronuclear injection, whereby an intact single-copy transgene can be inserted into predetermined chromosomal loci with high efficiency (up to 40%), and faithfully transmitted through generations. We show that neighboring transgenic elements and bacterial DNA within the transgene cause profound silencing and expression variability of the transgenic marker. Removal of these undesirable elements leads to global high-level marker expression from transgenes driven by a ubiquitous promoter. We also obtained faithful marker expression from a tissue-specific promoter. The technique presented here will greatly facilitate murine transgenesis and precise structure/function dissection of mammalian gene function and regulation in vivo.},
author = {Tasic, Bosiljka and Simon Hippenmeyer and Wang, Charlene and Gamboa, Matthew and Zong, Hui and Chen-Tsai, Yanru and Luo, Liqun},
journal = {PNAS},
number = {19},
pages = {7902 -- 7907},
publisher = {National Academy of Sciences},
title = {{Site specific integrase mediated transgenesis in mice via pronuclear injection}},
doi = {10.1073/pnas.1019507108},
volume = {108},
year = {2011},
}
@article{3147,
abstract = {Cancer cell of origin is difficult to identify by analyzing cells within terminal stage tumors, whose identity could be concealed by the acquired plasticity. Thus, an ideal approach to identify the cell of origin is to analyze proliferative abnormalities in distinct lineages prior to malignancy. Here, we use mosaic analysis with double markers (MADM) in mice to model gliomagenesis by initiating concurrent p53/Nf1 mutations sporadically in neural stem cells (NSCs). Surprisingly, MADM-based lineage tracing revealed significant aberrant growth prior to malignancy only in oligodendrocyte precursor cells (OPCs), but not in any other NSC-derived lineages or NSCs themselves. Upon tumor formation, phenotypic and transcriptome analyses of tumor cells revealed salient OPC features. Finally, introducing the same p53/Nf1 mutations directly into OPCs consistently led to gliomagenesis. Our findings suggest OPCs as the cell of origin in this model, even when initial mutations occur in NSCs, and highlight the importance of analyzing premalignant stages to identify the cancer cell of origin.},
author = {Liu, Chong and Sage, Jonathan C and Miller, Michael R and Verhaak, Roel G and Simon Hippenmeyer and Vogel, Hannes and Foreman, Oded and Bronson, Roderick T and Nishiyama, Akiko and Luo, Liqun and Zong, Hui},
journal = {Cell},
number = {2},
pages = {209 -- 221},
publisher = {Cell Press},
title = {{Mosaic analysis with double markers reveals tumor cell of origin in glioma}},
doi = {10.1016/j.cell.2011.06.014},
volume = {146},
year = {2011},
}
@article{3154,
abstract = {Regulated adhesion between cells and their environment is critical for normal cell migration. We have identified mutations in a gene encoding the Drosophila hydrogen peroxide (H2O2)-degrading enzyme Jafrac1, which lead to germ cell adhesion defects. During gastrulation, primordial germ cells (PGCs) associate tightly with the invaginating midgut primordium as it enters the embryo; however, in embryos from jafrac1 mutant mothers this association is disrupted, leaving some PGCs trailing on the outside of the embryo. We observed similar phenotypes in embryos from DE-cadherin/shotgun (shg) mutant mothers and were able to rescue the jafrac1 phenotype by increasing DE-cadherin levels. This and our biochemical evidence strongly suggest that Jafrac1-mediated reduction of H2O2 is required to maintain DE-cadherin protein levels in the early embryo. Our results present in vivo evidence of a peroxiredoxin regulating DE-cadherin-mediated adhesion.},
author = {DeGennaro, Matthew and Hurd, Thomas R and Daria Siekhaus and Biteau, Benoit and Jasper, Heinrich and Lehmann, Ruth},
journal = {Developmental Cell},
number = {2},
pages = {233 -- 243},
publisher = {Cell Press},
title = {{Peroxiredoxin stabilization of DE-cadherin promotes primordial germ cell adhesion}},
doi = {10.1016/j.devcel.2010.12.007},
volume = {20},
year = {2011},
}
@inproceedings{3163,
abstract = {We study multi-label prediction for structured output sets, a problem that occurs, for example, in object detection in images, secondary structure prediction in computational biology, and graph matching with symmetries. Conventional multilabel classification techniques are typically not applicable in this situation, because they require explicit enumeration of the label set, which is infeasible in case of structured outputs. Relying on techniques originally designed for single-label structured prediction, in particular structured support vector machines, results in reduced prediction accuracy, or leads to infeasible optimization problems. In this work we derive a maximum-margin training formulation for multi-label structured prediction that remains computationally tractable while achieving high prediction accuracy. It also shares most beneficial properties with single-label maximum-margin approaches, in particular formulation as a convex optimization problem, efficient working set training, and PAC-Bayesian generalization bounds.},
author = {Lampert, Christoph},
location = {Granada, Spain},
publisher = {Neural Information Processing Systems},
title = {{Maximum margin multi-label structured prediction}},
year = {2011},
}
@inproceedings{3204,
abstract = {We introduce a new class of functions that can be minimized in polynomial time in the value oracle model. These are functions f satisfying f(x) + f(y) ≥ f(x ∏ y) + f(x ∐ y) where the domain of each variable x i corresponds to nodes of a rooted binary tree, and operations ∏,∐ are defined with respect to this tree. Special cases include previously studied L-convex and bisubmodular functions, which can be obtained with particular choices of trees. We present a polynomial-time algorithm for minimizing functions in the new class. It combines Murota's steepest descent algorithm for L-convex functions with bisubmodular minimization algorithms. },
author = {Vladimir Kolmogorov},
pages = {400 -- 411},
publisher = {Springer},
title = {{Submodularity on a tree: Unifying Submodularity on a tree: Unifying L-convex and bisubmodular functions convex and bisubmodular functions}},
doi = {10.1007/978-3-642-22993-0_37},
volume = {6907},
year = {2011},
}
@inproceedings{3205,
abstract = {This paper proposes a novel Linear Programming (LP) based algorithm, called Dynamic Tree-Block Coordinate Ascent (DT-BCA), for performing maximum a posteriori (MAP) inference in probabilistic graphical models. Unlike traditional message passing algorithms, which operate uniformly on the whole factor graph, our method dynamically chooses regions of the factor graph on which to focus message-passing efforts. We propose two criteria for selecting regions, including an efficiently computable upper-bound on the increase in the objective possible by passing messages in any particular region. This bound is derived from the theory of primal-dual methods from combinatorial optimization, and the forest that maximizes the bounds can be chosen efficiently using a maximum-spanning-tree-like algorithm. Experimental results show that our dynamic schedules significantly speed up state-of-the-art LP-based message-passing algorithms on a wide variety of real-world problems.},
author = {Tarlow, Daniel and Batra, Druv and Kohli, Pushmeet and Vladimir Kolmogorov},
pages = {113 -- 120},
publisher = {Omnipress},
title = {{Dynamic tree block coordinate ascent}},
year = {2011},
}
@inproceedings{3206,
abstract = {In this paper we address the problem of finding the most probable state of discrete Markov random field (MRF) with associative pairwise terms. Although of practical importance, this problem is known to be NP-hard in general. We propose a new type of MRF decomposition, submod-ular decomposition (SMD). Unlike existing decomposition approaches SMD decomposes the initial problem into sub-problems corresponding to a specific class label while preserving the graph structure of each subproblem. Such decomposition enables us to take into account several types of global constraints in an efficient manner. We study theoretical properties of the proposed approach and demonstrate its applicability on a number of problems.},
author = {Osokin, Anton and Vetrov, Dmitry and Vladimir Kolmogorov},
pages = {1889 -- 1896},
publisher = {IEEE},
title = {{Submodular decomposition framework for inference in associative Markov networks with global constraints}},
doi = {10.1109/CVPR.2011.5995361},
year = {2011},
}
@inproceedings{3207,
abstract = {Cosegmentation is typically defined as the task of jointly segmenting something similar in a given set of images. Existing methods are too generic and so far have not demonstrated competitive results for any specific task. In this paper we overcome this limitation by adding two new aspects to cosegmentation: (1) the "something" has to be an object, and (2) the "similarity" measure is learned. In this way, we are able to achieve excellent results on the recently introduced iCoseg dataset, which contains small sets of images of either the same object instance or similar objects of the same class. The challenge of this dataset lies in the extreme changes in viewpoint, lighting, and object deformations within each set. We are able to considerably outperform several competitors. To achieve this performance, we borrow recent ideas from object recognition: the use of powerful features extracted from a pool of candidate object-like segmentations. We believe that our work will be beneficial to several application areas, such as image retrieval.},
author = {Vicente, Sara and Rother, Carsten and Vladimir Kolmogorov},
pages = {2217 -- 2224},
publisher = {IEEE},
title = {{Object cosegmentation}},
doi = {10.1109/CVPR.2011.5995530},
year = {2011},
}
@inproceedings{3236,
abstract = {If a cryptographic primitive remains secure even if ℓ bits about the secret key are leaked to the adversary, one would expect that at least one of n independent instantiations of the scheme remains secure given n·ℓ bits of leakage. This intuition has been proven true for schemes satisfying some special information-theoretic properties by Alwen et al. [Eurocrypt'10]. On the negative side, Lewko and Waters [FOCS'10] construct a CPA secure public-key encryption scheme for which this intuition fails. The counterexample of Lewko and Waters leaves open the interesting possibility that for any scheme there exists a constant c>0, such that n fold repetition remains secure against c·n·ℓ bits of leakage. Furthermore, their counterexample requires the n copies of the encryption scheme to share a common reference parameter, leaving open the possibility that the intuition is true for all schemes without common setup. In this work we give a stronger counterexample ruling out these possibilities. We construct a signature scheme such that: 1. a single instantiation remains secure given ℓ = log(k) bits of leakage where k is a security parameter. 2. any polynomial number of independent instantiations can be broken (in the strongest sense of key-recovery) given ℓ′ = poly(k) bits of leakage. Note that ℓ does not depend on the number of instances. The computational assumption underlying our counterexample is that non-interactive computationally sound proofs exist. Moreover, under a stronger (non-standard) assumption about such proofs, our counterexample does not require a common reference parameter. The underlying idea of our counterexample is rather generic and can be applied to other primitives like encryption schemes. © 2011 International Association for Cryptologic Research.},
author = {Jain, Abhishek and Krzysztof Pietrzak},
pages = {58 -- 69},
publisher = {Springer},
title = {{Parallel repetition for leakage resilience amplification revisited}},
doi = {10.1007/978-3-642-19571-6_5},
volume = {6597 },
year = {2011},
}
@inproceedings{3238,
abstract = {We construct efficient authentication protocols and message-authentication codes (MACs) whose security can be reduced to the learning parity with noise (LPN) problem. Despite a large body of work - starting with the HB protocol of Hopper and Blum in 2001 - until now it was not even known how to construct an efficient authentication protocol from LPN which is secure against man-in-the-middle (MIM) attacks. A MAC implies such a (two-round) protocol. © 2011 International Association for Cryptologic Research},
author = {Kiltz, Eike and Pietrzak, Krzysztof Z and Cash, David and Jain, Abhishek and Venturi, Daniele},
location = {Tallinn, Estonia},
pages = {7 -- 26},
publisher = {Springer},
title = {{Efficient authentication from hard learning problems}},
doi = {10.1007/978-3-642-20465-4_3},
volume = {6632},
year = {2011},
}
@inproceedings{3239,
abstract = {Tampering attacks are cryptanalytic attacks on the implementation of cryptographic algorithms (e.g., smart cards), where an adversary introduces faults with the hope that the tampered device will reveal secret information. Inspired by the work of Ishai et al. [Eurocrypt'06], we propose a compiler that transforms any circuit into a new circuit with the same functionality, but which is resilient against a well-defined and powerful tampering adversary. More concretely, our transformed circuits remain secure even if the adversary can adaptively tamper with every wire in the circuit as long as the tampering fails with some probability δ>0. This additional requirement is motivated by practical tampering attacks, where it is often difficult to guarantee the success of a specific attack. Formally, we show that a q-query tampering attack against the transformed circuit can be "simulated" with only black-box access to the original circuit and log(q) bits of additional auxiliary information. Thus, if the implemented cryptographic scheme is secure against log(q) bits of leakage, then our implementation is tamper-proof in the above sense. Surprisingly, allowing for this small amount of information leakage allows for much more efficient compilers, which moreover do not require randomness during evaluation. Similar to earlier works our compiler requires small, stateless and computation-independent tamper-proof gadgets. Thus, our result can be interpreted as reducing the problem of shielding arbitrary complex computation to protecting simple components. © 2011 Springer-Verlag.},
author = {Faust, Sebastian and Krzysztof Pietrzak and Venturi, Daniele},
number = {Part 1},
pages = {391 -- 402},
publisher = {Springer},
title = {{Tamper proof circuits How to trade leakage for tamper resilience}},
doi = {10.1007/978-3-642-22006-7_33},
volume = {6755 },
year = {2011},
}
@inproceedings{3240,
abstract = {The famous Leftover Hash Lemma (LHL) states that (almost) universal hash functions are good randomness extractors. Despite its numerous applications, LHL-based extractors suffer from the following two limitations: - Large Entropy Loss: to extract v bits from distribution X of min-entropy m which are ε-close to uniform, one must set v ≤ m - 2log(1/ε), meaning that the entropy loss L = def m - v ≥ 2 log(1/ε). For many applications, such entropy loss is too large. - Large Seed Length: the seed length n of (almost) universal hash function required by the LHL must be at least n ≥ min (u - v, v + 2log(1/ε)) - O(1), where u is the length of the source, and must grow with the number of extracted bits. Quite surprisingly, we show that both limitations of the LHL - large entropy loss and large seed - can be overcome (or, at least, mitigated) in various important scenarios. First, we show that entropy loss could be reduced to L = log(1/ε) for the setting of deriving secret keys for a wide range of cryptographic applications. Specifically, the security of these schemes with an LHL-derived key gracefully degrades from ε to at most ε + √ε2-L. (Notice that, unlike standard LHL, this bound is meaningful even when one extracts more bits than the min-entropy we have!) Based on these results we build a general computational extractor that enjoys low entropy loss and can be used to instantiate a generic key derivation function for any cryptographic application. Second, we study the soundness of the natural expand-then-extract approach, where one uses a pseudorandom generator (PRG) to expand a short "input seed" S into a longer "output seed" S′, and then use the resulting S′ as the seed required by the LHL (or, more generally, by any randomness extractor). We show that, in general, the expand-then-extract approach is not sound if the Decisional Diffie-Hellman assumption is true. Despite that, we show that it is sound either: (1) when extracting a "small" (logarithmic in the security of the PRG) number of bits; or (2) in minicrypt. Implication (2) suggests that the expand-then-extract approach is likely secure when used with "practical" PRGs, despite lacking a reductionist proof of security! © 2011 International Association for Cryptologic Research.},
author = {Barak, Boaz and Dodis, Yevgeniy and Krawczyk, Hugo and Pereira, Olivier and Krzysztof Pietrzak and Standaert, François-Xavier and Yu, Yu},
pages = {1 -- 20},
publisher = {Springer},
title = {{Leftover hash lemma revisited}},
doi = { 10.1007/978-3-642-22792-9_1},
volume = {6841},
year = {2011},
}
@inproceedings{3264,
abstract = {Verification of programs with procedures, multi-threaded programs, and higher-order functional programs can be effectively au- tomated using abstraction and refinement schemes that rely on spurious counterexamples for abstraction discovery. The analysis of counterexam- ples can be automated by a series of interpolation queries, or, alterna- tively, as a constraint solving query expressed by a set of recursion free Horn clauses. (A set of interpolation queries can be formulated as a single constraint over Horn clauses with linear dependency structure between the unknown relations.) In this paper we present an algorithm for solving recursion free Horn clauses over a combined theory of linear real/rational arithmetic and uninterpreted functions. Our algorithm performs resolu- tion to deal with the clausal structure and relies on partial solutions to deal with (non-local) instances of functionality axioms.},
author = {Gupta, Ashutosh and Popeea, Corneliu and Rybalchenko, Andrey},
editor = {Yang, Hongseok},
location = {Kenting, Taiwan},
pages = {188 -- 203},
publisher = {Springer},
title = {{Solving recursion-free Horn clauses over LI+UIF}},
doi = {10.1007/978-3-642-25318-8_16},
volume = {7078},
year = {2011},
}
@inproceedings{3266,
abstract = {We present a joint image segmentation and labeling model (JSL) which, given a bag of figure-ground segment hypotheses extracted at multiple image locations and scales, constructs a joint probability distribution over both the compatible image interpretations (tilings or image segmentations) composed from those segments, and over their labeling into categories. The process of drawing samples from the joint distribution can be interpreted as first sampling tilings, modeled as maximal cliques, from a graph connecting spatially non-overlapping segments in the bag [1], followed by sampling labels for those segments, conditioned on the choice of a particular tiling. We learn the segmentation and labeling parameters jointly, based on Maximum Likelihood with a novel Incremental Saddle Point estimation procedure. The partition function over tilings and labelings is increasingly more accurately approximated by including incorrect configurations that a not-yet-competent model rates probable during learning. We show that the proposed methodologymatches the current state of the art in the Stanford dataset [2], as well as in VOC2010, where 41.7% accuracy on the test set is achieved.},
author = {Ion, Adrian and Carreira, Joao and Sminchisescu, Cristian},
booktitle = {NIPS Proceedings},
location = {Granada, Spain},
pages = {1827 -- 1835},
publisher = {Neural Information Processing Systems Foundation},
title = {{Probabilistic joint image segmentation and labeling}},
volume = {24},
year = {2011},
}
@inbook{3268,
abstract = {Algebraic topology is generally considered one of the purest subfield of mathematics. However, over the last decade two interesting new lines of research have emerged, one focusing on algorithms for algebraic topology, and the other on applications of algebraic topology in engineering and science. Amongst the new areas in which the techniques have been applied are computer vision and image processing. In this paper, we survey the results of these endeavours. Because algebraic topology is an area of mathematics with which most computer vision practitioners have no experience, we review the machinery behind the theories of homology and persistent homology; our review emphasizes intuitive explanations. In terms of applications to computer vision, we focus on four illustrative problems: shape signatures, natural image statistics, image denoising, and segmentation. Our hope is that this review will stimulate interest on the part of computer vision researchers to both use and extend the tools of this new field. },
author = {Freedman, Daniel and Chen, Chao},
booktitle = {Computer Vision},
pages = {239 -- 268},
publisher = {Nova Science Publishers},
title = {{Algebraic topology for computer vision}},
year = {2011},
}
@article{3269,
abstract = {The unintentional scattering of light between neighboring surfaces in complex projection environments increases the brightness and decreases the contrast, disrupting the appearance of the desired imagery. To achieve satisfactory projection results, the inverse problem of global illumination must be solved to cancel this secondary scattering. In this paper, we propose a global illumination cancellation method that minimizes the perceptual difference between the desired imagery and the actual total illumination in the resulting physical environment. Using Gauss-Newton and active set methods, we design a fast solver for the bound constrained nonlinear least squares problem raised by the perceptual error metrics. Our solver is further accelerated with a CUDA implementation and multi-resolution method to achieve 1–2 fps for problems with approximately 3000 variables. We demonstrate the global illumination cancellation algorithm with our multi-projector system. Results show that our method preserves the color fidelity of the desired imagery significantly better than previous methods.},
author = {Sheng, Yu and Cutler, Barbara and Chen, Chao and Nasman, Joshua},
journal = {Computer Graphics Forum},
number = {4},
pages = {1261 -- 1268},
publisher = {Wiley-Blackwell},
title = {{Perceptual global illumination cancellation in complex projection environments}},
doi = {10.1111/j.1467-8659.2011.01985.x},
volume = {30},
year = {2011},
}
@inproceedings{3270,
abstract = {The persistence diagram of a filtered simplicial com- plex is usually computed by reducing the boundary matrix of the complex. We introduce a simple op- timization technique: by processing the simplices of the complex in decreasing dimension, we can “kill” columns (i.e., set them to zero) without reducing them. This technique completely avoids reduction on roughly half of the columns. We demonstrate that this idea significantly improves the running time of the reduction algorithm in practice. We also give an output-sensitive complexity analysis for the new al- gorithm which yields to sub-cubic asymptotic bounds under certain assumptions.},
author = {Chen, Chao and Kerber, Michael},
location = {Morschach, Switzerland},
pages = {197 -- 200},
publisher = {TU Dortmund},
title = {{Persistent homology computation with a twist}},
year = {2011},
}
@inbook{3271,
abstract = {In this paper we present an efficient framework for computation of persis- tent homology of cubical data in arbitrary dimensions. An existing algorithm using simplicial complexes is adapted to the setting of cubical complexes. The proposed approach enables efficient application of persistent homology in domains where the data is naturally given in a cubical form. By avoiding triangulation of the data, we significantly reduce the size of the complex. We also present a data-structure de- signed to compactly store and quickly manipulate cubical complexes. By means of numerical experiments, we show high speed and memory efficiency of our ap- proach. We compare our framework to other available implementations, showing its superiority. Finally, we report performance on selected 3D and 4D data-sets.},
author = {Wagner, Hubert and Chen, Chao and Vuçini, Erald},
booktitle = {Topological Methods in Data Analysis and Visualization II},
editor = {Peikert, Ronald and Hauser, Helwig and Carr, Hamish and Fuchs, Raphael},
pages = {91 -- 106},
publisher = {Springer},
title = {{Efficient computation of persistent homology for cubical data}},
doi = {10.1007/978-3-642-23175-9_7},
year = {2011},
}
@phdthesis{3273,
author = {Maître, Jean-Léon},
publisher = {IST Austria},
title = {{Mechanics of adhesion and de‐adhesion in zebrafish germ layer progenitors}},
year = {2011},
}
@phdthesis{3275,
abstract = {Chemokines organize immune cell trafficking by inducing either directed (tactic) or random (kinetic) migration and by activating integrins in order to support surface adhesion (haptic). Beyond that the same chemokines can establish clearly defined functional areas in secondary lymphoid organs. Until now it is unclear how chemokines can fulfill such diverse functions. One decisive prerequisite to explain these capacities is to know how chemokines are presented in tissue. In theory chemokines could occur either soluble or immobilized, and could be distributed either homogenously or as a concentration gradient. To dissect if and how the presenting mode of chemokines influences immune cells, I tested the response of dendritic cells (DCs) to differentially displayed chemokines. DCs are antigen presenting cells that reside in the periphery and migrate into draining lymph nodes (LNs) once exposed to inflammatory stimuli to activate naïve T cells. DCs are guided to and within the LN by the chemokine receptor CCR7, which has two ligands, the chemokines CCL19 and CCL21. Both CCR7 ligands are expressed by fibroblastic reticular cells in the LN, but differ in their ability to bind to heparan sulfate residues. CCL21 has a highly charged C-terminal extension, which mediates binding to anionic surfaces, whereas CCL19 is lacking such residues and likely distributes as a soluble molecule. This study shows that surface-bound CCL21 causes random, haptokinetic DC motility, which is confined to the chemokine coated area by insideout activation of β2 integrins that mediate cell binding to the surface. CCL19 on the other hand forms concentration gradients which trigger directional, chemotactic movement, but no surface adhesion. In addition DCs can actively manipulate this system by recruiting and activating serine proteases on their surfaces, which create - by proteolytically removing the adhesive C-terminus - a solubilized variant of CCL21 that functionally resembles CCL19. By generating a CCL21 concentration gradient DCs establish a positive feedback loop to recruit further DCs from the periphery to the CCL21 coated region. In addition DCs can sense chemotactic gradients as well as immobilized haptokinetic fields at the same time and integrate these signals. The result is chemotactically biased haptokinesis - directional migration confined to a chemokine coated track or area - which could explain the dynamic but spatially tightly controlled swarming leukocyte locomotion patterns that have been observed in lymphatic organs by intravital microscopists. The finding that DCs can approach soluble cues in a non-adhesive manner while they attach to surfaces coated with immobilized cues raises the question how these cells transmit intracellular forces to the environment, especially in the non-adherent migration mode. In order to migrate, cells have to generate and transmit force to the extracellular substrate. Force transmission is the prerequisite to procure an expansion of the leading edge and a forward motion of the whole cell body. In the current conceptions actin polymerization at the leading edge is coupled to extracellular ligands via the integrin family of transmembrane receptors, which allows the transmission of intracellular force. Against the paradigm of force transmission during migration, leukocytes, like DCs, are able to migrate in threedimensional environments without using integrin transmembrane receptors (Lämmermann et al., 2008). This reflects the biological function of leukocytes, as they can invade almost all tissues, whereby their migration has to be independent from the extracellular environment. How the cells can achieve this is unclear. For this study I examined DC migration in a defined threedimensional environment and highlighted actin-dynamics with the probe Lifeact-GFP. The result was that chemotactic DCs can switch between integrin-dependent and integrin- independent locomotion and can thereby adapt to the adhesive properties of their environment. If the cells are able to couple their actin cytoskeleton to the substrate, actin polymerization is entirely converted into protrusion. Without coupling the actin cortex undergoes slippage and retrograde actin flow can be observed. But retrograde actin flow can be completely compensated by higher actin polymerization rate keeping the migration velocity and the shape of the cells unaltered. Mesenchymal cells like fibroblast cannot balance the loss of adhesive interaction, cannot protrude into open space and, therefore, strictly depend on integrinmediated force coupling. This leukocyte specific phenomenon of “adaptive force transmission” endows these cells with the unique ability to transit and invade almost every type of tissue. },
author = {Schumann, Kathrin},
pages = {141},
publisher = {IST Austria},
title = {{The role of chemotactic gradients in dendritic cell migration}},
year = {2011},
}
@article{3276,
abstract = {We present an algorithm to identify individual neural spikes observed on high-density multi-electrode arrays (MEAs). Our method can distinguish large numbers of distinct neural units, even when spikes overlap, and accounts for intrinsic variability of spikes from each unit. As MEAs grow larger, it is important to find spike-identification methods that are scalable, that is, the computational cost of spike fitting should scale well with the number of units observed. Our algorithm accomplishes this goal, and is fast, because it exploits the spatial locality of each unit and the basic biophysics of extracellular signal propagation. Human interaction plays a key role in our method; but effort is minimized and streamlined via a graphical interface. We illustrate our method on data from guinea pig retinal ganglion cells and document its performance on simulated data consisting of spikes added to experimentally measured background noise. We present several tests demonstrating that the algorithm is highly accurate: it exhibits low error rates on fits to synthetic data, low refractory violation rates, good receptive field coverage, and consistency across users.},
author = {Prentice, Jason S and Homann, Jan and Simmons, Kristina D and Gasper Tkacik and Balasubramanian, Vijay and Nelson, Philip C},
journal = {PLoS One},
number = {7},
publisher = {Public Library of Science},
title = {{Fast, scalable, Bayesian spike identification for multi-electrode arrays}},
doi = {10.1371/journal.pone.0019884},
volume = {6},
year = {2011},
}
@article{3278,
abstract = {Despite much research on the socially parasitic large blue butterflies (genus Maculinea) in the past 40 years, their relationship to their closest relatives, Phengaris, is controversial and the relationships among the remaining genera in the Glaucopsyche section are largely unresolved. The evolutionary history of this butterfly section is particularly important to understand the evolution of life history diversity con- nected to food-plant and host-ant associations in the larval stage. In the present study, we use a combi- nation of four nuclear and two mitochondrial genes to reconstruct the phylogeny of the Glaucopsyche section, and in particular, to study the relationships among and within the Phengaris–Maculinea species.
We find a clear pattern between the clades recovered in the Glaucopsyche section phylogeny and their food-plant associations, with only the Phengaris–Maculinea clade utilising more than one plant family. Maculinea is, for the first time, recovered with strong support as a monophyletic group nested within Phengaris, with the closest relative being the rare genus Caerulea. The genus Glaucopsyche is polyphyletic, including the genera Sinia and Iolana. Interestingly, we find evidence for additional potential cryptic spe- cies within the highly endangered Maculinea, which has long been suspected from morphological, ecolog- ical and molecular studies.},
author = {Vila, Roger and Pierce, Naomi E and Nash, David R and Line Ugelvig},
journal = {Molecular Phylogenetics and Evolution},
number = {1},
pages = {237 -- 243},
publisher = {Elsevier},
title = {{A phylogenetic revision of the Glaucopsyche section (Lepidoptera: Lycaenidae), with special focus on the Phengaris-Maculinea clade}},
doi = {10.1016/j.ympev.2011.05.016},
volume = {61},
year = {2011},
}
@article{3285,
abstract = {Resolving the dynamical interplay of proteins and lipids in the live-cell plasma membrane represents a central goal in current cell biology. Superresolution concepts have introduced a means of capturing spatial heterogeneity at a nanoscopic length scale. Similar concepts for detecting dynamical transitions (superresolution chronoscopy) are still lacking. Here, we show that recently introduced spot-variation fluorescence correlation spectroscopy allows for sensing transient confinement times of membrane constituents at dramatically improved resolution. Using standard diffraction-limited optics, spot-variation fluorescence correlation spectroscopy captures signatures of single retardation events far below the transit time of the tracer through the focal spot. We provide an analytical description of special cases of transient binding of a tracer to pointlike traps, or association of a tracer with nanodomains. The influence of trap mobility and the underlying binding kinetics are quantified. Experimental approaches are suggested that allow for gaining quantitative mechanistic insights into the interaction processes of membrane constituents.},
author = {Ruprecht, Verena and Wieser, Stefan and Marguet, Didier and Schuetz, Gerhard},
journal = {Biophysical Journal},
number = {11},
pages = {2839 -- 2845},
publisher = {Biophysical Society},
title = {{Spot variation fluorescence correlation spectroscopy allows for superresolution chronoscopy of confinement times in membranes}},
doi = {10.1016/j.bpj.2011.04.035},
volume = {100},
year = {2011},
}
@article{3286,
abstract = {Cationic antimicrobial peptides (CAMPs) selectively target bacterial membranes by electrostatic interactions with negatively charged lipids. It turned out that for inhibition of microbial growth a high CAMP membrane concentration is required, which can be realized by the incorporation of hydrophobic groups within the peptide. Increasing hydrophobicity, however, reduces the CAMP selectivity for bacterial over eukaryotic host membranes, thereby causing the risk of detrimental side-effects. In this study we addressed how cationic amphipathic peptides—in particular a CAMP with Lysine–Leucine–Lysine repeats (termed KLK)—affect the localization and dynamics of molecules in eukaryotic membranes. We found KLK to selectively inhibit the endocytosis of a subgroup of membrane proteins and lipids by electrostatically interacting with negatively charged sialic acid moieties. Ultrastructural characterization revealed the formation of membrane invaginations representing fission or fusion intermediates, in which the sialylated proteins and lipids were immobilized. Experiments on structurally different cationic amphipathic peptides (KLK, 6-MO-LF11-322 and NK14-2) indicated a cooperation of electrostatic and hydrophobic forces that selectively arrest sialylated membrane constituents.},
author = {Weghuber, Julian and Aichinger, Michael C. and Brameshuber, Mario and Stefan Wieser and Verena Ruprecht and Plochberger, Birgit and Madl, Josef and Horner, Andreas and Reipert, Siegfried and Lohner, Karl and Henics, Tamas and Schuetz, Gerhard J},
journal = {Biochimica et Biophysica Acta (BBA) - Biomembranes},
number = {10},
pages = {2581 -- 2590},
publisher = {Elsevier},
title = {{Cationic amphipathic peptides accumulate sialylated proteins and lipids in the plasma membrane of eukaryotic host cells}},
doi = {10.1016/j.bbamem.2011.06.007},
volume = {1808},
year = {2011},
}
@article{3287,
abstract = {Diffusing membrane constituents are constantly exposed to a variety of forces that influence their stochastic path. Single molecule experiments allow for resolving trajectories at extremely high spatial and temporal accuracy, thereby offering insights into en route interactions of the tracer. In this review we discuss approaches to derive information about the underlying processes, based on single molecule tracking experiments. In particular, we focus on a new versatile way to analyze single molecule diffusion in the absence of a full analytical treatment. The method is based on comprehensive comparison of an experimental data set against the hypothetical outcome of multiple experiments performed on the computer. Since Monte Carlo simulations can be easily and rapidly performed even on state-of-the-art PCs, our method provides a simple way for testing various - even complicated - diffusion models. We describe the new method in detail, and show the applicability on two specific examples: firstly, kinetic rate constants can be derived for the transient interaction of mobile membrane proteins; secondly, residence time and corral size can be extracted for confined diffusion.},
author = {Ruprecht, Verena and Axmann, Markus and Wieser, Stefan and Schuetz, Gerhard},
journal = {Current Protein & Peptide Science},
number = {8},
pages = {714 -- 724},
publisher = {Bentham Science Publishers},
title = {{What can we learn from single molecule trajectories?}},
doi = {10.2174/138920311798841753},
volume = {12},
year = {2011},
}
@article{3288,
abstract = {The zonula adherens (ZA) of epithelial cells is a site of cell-cell adhesion where cellular forces are exerted and resisted. Increasing evidence indicates that E-cadherin adhesion molecules at the ZA serve to sense force applied on the junctions and coordinate cytoskeletal responses to those forces. Efforts to understand the role that cadherins play in mechanotransduction have been limited by the lack of assays to measure the impact of forces on the ZA. In this study we used 4D imaging of GFP-tagged E-cadherin to analyse the movement of the ZA. Junctions in confluent epithelial monolayers displayed prominent movements oriented orthogonal (perpendicular) to the ZA itself. Two components were identified in these movements: a relatively slow unidirectional (translational) component that could be readily fitted by least-squares regression analysis, upon which were superimposed more rapid oscillatory movements. Myosin IIB was a dominant factor responsible for driving the unilateral translational movements. In contrast, frequency spectrum analysis revealed that depletion of Myosin IIA increased the power of the oscillatory movements. This implies that Myosin IIA may serve to dampen oscillatory movements of the ZA. This extends our recent analysis of Myosin II at the ZA to demonstrate that Myosin IIA and Myosin IIB make distinct contributions to junctional movement at the ZA.},
author = {Smutny, Michael and Wu, Selwin and Gomez, Guillermo and Mangold, Sabine and Yap, Alpha and Hamilton, Nicholas},
journal = {PLoS One},
number = {7},
publisher = {Public Library of Science},
title = {{Multicomponent analysis of junctional movements regulated by Myosin II isoforms at the epithelial zonula adherens}},
doi = {10.1371/journal.pone.0022458},
volume = {6},
year = {2011},
}
@article{3290,
abstract = {Analysis of genomic data requires an efficient way to calculate likelihoods across very large numbers of loci. We describe a general method for finding the distribution of genealogies: we allow migration between demes, splitting of demes [as in the isolation-with-migration (IM) model], and recombination between linked loci. These processes are described by a set of linear recursions for the generating function of branch lengths. Under the infinite-sites model, the probability of any configuration of mutations can be found by differentiating this generating function. Such calculations are feasible for small numbers of sampled genomes: as an example, we show how the generating function can be derived explicitly for three genes under the two-deme IM model. This derivation is done automatically, using Mathematica. Given data from a large number of unlinked and nonrecombining blocks of sequence, these results can be used to find maximum-likelihood estimates of model parameters by tabulating the probabilities of all relevant mutational configurations and then multiplying across loci. The feasibility of the method is demonstrated by applying it to simulated data and to a data set previously analyzed by Wang and Hey (2010) consisting of 26,141 loci sampled from Drosophila simulans and D. melanogaster. Our results suggest that such likelihood calculations are scalable to genomic data as long as the numbers of sampled individuals and mutations per sequence block are small.},
author = {Lohse, Konrad and Harrison, Richard and Barton, Nicholas H},
journal = {Genetics},
number = {3},
pages = {977 -- 987},
publisher = {Genetics Society of America},
title = {{A general method for calculating likelihoods under the coalescent process}},
doi = {10.1534/genetics.111.129569},
volume = {189},
year = {2011},
}
@inproceedings{3297,
abstract = {Animating detailed liquid surfaces has always been a challenge for computer graphics researchers and visual effects artists. Over the past few years, researchers in this field have focused on mesh-based surface tracking to synthesize extremely detailed liquid surfaces as efficiently as possible. This course provides a solid understanding of the steps required to create a fluid simulator with a mesh-based liquid surface.
The course begins with an overview of several existing liquid-surface-tracking techniques and the pros and cons of each method. Then it explains how to embed a triangle mesh into a finite-difference-based fluid simulator and describes several methods for allowing the liquid surface to merge together or break apart. The final section showcases the benefits and further applications of a mesh-based liquid surface, highlighting state-of-the-art methods for tracking colors and textures, maintaining liquid volume, preserving small surface features, and simulating realistic surface-tension waves.},
author = {Wojtan, Christopher J and Müller Fischer, Matthias and Brochu, Tyson},
location = {Vancouver, BC, Canada},
publisher = {ACM},
title = {{Liquid simulation with mesh-based surface tracking}},
doi = {10.1145/2037636.2037644},
year = {2011},
}
@inproceedings{3298,
abstract = {We present a new algorithm for enforcing incompressibility for Smoothed Particle Hydrodynamics (SPH) by preserving uniform density across the domain. We propose a hybrid method that uses a Poisson solve on a coarse grid to enforce a divergence free velocity ﬁeld, followed by a local density correction of the particles. This avoids typical grid artifacts and maintains the Lagrangian nature of SPH by directly transferring pressures onto particles. Our method can be easily integrated with existing SPH techniques such as the incompressible PCISPH method as well as weakly compressible SPH by adding an additional force term. We show that this hybrid method accelerates convergence towards uniform density and permits a signiﬁcantly larger time step compared to earlier approaches while producing similar results. We demonstrate our approach in a variety of scenarios with signiﬁcant pressure gradients such as splashing liquids.},
author = {Raveendran, Karthik and Wojtan, Christopher J and Turk, Greg},
editor = {Spencer, Stephen},
location = {Vancouver, Canada},
pages = {33 -- 42},
publisher = {ACM},
title = {{Hybrid smoothed particle hydrodynamics}},
doi = {10.1145/2019406.2019411},
year = {2011},
}
@inproceedings{3299,
abstract = {We introduce propagation models, a formalism designed to support general and efficient data structures for the transient analysis of biochemical reaction networks. We give two use cases for propagation abstract data types: the uniformization method and numerical integration. We also sketch an implementation of a propagation abstract data type, which uses abstraction to approximate states.},
author = {Henzinger, Thomas A and Mateescu, Maria},
location = {Paris, France},
pages = {1 -- 3},
publisher = {Springer},
title = {{Propagation models for computing biochemical reaction networks}},
doi = {10.1145/2037509.2037510},
year = {2011},
}
@inproceedings{3301,
abstract = {The chemical master equation is a differential equation describing the time evolution of the probability distribution over the possible “states” of a biochemical system. The solution of this equation is of interest within the systems biology field ever since the importance of the molec- ular noise has been acknowledged. Unfortunately, most of the systems do not have analytical solutions, and numerical solutions suffer from the course of dimensionality and therefore need to be approximated. Here, we introduce the concept of tail approximation, which retrieves an approximation of the probabilities in the tail of a distribution from the total probability of the tail and its conditional expectation. This approximation method can then be used to numerically compute the solution of the chemical master equation on a subset of the state space, thus fighting the explosion of the state space, for which this problem is renowned.},
author = {Henzinger, Thomas A and Mateescu, Maria},
publisher = {Tampere International Center for Signal Processing},
title = {{Tail approximation for the chemical master equation}},
year = {2011},
}
@inproceedings{3302,
abstract = {Cloud computing aims to give users virtually unlimited pay-per-use computing resources without the burden of managing the underlying infrastructure. We present a new job execution environment Flextic that exploits scal- able static scheduling techniques to provide the user with a flexible pricing model, such as a tradeoff between dif- ferent degrees of execution speed and execution price, and at the same time, reduce scheduling overhead for the cloud provider. We have evaluated a prototype of Flextic on Amazon EC2 and compared it against Hadoop. For various data parallel jobs from machine learning, im- age processing, and gene sequencing that we considered, Flextic has low scheduling overhead and reduces job du- ration by up to 15% compared to Hadoop, a dynamic cloud scheduler.},
author = {Henzinger, Thomas A and Singh, Anmol and Singh, Vasu and Wies, Thomas and Zufferey, Damien},
pages = {1 -- 6},
publisher = {USENIX},
title = {{Static scheduling in clouds}},
year = {2011},
}
@misc{3312,
abstract = {We study the 3D reconstruction of plant roots from multiple 2D images. To meet the challenge caused by the delicate nature of thin branches, we make three innovations to cope with the sensitivity to image quality and calibration. First, we model the background as a harmonic function to improve the segmentation of the root in each 2D image. Second, we develop the concept of the regularized visual hull which reduces the effect of jittering and refraction by ensuring consistency with one 2D image. Third, we guarantee connectedness through adjustments to the 3D reconstruction that minimize global error. Our software is part of a biological phenotype/genotype study of agricultural root systems. It has been tested on more than 40 plant roots and results are promising in terms of reconstruction quality and efficiency.},
author = {Zheng, Ying and Gu, Steve and Edelsbrunner, Herbert and Tomasi, Carlo and Benfey, Philip},
booktitle = {Proceedings of the IEEE International Conference on Computer Vision},
location = {Barcelona, Spain},
publisher = {IEEE},
title = {{Detailed reconstruction of 3D plant root shape}},
doi = {10.1109/ICCV.2011.6126475},
year = {2011},
}
@inproceedings{3313,
abstract = {Interpreting an image as a function on a compact sub- set of the Euclidean plane, we get its scale-space by diffu- sion, spreading the image over the entire plane. This gener- ates a 1-parameter family of functions alternatively defined as convolutions with a progressively wider Gaussian ker- nel. We prove that the corresponding 1-parameter family of persistence diagrams have norms that go rapidly to zero as time goes to infinity. This result rationalizes experimental observations about scale-space. We hope this will lead to targeted improvements of related computer vision methods.},
author = {Chen, Chao and Edelsbrunner, Herbert},
booktitle = {Proceedings of the IEEE International Conference on Computer Vision},
location = {Barcelona, Spain},
publisher = {IEEE},
title = {{Diffusion runs low on persistence fast}},
doi = {10.1109/ICCV.2011.6126271},
year = {2011},
}
@article{3315,
abstract = {We consider two-player games played in real time on game structures with clocks where the objectives of players are described using parity conditions. The games are concurrent in that at each turn, both players independently propose a time delay and an action, and the action with the shorter delay is chosen. To prevent a player from winning by blocking time, we restrict each player to play strategies that ensure that the player cannot be responsible for causing a zeno run. First, we present an efficient reduction of these games to turn-based (i.e., not concurrent) finite-state (i.e., untimed) parity games. Our reduction improves the best known complexity for solving timed parity games. Moreover, the rich class of algorithms for classical parity games can now be applied to timed parity games. The states of the resulting game are based on clock regions of the original game, and the state space of the finite game is linear in the size of the region graph. Second, we consider two restricted classes of strategies for the player that represents the controller in a real-time synthesis problem, namely, limit-robust and bounded-robust winning strategies. Using a limit-robust winning strategy, the controller cannot choose an exact real-valued time delay but must allow for some nonzero jitter in each of its actions. If there is a given lower bound on the jitter, then the strategy is bounded-robust winning. We show that exact strategies are more powerful than limit-robust strategies, which are more powerful than bounded-robust winning strategies for any bound. For both kinds of robust strategies, we present efficient reductions to standard timed automaton games. These reductions provide algorithms for the synthesis of robust real-time controllers.},
author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Prabhu, Vinayak},
journal = {Logical Methods in Computer Science},
number = {4},
publisher = {International Federation of Computational Logic},
title = {{Timed parity games: Complexity and robustness}},
doi = {10.2168/LMCS-7(4:8)2011},
volume = {7},
year = {2011},
}
@inproceedings{3316,
abstract = {In addition to being correct, a system should be robust, that is, it should behave reasonably even after receiving unexpected inputs. In this paper, we summarize two formal notions of robustness that we have introduced previously for reactive systems. One of the notions is based on assigning costs for failures on a user-provided notion of incorrect transitions in a specification. Here, we define a system to be robust if a finite number of incorrect inputs does not lead to an infinite number of incorrect outputs. We also give a more refined notion of robustness that aims to minimize the ratio of output failures to input failures. The second notion is aimed at liveness. In contrast to the previous notion, it has no concept of recovery from an error. Instead, it compares the ratio of the number of liveness constraints that the system violates to the number of liveness constraints that the environment violates.},
author = {Bloem, Roderick and Chatterjee, Krishnendu and Greimel, Karin and Henzinger, Thomas A and Jobstmann, Barbara},
booktitle = {6th IEEE International Symposium on Industrial and Embedded Systems},
location = {Vasteras, Sweden},
pages = {176 -- 185},
publisher = {IEEE},
title = {{Specification-centered robustness}},
doi = {10.1109/SIES.2011.5953660},
year = {2011},
}
@article{3318,
abstract = {Parvalbumin is thought to act in a manner similar to EGTA, but how a slow Ca2+ buffer affects nanodomain-coupling regimes at GABAergic synapses is unclear. Direct measurements of parvalbumin concentration and paired recordings in rodent hippocampus and cerebellum revealed that parvalbumin affects synaptic dynamics only when expressed at high levels. Modeling suggests that, in high concentrations, parvalbumin may exert BAPTA-like effects, modulating nanodomain coupling via competition with local saturation of endogenous fixed buffers.},
author = {Eggermann, Emmanuel and Jonas, Peter M},
journal = {Nature Neuroscience},
pages = {20 -- 22},
publisher = {Nature Publishing Group},
title = {{How the “slow” Ca(2+) buffer parvalbumin affects transmitter release in nanodomain coupling regimes at GABAergic synapses}},
doi = {10.1038/nn.3002},
volume = {15},
year = {2011},
}
@inproceedings{3319,
abstract = {We address the problem of metric learning for multi-view data, namely the construction of embedding projections from data in different representations into a shared feature space, such that the Euclidean distance in this space provides a meaningful within-view as well as between-view similarity. Our motivation stems from the problem of cross-media retrieval tasks, where the availability of a joint Euclidean distance function is a pre-requisite to allow fast, in particular hashing-based, nearest neighbor queries. We formulate an objective function that expresses the intuitive concept that matching samples are mapped closely together in the output space, whereas non-matching samples are pushed apart, no matter in which view they are available. The resulting optimization problem is not convex, but it can be decomposed explicitly into a convex and a concave part, thereby allowing efficient optimization using the convex-concave procedure. Experiments on an image retrieval task show that nearest-neighbor based cross-view retrieval is indeed possible, and the proposed technique improves the retrieval accuracy over baseline techniques.},
author = {Quadrianto, Novi and Lampert, Christoph},
location = {Bellevue, USA},
pages = {425 -- 432},
publisher = {Omnipress},
title = {{Learning multi-view neighborhood preserving projections}},
year = {2011},
}
@article{3320,
abstract = {Powerful statistical models that can be learned efficiently from large amounts of data are currently revolutionizing computer vision. These models possess a rich internal structure reflecting task-specific relations and constraints. This monograph introduces the reader to the most popular classes of structured models in computer vision. Our focus is discrete undirected graphical models which we cover in detail together with a description of algorithms for both probabilistic inference and maximum a posteriori inference. We discuss separately recently successful techniques for prediction in general structured models. In the second part of this monograph we describe methods for parameter learning where we distinguish the classic maximum likelihood based methods from the more recent prediction-based parameter learning methods. We highlight developments to enhance current models and discuss kernelized models and latent variable models. To make the monograph more practical and to provide links to further study we provide examples of successful application of many methods in the computer vision literature.},
author = {Nowozin, Sebastian and Lampert, Christoph},
journal = {Foundations and Trends in Computer Graphics and Vision},
number = {3-4},
pages = {185 -- 365},
publisher = {now},
title = {{Structured learning and prediction in computer vision}},
doi = {10.1561/0600000033},
volume = {6},
year = {2011},
}
@misc{3322,
abstract = {We study multi-label prediction for structured output spaces, a problem that occurs, for example, in object detection in images, secondary structure prediction in computational biology, and graph matching with symmetries. Conventional multi-label classification techniques are typically not applicable in this situation, because they require explicit enumeration of the label space, which is infeasible in case of structured outputs. Relying on techniques originally designed for single- label structured prediction, in particular structured support vector machines, results in reduced prediction accuracy, or leads to infeasible optimization problems. In this work we derive a maximum-margin training formulation for multi-label structured prediction that remains computationally tractable while achieving high prediction accuracy. It also shares most beneficial properties with single-label maximum-margin approaches, in particular a formulation as a convex optimization problem, efficient working set training, and PAC-Bayesian generalization bounds.},
author = {Lampert, Christoph},
booktitle = {NIPS: Neural Information Processing Systems},
publisher = {Neural Information Processing Systems},
title = {{Maximum margin multi label structured prediction}},
year = {2011},
}
@inproceedings{3323,
abstract = {We present a new decidable logic called TREX for expressing constraints about imperative tree data structures. In particular, TREX supports a transitive closure operator that can express reachability constraints, which often appear in data structure invariants. We show that our logic is closed under weakest precondition computation, which enables its use for automated software verification. We further show that satisfiability of formulas in TREX is decidable in NP. The low complexity makes it an attractive alternative to more expensive logics such as monadic second-order logic (MSOL) over trees, which have been traditionally used for reasoning about tree data structures.},
author = {Wies, Thomas and Muñiz, Marco and Kuncak, Viktor},
location = {Wrocław, Poland},
pages = {476 -- 491},
publisher = {Springer},
title = {{An efficient decision procedure for imperative tree data structures}},
doi = {10.1007/978-3-642-22438-6_36},
volume = {6803},
year = {2011},
}
@inproceedings{3324,
abstract = {Automated termination provers often use the following schema to prove that a program terminates: construct a relational abstraction of the program's transition relation and then show that the relational abstraction is well-founded. The focus of current tools has been on developing sophisticated techniques for constructing the abstractions while relying on known decidable logics (such as linear arithmetic) to express them. We believe we can significantly increase the class of programs that are amenable to automated termination proofs by identifying more expressive decidable logics for reasoning about well-founded relations. We therefore present a new decision procedure for reasoning about multiset orderings, which are among the most powerful orderings used to prove termination. We show that, using our decision procedure, one can automatically prove termination of natural abstractions of programs.},
author = {Piskac, Ruzica and Wies, Thomas},
editor = {Jhala, Ranjit and Schmidt, David},
location = {Texas, USA},
pages = {371 -- 386},
publisher = {Springer},
title = {{Decision procedures for automating termination proofs}},
doi = {10.1007/978-3-642-18275-4_26},
volume = {6538},
year = {2011},
}
@inproceedings{3325,
abstract = {We introduce streaming data string transducers that map input data strings to output data strings in a single left-to-right pass in linear time. Data strings are (unbounded) sequences of data values, tagged with symbols from a finite set, over a potentially infinite data do- main that supports only the operations of equality and ordering. The transducer uses a finite set of states, a finite set of variables ranging over the data domain, and a finite set of variables ranging over data strings. At every step, it can make decisions based on the next in- put symbol, updating its state, remembering the input data value in its data variables, and updating data-string variables by concatenat- ing data-string variables and new symbols formed from data vari- ables, while avoiding duplication. We establish that the problems of checking functional equivalence of two streaming transducers, and of checking whether a streaming transducer satisfies pre/post verification conditions specified by streaming acceptors over in- put/output data-strings, are in PSPACE. We identify a class of imperative and a class of functional pro- grams, manipulating lists of data items, which can be effectively translated to streaming data-string transducers. The imperative pro- grams dynamically modify a singly-linked heap by changing next- pointers of heap-nodes and by adding new nodes. The main re- striction specifies how the next-pointers can be used for traversal. We also identify an expressively equivalent fragment of functional programs that traverse a list using syntactically restricted recursive calls. Our results lead to algorithms for assertion checking and for checking functional equivalence of two programs, written possibly in different programming styles, for commonly used routines such as insert, delete, and reverse.},
author = {Alur, Rajeev and Cerny, Pavol},
location = {Texas, USA},
number = {1},
pages = {599 -- 610},
publisher = {ACM},
title = {{Streaming transducers for algorithmic verification of single pass list processing programs}},
doi = {10.1145/1926385.1926454},
volume = {46},
year = {2011},
}
@inproceedings{3326,
abstract = {Weighted automata map input words to numerical values. Ap- plications of weighted automata include formal verification of quantitative properties, as well as text, speech, and image processing. A weighted au- tomaton is defined with respect to a semiring. For the tropical semiring, the weight of a run is the sum of the weights of the transitions taken along the run, and the value of a word is the minimal weight of an accepting run on it. In the 90’s, Krob studied the decidability of problems on rational series defined with respect to the tropical semiring. Rational series are strongly related to weighted automata, and Krob’s results apply to them. In par- ticular, it follows from Krob’s results that the universality problem (that is, deciding whether the values of all words are below some threshold) is decidable for weighted automata defined with respect to the tropical semir- ing with domain ∪ {∞}, and that the equality problem is undecidable when the domain is ∪ {∞}. In this paper we continue the study of the borders of decidability in weighted automata, describe alternative and direct proofs of the above results, and tighten them further. Unlike the proofs of Krob, which are algebraic in their nature, our proofs stay in the terrain of state machines, and the reduction is from the halting problem of a two-counter machine. This enables us to significantly simplify Krob’s reasoning, make the un- decidability result accessible to the automata-theoretic community, and strengthen it to apply already to a very simple class of automata: all the states are accepting, there are no initial nor final weights, and all the weights on the transitions are from the set {−1, 0, 1}. The fact we work directly with the automata enables us to tighten also the decidability re- sults and to show that the universality problem for weighted automata defined with respect to the tropical semiring with domain ∪ {∞}, and in fact even with domain ≥0 ∪ {∞}, is PSPACE-complete. Our results thus draw a sharper picture about the decidability of decision problems for weighted automata, in both the front of containment vs. universality and the front of the ∪ {∞} vs. the ∪ {∞} domains.},
author = {Almagor, Shaull and Boker, Udi and Kupferman, Orna},
location = {Taipei, Taiwan},
pages = {482 -- 491},
publisher = {Springer},
title = {{What’s decidable about weighted automata }},
doi = {10.1007/978-3-642-24372-1_37},
volume = {6996},
year = {2011},
}
@inproceedings{3327,
abstract = {We solve the open problems of translating, when possible, all common classes of nondeterministic word automata to deterministic and nondeterministic co-Büchi word automata. The handled classes include Büchi, parity, Rabin, Streett and Muller automata. The translations follow a unified approach and are all asymptotically tight. The problem of translating Büchi automata to equivalent co-Büchi automata was solved in [2], leaving open the problems of translating automata with richer acceptance conditions. For these classes, one cannot easily extend or use the construction in [2]. In particular, going via an intermediate Büchi automaton is not optimal and might involve a blow-up exponentially higher than the known lower bound. Other known translations are also not optimal and involve a doubly exponential blow-up. We describe direct, simple, and asymptotically tight constructions, involving a 2Θ(n) blow-up. The constructions are variants of the subset construction, and allow for symbolic implementations. Beyond the theoretical importance of the results, the new constructions have various applications, among which is an improved algorithm for translating, when possible, LTL formulas to deterministic Büchi word automata.},
author = {Boker, Udi and Kupferman, Orna},
editor = {Hofmann, Martin},
location = {Saarbrücken, Germany},
pages = {184 -- 198},
publisher = {Springer},
title = {{Co-Büching them all}},
doi = {10.1007/978-3-642-19805-2_13},
volume = {6604},
year = {2011},
}
@inproceedings{3328,
abstract = {We report on a generic uni- and bivariate algebraic kernel that is publicly available with CGAL 3.7. It comprises complete, correct, though efficient state-of-the-art implementations on polynomials, roots of polynomial systems, and the support to analyze algebraic curves defined by bivariate polynomials. The kernel design is generic, that is, various number types and substeps can be exchanged. It is accompanied with a ready-to-use interface to enable arrangements induced by algebraic curves, that have already been used as basis for various geometric applications, as arrangements on Dupin cyclides or the triangulation of algebraic surfaces. We present two novel applications: arrangements of rotated algebraic curves and Boolean set operations on polygons bounded by segments of algebraic curves. We also provide experiments showing that our general implementation is competitive and even often clearly outperforms existing implementations that are explicitly tailored for specific types of non-linear curves that are available in CGAL.},
author = {Berberich, Eric and Hemmer, Michael and Kerber, Michael},
location = {Paris, France},
pages = {179 -- 186},
publisher = {ACM},
title = {{A generic algebraic kernel for non linear geometric applications}},
doi = {10.1145/1998196.1998224},
year = {2011},
}
@inproceedings{3329,
abstract = {We consider the offset-deconstruction problem: Given a polygonal shape Q with n vertices, can it be expressed, up to a tolerance µ in Hausdorff distance, as the Minkowski sum of another polygonal shape P with a disk of fixed radius? If it does, we also seek a preferably simple-looking solution shape P; then, P's offset constitutes an accurate, vertex-reduced, and smoothened approximation of Q. We give an O(n log n)-time exact decision algorithm that handles any polygonal shape, assuming the real-RAM model of computation. An alternative algorithm, based purely on rational arithmetic, answers the same deconstruction problem, up to an uncertainty parameter, and its running time depends on the parameter δ (in addition to the other input parameters: n, δ and the radius of the disk). If the input shape is found to be approximable, the rational-arithmetic algorithm also computes an approximate solution shape for the problem. For convex shapes, the complexity of the exact decision algorithm drops to O(n), which is also the time required to compute a solution shape P with at most one more vertex than a vertex-minimal one. Our study is motivated by applications from two different domains. However, since the offset operation has numerous uses, we anticipate that the reverse question that we study here will be still more broadly applicable. We present results obtained with our implementation of the rational-arithmetic algorithm.},
author = {Berberich, Eric and Halperin, Dan and Kerber, Michael and Pogalnikova, Roza},
booktitle = {Proceedings of the twenty-seventh annual symposium on Computational geometry},
location = {Paris, France},
pages = {187 -- 196},
publisher = {ACM},
title = {{Deconstructing approximate offsets}},
doi = {10.1145/1998196.1998225},
year = {2011},
}
@inproceedings{3330,
abstract = {We consider the problem of approximating all real roots of a square-free polynomial f. Given isolating intervals, our algorithm refines each of them to a width at most 2-L, that is, each of the roots is approximated to L bits after the binary point. Our method provides a certified answer for arbitrary real polynomials, only requiring finite approximations of the polynomial coefficient and choosing a suitable working precision adaptively. In this way, we get a correct algorithm that is simple to implement and practically efficient. Our algorithm uses the quadratic interval refinement method; we adapt that method to be able to cope with inaccuracies when evaluating f, without sacrificing its quadratic convergence behavior. We prove a bound on the bit complexity of our algorithm in terms of degree, coefficient size and discriminant. Our bound improves previous work on integer polynomials by a factor of deg f and essentially matches best known theoretical bounds on root approximation which are obtained by very sophisticated algorithms.},
author = {Kerber, Michael and Sagraloff, Michael},
location = {California, USA},
pages = {209 -- 216},
publisher = {Springer},
title = {{Root refinement for real polynomials}},
doi = {10.1145/1993886.1993920},
year = {2011},
}
@article{3332,
abstract = {Given an algebraic hypersurface O in ℝd, how many simplices are necessary for a simplicial complex isotopic to O? We address this problem and the variant where all vertices of the complex must lie on O. We give asymptotically tight worst-case bounds for algebraic plane curves. Our results gradually improve known bounds in higher dimensions; however, the question for tight bounds remains unsolved for d ≥ 3.},
author = {Kerber, Michael and Sagraloff, Michael},
journal = {Graphs and Combinatorics},
number = {3},
pages = {419 -- 430},
publisher = {Springer},
title = {{A note on the complexity of real algebraic hypersurfaces}},
doi = {10.1007/s00373-011-1020-7},
volume = {27},
year = {2011},
}
@article{3334,
author = {Edelsbrunner, Herbert and Pach, János and Ziegler, Günter},
journal = {Discrete & Computational Geometry},
number = {1},
pages = {1 -- 2},
publisher = {Springer},
title = {{Letter from the new editors-in-chief}},
doi = {10.1007/s00454-010-9313-9},
volume = {45},
year = {2011},
}
@inbook{3335,
abstract = {We study the topology of the Megaparsec Cosmic Web in terms of the scale-dependent Betti numbers, which formalize the topological information content of the cosmic mass distribution. While the Betti numbers do not fully quantify topology, they extend the information beyond conventional cosmological studies of topology in terms of genus and Euler characteristic. The richer information content of Betti numbers goes along the availability of fast algorithms to compute them. For continuous density fields, we determine the scale-dependence of Betti numbers by invoking the cosmologically familiar filtration of sublevel or superlevel sets defined by density thresholds. For the discrete galaxy distribution, however, the analysis is based on the alpha shapes of the particles. These simplicial complexes constitute an ordered sequence of nested subsets of the Delaunay tessellation, a filtration defined by the scale parameter, α. As they are homotopy equivalent to the sublevel sets of the distance field, they are an excellent tool for assessing the topological structure of a discrete point distribution. In order to develop an intuitive understanding for the behavior of Betti numbers as a function of α, and their relation to the morphological patterns in the Cosmic Web, we first study them within the context of simple heuristic Voronoi clustering models. These can be tuned to consist of specific morphological elements of the Cosmic Web, i.e. clusters, filaments, or sheets. To elucidate the relative prominence of the various Betti numbers in different stages of morphological evolution, we introduce the concept of alpha tracks. Subsequently, we address the topology of structures emerging in the standard LCDM scenario and in cosmological scenarios with alternative dark energy content. The evolution of the Betti numbers is shown to reflect the hierarchical evolution of the Cosmic Web. We also demonstrate that the scale-dependence of the Betti numbers yields a promising measure of cosmological parameters, with a potential to help in determining the nature of dark energy and to probe primordial non-Gaussianities. We also discuss the expected Betti numbers as a function of the density threshold for superlevel sets of a Gaussian random field. Finally, we introduce the concept of persistent homology. It measures scale levels of the mass distribution and allows us to separate small from large scale features. Within the context of the hierarchical cosmic structure formation, persistence provides a natural formalism for a multiscale topology study of the Cosmic Web.},
author = {Van De Weygaert, Rien and Vegter, Gert and Edelsbrunner, Herbert and Jones, Bernard and Pranav, Pratyush and Park, Changbom and Hellwing, Wojciech and Eldering, Bob and Kruithof, Nico and Bos, Patrick and Hidding, Johan and Feldbrugge, Job and Ten Have, Eline and Van Engelen, Matti and Caroli, Manuel and Teillaud, Monique},
booktitle = {Transactions on Computational Science XIV},
editor = {Gavrilova, Marina and Tan, Kenneth and Mostafavi, Mir},
pages = {60 -- 101},
publisher = {Springer},
title = {{Alpha, Betti and the Megaparsec Universe: On the topology of the Cosmic Web}},
doi = {10.1007/978-3-642-25249-5_3},
volume = {6970},
year = {2011},
}
@inproceedings{3337,
abstract = {Playing table tennis is a difficult task for robots, especially due to their limitations of acceleration. A key bottleneck is the amount of time needed to reach the desired hitting position and velocity of the racket for returning the incoming ball. Here, it often does not suffice to simply extrapolate the ball's trajectory after the opponent returns it but more information is needed. Humans are able to predict the ball's trajectory based on the opponent's moves and, thus, have a considerable advantage. Hence, we propose to incorporate an anticipation system into robot table tennis players, which enables the robot to react earlier while the opponent is performing the striking movement. Based on visual observation of the opponent's racket movement, the robot can predict the aim of the opponent and adjust its movement generation accordingly. The policies for deciding how and when to react are obtained by reinforcement learning. We conduct experiments with an existing robot player to show that the learned reaction policy can significantly improve the performance of the overall system.},
author = {Wang, Zhikun and Lampert, Christoph and Mülling, Katharina and Schölkopf, Bernhard and Peters, Jan},
location = {San Francisco, USA},
pages = {332 -- 337},
publisher = {IEEE},
title = {{Learning anticipation policies for robot table tennis}},
doi = {10.1109/IROS.2011.6094892},
year = {2011},
}
@unpublished{3338,
abstract = {We consider 2-player games played on a finite state space for an infinite number of rounds. The games are concurrent: in each round, the two players (player 1 and player 2) choose their moves inde- pendently and simultaneously; the current state and the two moves determine the successor state. We study concurrent games with ω-regular winning conditions specified as parity objectives. We consider the qualitative analysis problems: the computation of the almost-sure and limit-sure winning set of states, where player 1 can ensure to win with probability 1 and with probability arbitrarily close to 1, respec- tively. In general the almost-sure and limit-sure winning strategies require both infinite-memory as well as infinite-precision (to describe probabilities). We study the bounded-rationality problem for qualitative analysis of concurrent parity games, where the strategy set for player 1 is restricted to bounded-resource strategies. In terms of precision, strategies can be deterministic, uniform, finite-precision or infinite- precision; and in terms of memory, strategies can be memoryless, finite-memory or infinite-memory. We present a precise and complete characterization of the qualitative winning sets for all combinations of classes of strategies. In particular, we show that uniform memoryless strategies are as powerful as finite-precision infinite-memory strategies, and infinite-precision memoryless strategies are as power- ful as infinite-precision finite-memory strategies. We show that the winning sets can be computed in O(n2d+3) time, where n is the size of the game structure and 2d is the number of priorities (or colors), and our algorithms are symbolic. The membership problem of whether a state belongs to a winning set can be decided in NP ∩ coNP. While this complexity is the same as for the simpler class of turn-based parity games, where in each state only one of the two players has a choice of moves, our algorithms, that are obtained by characterization of the winning sets as μ-calculus formulas, are considerably more involved than those for turn-based games.},
author = {Chatterjee, Krishnendu},
booktitle = {arXiv},
pages = {1 -- 51},
publisher = {ArXiv},
title = {{Bounded rationality in concurrent parity games}},
year = {2011},
}
@unpublished{3339,
abstract = {Turn-based stochastic games and its important subclass Markov decision processes (MDPs) provide models for systems with both probabilistic and nondeterministic behaviors. We consider turn-based stochastic games with two classical quantitative objectives: discounted-sum and long-run average objectives. The game models and the quantitative objectives are widely used in probabilistic verification, planning, optimal inventory control, network protocol and performance analysis. Games and MDPs that model realistic systems often have very large state spaces, and probabilistic abstraction techniques are necessary to handle the state-space explosion. The commonly used full-abstraction techniques do not yield space-savings for systems that have many states with similar value, but does not necessarily have similar transition structure. A semi-abstraction technique, namely Magnifying-lens abstractions (MLA), that clusters states based on value only, disregarding differences in their transition relation was proposed for qualitative objectives (reachability and safety objectives). In this paper we extend the MLA technique to solve stochastic games with discounted-sum and long-run average objectives. We present the MLA technique based abstraction-refinement algorithm for stochastic games and MDPs with discounted-sum objectives. For long-run average objectives, our solution works for all MDPs and a sub-class of stochastic games where every state has the same value. },
author = {Chatterjee, Krishnendu and De Alfaro, Luca and Pritam, Roy},
booktitle = {arXiv},
pages = {17},
publisher = {ArXiv},
title = {{Magnifying lens abstraction for stochastic games with discounted and long-run average objectives}},
year = {2011},
}
@inproceedings{3344,
abstract = {Games played on graphs provide the mathematical framework to analyze several important problems in computer science as well as mathematics, such as the synthesis problem of Church, model checking of open reactive systems and many others. On the basis of mode of interaction of the players these games can be classified as follows: (a) turn-based (players make moves in turns); and (b) concurrent (players make moves simultaneously). On the basis of the information available to the players these games can be classified as follows: (a) perfect-information (players have perfect view of the game); and (b) partial-information (players have partial view of the game). In this talk we will consider all these classes of games with reachability objectives, where the goal of one player is to reach a set of target vertices of the graph, and the goal of the opponent player is to prevent the player from reaching the target. We will survey the results for various classes of games, and the results range from linear time decision algorithms to EXPTIME-complete problems to undecidable problems.},
author = {Chatterjee, Krishnendu},
editor = {Delzanno, Giorgo and Potapov, Igor},
location = {Genoa, Italy},
pages = {1 -- 1},
publisher = {Springer},
title = {{Graph games with reachability objectives}},
doi = {10.1007/978-3-642-24288-5_1},
volume = {6945},
year = {2011},
}
@inproceedings{3345,
abstract = {We consider Markov Decision Processes (MDPs) with mean-payoff parity and energy parity objectives. In system design, the parity objective is used to encode ω-regular specifications, and the mean-payoff and energy objectives can be used to model quantitative resource constraints. The energy condition re- quires that the resource level never drops below 0, and the mean-payoff condi- tion requires that the limit-average value of the resource consumption is within a threshold. While these two (energy and mean-payoff) classical conditions are equivalent for two-player games, we show that they differ for MDPs. We show that the problem of deciding whether a state is almost-sure winning (i.e., winning with probability 1) in energy parity MDPs is in NP ∩ coNP, while for mean- payoff parity MDPs, the problem is solvable in polynomial time, improving a recent PSPACE bound.},
author = {Chatterjee, Krishnendu and Doyen, Laurent},
location = {Warsaw, Poland},
pages = {206 -- 218},
publisher = {Springer},
title = {{Energy and mean-payoff parity Markov Decision Processes}},
doi = {10.1007/978-3-642-22993-0_21},
volume = {6907},
year = {2011},
}
@inproceedings{3346,
abstract = {We study Markov decision processes (MDPs) with multiple limit-average (or mean-payoff) functions. We consider two different objectives, namely, expectation and satisfaction objectives. Given an MDP with k reward functions, in the expectation objective the goal is to maximize the expected limit-average value, and in the satisfaction objective the goal is to maximize the probability of runs such that the limit-average value stays above a given vector. We show that under the expectation objective, in contrast to the single-objective case, both randomization and memory are necessary for strategies, and that finite-memory randomized strategies are sufficient. Under the satisfaction objective, in contrast to the single-objective case, infinite memory is necessary for strategies, and that randomized memoryless strategies are sufficient for epsilon-approximation, for all epsilon>;0. We further prove that the decision problems for both expectation and satisfaction objectives can be solved in polynomial time and the trade-off curve (Pareto curve) can be epsilon-approximated in time polynomial in the size of the MDP and 1/epsilon, and exponential in the number of reward functions, for all epsilon>;0. Our results also reveal flaws in previous work for MDPs with multiple mean-payoff functions under the expectation objective, correct the flaws and obtain improved results.},
author = {Brázdil, Tomáš and Brožek, Václav and Chatterjee, Krishnendu and Forejt, Vojtěch and Kučera, Antonín},
location = {Toronto, Canada},
publisher = {IEEE},
title = {{Two views on multiple mean payoff objectives in Markov Decision Processes}},
doi = {10.1109/LICS.2011.10},
year = {2011},
}
@inproceedings{3347,
abstract = {The class of omega-regular languages provides a robust specification language in verification. Every omega-regular condition can be decomposed into a safety part and a liveness part. The liveness part ensures that something good happens "eventually". Finitary liveness was proposed by Alur and Henzinger as a stronger formulation of liveness. It requires that there exists an unknown, fixed bound b such that something good happens within b transitions. In this work we consider automata with finitary acceptance conditions defined by finitary Buchi, parity and Streett languages. We study languages expressible by such automata: we give their topological complexity and present a regular-expression characterization. We compare the expressive power of finitary automata and give optimal algorithms for classical decisions questions. We show that the finitary languages are Sigma 2-complete; we present a complete picture of the expressive power of various classes of automata with finitary and infinitary acceptance conditions; we show that the languages defined by finitary parity automata exactly characterize the star-free fragment of omega B-regular languages; and we show that emptiness is NLOGSPACE-complete and universality as well as language inclusion are PSPACE-complete for finitary parity and Streett automata.},
author = {Chatterjee, Krishnendu and Fijalkow, Nathanaël},
location = {Tarragona, Spain},
pages = {216 -- 226},
publisher = {Springer},
title = {{Finitary languages}},
doi = {10.1007/978-3-642-21254-3_16},
volume = {6638},
year = {2011},
}
@inproceedings{3348,
abstract = {We study synthesis of controllers for real-time systems, where the objective is to stay in a given safe set. The problem is solved by obtaining winning strategies in the setting of concurrent two-player timed automaton games with safety objectives. To prevent a player from winning by blocking time, we restrict each player to strategies that ensure that the player cannot be responsible for causing a zeno run. We construct winning strategies for the controller which require access only to (1) the system clocks (thus, controllers which require their own internal infinitely precise clocks are not necessary), and (2) a linear (in the number of clocks) number of memory bits. Precisely, we show that for safety objectives, a memory of size (3 · |C|+lg(|C|+1)) bits suffices for winning controller strategies, where C is the set of clocks of the timed automaton game, significantly improving the previous known exponential bound. We also settle the open question of whether winning region controller strategies require memory for safety objectives by showing with an example the necessity of memory for region strategies to win for safety objectives.},
author = {Chatterjee, Krishnendu and Prabhu, Vinayak},
location = {Chicago, USA},
pages = {221 -- 230},
publisher = {Springer},
title = {{Synthesis of memory efficient real time controllers for safety objectives}},
doi = {10.1145/1967701.1967734},
year = {2011},
}
@inproceedings{3349,
abstract = {Games on graphs provide a natural model for reactive non-terminating systems. In such games, the interaction of two players on an arena results in an infinite path that describes a run of the system. Different settings are used to model various open systems in computer science, as for instance turn-based or concurrent moves, and deterministic or stochastic transitions. In this paper, we are interested in turn-based games, and specifically in deterministic parity games and stochastic reachability games (also known as simple stochastic games). We present a simple, direct and efficient reduction from deterministic parity games to simple stochastic games: it yields an arena whose size is linear up to a logarithmic factor in size of the original arena.},
author = {Chatterjee, Krishnendu and Fijalkow, Nathanaël},
location = {Minori, Italy},
pages = {74 -- 86},
publisher = {EPTCS},
title = {{A reduction from parity games to simple stochastic games}},
doi = {10.4204/EPTCS.54.6},
volume = {54},
year = {2011},
}
@article{335,
abstract = {Recently reported synthetic routes for the production of hollow nanoparticles have stimulated significant interest for the possibilities this novel geometry offers. While advantageous properties have been found and innovative applications have been proposed, the development of the full potential of these new nanostructures is still strongly tied to the extent of control that can be accomplished over their characteristics (e.g., composition, size, shell thickness, and nanocrystalline structure). In the present work, we investigate the means and limits of control over these parameters that can be obtained by the Kirkendall effect synthetic route on cadmium chalcogenide nanocrystalline shells. We demonstrate that the selection of the reactants and oxidation conditions allows some extent of control of the nanocrystalline structure and thickness of the shell. However, the tuning range is limited by the intrinsic restrictions of the synthetic procedure and by the dependence of the particle geometry on the same reaction conditions. Thus, we further explore the range of control over the shell parameters that can be accomplished through post-synthesis processes, such as chemical etching and thermal annealing. },
author = {Ibáñez, Maria and Fan, Jiandong and Li, Wenhua and Cadavid, Doris and Nafria, Raquel and Carrete, Alex and Cabot, Andreu},
journal = {Chemistry of Materials},
number = {12},
pages = {3095 -- 3104},
publisher = {American Chemical Society},
title = {{Means and limits of control of the shell parameters in hollow nanoparticles obtained by the Kirkendall effect}},
doi = {10.1021/cm2006633},
volume = {23},
year = {2011},
}
@inproceedings{3350,
abstract = {A controller for a discrete game with ω-regular objectives requires attention if, intuitively, it requires measuring the state and switching from the current control action. Minimum attention controllers are preferable in modern shared implementations of cyber-physical systems because they produce the least burden on system resources such as processor time or communication bandwidth. We give algorithms to compute minimum attention controllers for ω-regular objectives in imperfect information discrete two-player games. We show a polynomial-time reduction from minimum attention controller synthesis to synthesis of controllers for mean-payoff parity objectives in games of incomplete information. This gives an optimal EXPTIME-complete synthesis algorithm. We show that the minimum attention controller problem is decidable for infinite state systems with finite bisimulation quotients. In particular, the problem is decidable for timed and rectangular automata.},
author = {Chatterjee, Krishnendu and Majumdar, Ritankar},
editor = {Fahrenberg, Uli and Tripakis, Stavros},
location = {Aalborg, Denmark},
pages = {145 -- 159},
publisher = {Springer},
title = {{Minimum attention controller synthesis for omega regular objectives}},
doi = {10.1007/978-3-642-24310-3_11},
volume = {6919},
year = {2011},
}
@inproceedings{3351,
abstract = {In two-player games on graph, the players construct an infinite path through the game graph and get a reward computed by a payoff function over infinite paths. Over weighted graphs, the typical and most studied payoff functions compute the limit-average or the discounted sum of the rewards along the path. Besides their simple definition, these two payoff functions enjoy the property that memoryless optimal strategies always exist. In an attempt to construct other simple payoff functions, we define a class of payoff functions which compute an (infinite) weighted average of the rewards. This new class contains both the limit-average and the discounted sum functions, and we show that they are the only members of this class which induce memoryless optimal strategies, showing that there is essentially no other simple payoff functions.},
author = {Chatterjee, Krishnendu and Doyen, Laurent and Singh, Rohit},
editor = {Owe, Olaf and Steffen, Martin and Telle, Jan Arne},
location = {Oslo, Norway},
pages = {148 -- 159},
publisher = {Springer},
title = {{On memoryless quantitative objectives}},
doi = {10.1007/978-3-642-22953-4_13},
volume = {6914},
year = {2011},
}
@article{3352,
abstract = {Exploring the connection of biology with reactive systems to better understand living systems.},
author = {Fisher, Jasmin and Harel, David and Henzinger, Thomas A},
journal = {Communications of the ACM},
number = {10},
pages = {72 -- 82},
publisher = {ACM},
title = {{Biology as reactivity}},
doi = {10.1145/2001269.2001289},
volume = {54},
year = {2011},
}
@article{3353,
abstract = {Compositional theories are crucial when designing large and complex systems from smaller components. In this work we propose such a theory for synchronous concurrent systems. Our approach follows so-called interface theories, which use game-theoretic interpretations of composition and refinement. These are appropriate for systems with distinct inputs and outputs, and explicit conditions on inputs that must be enforced during composition. Our interfaces model systems that execute in an infinite sequence of synchronous rounds. At each round, a contract must be satisfied. The contract is simply a relation specifying the set of valid input/output pairs. Interfaces can be composed by parallel, serial or feedback composition. A refinement relation between interfaces is defined, and shown to have two main properties: (1) it is preserved by composition, and (2) it is equivalent to substitutability, namely, the ability to replace an interface by another one in any context. Shared refinement and abstraction operators, corresponding to greatest lower and least upper bounds with respect to refinement, are also defined. Input-complete interfaces, that impose no restrictions on inputs, and deterministic interfaces, that produce a unique output for any legal input, are discussed as special cases, and an interesting duality between the two classes is exposed. A number of illustrative examples are provided, as well as algorithms to compute compositions, check refinement, and so on, for finite-state interfaces.},
author = {Tripakis, Stavros and Lickly, Ben and Henzinger, Thomas A and Lee, Edward},
journal = {ACM Transactions on Programming Languages and Systems (TOPLAS)},
number = {4},
publisher = {ACM},
title = {{A theory of synchronous relational interfaces}},
doi = {10.1145/1985342.1985345},
volume = {33},
year = {2011},
}
@article{3354,
abstract = {We consider two-player games played on a finite state space for an infinite number of rounds. The games are concurrent: in each round, the two players (player 1 and player 2) choose their moves independently and simultaneously; the current state and the two moves determine the successor state. We consider ω-regular winning conditions specified as parity objectives. Both players are allowed to use randomization when choosing their moves. We study the computation of the limit-winning set of states, consisting of the states where the sup-inf value of the game for player 1 is 1: in other words, a state is limit-winning if player 1 can ensure a probability of winning arbitrarily close to 1. We show that the limit-winning set can be computed in O(n2d+2) time, where n is the size of the game structure and 2d is the number of priorities (or colors). The membership problem of whether a state belongs to the limit-winning set can be decided in NP ∩ coNP. While this complexity is the same as for the simpler class of turn-based parity games, where in each state only one of the two players has a choice of moves, our algorithms are considerably more involved than those for turn-based games. This is because concurrent games do not satisfy two of the most fundamental properties of turn-based parity games. First, in concurrent games limit-winning strategies require randomization; and second, they require infinite memory.},
author = {Chatterjee, Krishnendu and De Alfaro, Luca and Henzinger, Thomas A},
journal = {ACM Transactions on Computational Logic (TOCL)},
number = {4},
publisher = {ACM},
title = {{Qualitative concurrent parity games}},
doi = {10.1145/1970398.1970404},
volume = {12},
year = {2011},
}
@inproceedings{3355,
abstract = {Byzantine Fault Tolerant (BFT) protocols aim to improve the reliability of distributed systems. They enable systems to tolerate arbitrary failures in a bounded number of nodes. BFT protocols are usually proven correct for certain safety and liveness properties. However, recent studies have shown that the performance of state-of-the-art BFT protocols decreases drastically in the presence of even a single malicious node. This motivates a formal quantitative analysis of BFT protocols to investigate their performance characteristics under different scenarios. We present HyPerf, a new hybrid methodology based on model checking and simulation techniques for evaluating the performance of BFT protocols. We build a transition system corresponding to a BFT protocol and systematically explore the set of behaviors allowed by the protocol. We associate certain timing information with different operations in the protocol, like cryptographic operations and message transmission. After an elaborate state exploration, we use the time information to evaluate the performance characteristics of the protocol using simulation techniques. We integrate our framework in Mace, a tool for building and verifying distributed systems. We evaluate the performance of PBFT using our framework. We describe two different use-cases of our methodology. For the benign operation of the protocol, we use the time information as random variables to compute the probability distribution of the execution times. In the presence of faults, we estimate the worst-case performance of the protocol for various attacks that can be employed by malicious nodes. Our results show the importance of hybrid techniques in systematically analyzing the performance of large-scale systems.},
author = {Halalai, Raluca and Henzinger, Thomas A and Singh, Vasu},
location = {Aachen, Germany},
pages = {255 -- 264},
publisher = {IEEE},
title = {{Quantitative evaluation of BFT protocols}},
doi = {10.1109/QEST.2011.40},
year = {2011},
}
@inproceedings{3356,
abstract = {There is recently a significant effort to add quantitative objectives to formal verification and synthesis. We introduce and investigate the extension of temporal logics with quantitative atomic assertions, aiming for a general and flexible framework for quantitative-oriented specifications. In the heart of quantitative objectives lies the accumulation of values along a computation. It is either the accumulated summation, as with the energy objectives, or the accumulated average, as with the mean-payoff objectives. We investigate the extension of temporal logics with the prefix-accumulation assertions Sum(v) ≥ c and Avg(v) ≥ c, where v is a numeric variable of the system, c is a constant rational number, and Sum(v) and Avg(v) denote the accumulated sum and average of the values of v from the beginning of the computation up to the current point of time. We also allow the path-accumulation assertions LimInfAvg(v) ≥ c and LimSupAvg(v) ≥ c, referring to the average value along an entire computation. We study the border of decidability for extensions of various temporal logics. In particular, we show that extending the fragment of CTL that has only the EX, EF, AX, and AG temporal modalities by prefix-accumulation assertions and extending LTL with path-accumulation assertions, result in temporal logics whose model-checking problem is decidable. The extended logics allow to significantly extend the currently known energy and mean-payoff objectives. Moreover, the prefix-accumulation assertions may be refined with "controlled-accumulation", allowing, for example, to specify constraints on the average waiting time between a request and a grant. On the negative side, we show that the fragment we point to is, in a sense, the maximal logic whose extension with prefix-accumulation assertions permits a decidable model-checking procedure. Extending a temporal logic that has the EG or EU modalities, and in particular CTL and LTL, makes the problem undecidable.},
author = {Boker, Udi and Chatterjee, Krishnendu and Henzinger, Thomas A and Kupferman, Orna},
location = {Toronto, Canada},
publisher = {IEEE},
title = {{Temporal specifications with accumulative values}},
doi = {10.1109/LICS.2011.33},
year = {2011},
}
@inproceedings{3357,
abstract = {We consider two-player graph games whose objectives are request-response condition, i.e conjunctions of conditions of the form "if a state with property Rq is visited, then later a state with property Rp is visited". The winner of such games can be decided in EXPTIME and the problem is known to be NP-hard. In this paper, we close this gap by showing that this problem is, in fact, EXPTIME-complete. We show that the problem becomes PSPACE-complete if we only consider games played on DAGs, and NP-complete or PTIME-complete if there is only one player (depending on whether he wants to enforce or spoil the request-response condition). We also present near-optimal bounds on the memory needed to design winning strategies for each player, in each case.},
author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Horn, Florian},
editor = {Dediu, Adrian-Horia and Inenaga, Shunsuke and Martín-Vide, Carlos},
location = {Tarragona, Spain},
pages = {227 -- 237},
publisher = {Springer},
title = {{The complexity of request-response games}},
doi = {10.1007/978-3-642-21254-3_17},
volume = {6638},
year = {2011},
}
@inproceedings{3358,
abstract = {The static scheduling problem often arises as a fundamental problem in real-time systems and grid computing. We consider the problem of statically scheduling a large job expressed as a task graph on a large number of computing nodes, such as a data center. This paper solves the large-scale static scheduling problem using abstraction refinement, a technique commonly used in formal verification to efficiently solve computationally hard problems. A scheduler based on abstraction refinement first attempts to solve the scheduling problem with abstract representations of the job and the computing resources. As abstract representations are generally small, the scheduling can be done reasonably fast. If the obtained schedule does not meet specified quality conditions (like data center utilization or schedule makespan) then the scheduler refines the job and data center abstractions and, again solves the scheduling problem. We develop different schedulers based on abstraction refinement. We implemented these schedulers and used them to schedule task graphs from various computing domains on simulated data centers with realistic topologies. We compared the speed of scheduling and the quality of the produced schedules with our abstraction refinement schedulers against a baseline scheduler that does not use any abstraction. We conclude that abstraction refinement techniques give a significant speed-up compared to traditional static scheduling heuristics, at a reasonable cost in the quality of the produced schedules. We further used our static schedulers in an actual system that we deployed on Amazon EC2 and compared it against the Hadoop dynamic scheduler for large MapReduce jobs. Our experiments indicate that there is great potential for static scheduling techniques.},
author = {Henzinger, Thomas A and Singh, Vasu and Wies, Thomas and Zufferey, Damien},
location = {Salzburg, Austria},
pages = {329 -- 342},
publisher = {ACM},
title = {{Scheduling large jobs by abstraction refinement}},
doi = {10.1145/1966445.1966476},
year = {2011},
}
@inproceedings{3359,
abstract = {Motivated by improvements in constraint-solving technology and by the increase of routinely available computational power, partial-program synthesis is emerging as an effective approach for increasing programmer productivity. The goal of the approach is to allow the programmer to specify a part of her intent imperatively (that is, give a partial program) and a part of her intent declaratively, by specifying which conditions need to be achieved or maintained. The task of the synthesizer is to construct a program that satisfies the specification. As an example, consider a partial program where threads access shared data without using any synchronization mechanism, and a declarative specification that excludes data races and deadlocks. The task of the synthesizer is then to place locks into the program code in order for the program to meet the specification.
In this paper, we argue that quantitative objectives are needed in partial-program synthesis in order to produce higher-quality programs, while enabling simpler specifications. Returning to the example, the synthesizer could construct a naive solution that uses one global lock for shared data. This can be prevented either by constraining the solution space further (which is error-prone and partly defeats the point of synthesis), or by optimizing a quantitative objective that models performance. Other quantitative notions useful in synthesis include fault tolerance, robustness, resource (memory, power) consumption, and information flow.},
author = {Cerny, Pavol and Henzinger, Thomas A},
location = {Taipei; Taiwan},
pages = {149 -- 154},
publisher = {ACM},
title = {{From boolean to quantitative synthesis}},
doi = {10.1145/2038642.2038666},
year = {2011},
}
@article{336,
abstract = {The growth kinetics of colloidal Bi2S3 nanorods was investigated. After nucleation, the length distribution of the growing Bi 2S3 nanorods narrows with the reaction time until a bimodal length distribution appears. From this critical reaction time on, the smallest nanorods of the ensemble dissolve, feeding with monomer the growth of the largest ones. A comprehensive characterization of the size-distribution evolution of Bi2S3 nanorods is used here to illustrate the dependences of the anisotropic growth rates of cylindrical nanoparticles on the nanoparticle dimensions and the monomer concentration in solution. With this goal in mind, a diffusion-reaction model is presented to explain the origin of the experimentally obtained length distribution focusing mechanism. The model is able to reproduce the decrease of the growth rate in the nanorod axial direction with both its thickness and length. On the other hand, low lateral reaction rates prevent the nanorod thickness distribution to be focused. In both crystallographic growth directions, a concentration-dependent critical thickness exists, which discriminates between nanorods with positive growth rates and those dissolving in the reaction solution. },
author = {Ibáñez, Maria and Guardia, Pablo and Shavel, Alexey and Cadavid, Doris and Arbiol, Jordi and Morante, Joan and Cabot, Andreu},
journal = {Journal of Physical Chemistry C},
number = {16},
pages = {7947 -- 7955},
publisher = {American Chemical Society},
title = {{Growth kinetics of asymmetric Bi2S3 nanocrystals: Size distribution focusing in nanorods}},
doi = {10.1021/jp2002904},
volume = {115},
year = {2011},
}
@inproceedings{3360,
abstract = {A discounted-sum automaton (NDA) is a nondeterministic finite automaton with edge weights, which values a run by the discounted sum of visited edge weights. More precisely, the weight in the i-th position of the run is divided by lambda^i, where the discount factor lambda is a fixed rational number greater than 1. Discounted summation is a common and useful measuring scheme, especially for infinite sequences, which reflects the assumption that earlier weights are more important than later weights. Determinizing automata is often essential, for example, in formal verification, where there are polynomial algorithms for comparing two deterministic NDAs, while the equivalence problem for NDAs is not known to be decidable. Unfortunately, however, discounted-sum automata are, in general, not determinizable: it is currently known that for every rational discount factor 1 < lambda < 2, there is an NDA with lambda (denoted lambda-NDA) that cannot be determinized. We provide positive news, showing that every NDA with an integral factor is determinizable. We also complete the picture by proving that the integers characterize exactly the discount factors that guarantee determinizability: we show that for every non-integral rational factor lambda, there is a nondeterminizable lambda-NDA. Finally, we prove that the class of NDAs with integral discount factors enjoys closure under the algebraic operations min, max, addition, and subtraction, which is not the case for general NDAs nor for deterministic NDAs. This shows that for integral discount factors, the class of NDAs forms an attractive specification formalism in quantitative formal verification. All our results hold equally for automata over finite words and for automata over infinite words. },
author = {Boker, Udi and Henzinger, Thomas A},
location = {Bergen, Norway},
pages = {82 -- 96},
publisher = {Springer},
title = {{Determinizing discounted-sum automata}},
doi = {10.4230/LIPIcs.CSL.2011.82},
volume = {12},
year = {2011},
}
@inproceedings{3361,
abstract = {In this paper, we investigate the computational complexity of quantitative information flow (QIF) problems. Information-theoretic quantitative relaxations of noninterference (based on Shannon entropy)have been introduced to enable more fine-grained reasoning about programs in situations where limited information flow is acceptable. The QIF bounding problem asks whether the information flow in a given program is bounded by a constant $d$. Our first result is that the QIF bounding problem is PSPACE-complete. The QIF memoryless synthesis problem asks whether it is possible to resolve nondeterministic choices in a given partial program in such a way that in the resulting deterministic program, the quantitative information flow is bounded by a given constant $d$. Our second result is that the QIF memoryless synthesis problem is also EXPTIME-complete. The QIF memoryless synthesis problem generalizes to QIF general synthesis problem which does not impose the memoryless requirement (that is, by allowing the synthesized program to have more variables then the original partial program). Our third result is that the QIF general synthesis problem is EXPTIME-hard.},
author = {Cerny, Pavol and Chatterjee, Krishnendu and Henzinger, Thomas A},
location = {Cernay-la-Ville, France},
pages = {205 -- 217},
publisher = {IEEE},
title = {{The complexity of quantitative information flow problems}},
doi = {10.1109/CSF.2011.21},
year = {2011},
}
@inproceedings{3362,
abstract = {State-transition systems communicating by shared variables have been the underlying model of choice for applications of model checking. Such formalisms, however, have difficulty with modeling process creation or death and communication reconfigurability. Here, we introduce “dynamic reactive modules” (DRM), a state-transition modeling formalism that supports dynamic reconfiguration and creation/death of processes. The resulting formalism supports two types of variables, data variables and reference variables. Reference variables enable changing the connectivity between processes and referring to instances of processes. We show how this new formalism supports parallel composition and refinement through trace containment. DRM provide a natural language for modeling (and ultimately reasoning about) biological systems and multiple threads communicating through shared variables.},
author = {Fisher, Jasmin and Henzinger, Thomas A and Nickovic, Dejan and Piterman, Nir and Singh, Anmol and Vardi, Moshe},
location = {Aachen, Germany},
pages = {404 -- 418},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{Dynamic reactive modules}},
doi = {10.1007/978-3-642-23217-6_27},
volume = {6901},
year = {2011},
}
@unpublished{3363,
abstract = {We consider probabilistic automata on infinite words with acceptance defined by safety, reachability, Büchi, coBüchi, and limit-average conditions. We consider quantitative and qualitative decision problems. We present extensions and adaptations of proofs for probabilistic finite automata and present a complete characterization of the decidability and undecidability frontier of the quantitative and qualitative decision problems for probabilistic automata on infinite words.},
author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Tracol, Mathieu},
pages = {19},
publisher = {ArXiv},
title = {{The decidability frontier for probabilistic automata on infinite words}},
year = {2011},
}
@article{3364,
abstract = {Molecular noise, which arises from the randomness of the discrete events in the cell, significantly influences fundamental biological processes. Discrete-state continuous-time stochastic models (CTMC) can be used to describe such effects, but the calculation of the probabilities of certain events is computationally expensive. We present a comparison of two analysis approaches for CTMC. On one hand, we estimate the probabilities of interest using repeated Gillespie simulation and determine the statistical accuracy that we obtain. On the other hand, we apply a numerical reachability analysis that approximates the probability distributions of the system at several time instances. We use examples of cellular processes to demonstrate the superiority of the reachability analysis if accurate results are required.},
author = {Didier, Frédéric and Henzinger, Thomas A and Mateescu, Maria and Wolf, Verena},
journal = {Theoretical Computer Science},
number = {21},
pages = {2128 -- 2141},
publisher = {Elsevier},
title = {{Approximation of event probabilities in noisy cellular processes}},
doi = {10.1016/j.tcs.2010.10.022},
volume = {412},
year = {2011},
}
@inproceedings{3365,
abstract = {We present the tool Quasy, a quantitative synthesis tool. Quasy takes qualitative and quantitative specifications and automatically constructs a system that satisfies the qualitative specification and optimizes the quantitative specification, if such a system exists. The user can choose between a system that satisfies and optimizes the specifications (a) under all possible environment behaviors or (b) under the most-likely environment behaviors given as a probability distribution on the possible input sequences. Quasy solves these two quantitative synthesis problems by reduction to instances of 2-player games and Markov Decision Processes (MDPs) with quantitative winning objectives. Quasy can also be seen as a game solver for quantitative games. Most notable, it can solve lexicographic mean-payoff games with 2 players, MDPs with mean-payoff objectives, and ergodic MDPs with mean-payoff parity objectives.},
author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Jobstmann, Barbara and Singh, Rohit},
location = {Saarbrucken, Germany},
pages = {267 -- 271},
publisher = {Springer},
title = {{QUASY: quantitative synthesis tool}},
doi = {10.1007/978-3-642-19835-9_24},
volume = {6605},
year = {2011},
}
@inproceedings{3366,
abstract = {We present an algorithmic method for the quantitative, performance-aware synthesis of concurrent programs. The input consists of a nondeterministic partial program and of a parametric performance model. The nondeterminism allows the programmer to omit which (if any) synchronization construct is used at a particular program location. The performance model, specified as a weighted automaton, can capture system architectures by assigning different costs to actions such as locking, context switching, and memory and cache accesses. The quantitative synthesis problem is to automatically resolve the nondeterminism of the partial program so that both correctness is guaranteed and performance is optimal. As is standard for shared memory concurrency, correctness is formalized "specification free", in particular as race freedom or deadlock freedom. For worst-case (average-case) performance, we show that the problem can be reduced to 2-player graph games (with probabilistic transitions) with quantitative objectives. While we show, using game-theoretic methods, that the synthesis problem is Nexp-complete, we present an algorithmic method and an implementation that works efficiently for concurrent programs and performance models of practical interest. We have implemented a prototype tool and used it to synthesize finite-state concurrent programs that exhibit different programming patterns, for several performance models representing different architectures. },
author = {Cerny, Pavol and Chatterjee, Krishnendu and Henzinger, Thomas A and Radhakrishna, Arjun and Singh, Rohit},
editor = {Gopalakrishnan, Ganesh and Qadeer, Shaz},
location = {Snowbird, USA},
pages = {243 -- 259},
publisher = {Springer},
title = {{Quantitative synthesis for concurrent programs}},
doi = {10.1007/978-3-642-22110-1_20},
volume = {6806},
year = {2011},
}
@inproceedings{3367,
abstract = {In this paper, we present the first output-sensitive algorithm to compute the persistence diagram of a filtered simplicial complex. For any Γ>0, it returns only those homology classes with persistence at least Γ. Instead of the classical reduction via column operations, our algorithm performs rank computations on submatrices of the boundary matrix. For an arbitrary constant δ ∈ (0,1), the running time is O(C(1-δ)ΓR(n)log n), where C(1-δ)Γ is the number of homology classes with persistence at least (1-δ)Γ, n is the total number of simplices, and R(n) is the complexity of computing the rank of an n x n matrix with O(n) nonzero entries. Depending on the choice of the rank algorithm, this yields a deterministic O(C(1-δ)Γn2.376) algorithm, a O(C(1-δ)Γn2.28) Las-Vegas algorithm, or a O(C(1-δ)Γn2+ε) Monte-Carlo algorithm for an arbitrary ε>0.},
author = {Chen, Chao and Kerber, Michael},
location = {Paris, France},
pages = {207 -- 216},
publisher = {ACM},
title = {{An output sensitive algorithm for persistent homology}},
doi = {10.1145/1998196.1998228},
year = {2011},
}