@article{249,
abstract = {A version of the Hardy-Littlewood circle method is developed for number fields K/ℚ and is used to show that nonsingular projective cubic hypersurfaces over K always have a K-rational point when they have dimension at least 8. },
author = {Timothy Browning and Vishe, Pankaj},
journal = {Duke Mathematical Journal},
number = {10},
pages = {1825 -- 1883},
publisher = {Duke University Press},
title = {{Cubic hypersurfaces and a version of the circle method for number fields}},
doi = {10.1215/00127094-2738530},
volume = {163},
year = {2014},
}
@article{3263,
abstract = {Adaptation in the retina is thought to optimize the encoding of natural light signals into sequences of spikes sent to the brain. While adaptive changes in retinal processing to the variations of the mean luminance level and second-order stimulus statistics have been documented before, no such measurements have been performed when higher-order moments of the light distribution change. We therefore measured the ganglion cell responses in the tiger salamander retina to controlled changes in the second (contrast), third (skew) and fourth (kurtosis) moments of the light intensity distribution of spatially uniform temporally independent stimuli. The skew and kurtosis of the stimuli were chosen to cover the range observed in natural scenes. We quantified adaptation in ganglion cells by studying linear-nonlinear models that capture well the retinal encoding properties across all stimuli. We found that the encoding properties of retinal ganglion cells change only marginally when higher-order statistics change, compared to the changes observed in response to the variation in contrast. By analyzing optimal coding in LN-type models, we showed that neurons can maintain a high information rate without large dynamic adaptation to changes in skew or kurtosis. This is because, for uncorrelated stimuli, spatio-temporal summation within the receptive field averages away non-gaussian aspects of the light intensity distribution.},
author = {Tkacik, Gasper and Ghosh, Anandamohan and Schneidman, Elad and Segev, Ronen},
journal = {PLoS One},
number = {1},
publisher = {Public Library of Science},
title = {{Adaptation to changes in higher-order stimulus statistics in the salamander retina}},
doi = {10.1371/journal.pone.0085841},
volume = {9},
year = {2014},
}
@article{357,
abstract = {Near-resonant Raman scattering measurements of zinc sulfide nanoparticles and thin films have been made and correlated to grain and particle size, respectively, using a 325 nm wavelength excitation source. The area ratios between the first, second, and third order peaks of ZnS identified as the T 2(LO) mode decrease with increasing ZnS grain size. This is an effect attributed to changes in the bandgap energy from quantum confinement due to the varying grain size between the films/particles, as noted by a shift in the room temperature photoluminescence emission corresponding to the free exciton emission energy. While Raman scattering spectroscopy is typically limited to identification of phases and their crystalline properties, it is possible to attain more than such straightforward information by calibrating the spectral features to variations between sets of samples. These results open the possibility of making a quantitative grain size estimation in ZnS thin films and nanostructures, as well as in other material systems where ZnS may be expected as a secondary phase, such as Cu2ZnSnS4. Additionally, more commonly used excitation wavelengths for Raman scattering, such as 514 and 532 nm, are shown to be of limited use in characterizing ZnS thin films due to the extremely low Raman scattering efficiency of ZnS in films with sub-micron thicknesses. },
author = {Fairbrother, Andrew and Izquierdo-Roca, Victor and Fontané, Xavier and Ibanez, Maria and Cabot, Andreu and Saucedo, Edgardo and Pérez Rodríguez, Alejandro},
journal = {CrystEngComm},
number = {20},
pages = {4120 -- 4125},
publisher = {Royal Society of Chemistry},
title = {{ZnS grain size effects on near-resonant Raman scattering: Optical non-destructive grain size estimation}},
doi = {10.1039/c3ce42578a},
volume = {16},
year = {2014},
}
@inproceedings{5810,
abstract = {A discrete spherical geodesic path between two voxels s and t lying on a discrete sphere is a/the 1-connected shortest path from s to t, comprising voxels of the discrete sphere intersected by the real plane passing through s, t, and the center of the sphere. We show that the set of sphere voxels intersected by the aforesaid real plane always contains a 1-connected cycle passing through s and t, and each voxel in this set lies within an isothetic distance of 32 from the concerned plane. Hence, to compute the path, the algorithm starts from s, and iteratively computes each voxel p of the path from the predecessor of p. A novel number-theoretic property and the 48-symmetry of discrete sphere are used for searching the 1-connected voxels comprising the path. The algorithm is output-sensitive, having its time and space complexities both linear in the length of the path. It can be extended for constructing 1-connected discrete 3D circles of arbitrary orientations, specified by a few appropriate input parameters. Experimental results and related analysis demonstrate its efficiency and versatility.},
author = {Biswas, Ranita and Bhowmick, Partha},
isbn = {9783642387081},
issn = {0302-9743},
location = {Siena, Italy},
pages = {396--409},
publisher = {Springer},
title = {{On Finding Spherical Geodesic Paths and Circles in ℤ3}},
doi = {10.1007/978-3-319-09955-2_33},
volume = {8668},
year = {2014},
}
@inbook{6178,
abstract = {Mechanically coupled cells can generate forces driving cell and tissue morphogenesis during development. Visualization and measuring of these forces is of major importance to better understand the complexity of the biomechanic processes that shape cells and tissues. Here, we describe how UV laser ablation can be utilized to quantitatively assess mechanical tension in different tissues of the developing zebrafish and in cultures of primary germ layer progenitor cells ex vivo.},
author = {Smutny, Michael and Behrndt, Martin and Campinho, Pedro and Ruprecht, Verena and Heisenberg, Carl-Philipp J},
booktitle = {Tissue Morphogenesis},
editor = {Nelson, Celeste},
isbn = {9781493911639},
issn = {1064-3745},
pages = {219--235},
publisher = {Springer},
title = {{UV laser ablation to measure cell and tissue-generated forces in the zebrafish embryo in vivo and ex vivo}},
doi = {10.1007/978-1-4939-1164-6_15},
volume = {1189},
year = {2014},
}
@inproceedings{768,
abstract = {Task allocation is a classic distributed problem in which a set of p potentially faulty processes must cooperate to perform a set of tasks. This paper considers a new dynamic version of the problem, in which tasks are injected adversarially during an asynchronous execution. We give the first asynchronous shared-memory algorithm for dynamic task allocation, and we prove that our solution is optimal within logarithmic factors. The main algorithmic idea is a randomized concurrent data structure called a dynamic to-do tree, which allows processes to pick new tasks to perform at random from the set of available tasks, and to insert tasks at random empty locations in the data structure. Our analysis shows that these properties avoid duplicating work unnecessarily. On the other hand, since the adversary controls the input as well the scheduling, it can induce executions where lots of processes contend for a few available tasks, which is inefficient. However, we prove that every algorithm has the same problem: given an arbitrary input, if OPT is the worst-case complexity of the optimal algorithm on that input, then the expected work complexity of our algorithm on the same input is O(OPT log3 m), where m is an upper bound on the number of tasks that are present in the system at any given time.},
author = {Alistarh, Dan and Aspnes, James and Bender, Michael A and Gelashvili, Rati and Gilbert, Seth},
pages = {416 -- 435},
publisher = {SIAM},
title = {{Dynamic task allocation in asynchronous shared memory}},
doi = {10.1137/1.9781611973402.31},
year = {2014},
}
@inproceedings{770,
abstract = {Dynamic memory reclamation is arguably the biggest open problem in concurrent data structure design: All known solutions induce high overhead, or must be customized to the specific data structure by the programmer, or both. This paper presents StackTrack, the first concurrent memory reclamation scheme that can be applied automatically by a compiler, while maintaining efficiency. StackTrack eliminates most of the expensive bookkeeping required for memory reclamation by leveraging the power of hardware transactional memory (HTM) in a new way: it tracks thread variables dynamically, and in an atomic fashion. This effectively makes all memory references visible without having threads pay the overhead of writing out this information. Our empirical results show that this new approach matches or outperforms prior, non-automated, techniques.},
author = {Alistarh, Dan and Eugster, Patrick T and Herlihy, Maurice P and Matveev, Alexander and Shavit, Nir N},
publisher = {ACM},
title = {{StackTrack: An automated transactional approach to concurrent memory reclamation}},
doi = {10.1145/2592798.2592808},
year = {2014},
}
@inproceedings{775,
abstract = {The long-lived renaming problem appears in shared-memory systems where a set of threads need to register and deregister frequently from the computation, while concurrent operations scan the set of currently registered threads. Instances of this problem show up in concurrent implementations of transactional memory, flat combining, thread barriers, and memory reclamation schemes for lock-free data structures. In this paper, we analyze a randomized solution for long-lived renaming. The algorithmic technique we consider, called the Level Array, has previously been used for hashing and one-shot (single-use) renaming. Our main contribution is to prove that, in long-lived executions, where processes may register and deregister polynomially many times, the technique guarantees constant steps on average and O (log log n) steps with high probability for registering, unit cost for deregistering, and O (n) steps for collect queries, where n is an upper bound on the number of processes that may be active at any point in time. We also show that the algorithm has the surprising property that it is self-healing: under reasonable assumptions on the schedule, operations running while the data structure is in a degraded state implicitly help the data structure re-balance itself. This subtle mechanism obviates the need for expensive periodic rebuilding procedures. Our benchmarks validate this approach, showing that, for typical use parameters, the average number of steps a process takes to register is less than two and the worst-case number of steps is bounded by six, even in executions with billions of operations. We contrast this with other randomized implementations, whose worst-case behavior we show to be unreliable, and with deterministic implementations, whose cost is linear in n.},
author = {Alistarh, Dan and Kopinsky, Justin and Matveev, Alexander and Shavit, Nir N},
pages = {348 -- 357},
publisher = {IEEE},
title = {{The levelarray: A fast, practical long-lived renaming algorithm}},
doi = {10.1109/ICDCS.2014.43},
year = {2014},
}
@article{2001,
abstract = {Antibiotics affect bacterial cell physiology at many levels. Rather than just compensating for the direct cellular defects caused by the drug, bacteria respond to antibiotics by changing their morphology, macromolecular composition, metabolism, gene expression and possibly even their mutation rate. Inevitably, these processes affect each other, resulting in a complex response with changes in the expression of numerous genes. Genome‐wide approaches can thus help in gaining a comprehensive understanding of bacterial responses to antibiotics. In addition, a combination of experimental and theoretical approaches is needed for identifying general principles that underlie these responses. Here, we review recent progress in our understanding of bacterial responses to antibiotics and their combinations, focusing on effects at the levels of growth rate and gene expression. We concentrate on studies performed in controlled laboratory conditions, which combine promising experimental techniques with quantitative data analysis and mathematical modeling. While these basic research approaches are not immediately applicable in the clinic, uncovering the principles and mechanisms underlying bacterial responses to antibiotics may, in the long term, contribute to the development of new treatment strategies to cope with and prevent the rise of resistant pathogenic bacteria.},
author = {Mitosch, Karin and Bollenbach, Tobias},
journal = {Environmental Microbiology Reports},
number = {6},
pages = {545 -- 557},
publisher = {Wiley},
title = {{Bacterial responses to antibiotics and their combinations}},
doi = {10.1111/1758-2229.12190},
volume = {6},
year = {2014},
}
@article{927,
abstract = {Morphogenesis during embryo development requires the coordination of mechanical forces to generate the macroscopic shapes of organs. We propose a minimal theoretical model, based on cell adhesion and actomyosin contractility, which describes the various shapes of epithelial cells and the bending and buckling of epithelial sheets, as well as the relative stability of cellular tubes and spheres. We show that, to understand these processes, a full 3D description of the cells is needed, but that simple scaling laws can still be derived. The morphologies observed in vivo can be understood as stable points of mechanical equations and the transitions between them are either continuous or discontinuous. We then focus on epithelial sheet bending, a ubiquitous morphogenetic process. We calculate the curvature of an epithelium as a function of actin belt tension as well as of cell-cell and and cell-substrate tension. The model allows for a comparison of the relative stabilities of spherical or cylindrical cellular structures (acini or tubes). Finally, we propose a unique type of buckling instability of epithelia, driven by a flattening of individual cell shapes, and discuss experimental tests to verify our predictions.},
author = {Hannezo, Edouard and Prost, Jacques B and Joanny, Jean F},
journal = {PNAS},
number = {1},
pages = {27 -- 32},
publisher = {National Academy of Sciences},
title = {{Theory of epithelial sheet morphology in three dimensions}},
doi = {10.1073/pnas.1312076111},
volume = {111},
year = {2014},
}
@article{845,
abstract = {Recombination between double-stranded DNA molecules is a key genetic process which occurs in a wide variety of organisms. Usually, crossing-over (CO) occurs during meiosis between genotypes with 98.0-99.9% sequence identity, because within-population nucleotide diversity only rarely exceeds 2%. However, some species are hypervariable and it is unclear how CO can occur between genotypes with less than 90% sequence identity. Here, we study CO in Schizophyllum commune, a hypervariable cosmopolitan basidiomycete mushroom, a frequently encountered decayer of woody substrates. We crossed two haploid individuals, from the United States and from Russia, and obtained genome sequences for their 17 offspring. The average genetic distance between the parents was 14%, making it possible to study CO at very high resolution. We found reduced levels of linkage disequilibrium between loci flanking the CO sites indicating that they are mostly confined to hotspots of recombination. Furthermore, CO events preferentially occurred in regions under stronger negative selection, in particular within exons that showed reduced levels of nucleotide diversity. Apparently, in hypervariable species CO must avoid regions of higher divergence between the recombining genomes due to limitations imposed by the mismatch repair system, with regions under strong negative selection providing the opportunity for recombination. These patterns are opposite to those observed in a number of less variable species indicating that population genomics of hypervariable species may reveal novel biological phenomena.},
author = {Seplyarskiy, Vladimir B and Logacheva, Maria D and Penin, Aleksey A and Baranová, Maria A and Leushkin, Evgeny V and Demidenko, Natalia V and Klepikova, Anna V and Fyodor Kondrashov and Kondrashov, Alexey S and James, Timothy Y},
journal = {Molecular Biology and Evolution},
number = {11},
pages = {3016 -- 3025},
publisher = {Oxford University Press},
title = {{Crossing-over in a hypervariable species preferentially occurs in regions of high local similarity}},
doi = {10.1093/molbev/msu242},
volume = {31},
year = {2014},
}
@article{852,
abstract = {Rapid divergence of gene copies after duplication is thought to determine the fate of the copies and evolution of novel protein functions. However, data on howlong the gene copies continue to experience an elevated rate of evolution remain scarce. Standard theory of gene duplications based on some level of genetic redundancy of gene copies predicts that the period of accelerated evolutionmust end relatively quickly. Using a maximum-likelihood approach we estimate preduplication, initial postduplication, and recent postduplication rates of evolution that occurred in themammalian lineage.Wefind that both gene copies experience a similar in magnitude acceleration in their rate of evolution. The copy located in the original genomic position typically returns to the preduplication rates of evolution in a short period of time. The burst of faster evolution of the copy that is located in a new genomic position typically lasts longer. Furthermore, the fast-evolving copies on average continue to evolve faster than the preduplication rates far longer than predicted by standard theory of gene duplications.We hypothesize that the prolonged elevated rates of evolution are determined by functional properties that were acquired during, or soon after, the gene duplication event. },
author = {Rosello, Oriol P and Fyodor Kondrashov},
journal = {Genome Biology and Evolution},
number = {8},
pages = {1949 -- 1955},
publisher = {Oxford University Press},
title = {{Long-Term asymmetrical acceleration of protein evolution after gene duplication}},
doi = {10.1093/gbe/evu159},
volume = {6},
year = {2014},
}
@inproceedings{2082,
abstract = {NMAC is a mode of operation which turns a fixed input-length keyed hash function f into a variable input-length function. A practical single-key variant of NMAC called HMAC is a very popular and widely deployed message authentication code (MAC). Security proofs and attacks for NMAC can typically be lifted to HMAC. NMAC was introduced by Bellare, Canetti and Krawczyk [Crypto'96], who proved it to be a secure pseudorandom function (PRF), and thus also a MAC, assuming that (1) f is a PRF and (2) the function we get when cascading f is weakly collision-resistant. Unfortunately, HMAC is typically instantiated with cryptographic hash functions like MD5 or SHA-1 for which (2) has been found to be wrong. To restore the provable guarantees for NMAC, Bellare [Crypto'06] showed its security based solely on the assumption that f is a PRF, albeit via a non-uniform reduction. - Our first contribution is a simpler and uniform proof for this fact: If f is an ε-secure PRF (against q queries) and a δ-non-adaptively secure PRF (against q queries), then NMAC f is an (ε+ℓqδ)-secure PRF against q queries of length at most ℓ blocks each. - We then show that this ε+ℓqδ bound is basically tight. For the most interesting case where ℓqδ ≥ ε we prove this by constructing an f for which an attack with advantage ℓqδ exists. This also violates the bound O(ℓε) on the PRF-security of NMAC recently claimed by Koblitz and Menezes. - Finally, we analyze the PRF-security of a modification of NMAC called NI [An and Bellare, Crypto'99] that differs mainly by using a compression function with an additional keying input. This avoids the constant rekeying on multi-block messages in NMAC and allows for a security proof starting by the standard switch from a PRF to a random function, followed by an information-theoretic analysis. We carry out such an analysis, obtaining a tight ℓq2/2 c bound for this step, improving over the trivial bound of ℓ2q2/2c. The proof borrows combinatorial techniques originally developed for proving the security of CBC-MAC [Bellare et al., Crypto'05].},
author = {Gazi, Peter and Pietrzak, Krzysztof Z and Rybar, Michal},
editor = {Garay, Juan and Gennaro, Rosario},
location = {Santa Barbara, USA},
number = {1},
pages = {113 -- 130},
publisher = {Springer},
title = {{The exact PRF-security of NMAC and HMAC}},
doi = {10.1007/978-3-662-44371-2_7},
volume = {8616},
year = {2014},
}
@article{97,
abstract = {The distribution of Coulomb blockade peak heights as a function of magnetic field is investigated experimentally in a Ge-Si nanowire quantum dot. Strong spin-orbit coupling in this hole-gas system leads to antilocalization of Coulomb blockade peaks, consistent with theory. In particular, the peak height distribution has its maximum away from zero at zero magnetic field, with an average that decreases with increasing field. Magnetoconductance in the open-wire regime places a bound on the spin-orbit length (lso < 20 nm), consistent with values extracted in the Coulomb blockade regime (lso < 25 nm).},
author = {Higginbotham, Andrew P and Kuemmeth, Ferdinand and Larsen, Thorvald and Fitzpatrick, Mattias and Yao, Jun and Yan, Hao and Lieber, Charles and Marcus, Charles},
journal = {APS Physics, Physical Review Letters},
number = {21},
publisher = {American Physical Society},
title = {{Antilocalization of coulomb blockade in a Ge/Si nanowire}},
doi = {10.1103/PhysRevLett.112.216806},
volume = {112},
year = {2014},
}
@article{977,
abstract = {We propose a method for detecting many-body localization (MBL) in disordered spin systems. The method involves pulsed coherent spin manipulations that probe the dephasing of a given spin due to its entanglement with a set of distant spins. It allows one to distinguish the MBL phase from a noninteracting localized phase and a delocalized phase. In particular, we show that for a properly chosen pulse sequence the MBL phase exhibits a characteristic power-law decay reflecting its slow growth of entanglement. We find that this power-law decay is robust with respect to thermal and disorder averaging, provide numerical simulations supporting our results, and discuss possible experimental realizations in solid-state and cold-atom systems.},
author = {Maksym Serbyn and Knap, Michael J and Gopalakrishnan, Sarang and Papić, Zlatko and Yao, Norman Y and Laumann, Chris R and Abanin, Dmitry A and Lukin, Mikhail D and Demler, Eugene A},
journal = {Physical Review Letters},
number = {14},
publisher = {American Physical Society},
title = {{Interferometric probes of many-body localization}},
doi = {10.1103/PhysRevLett.113.147204},
volume = {113},
year = {2014},
}
@misc{5428,
abstract = {Simulation is an attractive alternative for language inclusion for automata as it is an under-approximation of language inclusion, but usually has much lower complexity. For non-deterministic automata, while language inclusion is PSPACE-complete, simulation can be computed in polynomial time. Simulation has also been extended in two orthogonal directions, namely, (1) fair simulation, for simulation over specified set of infinite runs; and (2) quantitative simulation, for simulation between weighted automata. Again, while fair trace inclusion is PSPACE-complete, fair simulation can be computed in polynomial time. For weighted automata, the (quantitative) language inclusion problem is undecidable for mean-payoff automata and the decidability is open for discounted-sum automata, whereas the (quantitative) simulation reduce to mean-payoff games and discounted-sum games, which admit pseudo-polynomial time algorithms.
In this work, we study (quantitative) simulation for weighted automata with Büchi acceptance conditions, i.e., we generalize fair simulation from non-weighted automata to weighted automata. We show that imposing Büchi acceptance conditions on weighted automata changes many fundamental properties of the simulation games. For example, whereas for mean-payoff and discounted-sum games, the players do not need memory to play optimally; we show in contrast that for simulation games with Büchi acceptance conditions, (i) for mean-payoff objectives, optimal strategies for both players require infinite memory in general, and (ii) for discounted-sum objectives, optimal strategies need not exist for both players. While the simulation games with Büchi acceptance conditions are more complicated (e.g., due to infinite-memory requirements for mean-payoff objectives) as compared to their counterpart without Büchi acceptance conditions, we still present pseudo-polynomial time algorithms to solve simulation games with Büchi acceptance conditions for both weighted mean-payoff and weighted discounted-sum automata.},
author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Otop, Jan and Velner, Yaron},
issn = {2664-1690},
pages = {26},
publisher = {IST Austria},
title = {{Quantitative fair simulation games}},
doi = {10.15479/AT:IST-2014-315-v1-1},
year = {2014},
}
@article{1375,
abstract = {We consider directed graphs where each edge is labeled with an integer weight and study the fundamental algorithmic question of computing the value of a cycle with minimum mean weight. Our contributions are twofold: (1) First we show that the algorithmic question is reducible to the problem of a logarithmic number of min-plus matrix multiplications of n×n-matrices, where n is the number of vertices of the graph. (2) Second, when the weights are nonnegative, we present the first (1+ε)-approximation algorithm for the problem and the running time of our algorithm is Õ(nωlog3(nW/ε)/ε),1 where O(nω) is the time required for the classic n×n-matrix multiplication and W is the maximum value of the weights. With an additional O(log(nW/ε)) factor in space a cycle with approximately optimal weight can be computed within the same time bound.},
author = {Chatterjee, Krishnendu and Henzinger, Monika and Krinninger, Sebastian and Loitzenbauer, Veronika and Raskin, Michael},
journal = {Theoretical Computer Science},
number = {C},
pages = {104 -- 116},
publisher = {Elsevier},
title = {{Approximating the minimum cycle mean}},
doi = {10.1016/j.tcs.2014.06.031},
volume = {547},
year = {2014},
}
@article{2234,
abstract = {We study Markov decision processes (MDPs) with multiple limit-average (or mean-payoff) functions. We consider two different objectives, namely, expectation and satisfaction objectives. Given an MDP with κ limit-average functions, in the expectation objective the goal is to maximize the expected limit-average value, and in the satisfaction objective the goal is to maximize the probability of runs such that the limit-average value stays above a given vector. We show that under the expectation objective, in contrast to the case of one limit-average function, both randomization and memory are necessary for strategies even for ε-approximation, and that finite-memory randomized strategies are sufficient for achieving Pareto optimal values. Under the satisfaction objective, in contrast to the case of one limit-average function, infinite memory is necessary for strategies achieving a specific value (i.e. randomized finite-memory strategies are not sufficient), whereas memoryless randomized strategies are sufficient for ε-approximation, for all ε > 0. We further prove that the decision problems for both expectation and satisfaction objectives can be solved in polynomial time and the trade-off curve (Pareto curve) can be ε-approximated in time polynomial in the size of the MDP and 1/ε, and exponential in the number of limit-average functions, for all ε > 0. Our analysis also reveals flaws in previous work for MDPs with multiple mean-payoff functions under the expectation objective, corrects the flaws, and allows us to obtain improved results.},
author = {Brázdil, Tomáš and Brožek, Václav and Chatterjee, Krishnendu and Forejt, Vojtěch and Kučera, Antonín},
issn = {18605974},
journal = {Logical Methods in Computer Science},
number = {1},
publisher = {International Federation of Computational Logic},
title = {{Markov decision processes with multiple long-run average objectives}},
doi = {10.2168/LMCS-10(1:13)2014},
volume = {10},
year = {2014},
}
@inproceedings{2239,
abstract = {The analysis of the energy consumption of software is an important goal for quantitative formal methods. Current methods, using weighted transition systems or energy games, model the energy source as an ideal resource whose status is characterized by one number, namely the amount of remaining energy. Real batteries, however, exhibit behaviors that can deviate substantially from an ideal energy resource. Based on a discretization of a standard continuous battery model, we introduce battery transition systems. In this model, a battery is viewed as consisting of two parts-the available-charge tank and the bound-charge tank. Any charge or discharge is applied to the available-charge tank. Over time, the energy from each tank diffuses to the other tank. Battery transition systems are infinite state systems that, being not well-structured, fall into no decidable class that is known to us. Nonetheless, we are able to prove that the !-regular modelchecking problem is decidable for battery transition systems. We also present a case study on the verification of control programs for energy-constrained semi-autonomous robots.},
author = {Boker, Udi and Henzinger, Thomas A and Radhakrishna, Arjun},
isbn = {978-145032544-8},
location = {San Diego, USA},
number = {1},
pages = {595 -- 606},
publisher = {ACM},
title = {{Battery transition systems}},
doi = {10.1145/2535838.2535875},
volume = {49},
year = {2014},
}
@inproceedings{2190,
abstract = {We present a new algorithm to construct a (generalized) deterministic Rabin automaton for an LTL formula φ. The automaton is the product of a master automaton and an array of slave automata, one for each G-subformula of φ. The slave automaton for G ψ is in charge of recognizing whether FG ψ holds. As opposed to standard determinization procedures, the states of all our automata have a clear logical structure, which allows for various optimizations. Our construction subsumes former algorithms for fragments of LTL. Experimental results show improvement in the sizes of the resulting automata compared to existing methods.},
author = {Esparza, Javier and Kretinsky, Jan},
pages = {192 -- 208},
publisher = {Springer},
title = {{From LTL to deterministic automata: A safraless compositional approach}},
doi = {10.1007/978-3-319-08867-9_13},
volume = {8559},
year = {2014},
}