@misc{9946, abstract = {We argue that the time is ripe to investigate differential monitoring, in which the specification of a program's behavior is implicitly given by a second program implementing the same informal specification. Similar ideas have been proposed before, and are currently implemented in restricted form for testing and specialized run-time analyses, aspects of which we combine. We discuss the challenges of implementing differential monitoring as a general-purpose, black-box run-time monitoring framework, and present promising results of a preliminary implementation, showing low monitoring overheads for diverse programs.}, author = {Mühlböck, Fabian and Henzinger, Thomas A}, issn = {2664-1690}, keywords = {run-time verification, software engineering, implicit specification}, pages = {17}, publisher = {IST Austria}, title = {{Differential monitoring}}, doi = {10.15479/AT:ISTA:9946}, year = {2021}, } @misc{8067, abstract = {With the lithium-ion technology approaching its intrinsic limit with graphite-based anodes, lithium metal is recently receiving renewed interest from the battery community as potential high capacity anode for next-generation rechargeable batteries. In this focus paper, we review the main advances in this field since the first attempts in the mid-1970s. Strategies for enabling reversible cycling and avoiding dendrite growth are thoroughly discussed, including specific applications in all-solid-state (polymeric and inorganic), Lithium-sulphur and Li-O2 (air) batteries. A particular attention is paid to review recent developments in regard of prototype manufacturing and current state-ofthe-art of these battery technologies with respect to the 2030 targets of the EU Integrated Strategic Energy Technology Plan (SET-Plan) Action 7.}, author = {Varzi, Alberto and Thanner, Katharina and Scipioni, Roberto and Di Lecce, Daniele and Hassoun, Jusef and Dörfler, Susanne and Altheus, Holger and Kaskel, Stefan and Prehal, Christian and Freunberger, Stefan Alexander}, issn = {2664-1690}, keywords = {Battery, Lithium metal, Lithium-sulphur, Lithium-air, All-solid-state}, pages = {63}, publisher = {IST Austria}, title = {{Current status and future perspectives of Lithium metal batteries}}, doi = {10.15479/AT:ISTA:8067}, year = {2020}, } @misc{5457, abstract = {We consider the problem of expected cost analysis over nondeterministic probabilistic programs, which aims at automated methods for analyzing the resource-usage of such programs. Previous approaches for this problem could only handle nonnegative bounded costs. However, in many scenarios, such as queuing networks or analysis of cryptocurrency protocols, both positive and negative costs are necessary and the costs are unbounded as well. In this work, we present a sound and efficient approach to obtain polynomial bounds on the expected accumulated cost of nondeterministic probabilistic programs. Our approach can handle (a) general positive and negative costs with bounded updates in variables; and (b) nonnegative costs with general updates to variables. We show that several natural examples which could not be handled by previous approaches are captured in our framework. Moreover, our approach leads to an efficient polynomial-time algorithm, while no previous approach for cost analysis of probabilistic programs could guarantee polynomial runtime. Finally, we show the effectiveness of our approach by presenting experimental results on a variety of programs, motivated by real-world applications, for which we efficiently synthesize tight resource-usage bounds.}, author = {Anonymous, 1 and Anonymous, 2 and Anonymous, 3 and Anonymous, 4 and Anonymous, 5 and Anonymous, 6}, issn = {2664-1690}, pages = {27}, publisher = {IST Austria}, title = {{Cost analysis of nondeterministic probabilistic programs}}, year = {2018}, } @misc{5455, abstract = {A fundamental algorithmic problem at the heart of static analysis is Dyck reachability. The input is a graphwhere the edges are labeled with different types of opening and closing parentheses, and the reachabilityinformation is computed via paths whose parentheses are properly matched. We present new results for Dyckreachability problems with applications to alias analysis and data-dependence analysis. Our main contributions,that include improved upper bounds as well as lower bounds that establish optimality guarantees, are asfollows:First, we consider Dyck reachability on bidirected graphs, which is the standard way of performing field-sensitive points-to analysis. Given a bidirected graph withnnodes andmedges, we present: (i) an algorithmwith worst-case running timeO(m+n·α(n)), whereα(n)is the inverse Ackermann function, improving thepreviously knownO(n2)time bound; (ii) a matching lower bound that shows that our algorithm is optimalwrt to worst-case complexity; and (iii) an optimal average-case upper bound ofO(m)time, improving thepreviously knownO(m·logn)bound.Second, we consider the problem of context-sensitive data-dependence analysis, where the task is to obtainanalysis summaries of library code in the presence of callbacks. Our algorithm preprocesses libraries in almostlinear time, after which the contribution of the library in the complexity of the client analysis is only linear,and only wrt the number of call sites.Third, we prove that combinatorial algorithms for Dyck reachability on general graphs with truly sub-cubic bounds cannot be obtained without obtaining sub-cubic combinatorial algorithms for Boolean MatrixMultiplication, which is a long-standing open problem. Thus we establish that the existing combinatorialalgorithms for Dyck reachability are (conditionally) optimal for general graphs. We also show that the samehardness holds for graphs of constant treewidth.Finally, we provide a prototype implementation of our algorithms for both alias analysis and data-dependenceanalysis. Our experimental evaluation demonstrates that the new algorithms significantly outperform allexisting methods on the two problems, over real-world benchmarks.}, author = {Chatterjee, Krishnendu and Choudhary, Bhavya and Pavlogiannis, Andreas}, issn = {2664-1690}, pages = {37}, publisher = {IST Austria}, title = {{Optimal Dyck reachability for data-dependence and alias analysis}}, doi = {10.15479/AT:IST-2017-870-v1-1}, year = {2017}, } @misc{5456, abstract = {We present a new dynamic partial-order reduction method for stateless model checking of concurrent programs. A common approach for exploring program behaviors relies on enumerating the traces of the program, without storing the visited states (aka stateless exploration). As the number of distinct traces grows exponentially, dynamic partial-order reduction (DPOR) techniques have been successfully used to partition the space of traces into equivalence classes (Mazurkiewicz partitioning), with the goal of exploring only few representative traces from each class. We introduce a new equivalence on traces under sequential consistency semantics, which we call the observation equivalence. Two traces are observationally equivalent if every read event observes the same write event in both traces. While the traditional Mazurkiewicz equivalence is control-centric, our new definition is data-centric. We show that our observation equivalence is coarser than the Mazurkiewicz equivalence, and in many cases even exponentially coarser. We devise a DPOR exploration of the trace space, called data-centric DPOR, based on the observation equivalence. 1. For acyclic architectures, our algorithm is guaranteed to explore exactly one representative trace from each observation class, while spending polynomial time per class. Hence, our algorithm is optimal wrt the observation equivalence, and in several cases explores exponentially fewer traces than any enumerative method based on the Mazurkiewicz equivalence. 2. For cyclic architectures, we consider an equivalence between traces which is finer than the observation equivalence; but coarser than the Mazurkiewicz equivalence, and in some cases is exponentially coarser. Our data-centric DPOR algorithm remains optimal under this trace equivalence. Finally, we perform a basic experimental comparison between the existing Mazurkiewicz-based DPOR and our data-centric DPOR on a set of academic benchmarks. Our results show a significant reduction in both running time and the number of explored equivalence classes.}, author = {Chalupa, Marek and Chatterjee, Krishnendu and Pavlogiannis, Andreas and Sinha, Nishant and Vaidya, Kapil}, issn = {2664-1690}, pages = {36}, publisher = {IST Austria}, title = {{Data-centric dynamic partial order reduction}}, doi = {10.15479/AT:IST-2017-872-v1-1}, year = {2017}, } @misc{6426, abstract = {Synchronous programs are easy to specify because the side effects of an operation are finished by the time the invocation of the operation returns to the caller. Asynchronous programs, on the other hand, are difficult to specify because there are side effects due to pending computation scheduled as a result of the invocation of an operation. They are also difficult to verify because of the large number of possible interleavings of concurrent asynchronous computation threads. We show that specifications and correctness proofs for asynchronous programs can be structured by introducing the fiction, for proof purposes, that intermediate, non-quiescent states of asynchronous operations can be ignored. Then, the task of specification becomes relatively simple and the task of verification can be naturally decomposed into smaller sub-tasks. The sub-tasks iteratively summarize, guided by the structure of an asynchronous program, the atomic effect of non-atomic operations and the synchronous effect of asynchronous operations. This structuring of specifications and proofs corresponds to the introduction of multiple layers of stepwise refinement for asynchronous programs. We present the first proof rule, called synchronization, to reduce asynchronous invocations on a lower layer to synchronous invocations on a higher layer. We implemented our proof method in CIVL and evaluated it on a collection of benchmark programs.}, author = {Henzinger, Thomas A and Kragl, Bernhard and Qadeer, Shaz}, issn = {2664-1690}, pages = {28}, publisher = {IST Austria}, title = {{Synchronizing the asynchronous}}, doi = {10.15479/AT:IST-2018-853-v2-2}, year = {2017}, } @misc{5445, abstract = {We consider the quantitative analysis problem for interprocedural control-flow graphs (ICFGs). The input consists of an ICFG, a positive weight function that assigns every transition a positive integer-valued number, and a labelling of the transitions (events) as good, bad, and neutral events. The weight function assigns to each transition a numerical value that represents ameasure of how good or bad an event is. The quantitative analysis problem asks whether there is a run of the ICFG where the ratio of the sum of the numerical weights of good events versus the sum of weights of bad events in the long-run is at least a given threshold (or equivalently, to compute the maximal ratio among all valid paths in the ICFG). The quantitative analysis problem for ICFGs can be solved in polynomial time, and we present an efficient and practical algorithm for the problem. We show that several problems relevant for static program analysis, such as estimating the worst-case execution time of a program or the average energy consumption of a mobile application, can be modeled in our framework. We have implemented our algorithm as a tool in the Java Soot framework. We demonstrate the effectiveness of our approach with two case studies. First, we show that our framework provides a sound approach (no false positives) for the analysis of inefficiently-used containers. Second, we show that our approach can also be used for static profiling of programs which reasons about methods that are frequently invoked. Our experimental results show that our tool scales to relatively large benchmarks, and discovers relevant and useful information that can be used to optimize performance of the programs. }, author = {Chatterjee, Krishnendu and Pavlogiannis, Andreas and Velner, Yaron}, issn = {2664-1690}, pages = {33}, publisher = {IST Austria}, title = {{Quantitative interprocedural analysis}}, doi = {10.15479/AT:IST-2016-523-v1-1}, year = {2016}, } @misc{5449, abstract = {The fixation probability is the probability that a new mutant introduced in a homogeneous population eventually takes over the entire population. The fixation probability is a fundamental quantity of natural selection, and known to depend on the population structure. Amplifiers of natural selection are population structures which increase the fixation probability of advantageous mutants, as compared to the baseline case of well-mixed populations. In this work we focus on symmetric population structures represented as undirected graphs. In the regime of undirected graphs, the strongest amplifier known has been the Star graph, and the existence of undirected graphs with stronger amplification properties has remained open for over a decade. In this work we present the Comet and Comet-swarm families of undirected graphs. We show that for a range of fitness values of the mutants, the Comet and Comet-swarm graphs have fixation probability strictly larger than the fixation probability of the Star graph, for fixed population size and at the limit of large populations, respectively.}, author = {Pavlogiannis, Andreas and Tkadlec, Josef and Chatterjee, Krishnendu and Nowak, Martin}, issn = {2664-1690}, pages = {22}, publisher = {IST Austria}, title = {{Amplification on undirected population structures: Comets beat stars}}, doi = {10.15479/AT:IST-2016-648-v1-1}, year = {2016}, } @misc{5446, abstract = {We study the problem of developing efficient approaches for proving termination of recursive programs with one-dimensional arrays. Ranking functions serve as a sound and complete approach for proving termination of non-recursive programs without array operations. First, we generalize ranking functions to the notion of measure functions, and prove that measure functions (i) provide a sound method to prove termination of recursive programs (with one-dimensional arrays), and (ii) is both sound and complete over recursive programs without array operations. Our second contribution is the synthesis of measure functions of specific forms in polynomial time. More precisely, we prove that (i) polynomial measure functions over recursive programs can be synthesized in polynomial time through Farkas’ Lemma and Handelman’s Theorem, and (ii) measure functions involving logarithm and exponentiation can be synthesized in polynomial time through abstraction of logarithmic or exponential terms and Handelman’s Theorem. A key application of our method is the worst-case analysis of recursive programs. While previous methods obtain worst-case polynomial bounds of the form O(n^k), where k is an integer, our polynomial time methods can synthesize bounds of the form O(n log n), as well as O(n^x), where x is not an integer. We show the applicability of our automated technique to obtain worst-case complexity of classical recursive algorithms such as (i) Merge-Sort, the divideand- conquer algorithm for the Closest-Pair problem, where we obtain O(n log n) worst-case bound, and (ii) Karatsuba’s algorithm for polynomial multiplication and Strassen’s algorithm for matrix multiplication, where we obtain O(n^x) bound, where x is not an integer and close to the best-known bounds for the respective algorithms. Finally, we present experimental results to demonstrate the effectiveness of our approach.}, author = {Anonymous, 1 and Anonymous, 2 and Anonymous, 3}, issn = {2664-1690}, pages = {26}, publisher = {IST Austria}, title = {{Termination and worst-case analysis of recursive programs}}, year = {2016}, } @misc{5447, abstract = {We consider the problem of developing automated techniques to aid the average-case complexity analysis of programs. Several classical textbook algorithms have quite efficient average-case complexity, whereas the corresponding worst-case bounds are either inefficient (e.g., QUICK-SORT), or completely ineffective (e.g., COUPONCOLLECTOR). Since the main focus of average-case analysis is to obtain efficient bounds, we consider bounds that are either logarithmic, linear, or almost-linear (O(log n), O(n), O(n · log n), respectively, where n represents the size of the input). Our main contribution is a sound approach for deriving such average-case bounds for randomized recursive programs. Our approach is efficient (a simple linear-time algorithm), and it is based on (a) the analysis of recurrence relations induced by randomized algorithms, and (b) a guess-and-check technique. Our approach can infer the asymptotically optimal average-case bounds for classical randomized algorithms, including RANDOMIZED-SEARCH, QUICKSORT, QUICK-SELECT, COUPON-COLLECTOR, where the worstcase bounds are either inefficient (such as linear as compared to logarithmic of average-case, or quadratic as compared to linear or almost-linear of average-case), or ineffective. We have implemented our approach, and the experimental results show that we obtain the bounds efficiently for various classical algorithms.}, author = {Anonymous, 1 and Anonymous, 2 and Anonymous, 3}, issn = {2664-1690}, pages = {20}, publisher = {IST Austria}, title = {{Average-case analysis of programs: Automated recurrence analysis for almost-linear bounds}}, year = {2016}, } @misc{5453, author = {Pavlogiannis, Andreas and Tkadlec, Josef and Chatterjee, Krishnendu and Nowak, Martin}, issn = {2664-1690}, pages = {34}, publisher = {IST Austria}, title = {{Arbitrarily strong amplifiers of natural selection}}, doi = {10.15479/AT:IST-2017-749-v3-1}, year = {2016}, } @misc{5451, author = {Pavlogiannis, Andreas and Tkadlec, Josef and Chatterjee, Krishnendu and Nowak, Martin}, issn = {2664-1690}, pages = {34}, publisher = {IST Austria}, title = {{Strong amplifiers of natural selection}}, doi = {10.15479/AT:IST-2016-728-v1-1}, year = {2016}, } @misc{5448, abstract = {We present a new dynamic partial-order reduction method for stateless model checking of concurrent programs. A common approach for exploring program behaviors relies on enumerating the traces of the program, without storing the visited states (aka stateless exploration). As the number of distinct traces grows exponentially, dynamic partial-order reduction (DPOR) techniques have been successfully used to partition the space of traces into equivalence classes (Mazurkiewicz partitioning), with the goal of exploring only few representative traces from each class. We introduce a new equivalence on traces under sequential consistency semantics, which we call the observation equivalence. Two traces are observationally equivalent if every read event observes the same write event in both traces. While the traditional Mazurkiewicz equivalence is control-centric, our new definition is data-centric. We show that our observation equivalence is coarser than the Mazurkiewicz equivalence, and in many cases even exponentially coarser. We devise a DPOR exploration of the trace space, called data-centric DPOR, based on the observation equivalence. 1. For acyclic architectures, our algorithm is guaranteed to explore exactly one representative trace from each observation class, while spending polynomial time per class. Hence, our algorithm is optimal wrt the observation equivalence, and in several cases explores exponentially fewer traces than any enumerative method based on the Mazurkiewicz equivalence. 2. For cyclic architectures, we consider an equivalence between traces which is finer than the observation equivalence; but coarser than the Mazurkiewicz equivalence, and in some cases is exponentially coarser. Our data-centric DPOR algorithm remains optimal under this trace equivalence. Finally, we perform a basic experimental comparison between the existing Mazurkiewicz-based DPOR and our data-centric DPOR on a set of academic benchmarks. Our results show a significant reduction in both running time and the number of explored equivalence classes.}, author = {Anonymous, 1 and Anonymous, 2 and Anonymous, 3 and Anonymous, 4}, issn = {2664-1690}, pages = {20}, publisher = {IST Austria}, title = {{Data-centric dynamic partial order reduction}}, year = {2016}, } @misc{5452, author = {Pavlogiannis, Andreas and Tkadlec, Josef and Chatterjee, Krishnendu and Nowak, Martin}, issn = {2664-1690}, pages = {32}, publisher = {IST Austria}, title = {{Arbitrarily strong amplifiers of natural selection}}, doi = {10.15479/AT:IST-2017-728-v2-1}, year = {2016}, } @misc{5431, abstract = {We consider finite-state concurrent stochastic games, played by k>=2 players for an infinite number of rounds, where in every round, each player simultaneously and independently of the other players chooses an action, whereafter the successor state is determined by a probability distribution given by the current state and the chosen actions. We consider reachability objectives that given a target set of states require that some state in the target set is visited, and the dual safety objectives that given a target set require that only states in the target set are visited. We are interested in the complexity of stationary strategies measured by their patience, which is defined as the inverse of the smallest non-zero probability employed. Our main results are as follows: We show that in two-player zero-sum concurrent stochastic games (with reachability objective for one player and the complementary safety objective for the other player): (i) the optimal bound on the patience of optimal and epsilon-optimal strategies, for both players is doubly exponential; and (ii) even in games with a single non-absorbing state exponential (in the number of actions) patience is necessary. In general we study the class of non-zero-sum games admitting epsilon-Nash equilibria. We show that if there is at least one player with reachability objective, then doubly-exponential patience is needed in general for epsilon-Nash equilibrium strategies, whereas in contrast if all players have safety objectives, then the optimal bound on patience for epsilon-Nash equilibrium strategies is only exponential.}, author = {Chatterjee, Krishnendu and Ibsen-Jensen, Rasmus and Hansen, Kristoffer}, issn = {2664-1690}, pages = {25}, publisher = {IST Austria}, title = {{The patience of concurrent stochastic games with safety and reachability objectives}}, doi = {10.15479/AT:IST-2015-322-v1-1}, year = {2015}, } @misc{5434, abstract = {DEC-POMDPs extend POMDPs to a multi-agent setting, where several agents operate in an uncertain environment independently to achieve a joint objective. DEC-POMDPs have been studied with finite-horizon and infinite-horizon discounted-sum objectives, and there exist solvers both for exact and approximate solutions. In this work we consider Goal-DEC-POMDPs, where given a set of target states, the objective is to ensure that the target set is reached with minimal cost. We consider the indefinite-horizon (infinite-horizon with either discounted-sum, or undiscounted-sum, where absorbing goal states have zero-cost) problem. We present a new method to solve the problem that extends methods for finite-horizon DEC- POMDPs and the RTDP-Bel approach for POMDPs. We present experimental results on several examples, and show our approach presents promising results.}, author = {Anonymous, 1 and Anonymous, 2}, issn = {2664-1690}, pages = {16}, publisher = {IST Austria}, title = {{Optimal cost indefinite-horizon reachability in goal DEC-POMDPs}}, year = {2015}, } @misc{5429, abstract = {We consider Markov decision processes (MDPs) with multiple limit-average (or mean-payoff) objectives. There have been two different views: (i) the expectation semantics, where the goal is to optimize the expected mean-payoff objective, and (ii) the satisfaction semantics, where the goal is to maximize the probability of runs such that the mean-payoff value stays above a given vector. We consider the problem where the goal is to optimize the expectation under the constraint that the satisfaction semantics is ensured, and thus consider a generalization that unifies the existing semantics. Our problem captures the notion of optimization with respect to strategies that are risk-averse (i.e., ensures certain probabilistic guarantee). Our main results are algorithms for the decision problem which are always polynomial in the size of the MDP. We also show that an approximation of the Pareto-curve can be computed in time polynomial in the size of the MDP, and the approximation factor, but exponential in the number of dimensions. Finally, we present a complete characterization of the strategy complexity (in terms of memory bounds and randomization) required to solve our problem.}, author = {Chatterjee, Krishnendu and Komarkova, Zuzana and Kretinsky, Jan}, issn = {2664-1690}, pages = {41}, publisher = {IST Austria}, title = {{Unifying two views on multiple mean-payoff objectives in Markov decision processes}}, doi = {10.15479/AT:IST-2015-318-v1-1}, year = {2015}, } @misc{5435, abstract = {We consider Markov decision processes (MDPs) with multiple limit-average (or mean-payoff) objectives. There have been two different views: (i) the expectation semantics, where the goal is to optimize the expected mean-payoff objective, and (ii) the satisfaction semantics, where the goal is to maximize the probability of runs such that the mean-payoff value stays above a given vector. We consider the problem where the goal is to optimize the expectation under the constraint that the satisfaction semantics is ensured, and thus consider a generalization that unifies the existing semantics. Our problem captures the notion of optimization with respect to strategies that are risk-averse (i.e., ensures certain probabilistic guarantee). Our main results are algorithms for the decision problem which are always polynomial in the size of the MDP. We also show that an approximation of the Pareto-curve can be computed in time polynomial in the size of the MDP, and the approximation factor, but exponential in the number of dimensions. Finally, we present a complete characterization of the strategy complexity (in terms of memory bounds and randomization) required to solve our problem.}, author = {Chatterjee, Krishnendu and Komarkova, Zuzana and Kretinsky, Jan}, issn = {2664-1690}, pages = {51}, publisher = {IST Austria}, title = {{Unifying two views on multiple mean-payoff objectives in Markov decision processes}}, doi = {10.15479/AT:IST-2015-318-v2-1}, year = {2015}, } @misc{5436, abstract = {Recently there has been a significant effort to handle quantitative properties in formal verification and synthesis. While weighted automata over finite and infinite words provide a natural and flexible framework to express quantitative properties, perhaps surprisingly, some basic system properties such as average response time cannot be expressed using weighted automata, nor in any other know decidable formalism. In this work, we introduce nested weighted automata as a natural extension of weighted automata which makes it possible to express important quantitative properties such as average response time. In nested weighted automata, a master automaton spins off and collects results from weighted slave automata, each of which computes a quantity along a finite portion of an infinite word. Nested weighted automata can be viewed as the quantitative analogue of monitor automata, which are used in run-time verification. We establish an almost complete decidability picture for the basic decision problems about nested weighted automata, and illustrate their applicability in several domains. In particular, nested weighted automata can be used to decide average response time properties.}, author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Otop, Jan}, issn = {2664-1690}, pages = {29}, publisher = {IST Austria}, title = {{Nested weighted automata}}, doi = {10.15479/AT:IST-2015-170-v2-2}, year = {2015}, } @misc{5437, abstract = {We consider the core algorithmic problems related to verification of systems with respect to three classical quantitative properties, namely, the mean-payoff property, the ratio property, and the minimum initial credit for energy property. The algorithmic problem given a graph and a quantitative property asks to compute the optimal value (the infimum value over all traces) from every node of the graph. We consider graphs with constant treewidth, and it is well-known that the control-flow graphs of most programs have constant treewidth. Let $n$ denote the number of nodes of a graph, $m$ the number of edges (for constant treewidth graphs $m=O(n)$) and $W$ the largest absolute value of the weights. Our main theoretical results are as follows. First, for constant treewidth graphs we present an algorithm that approximates the mean-payoff value within a multiplicative factor of $\epsilon$ in time $O(n \cdot \log (n/\epsilon))$ and linear space, as compared to the classical algorithms that require quadratic time. Second, for the ratio property we present an algorithm that for constant treewidth graphs works in time $O(n \cdot \log (|a\cdot b|))=O(n\cdot\log (n\cdot W))$, when the output is $\frac{a}{b}$, as compared to the previously best known algorithm with running time $O(n^2 \cdot \log (n\cdot W))$. Third, for the minimum initial credit problem we show that (i)~for general graphs the problem can be solved in $O(n^2\cdot m)$ time and the associated decision problem can be solved in $O(n\cdot m)$ time, improving the previous known $O(n^3\cdot m\cdot \log (n\cdot W))$ and $O(n^2 \cdot m)$ bounds, respectively; and (ii)~for constant treewidth graphs we present an algorithm that requires $O(n\cdot \log n)$ time, improving the previous known $O(n^4 \cdot \log (n \cdot W))$ bound. We have implemented some of our algorithms and show that they present a significant speedup on standard benchmarks. }, author = {Chatterjee, Krishnendu and Ibsen-Jensen, Rasmus and Pavlogiannis, Andreas}, issn = {2664-1690}, pages = {27}, publisher = {IST Austria}, title = {{Faster algorithms for quantitative verification in constant treewidth graphs}}, doi = {10.15479/AT:IST-2015-330-v2-1}, year = {2015}, }