@article{2271, abstract = {A class of valued constraint satisfaction problems (VCSPs) is characterised by a valued constraint language, a fixed set of cost functions on a finite domain. Finite-valued constraint languages contain functions that take on rational costs and general-valued constraint languages contain functions that take on rational or infinite costs. An instance of the problem is specified by a sum of functions from the language with the goal to minimise the sum. This framework includes and generalises well-studied constraint satisfaction problems (CSPs) and maximum constraint satisfaction problems (Max-CSPs). Our main result is a precise algebraic characterisation of valued constraint languages whose instances can be solved exactly by the basic linear programming relaxation (BLP). For a general-valued constraint language Γ, BLP is a decision procedure for Γ if and only if Γ admits a symmetric fractional polymorphism of every arity. For a finite-valued constraint language Γ, BLP is a decision procedure if and only if Γ admits a symmetric fractional polymorphism of some arity, or equivalently, if Γ admits a symmetric fractional polymorphism of arity 2. Using these results, we obtain tractability of several novel and previously widely-open classes of VCSPs, including problems over valued constraint languages that are: (1) submodular on arbitrary lattices; (2) bisubmodular (also known as k-submodular) on arbitrary finite domains; (3) weakly (and hence strongly) tree-submodular on arbitrary trees. }, author = {Kolmogorov, Vladimir and Thapper, Johan and Živný, Stanislav}, journal = {SIAM Journal on Computing}, number = {1}, pages = {1 -- 36}, publisher = {SIAM}, title = {{The power of linear programming for general-valued CSPs}}, doi = {10.1137/130945648}, volume = {44}, year = {2015}, } @inproceedings{1637, abstract = {An instance of the Valued Constraint Satisfaction Problem (VCSP) is given by a finite set of variables, a finite domain of labels, and a sum of functions, each function depending on a subset of the variables. Each function can take finite values specifying costs of assignments of labels to its variables or the infinite value, which indicates an infeasible assignment. The goal is to find an assignment of labels to the variables that minimizes the sum. We study, assuming that P ≠ NP, how the complexity of this very general problem depends on the set of functions allowed in the instances, the so-called constraint language. The case when all allowed functions take values in {0, ∞} corresponds to ordinary CSPs, where one deals only with the feasibility issue and there is no optimization. This case is the subject of the Algebraic CSP Dichotomy Conjecture predicting for which constraint languages CSPs are tractable (i.e. solvable in polynomial time) and for which NP-hard. The case when all allowed functions take only finite values corresponds to finite-valued CSP, where the feasibility aspect is trivial and one deals only with the optimization issue. The complexity of finite-valued CSPs was fully classified by Thapper and Zivny. An algebraic necessary condition for tractability of a general-valued CSP with a fixed constraint language was recently given by Kozik and Ochremiak. As our main result, we prove that if a constraint language satisfies this algebraic necessary condition, and the feasibility CSP (i.e. the problem of deciding whether a given instance has a feasible solution) corresponding to the VCSP with this language is tractable, then the VCSP is tractable. The algorithm is a simple combination of the assumed algorithm for the feasibility CSP and the standard LP relaxation. As a corollary, we obtain that a dichotomy for ordinary CSPs would imply a dichotomy for general-valued CSPs.}, author = {Kolmogorov, Vladimir and Krokhin, Andrei and Rolinek, Michal}, location = {Berkeley, CA, United States}, pages = {1246 -- 1258}, publisher = {IEEE}, title = {{The complexity of general-valued CSPs}}, doi = {10.1109/FOCS.2015.80}, year = {2015}, } @inproceedings{1675, abstract = {Proofs of work (PoW) have been suggested by Dwork and Naor (Crypto’92) as protection to a shared resource. The basic idea is to ask the service requestor to dedicate some non-trivial amount of computational work to every request. The original applications included prevention of spam and protection against denial of service attacks. More recently, PoWs have been used to prevent double spending in the Bitcoin digital currency system. In this work, we put forward an alternative concept for PoWs - so-called proofs of space (PoS), where a service requestor must dedicate a significant amount of disk space as opposed to computation. We construct secure PoS schemes in the random oracle model (with one additional mild assumption required for the proof to go through), using graphs with high “pebbling complexity” and Merkle hash-trees. We discuss some applications, including follow-up work where a decentralized digital currency scheme called Spacecoin is constructed that uses PoS (instead of wasteful PoW like in Bitcoin) to prevent double spending. The main technical contribution of this work is the construction of (directed, loop-free) graphs on N vertices with in-degree O(log logN) such that even if one places Θ(N) pebbles on the nodes of the graph, there’s a constant fraction of nodes that needs Θ(N) steps to be pebbled (where in every step one can put a pebble on a node if all its parents have a pebble).}, author = {Dziembowski, Stefan and Faust, Sebastian and Kolmogorov, Vladimir and Pietrzak, Krzysztof Z}, booktitle = {35th Annual Cryptology Conference}, isbn = {9783662479995}, issn = {0302-9743}, location = {Santa Barbara, CA, United States}, pages = {585 -- 605}, publisher = {Springer}, title = {{Proofs of space}}, doi = {10.1007/978-3-662-48000-7_29}, volume = {9216}, year = {2015}, } @inproceedings{2275, abstract = {Energies with high-order non-submodular interactions have been shown to be very useful in vision due to their high modeling power. Optimization of such energies, however, is generally NP-hard. A naive approach that works for small problem instances is exhaustive search, that is, enumeration of all possible labelings of the underlying graph. We propose a general minimization approach for large graphs based on enumeration of labelings of certain small patches. This partial enumeration technique reduces complex high-order energy formulations to pairwise Constraint Satisfaction Problems with unary costs (uCSP), which can be efficiently solved using standard methods like TRW-S. Our approach outperforms a number of existing state-of-the-art algorithms on well known difficult problems (e.g. curvature regularization, stereo, deconvolution); it gives near global minimum and better speed. Our main application of interest is curvature regularization. In the context of segmentation, our partial enumeration technique allows to evaluate curvature directly on small patches using a novel integral geometry approach. }, author = {Olsson, Carl and Ulen, Johannes and Boykov, Yuri and Kolmogorov, Vladimir}, location = {Sydney, Australia}, pages = {2936 -- 2943}, publisher = {IEEE}, title = {{Partial enumeration and curvature regularization}}, doi = {10.1109/ICCV.2013.365}, year = {2014}, } @techreport{7038, author = {Huszár, Kristóf and Rolinek, Michal}, pages = {5}, publisher = {IST Austria}, title = {{Playful Math - An introduction to mathematical games}}, year = {2014}, } @inproceedings{2270, abstract = {Representation languages for coalitional games are a key research area in algorithmic game theory. There is an inher- ent tradeoff between how general a language is, allowing it to capture more elaborate games, and how hard it is computationally to optimize and solve such games. One prominent such language is the simple yet expressive Weighted Graph Games (WGGs) representation (Deng and Papadimitriou 1994), which maintains knowledge about synergies between agents in the form of an edge weighted graph. We consider the problem of finding the optimal coalition structure in WGGs. The agents in such games are vertices in a graph, and the value of a coalition is the sum of the weights of the edges present between coalition members. The optimal coalition structure is a partition of the agents to coalitions, that maximizes the sum of utilities obtained by the coalitions. We show that finding the optimal coalition structure is not only hard for general graphs, but is also intractable for restricted families such as planar graphs which are amenable for many other combinatorial problems. We then provide algorithms with constant factor approximations for planar, minorfree and bounded degree graphs.}, author = {Bachrach, Yoram and Kohli, Pushmeet and Kolmogorov, Vladimir and Zadimoghaddam, Morteza}, location = {Bellevue, WA, United States}, pages = {81--87}, publisher = {AAAI Press}, title = {{Optimal Coalition Structures in Cooperative Graph Games}}, year = {2013}, } @techreport{2273, abstract = {We propose a new family of message passing techniques for MAP estimation in graphical models which we call Sequential Reweighted Message Passing (SRMP). Special cases include well-known techniques such as Min-Sum Diusion (MSD) and a faster Sequential Tree-Reweighted Message Passing (TRW-S). Importantly, our derivation is simpler than the original derivation of TRW-S, and does not involve a decomposition into trees. This allows easy generalizations. We present such a generalization for the case of higher-order graphical models, and test it on several real-world problems with promising results.}, author = {Vladimir Kolmogorov}, publisher = {IST Austria}, title = {{Reweighted message passing revisited}}, year = {2013}, } @inproceedings{2276, abstract = {The problem of minimizing the Potts energy function frequently occurs in computer vision applications. One way to tackle this NP-hard problem was proposed by Kovtun [19, 20]. It identifies a part of an optimal solution by running k maxflow computations, where k is the number of labels. The number of “labeled” pixels can be significant in some applications, e.g. 50-93% in our tests for stereo. We show how to reduce the runtime to O (log k) maxflow computations (or one parametric maxflow computation). Furthermore, the output of our algorithm allows to speed-up the subsequent alpha expansion for the unlabeled part, or can be used as it is for time-critical applications. To derive our technique, we generalize the algorithm of Felzenszwalb et al. [7] for Tree Metrics . We also show a connection to k-submodular functions from combinatorial optimization, and discuss k-submodular relaxations for general energy functions.}, author = {Gridchyn, Igor and Kolmogorov, Vladimir}, location = {Sydney, Australia}, pages = {2320 -- 2327}, publisher = {IEEE}, title = {{Potts model, parametric maxflow and k-submodular functions}}, doi = {10.1109/ICCV.2013.288}, year = {2013}, } @inproceedings{2518, abstract = {A class of valued constraint satisfaction problems (VCSPs) is characterised by a valued constraint language, a fixed set of cost functions on a finite domain. An instance of the problem is specified by a sum of cost functions from the language with the goal to minimise the sum. We study which classes of finite-valued languages can be solved exactly by the basic linear programming relaxation (BLP). Thapper and Živný showed [20] that if BLP solves the language then the language admits a binary commutative fractional polymorphism. We prove that the converse is also true. This leads to a necessary and a sufficient condition which can be checked in polynomial time for a given language. In contrast, the previous necessary and sufficient condition due to [20] involved infinitely many inequalities. More recently, Thapper and Živný [21] showed (using, in particular, a technique introduced in this paper) that core languages that do not satisfy our condition are NP-hard. Taken together, these results imply that a finite-valued language can either be solved using Linear Programming or is NP-hard.}, author = {Kolmogorov, Vladimir}, location = {Riga, Latvia}, number = {1}, pages = {625 -- 636}, publisher = {Springer}, title = {{The power of linear programming for finite-valued CSPs: A constructive characterization}}, doi = {10.1007/978-3-642-39206-1_53}, volume = {7965}, year = {2013}, } @article{2828, abstract = {We study the complexity of valued constraint satisfaction problems (VCSPs) parametrized by a constraint language, a fixed set of cost functions over a finite domain. An instance of the problem is specified by a sum of cost functions from the language and the goal is to minimize the sum. Under the unique games conjecture, the approximability of finite-valued VCSPs is well understood, see Raghavendra [2008]. However, there is no characterization of finite-valued VCSPs, let alone general-valued VCSPs, that can be solved exactly in polynomial time, thus giving insights from a combinatorial optimization perspective. We consider the case of languages containing all possible unary cost functions. In the case of languages consisting of only {0, ∞}-valued cost functions (i.e., relations), such languages have been called conservative and studied by Bulatov [2003, 2011] and recently by Barto [2011]. Since we study valued languages, we call a language conservative if it contains all finite-valued unary cost functions. The computational complexity of conservative valued languages has been studied by Cohen et al. [2006] for languages over Boolean domains, by Deineko et al. [2008] for {0, 1}-valued languages (a.k.a Max-CSP), and by Takhanov [2010a] for {0, ∞}-valued languages containing all finite-valued unary cost functions (a.k.a. Min-Cost-Hom). We prove a Schaefer-like dichotomy theorem for conservative valued languages: if all cost functions in the language satisfy a certain condition (specified by a complementary combination of STP and MJN multimor-phisms), then any instance can be solved in polynomial time (via a new algorithm developed in this article), otherwise the language is NP-hard. This is the first complete complexity classification of general-valued constraint languages over non-Boolean domains. It is a common phenomenon that complexity classifications of problems over non-Boolean domains are significantly harder than the Boolean cases. The polynomial-time algorithm we present for the tractable cases is a generalization of the submodular minimization problem and a result of Cohen et al. [2008]. Our results generalize previous results by Takhanov [2010a] and (a subset of results) by Cohen et al. [2006] and Deineko et al. [2008]. Moreover, our results do not rely on any computer-assisted search as in Deineko et al. [2008], and provide a powerful tool for proving hardness of finite-valued and general-valued languages.}, author = {Kolmogorov, Vladimir and Živný, Stanislav}, journal = {Journal of the ACM}, number = {2}, publisher = {ACM}, title = {{The complexity of conservative valued CSPs}}, doi = {10.1145/2450142.2450146}, volume = {60}, year = {2013}, } @inproceedings{2901, abstract = { We introduce the M-modes problem for graphical models: predicting the M label configurations of highest probability that are at the same time local maxima of the probability landscape. M-modes have multiple possible applications: because they are intrinsically diverse, they provide a principled alternative to non-maximum suppression techniques for structured prediction, they can act as codebook vectors for quantizing the configuration space, or they can form component centers for mixture model approximation. We present two algorithms for solving the M-modes problem. The first algorithm solves the problem in polynomial time when the underlying graphical model is a simple chain. The second algorithm solves the problem for junction chains. In synthetic and real dataset, we demonstrate how M-modes can improve the performance of prediction. We also use the generated modes as a tool to understand the topography of the probability distribution of configurations, for example with relation to the training set size and amount of noise in the data. }, author = {Chen, Chao and Kolmogorov, Vladimir and Yan, Zhu and Metaxas, Dimitris and Lampert, Christoph}, location = {Scottsdale, AZ, United States}, pages = {161 -- 169}, publisher = {JMLR}, title = {{Computing the M most probable modes of a graphical model}}, volume = {31}, year = {2013}, } @inproceedings{2272, abstract = {We consider Conditional Random Fields (CRFs) with pattern-based potentials defined on a chain. In this model the energy of a string (labeling) x1...xn is the sum of terms over intervals [i,j] where each term is non-zero only if the substring xi...xj equals a prespecified pattern α. Such CRFs can be naturally applied to many sequence tagging problems. We present efficient algorithms for the three standard inference tasks in a CRF, namely computing (i) the partition function, (ii) marginals, and (iii) computing the MAP. Their complexities are respectively O(nL), O(nLℓmax) and O(nLmin{|D|,log(ℓmax+1)}) where L is the combined length of input patterns, ℓmax is the maximum length of a pattern, and D is the input alphabet. This improves on the previous algorithms of (Ye et al., 2009) whose complexities are respectively O(nL|D|), O(n|Γ|L2ℓ2max) and O(nL|D|), where |Γ| is the number of input patterns. In addition, we give an efficient algorithm for sampling. Finally, we consider the case of non-positive weights. (Komodakis & Paragios, 2009) gave an O(nL) algorithm for computing the MAP. We present a modification that has the same worst-case complexity but can beat it in the best case. }, author = {Takhanov, Rustem and Kolmogorov, Vladimir}, booktitle = {ICML'13 Proceedings of the 30th International Conference on International}, location = {Atlanta, GA, USA}, number = {3}, pages = {145 -- 153}, publisher = {ML Research Press}, title = {{Inference algorithms for pattern-based CRFs on sequence data}}, volume = {28}, year = {2013}, } @techreport{2274, abstract = {Proofs of work (PoW) have been suggested by Dwork and Naor (Crypto'92) as protection to a shared resource. The basic idea is to ask the service requestor to dedicate some non-trivial amount of computational work to every request. The original applications included prevention of spam and protection against denial of service attacks. More recently, PoWs have been used to prevent double spending in the Bitcoin digital currency system. In this work, we put forward an alternative concept for PoWs -- so-called proofs of space (PoS), where a service requestor must dedicate a significant amount of disk space as opposed to computation. We construct secure PoS schemes in the random oracle model, using graphs with high "pebbling complexity" and Merkle hash-trees. }, author = {Dziembowski, Stefan and Faust, Sebastian and Kolmogorov, Vladimir and Pietrzak, Krzysztof Z}, publisher = {IST Austria}, title = {{Proofs of Space}}, year = {2013}, } @inproceedings{2930, abstract = {In this paper we investigate k-submodular functions. This natural family of discrete functions includes submodular and bisubmodular functions as the special cases k = 1 and k = 2 respectively. In particular we generalize the known Min-Max-Theorem for submodular and bisubmodular functions. This theorem asserts that the minimum of the (bi)submodular function can be found by solving a maximization problem over a (bi)submodular polyhedron. We define a k-submodular polyhedron, prove a Min-Max-Theorem for k-submodular functions, and give a greedy algorithm to construct the vertices of the polyhedron. }, author = {Huber, Anna and Kolmogorov, Vladimir}, location = {Athens, Greece}, pages = {451 -- 462}, publisher = {Springer}, title = {{Towards minimizing k-submodular functions}}, doi = {10.1007/978-3-642-32147-4_40}, volume = {7422}, year = {2012}, } @unpublished{2928, abstract = { This paper addresses the problem of approximate MAP-MRF inference in general graphical models. Following [36], we consider a family of linear programming relaxations of the problem where each relaxation is specified by a set of nested pairs of factors for which the marginalization constraint needs to be enforced. We develop a generalization of the TRW-S algorithm [9] for this problem, where we use a decomposition into junction chains, monotonic w.r.t. some ordering on the nodes. This generalizes the monotonic chains in [9] in a natural way. We also show how to deal with nested factors in an efficient way. Experiments show an improvement over min-sum diffusion, MPLP and subgradient ascent algorithms on a number of computer vision and natural language processing problems. }, author = {Kolmogorov, Vladimir and Schoenemann, Thomas}, booktitle = {arXiv}, pages = {16}, publisher = {ArXiv}, title = {{Generalized sequential tree-reweighted message passing}}, year = {2012}, } @article{2931, abstract = {In this paper, we present a new approach for establishing correspondences between sparse image features related by an unknown nonrigid mapping and corrupted by clutter and occlusion, such as points extracted from images of different instances of the same object category. We formulate this matching task as an energy minimization problem by defining an elaborate objective function of the appearance and the spatial arrangement of the features. Optimization of this energy is an instance of graph matching, which is in general an NP-hard problem. We describe a novel graph matching optimization technique, which we refer to as dual decomposition (DD), and demonstrate on a variety of examples that this method outperforms existing graph matching algorithms. In the majority of our examples, DD is able to find the global minimum within a minute. The ability to globally optimize the objective allows us to accurately learn the parameters of our matching model from training examples. We show on several matching tasks that our learned model yields results superior to those of state-of-the-art methods. }, author = {Torresani, Lorenzo and Kolmogorov, Vladimir and Rother, Carsten}, journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, number = {2}, pages = {259 -- 271}, publisher = {IEEE}, title = {{A dual decomposition approach to feature correspondence}}, doi = {10.1109/TPAMI.2012.105}, volume = {35}, year = {2012}, } @article{3117, abstract = {We consider the problem of minimizing a function represented as a sum of submodular terms. We assume each term allows an efficient computation of exchange capacities. This holds, for example, for terms depending on a small number of variables, or for certain cardinality-dependent terms. A naive application of submodular minimization algorithms would not exploit the existence of specialized exchange capacity subroutines for individual terms. To overcome this, we cast the problem as a submodular flow (SF) problem in an auxiliary graph in such a way that applying most existing SF algorithms would rely only on these subroutines. We then explore in more detail Iwata's capacity scaling approach for submodular flows (Iwata 1997 [19]). In particular, we show how to improve its complexity in the case when the function contains cardinality-dependent terms.}, author = {Kolmogorov, Vladimir}, journal = {Discrete Applied Mathematics}, number = {15}, pages = {2246 -- 2258}, publisher = {Elsevier}, title = {{Minimizing a sum of submodular functions}}, doi = {10.1016/j.dam.2012.05.025}, volume = {160}, year = {2012}, } @article{3257, abstract = {Consider a convex relaxation f̂ of a pseudo-Boolean function f. We say that the relaxation is totally half-integral if f̂(x) is a polyhedral function with half-integral extreme points x, and this property is preserved after adding an arbitrary combination of constraints of the form x i=x j, x i=1-x j, and x i=γ where γ∈{0,1,1/2} is a constant. A well-known example is the roof duality relaxation for quadratic pseudo-Boolean functions f. We argue that total half-integrality is a natural requirement for generalizations of roof duality to arbitrary pseudo-Boolean functions. Our contributions are as follows. First, we provide a complete characterization of totally half-integral relaxations f̂ by establishing a one-to-one correspondence with bisubmodular functions. Second, we give a new characterization of bisubmodular functions. Finally, we show some relationships between general totally half-integral relaxations and relaxations based on the roof duality. On the conceptual level, our results show that bisubmodular functions provide a natural generalization of the roof duality approach to higher-order terms. This can be viewed as a non-submodular analogue of the fact that submodular functions generalize the s-t minimum cut problem with non-negative weights to higher-order terms.}, author = {Kolmogorov, Vladimir}, journal = {Discrete Applied Mathematics}, number = {4-5}, pages = {416 -- 426}, publisher = {Elsevier}, title = {{Generalized roof duality and bisubmodular functions}}, doi = {10.1016/j.dam.2011.10.026}, volume = {160}, year = {2012}, } @inproceedings{3124, abstract = {We consider the problem of inference in a graphical model with binary variables. While in theory it is arguably preferable to compute marginal probabilities, in practice researchers often use MAP inference due to the availability of efficient discrete optimization algorithms. We bridge the gap between the two approaches by introducing the Discrete Marginals technique in which approximate marginals are obtained by minimizing an objective function with unary and pairwise terms over a discretized domain. This allows the use of techniques originally developed for MAP-MRF inference and learning. We explore two ways to set up the objective function - by discretizing the Bethe free energy and by learning it from training data. Experimental results show that for certain types of graphs a learned function can outperform the Bethe approximation. We also establish a link between the Bethe free energy and submodular functions. }, author = {Korc, Filip and Kolmogorov, Vladimir and Lampert, Christoph}, location = {Edinburgh, Scotland}, publisher = {ICML}, title = {{Approximating marginals using discrete energy minimization}}, year = {2012}, } @misc{5396, abstract = {We consider the problem of inference in agraphical model with binary variables. While in theory it is arguably preferable to compute marginal probabilities, in practice researchers often use MAP inference due to the availability of efficient discrete optimization algorithms. We bridge the gap between the two approaches by introducing the Discrete Marginals technique in which approximate marginals are obtained by minimizing an objective function with unary and pair-wise terms over a discretized domain. This allows the use of techniques originally devel-oped for MAP-MRF inference and learning. We explore two ways to set up the objective function - by discretizing the Bethe free energy and by learning it from training data. Experimental results show that for certain types of graphs a learned function can out-perform the Bethe approximation. We also establish a link between the Bethe free energy and submodular functions.}, author = {Korc, Filip and Kolmogorov, Vladimir and Lampert, Christoph}, issn = {2664-1690}, pages = {13}, publisher = {IST Austria}, title = {{Approximating marginals using discrete energy minimization}}, doi = {10.15479/AT:IST-2012-0003}, year = {2012}, }