@inproceedings{6725, abstract = {A Valued Constraint Satisfaction Problem (VCSP) provides a common framework that can express a wide range of discrete optimization problems. A VCSP instance is given by a finite set of variables, a finite domain of labels, and an objective function to be minimized. This function is represented as a sum of terms where each term depends on a subset of the variables. To obtain different classes of optimization problems, one can restrict all terms to come from a fixed set Γ of cost functions, called a language. Recent breakthrough results have established a complete complexity classification of such classes with respect to language Γ: if all cost functions in Γ satisfy a certain algebraic condition then all Γ-instances can be solved in polynomial time, otherwise the problem is NP-hard. Unfortunately, testing this condition for a given language Γ is known to be NP-hard. We thus study exponential algorithms for this meta-problem. We show that the tractability condition of a finite-valued language Γ can be tested in O(3‾√3|D|⋅poly(size(Γ))) time, where D is the domain of Γ and poly(⋅) is some fixed polynomial. We also obtain a matching lower bound under the Strong Exponential Time Hypothesis (SETH). More precisely, we prove that for any constant δ<1 there is no O(3‾√3δ|D|) algorithm, assuming that SETH holds.}, author = {Kolmogorov, Vladimir}, booktitle = {46th International Colloquium on Automata, Languages and Programming}, isbn = {978-3-95977-109-2}, issn = {1868-8969}, location = {Patras, Greece}, pages = {77:1--77:12}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Testing the complexity of a valued CSP language}}, doi = {10.4230/LIPICS.ICALP.2019.77}, volume = {132}, year = {2019}, } @article{6596, abstract = {It is well known that many problems in image recovery, signal processing, and machine learning can be modeled as finding zeros of the sum of maximal monotone and Lipschitz continuous monotone operators. Many papers have studied forward-backward splitting methods for finding zeros of the sum of two monotone operators in Hilbert spaces. Most of the proposed splitting methods in the literature have been proposed for the sum of maximal monotone and inverse-strongly monotone operators in Hilbert spaces. In this paper, we consider splitting methods for finding zeros of the sum of maximal monotone operators and Lipschitz continuous monotone operators in Banach spaces. We obtain weak and strong convergence results for the zeros of the sum of maximal monotone and Lipschitz continuous monotone operators in Banach spaces. Many already studied problems in the literature can be considered as special cases of this paper.}, author = {Shehu, Yekini}, issn = {1420-9012}, journal = {Results in Mathematics}, number = {4}, publisher = {Springer}, title = {{Convergence results of forward-backward algorithms for sum of monotone operators in Banach spaces}}, doi = {10.1007/s00025-019-1061-4}, volume = {74}, year = {2019}, } @article{7000, abstract = {The main contributions of this paper are the proposition and the convergence analysis of a class of inertial projection-type algorithm for solving variational inequality problems in real Hilbert spaces where the underline operator is monotone and uniformly continuous. We carry out a unified analysis of the proposed method under very mild assumptions. In particular, weak convergence of the generated sequence is established and nonasymptotic O(1 / n) rate of convergence is established, where n denotes the iteration counter. We also present some experimental results to illustrate the profits gained by introducing the inertial extrapolation steps.}, author = {Shehu, Yekini and Iyiola, Olaniyi S. and Li, Xiao-Huan and Dong, Qiao-Li}, issn = {1807-0302}, journal = {Computational and Applied Mathematics}, number = {4}, publisher = {Springer Nature}, title = {{Convergence analysis of projection method for variational inequalities}}, doi = {10.1007/s40314-019-0955-9}, volume = {38}, year = {2019}, } @article{7412, abstract = {We develop a framework for the rigorous analysis of focused stochastic local search algorithms. These algorithms search a state space by repeatedly selecting some constraint that is violated in the current state and moving to a random nearby state that addresses the violation, while (we hope) not introducing many new violations. An important class of focused local search algorithms with provable performance guarantees has recently arisen from algorithmizations of the Lovász local lemma (LLL), a nonconstructive tool for proving the existence of satisfying states by introducing a background measure on the state space. While powerful, the state transitions of algorithms in this class must be, in a precise sense, perfectly compatible with the background measure. In many applications this is a very restrictive requirement, and one needs to step outside the class. Here we introduce the notion of measure distortion and develop a framework for analyzing arbitrary focused stochastic local search algorithms, recovering LLL algorithmizations as the special case of no distortion. Our framework takes as input an arbitrary algorithm of such type and an arbitrary probability measure and shows how to use the measure as a yardstick of algorithmic progress, even for algorithms designed independently of the measure.}, author = {Achlioptas, Dimitris and Iliopoulos, Fotis and Kolmogorov, Vladimir}, issn = {1095-7111}, journal = {SIAM Journal on Computing}, number = {5}, pages = {1583--1602}, publisher = {SIAM}, title = {{A local lemma for focused stochastical algorithms}}, doi = {10.1137/16m109332x}, volume = {48}, year = {2019}, } @inproceedings{7468, abstract = {We present a new proximal bundle method for Maximum-A-Posteriori (MAP) inference in structured energy minimization problems. The method optimizes a Lagrangean relaxation of the original energy minimization problem using a multi plane block-coordinate Frank-Wolfe method that takes advantage of the specific structure of the Lagrangean decomposition. We show empirically that our method outperforms state-of-the-art Lagrangean decomposition based algorithms on some challenging Markov Random Field, multi-label discrete tomography and graph matching problems.}, author = {Swoboda, Paul and Kolmogorov, Vladimir}, booktitle = {Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition}, isbn = {9781728132938}, issn = {10636919}, location = {Long Beach, CA, United States}, publisher = {IEEE}, title = {{Map inference via block-coordinate Frank-Wolfe algorithm}}, doi = {10.1109/CVPR.2019.01140}, volume = {2019-June}, year = {2019}, } @inproceedings{7639, abstract = {Deep neural networks (DNNs) have become increasingly important due to their excellent empirical performance on a wide range of problems. However, regularization is generally achieved by indirect means, largely due to the complex set of functions defined by a network and the difficulty in measuring function complexity. There exists no method in the literature for additive regularization based on a norm of the function, as is classically considered in statistical learning theory. In this work, we study the tractability of function norms for deep neural networks with ReLU activations. We provide, to the best of our knowledge, the first proof in the literature of the NP-hardness of computing function norms of DNNs of 3 or more layers. We also highlight a fundamental difference between shallow and deep networks. In the light on these results, we propose a new regularization strategy based on approximate function norms, and show its efficiency on a segmentation task with a DNN.}, author = {Rannen-Triki, Amal and Berman, Maxim and Kolmogorov, Vladimir and Blaschko, Matthew B.}, booktitle = {Proceedings of the 2019 International Conference on Computer Vision Workshop}, isbn = {9781728150239}, location = {Seoul, South Korea}, publisher = {IEEE}, title = {{Function norms for neural networks}}, doi = {10.1109/ICCVW.2019.00097}, year = {2019}, } @article{703, abstract = {We consider the NP-hard problem of MAP-inference for undirected discrete graphical models. We propose a polynomial time and practically efficient algorithm for finding a part of its optimal solution. Specifically, our algorithm marks some labels of the considered graphical model either as (i) optimal, meaning that they belong to all optimal solutions of the inference problem; (ii) non-optimal if they provably do not belong to any solution. With access to an exact solver of a linear programming relaxation to the MAP-inference problem, our algorithm marks the maximal possible (in a specified sense) number of labels. We also present a version of the algorithm, which has access to a suboptimal dual solver only and still can ensure the (non-)optimality for the marked labels, although the overall number of the marked labels may decrease. We propose an efficient implementation, which runs in time comparable to a single run of a suboptimal dual solver. Our method is well-scalable and shows state-of-the-art results on computational benchmarks from machine learning and computer vision.}, author = {Shekhovtsov, Alexander and Swoboda, Paul and Savchynskyy, Bogdan}, issn = {01628828}, journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, number = {7}, pages = {1668--1682}, publisher = {IEEE}, title = {{Maximum persistency via iterative relaxed inference with graphical models}}, doi = {10.1109/TPAMI.2017.2730884}, volume = {40}, year = {2018}, } @inbook{10864, abstract = {We prove that every congruence distributive variety has directed Jónsson terms, and every congruence modular variety has directed Gumm terms. The directed terms we construct witness every case of absorption witnessed by the original Jónsson or Gumm terms. This result is equivalent to a pair of claims about absorption for admissible preorders in congruence distributive and congruence modular varieties, respectively. For finite algebras, these absorption theorems have already seen significant applications, but until now, it was not clear if the theorems hold for general algebras as well. Our method also yields a novel proof of a result by P. Lipparini about the existence of a chain of terms (which we call Pixley terms) in varieties that are at the same time congruence distributive and k-permutable for some k.}, author = {Kazda, Alexandr and Kozik, Marcin and McKenzie, Ralph and Moore, Matthew}, booktitle = {Don Pigozzi on Abstract Algebraic Logic, Universal Algebra, and Computer Science}, editor = {Czelakowski, J}, isbn = {9783319747712}, issn = {2211-2766}, pages = {203--220}, publisher = {Springer Nature}, title = {{Absorption and directed Jónsson terms}}, doi = {10.1007/978-3-319-74772-9_7}, volume = {16}, year = {2018}, } @inproceedings{273, abstract = {The accuracy of information retrieval systems is often measured using complex loss functions such as the average precision (AP) or the normalized discounted cumulative gain (NDCG). Given a set of positive and negative samples, the parameters of a retrieval system can be estimated by minimizing these loss functions. However, the non-differentiability and non-decomposability of these loss functions does not allow for simple gradient based optimization algorithms. This issue is generally circumvented by either optimizing a structured hinge-loss upper bound to the loss function or by using asymptotic methods like the direct-loss minimization framework. Yet, the high computational complexity of loss-augmented inference, which is necessary for both the frameworks, prohibits its use in large training data sets. To alleviate this deficiency, we present a novel quicksort flavored algorithm for a large class of non-decomposable loss functions. We provide a complete characterization of the loss functions that are amenable to our algorithm, and show that it includes both AP and NDCG based loss functions. Furthermore, we prove that no comparison based algorithm can improve upon the computational complexity of our approach asymptotically. We demonstrate the effectiveness of our approach in the context of optimizing the structured hinge loss upper bound of AP and NDCG loss for learning models for a variety of vision tasks. We show that our approach provides significantly better results than simpler decomposable loss functions, while requiring a comparable training time.}, author = {Mohapatra, Pritish and Rolinek, Michal and Jawahar, C V and Kolmogorov, Vladimir and Kumar, M Pawan}, booktitle = {2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition}, isbn = {9781538664209}, location = {Salt Lake City, UT, USA}, pages = {3693--3701}, publisher = {IEEE}, title = {{Efficient optimization for rank-based loss functions}}, doi = {10.1109/cvpr.2018.00389}, year = {2018}, } @inproceedings{193, abstract = {We show attacks on five data-independent memory-hard functions (iMHF) that were submitted to the password hashing competition (PHC). Informally, an MHF is a function which cannot be evaluated on dedicated hardware, like ASICs, at significantly lower hardware and/or energy cost than evaluating a single instance on a standard single-core architecture. Data-independent means the memory access pattern of the function is independent of the input; this makes iMHFs harder to construct than data-dependent ones, but the latter can be attacked by various side-channel attacks. Following [Alwen-Blocki'16], we capture the evaluation of an iMHF as a directed acyclic graph (DAG). The cumulative parallel pebbling complexity of this DAG is a measure for the hardware cost of evaluating the iMHF on an ASIC. Ideally, one would like the complexity of a DAG underlying an iMHF to be as close to quadratic in the number of nodes of the graph as possible. Instead, we show that (the DAGs underlying) the following iMHFs are far from this bound: Rig.v2, TwoCats and Gambit each having an exponent no more than 1.75. Moreover, we show that the complexity of the iMHF modes of the PHC finalists Pomelo and Lyra2 have exponents at most 1.83 and 1.67 respectively. To show this we investigate a combinatorial property of each underlying DAG (called its depth-robustness. By establishing upper bounds on this property we are then able to apply the general technique of [Alwen-Block'16] for analyzing the hardware costs of an iMHF.}, author = {Alwen, Joel F and Gazi, Peter and Kamath Hosdurg, Chethan and Klein, Karen and Osang, Georg F and Pietrzak, Krzysztof Z and Reyzin, Lenoid and Rolinek, Michal and Rybar, Michal}, booktitle = {Proceedings of the 2018 on Asia Conference on Computer and Communication Security}, location = {Incheon, Republic of Korea}, pages = {51 -- 65}, publisher = {ACM}, title = {{On the memory hardness of data independent password hashing functions}}, doi = {10.1145/3196494.3196534}, year = {2018}, } @article{5975, abstract = {We consider the recent formulation of the algorithmic Lov ́asz Local Lemma [N. Har-vey and J. Vondr ́ak, inProceedings of FOCS, 2015, pp. 1327–1345; D. Achlioptas and F. Iliopoulos,inProceedings of SODA, 2016, pp. 2024–2038; D. Achlioptas, F. Iliopoulos, and V. Kolmogorov,ALocal Lemma for Focused Stochastic Algorithms, arXiv preprint, 2018] for finding objects that avoid“bad features,” or “flaws.” It extends the Moser–Tardos resampling algorithm [R. A. Moser andG. Tardos,J. ACM, 57 (2010), 11] to more general discrete spaces. At each step the method picks aflaw present in the current state and goes to a new state according to some prespecified probabilitydistribution (which depends on the current state and the selected flaw). However, the recent formu-lation is less flexible than the Moser–Tardos method since it requires a specific flaw selection rule,whereas the algorithm of Moser and Tardos allows an arbitrary rule (and thus can potentially beimplemented more efficiently). We formulate a new “commutativity” condition and prove that it issufficient for an arbitrary rule to work. It also enables an efficient parallelization under an additionalassumption. We then show that existing resampling oracles for perfect matchings and permutationsdo satisfy this condition.}, author = {Kolmogorov, Vladimir}, issn = {1095-7111}, journal = {SIAM Journal on Computing}, number = {6}, pages = {2029--2056}, publisher = {Society for Industrial & Applied Mathematics (SIAM)}, title = {{Commutativity in the algorithmic Lovász local lemma}}, doi = {10.1137/16m1093306}, volume = {47}, year = {2018}, } @inproceedings{5978, abstract = {We consider the MAP-inference problem for graphical models,which is a valued constraint satisfaction problem defined onreal numbers with a natural summation operation. We proposea family of relaxations (different from the famous Sherali-Adams hierarchy), which naturally define lower bounds for itsoptimum. This family always contains a tight relaxation andwe give an algorithm able to find it and therefore, solve theinitial non-relaxed NP-hard problem.The relaxations we consider decompose the original probleminto two non-overlapping parts: an easy LP-tight part and adifficult one. For the latter part a combinatorial solver must beused. As we show in our experiments, in a number of applica-tions the second, difficult part constitutes only a small fractionof the whole problem. This property allows to significantlyreduce the computational time of the combinatorial solver andtherefore solve problems which were out of reach before.}, author = {Haller, Stefan and Swoboda, Paul and Savchynskyy, Bogdan}, booktitle = {Proceedings of the 32st AAAI Conference on Artificial Intelligence}, location = {New Orleans, LU, United States}, pages = {6581--6588}, publisher = {AAAI Press}, title = {{Exact MAP-inference by confining combinatorial search with LP relaxation}}, year = {2018}, } @article{18, abstract = {An N-superconcentrator is a directed, acyclic graph with N input nodes and N output nodes such that every subset of the inputs and every subset of the outputs of same cardinality can be connected by node-disjoint paths. It is known that linear-size and bounded-degree superconcentrators exist. We prove the existence of such superconcentrators with asymptotic density 25.3 (where the density is the number of edges divided by N). The previously best known densities were 28 [12] and 27.4136 [17].}, author = {Kolmogorov, Vladimir and Rolinek, Michal}, issn = {0381-7032}, journal = {Ars Combinatoria}, number = {10}, pages = {269 -- 304}, publisher = {Charles Babbage Research Centre}, title = {{Superconcentrators of density 25.3}}, volume = {141}, year = {2018}, } @article{6032, abstract = {The main result of this article is a generalization of the classical blossom algorithm for finding perfect matchings. Our algorithm can efficiently solve Boolean CSPs where each variable appears in exactly two constraints (we call it edge CSP) and all constraints are even Δ-matroid relations (represented by lists of tuples). As a consequence of this, we settle the complexity classification of planar Boolean CSPs started by Dvorak and Kupec. Using a reduction to even Δ-matroids, we then extend the tractability result to larger classes of Δ-matroids that we call efficiently coverable. It properly includes classes that were known to be tractable before, namely, co-independent, compact, local, linear, and binary, with the following caveat:We represent Δ-matroids by lists of tuples, while the last two use a representation by matrices. Since an n ×n matrix can represent exponentially many tuples, our tractability result is not strictly stronger than the known algorithm for linear and binary Δ-matroids.}, author = {Kazda, Alexandr and Kolmogorov, Vladimir and Rolinek, Michal}, journal = {ACM Transactions on Algorithms}, number = {2}, publisher = {ACM}, title = {{Even delta-matroids and the complexity of planar boolean CSPs}}, doi = {10.1145/3230649}, volume = {15}, year = {2018}, } @misc{5573, abstract = {Graph matching problems for large displacement optical flow of RGB-D images.}, author = {Alhaija, Hassan and Sellent, Anita and Kondermann, Daniel and Rother, Carsten}, keywords = {graph matching, quadratic assignment problem<}, publisher = {Institute of Science and Technology Austria}, title = {{Graph matching problems for GraphFlow – 6D Large Displacement Scene Flow}}, doi = {10.15479/AT:ISTA:82}, year = {2018}, } @inproceedings{641, abstract = {We introduce two novel methods for learning parameters of graphical models for image labelling. The following two tasks underline both methods: (i) perturb model parameters based on given features and ground truth labelings, so as to exactly reproduce these labelings as optima of the local polytope relaxation of the labelling problem; (ii) train a predictor for the perturbed model parameters so that improved model parameters can be applied to the labelling of novel data. Our first method implements task (i) by inverse linear programming and task (ii) using a regressor e.g. a Gaussian process. Our second approach simultaneously solves tasks (i) and (ii) in a joint manner, while being restricted to linearly parameterised predictors. Experiments demonstrate the merits of both approaches.}, author = {Trajkovska, Vera and Swoboda, Paul and Åström, Freddie and Petra, Stefanie}, editor = {Lauze, François and Dong, Yiqiu and Bjorholm Dahl, Anders}, isbn = {978-331958770-7}, location = {Kolding, Denmark}, pages = {323 -- 334}, publisher = {Springer}, title = {{Graphical model parameter learning by inverse linear programming}}, doi = {10.1007/978-3-319-58771-4_26}, volume = {10302}, year = {2017}, } @article{644, abstract = {An instance of the valued constraint satisfaction problem (VCSP) is given by a finite set of variables, a finite domain of labels, and a sum of functions, each function depending on a subset of the variables. Each function can take finite values specifying costs of assignments of labels to its variables or the infinite value, which indicates an infeasible assignment. The goal is to find an assignment of labels to the variables that minimizes the sum. We study, assuming that P 6= NP, how the complexity of this very general problem depends on the set of functions allowed in the instances, the so-called constraint language. The case when all allowed functions take values in f0;1g corresponds to ordinary CSPs, where one deals only with the feasibility issue, and there is no optimization. This case is the subject of the algebraic CSP dichotomy conjecture predicting for which constraint languages CSPs are tractable (i.e., solvable in polynomial time) and for which they are NP-hard. The case when all allowed functions take only finite values corresponds to a finitevalued CSP, where the feasibility aspect is trivial and one deals only with the optimization issue. The complexity of finite-valued CSPs was fully classified by Thapper and Živný. An algebraic necessary condition for tractability of a general-valued CSP with a fixed constraint language was recently given by Kozik and Ochremiak. As our main result, we prove that if a constraint language satisfies this algebraic necessary condition, and the feasibility CSP (i.e., the problem of deciding whether a given instance has a feasible solution) corresponding to the VCSP with this language is tractable, then the VCSP is tractable. The algorithm is a simple combination of the assumed algorithm for the feasibility CSP and the standard LP relaxation. As a corollary, we obtain that a dichotomy for ordinary CSPs would imply a dichotomy for general-valued CSPs.}, author = {Kolmogorov, Vladimir and Krokhin, Andrei and Rolinek, Michal}, journal = {SIAM Journal on Computing}, number = {3}, pages = {1087 -- 1110}, publisher = {SIAM}, title = {{The complexity of general-valued CSPs}}, doi = {10.1137/16M1091836}, volume = {46}, year = {2017}, } @inproceedings{646, abstract = {We present a novel convex relaxation and a corresponding inference algorithm for the non-binary discrete tomography problem, that is, reconstructing discrete-valued images from few linear measurements. In contrast to state of the art approaches that split the problem into a continuous reconstruction problem for the linear measurement constraints and a discrete labeling problem to enforce discrete-valued reconstructions, we propose a joint formulation that addresses both problems simultaneously, resulting in a tighter convex relaxation. For this purpose a constrained graphical model is set up and evaluated using a novel relaxation optimized by dual decomposition. We evaluate our approach experimentally and show superior solutions both mathematically (tighter relaxation) and experimentally in comparison to previously proposed relaxations.}, author = {Kuske, Jan and Swoboda, Paul and Petra, Stefanie}, editor = {Lauze, François and Dong, Yiqiu and Bjorholm Dahl, Anders}, isbn = {978-331958770-7}, location = {Kolding, Denmark}, pages = {235 -- 246}, publisher = {Springer}, title = {{A novel convex relaxation for non binary discrete tomography}}, doi = {10.1007/978-3-319-58771-4_19}, volume = {10302}, year = {2017}, } @phdthesis{992, abstract = {An instance of the Constraint Satisfaction Problem (CSP) is given by a finite set of variables, a finite domain of labels, and a set of constraints, each constraint acting on a subset of the variables. The goal is to find an assignment of labels to its variables that satisfies all constraints (or decide whether one exists). If we allow more general “soft” constraints, which come with (possibly infinite) costs of particular assignments, we obtain instances from a richer class called Valued Constraint Satisfaction Problem (VCSP). There the goal is to find an assignment with minimum total cost. In this thesis, we focus (assuming that P 6 = NP) on classifying computational com- plexity of CSPs and VCSPs under certain restricting conditions. Two results are the core content of the work. In one of them, we consider VCSPs parametrized by a constraint language, that is the set of “soft” constraints allowed to form the instances, and finish the complexity classification modulo (missing pieces of) complexity classification for analogously parametrized CSP. The other result is a generalization of Edmonds’ perfect matching algorithm. This generalization contributes to complexity classfications in two ways. First, it gives a new (largest known) polynomial-time solvable class of Boolean CSPs in which every variable may appear in at most two constraints and second, it settles full classification of Boolean CSPs with planar drawing (again parametrized by a constraint language).}, author = {Rolinek, Michal}, issn = {2663-337X}, pages = {97}, publisher = {Institute of Science and Technology Austria}, title = {{Complexity of constraint satisfaction}}, doi = {10.15479/AT:ISTA:th_815}, year = {2017}, } @inproceedings{1192, abstract = {The main result of this paper is a generalization of the classical blossom algorithm for finding perfect matchings. Our algorithm can efficiently solve Boolean CSPs where each variable appears in exactly two constraints (we call it edge CSP) and all constraints are even Δ-matroid relations (represented by lists of tuples). As a consequence of this, we settle the complexity classification of planar Boolean CSPs started by Dvorak and Kupec. Knowing that edge CSP is tractable for even Δ-matroid constraints allows us to extend the tractability result to a larger class of Δ-matroids that includes many classes that were known to be tractable before, namely co-independent, compact, local and binary.}, author = {Kazda, Alexandr and Kolmogorov, Vladimir and Rolinek, Michal}, isbn = {978-161197478-2}, location = {Barcelona, Spain}, pages = {307 -- 326}, publisher = {SIAM}, title = {{Even delta-matroids and the complexity of planar Boolean CSPs}}, doi = {10.1137/1.9781611974782.20}, year = {2017}, }