@inproceedings{5978,
abstract = {We consider the MAP-inference problem for graphical models,which is a valued constraint satisfaction problem defined onreal numbers with a natural summation operation. We proposea family of relaxations (different from the famous Sherali-Adams hierarchy), which naturally define lower bounds for itsoptimum. This family always contains a tight relaxation andwe give an algorithm able to find it and therefore, solve theinitial non-relaxed NP-hard problem.The relaxations we consider decompose the original probleminto two non-overlapping parts: an easy LP-tight part and adifficult one. For the latter part a combinatorial solver must beused. As we show in our experiments, in a number of applica-tions the second, difficult part constitutes only a small fractionof the whole problem. This property allows to significantlyreduce the computational time of the combinatorial solver andtherefore solve problems which were out of reach before.},
author = {Haller, Stefan and Swoboda, Paul and Savchynskyy, Bogdan},
booktitle = {Proceedings of the 32st AAAI Conference on Artificial Intelligence},
location = {New Orleans, LU, United States},
pages = {6581--6588},
publisher = {AAAI},
title = {{Exact MAP-inference by confining combinatorial search with LP relaxation}},
year = {2018},
}
@article{6032,
abstract = {The main result of this article is a generalization of the classical blossom algorithm for finding perfect matchings. Our algorithm can efficiently solve Boolean CSPs where each variable appears in exactly two constraints (we call it edge CSP) and all constraints are even Δ-matroid relations (represented by lists of tuples). As a consequence of this, we settle the complexity classification of planar Boolean CSPs started by Dvorak and Kupec. Using a reduction to even Δ-matroids, we then extend the tractability result to larger classes of Δ-matroids that we call efficiently coverable. It properly includes classes that were known to be tractable before, namely, co-independent, compact, local, linear, and binary, with the following caveat:We represent Δ-matroids by lists of tuples, while the last two use a representation by matrices. Since an n ×n matrix can represent exponentially many tuples, our tractability result is not strictly stronger than the known algorithm for linear and binary Δ-matroids.},
author = {Kazda, Alexandr and Kolmogorov, Vladimir and Rolinek, Michal},
journal = {ACM Transactions on Algorithms},
number = {2},
publisher = {ACM},
title = {{Even delta-matroids and the complexity of planar boolean CSPs}},
doi = {10.1145/3230649},
volume = {15},
year = {2018},
}
@article{18,
abstract = {An N-superconcentrator is a directed, acyclic graph with N input nodes and N output nodes such that every subset of the inputs and every subset of the outputs of same cardinality can be connected by node-disjoint paths. It is known that linear-size and bounded-degree superconcentrators exist. We prove the existence of such superconcentrators with asymptotic density 25.3 (where the density is the number of edges divided by N). The previously best known densities were 28 [12] and 27.4136 [17].},
author = {Kolmogorov, Vladimir and Rolinek, Michal},
issn = {0381-7032},
journal = {Ars Combinatoria},
number = {10},
pages = {269 -- 304},
publisher = {Charles Babbage Research Centre},
title = {{Superconcentrators of density 25.3}},
volume = {141},
year = {2018},
}
@inproceedings{193,
abstract = {We show attacks on five data-independent memory-hard functions (iMHF) that were submitted to the password hashing competition (PHC). Informally, an MHF is a function which cannot be evaluated on dedicated hardware, like ASICs, at significantly lower hardware and/or energy cost than evaluating a single instance on a standard single-core architecture. Data-independent means the memory access pattern of the function is independent of the input; this makes iMHFs harder to construct than data-dependent ones, but the latter can be attacked by various side-channel attacks. Following [Alwen-Blocki'16], we capture the evaluation of an iMHF as a directed acyclic graph (DAG). The cumulative parallel pebbling complexity of this DAG is a measure for the hardware cost of evaluating the iMHF on an ASIC. Ideally, one would like the complexity of a DAG underlying an iMHF to be as close to quadratic in the number of nodes of the graph as possible. Instead, we show that (the DAGs underlying) the following iMHFs are far from this bound: Rig.v2, TwoCats and Gambit each having an exponent no more than 1.75. Moreover, we show that the complexity of the iMHF modes of the PHC finalists Pomelo and Lyra2 have exponents at most 1.83 and 1.67 respectively. To show this we investigate a combinatorial property of each underlying DAG (called its depth-robustness. By establishing upper bounds on this property we are then able to apply the general technique of [Alwen-Block'16] for analyzing the hardware costs of an iMHF.},
author = {Alwen, Joel F and Gazi, Peter and Kamath Hosdurg, Chethan and Klein, Karen and Osang, Georg F and Pietrzak, Krzysztof Z and Reyzin, Lenoid and Rolinek, Michal and Rybar, Michal},
booktitle = {Proceedings of the 2018 on Asia Conference on Computer and Communication Security},
location = {Incheon, Republic of Korea},
pages = {51 -- 65},
publisher = {ACM},
title = {{On the memory hardness of data independent password hashing functions}},
doi = {10.1145/3196494.3196534},
year = {2018},
}
@inproceedings{273,
abstract = {The accuracy of information retrieval systems is often measured using complex loss functions such as the average precision (AP) or the normalized discounted cumulative gain (NDCG). Given a set of positive and negative samples, the parameters of a retrieval system can be estimated by minimizing these loss functions. However, the non-differentiability and non-decomposability of these loss functions does not allow for simple gradient based optimization algorithms. This issue is generally circumvented by either optimizing a structured hinge-loss upper bound to the loss function or by using asymptotic methods like the direct-loss minimization framework. Yet, the high computational complexity of loss-augmented inference, which is necessary for both the frameworks, prohibits its use in large training data sets. To alleviate this deficiency, we present a novel quicksort flavored algorithm for a large class of non-decomposable loss functions. We provide a complete characterization of the loss functions that are amenable to our algorithm, and show that it includes both AP and NDCG based loss functions. Furthermore, we prove that no comparison based algorithm can improve upon the computational complexity of our approach asymptotically. We demonstrate the effectiveness of our approach in the context of optimizing the structured hinge loss upper bound of AP and NDCG loss for learning models for a variety of vision tasks. We show that our approach provides significantly better results than simpler decomposable loss functions, while requiring a comparable training time.},
author = {Mohapatra, Pritish and Rolinek, Michal and Jawahar, C V and Kolmogorov, Vladimir and Kumar, M Pawan},
booktitle = {2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition},
isbn = {9781538664209},
location = {Salt Lake City, UT, USA},
pages = {3693--3701},
publisher = {IEEE},
title = {{Efficient optimization for rank-based loss functions}},
doi = {10.1109/cvpr.2018.00389},
year = {2018},
}
@inproceedings{915,
abstract = {We propose a dual decomposition and linear program relaxation of the NP-hard minimum cost multicut problem. Unlike other polyhedral relaxations of the multicut polytope, it is amenable to efficient optimization by message passing. Like other polyhedral relaxations, it can be tightened efficiently by cutting planes. We define an algorithm that alternates between message passing and efficient separation of cycle- and odd-wheel inequalities. This algorithm is more efficient than state-of-the-art algorithms based on linear programming, including algorithms written in the framework of leading commercial software, as we show in experiments with large instances of the problem from applications in computer vision, biomedical image analysis and data mining.},
author = {Swoboda, Paul and Andres, Bjoern},
isbn = {978-153860457-1},
location = {Honolulu, HA, United States},
pages = {4990--4999},
publisher = {IEEE},
title = {{A message passing algorithm for the minimum cost multicut problem}},
doi = {10.1109/CVPR.2017.530},
volume = {2017},
year = {2017},
}
@inproceedings{916,
abstract = {We study the quadratic assignment problem, in computer vision also known as graph matching. Two leading solvers for this problem optimize the Lagrange decomposition duals with sub-gradient and dual ascent (also known as message passing) updates. We explore this direction further and propose several additional Lagrangean relaxations of the graph matching problem along with corresponding algorithms, which are all based on a common dual ascent framework. Our extensive empirical evaluation gives several theoretical insights and suggests a new state-of-the-art anytime solver for the considered problem. Our improvement over state-of-the-art is particularly visible on a new dataset with large-scale sparse problem instances containing more than 500 graph nodes each.},
author = {Swoboda, Paul and Rother, Carsten and Abu Alhaija, Carsten and Kainmueller, Dagmar and Savchynskyy, Bogdan},
isbn = {978-153860457-1},
location = {Honolulu, HA, United States},
pages = {7062--7071},
publisher = {IEEE},
title = {{A study of lagrangean decompositions and dual ascent solvers for graph matching}},
doi = {10.1109/CVPR.2017.747},
volume = {2017},
year = {2017},
}
@inproceedings{917,
abstract = {We propose a general dual ascent framework for Lagrangean decomposition of combinatorial problems. Although methods of this type have shown their efficiency for a number of problems, so far there was no general algorithm applicable to multiple problem types. In this work, we propose such a general algorithm. It depends on several parameters, which can be used to optimize its performance in each particular setting. We demonstrate efficacy of our method on graph matching and multicut problems, where it outperforms state-of-the-art solvers including those based on subgradient optimization and off-the-shelf linear programming solvers.},
author = {Swoboda, Paul and Kuske, Jan and Savchynskyy, Bogdan},
isbn = {978-153860457-1},
location = {Honolulu, HA, United States},
pages = {4950--4960},
publisher = {IEEE},
title = {{A dual ascent framework for Lagrangean decomposition of combinatorial problems}},
doi = {10.1109/CVPR.2017.526},
volume = {2017},
year = {2017},
}
@inproceedings{641,
abstract = {We introduce two novel methods for learning parameters of graphical models for image labelling. The following two tasks underline both methods: (i) perturb model parameters based on given features and ground truth labelings, so as to exactly reproduce these labelings as optima of the local polytope relaxation of the labelling problem; (ii) train a predictor for the perturbed model parameters so that improved model parameters can be applied to the labelling of novel data. Our first method implements task (i) by inverse linear programming and task (ii) using a regressor e.g. a Gaussian process. Our second approach simultaneously solves tasks (i) and (ii) in a joint manner, while being restricted to linearly parameterised predictors. Experiments demonstrate the merits of both approaches.},
author = {Trajkovska, Vera and Swoboda, Paul and Åström, Freddie and Petra, Stefanie},
editor = {Lauze, François and Dong, Yiqiu and Bjorholm Dahl, Anders},
isbn = {978-331958770-7},
location = {Kolding, Denmark},
pages = {323 -- 334},
publisher = {Springer},
title = {{Graphical model parameter learning by inverse linear programming}},
doi = {10.1007/978-3-319-58771-4_26},
volume = {10302},
year = {2017},
}
@article{644,
abstract = {An instance of the valued constraint satisfaction problem (VCSP) is given by a finite set of variables, a finite domain of labels, and a sum of functions, each function depending on a subset of the variables. Each function can take finite values specifying costs of assignments of labels to its variables or the infinite value, which indicates an infeasible assignment. The goal is to find an assignment of labels to the variables that minimizes the sum. We study, assuming that P 6= NP, how the complexity of this very general problem depends on the set of functions allowed in the instances, the so-called constraint language. The case when all allowed functions take values in f0;1g corresponds to ordinary CSPs, where one deals only with the feasibility issue, and there is no optimization. This case is the subject of the algebraic CSP dichotomy conjecture predicting for which constraint languages CSPs are tractable (i.e., solvable in polynomial time) and for which they are NP-hard. The case when all allowed functions take only finite values corresponds to a finitevalued CSP, where the feasibility aspect is trivial and one deals only with the optimization issue. The complexity of finite-valued CSPs was fully classified by Thapper and Živný. An algebraic necessary condition for tractability of a general-valued CSP with a fixed constraint language was recently given by Kozik and Ochremiak. As our main result, we prove that if a constraint language satisfies this algebraic necessary condition, and the feasibility CSP (i.e., the problem of deciding whether a given instance has a feasible solution) corresponding to the VCSP with this language is tractable, then the VCSP is tractable. The algorithm is a simple combination of the assumed algorithm for the feasibility CSP and the standard LP relaxation. As a corollary, we obtain that a dichotomy for ordinary CSPs would imply a dichotomy for general-valued CSPs.},
author = {Kolmogorov, Vladimir and Krokhin, Andrei and Rolinek, Michal},
journal = {SIAM Journal on Computing},
number = {3},
pages = {1087 -- 1110},
publisher = {SIAM},
title = {{The complexity of general-valued CSPs}},
doi = {10.1137/16M1091836},
volume = {46},
year = {2017},
}