@article{7925,
abstract = {In this paper, we introduce a relaxed CQ method with alternated inertial step for solving split feasibility problems. We give convergence of the sequence generated by our method under some suitable assumptions. Some numerical implementations from sparse signal and image deblurring are reported to show the efficiency of our method.},
author = {Shehu, Yekini and Gibali, Aviv},
issn = {1862-4472},
journal = {Optimization Letters},
publisher = {Springer Nature},
title = {{New inertial relaxed method for solving split feasibilities}},
doi = {10.1007/s11590-020-01603-1},
year = {2020},
}
@article{7577,
abstract = {Weak convergence of inertial iterative method for solving variational inequalities is the focus of this paper. The cost function is assumed to be non-Lipschitz and monotone. We propose a projection-type method with inertial terms and give weak convergence analysis under appropriate conditions. Some test results are performed and compared with relevant methods in the literature to show the efficiency and advantages given by our proposed methods.},
author = {Shehu, Yekini and Iyiola, Olaniyi S.},
issn = {1563-504X},
journal = {Applicable Analysis},
pages = {1--25},
publisher = {Taylor & Francis},
title = {{Weak convergence for variational inequalities with inertial-type method}},
doi = {10.1080/00036811.2020.1736287},
year = {2020},
}
@article{6593,
abstract = {We consider the monotone variational inequality problem in a Hilbert space and describe a projection-type method with inertial terms under the following properties: (a) The method generates a strongly convergent iteration sequence; (b) The method requires, at each iteration, only one projection onto the feasible set and two evaluations of the operator; (c) The method is designed for variational inequality for which the underline operator is monotone and uniformly continuous; (d) The method includes an inertial term. The latter is also shown to speed up the convergence in our numerical results. A comparison with some related methods is given and indicates that the new method is promising.},
author = {Shehu, Yekini and Li, Xiao-Huan and Dong, Qiao-Li},
issn = {1017-1398},
journal = {Numerical Algorithms},
pages = {365--388},
publisher = {Springer Nature},
title = {{An efficient projection-type method for monotone variational inequalities in Hilbert spaces}},
doi = {10.1007/s11075-019-00758-y},
volume = {84},
year = {2020},
}
@article{6596,
abstract = {It is well known that many problems in image recovery, signal processing, and machine learning can be modeled as finding zeros of the sum of maximal monotone and Lipschitz continuous monotone operators. Many papers have studied forward-backward splitting methods for finding zeros of the sum of two monotone operators in Hilbert spaces. Most of the proposed splitting methods in the literature have been proposed for the sum of maximal monotone and inverse-strongly monotone operators in Hilbert spaces. In this paper, we consider splitting methods for finding zeros of the sum of maximal monotone operators and Lipschitz continuous monotone operators in Banach spaces. We obtain weak and strong convergence results for the zeros of the sum of maximal monotone and Lipschitz continuous monotone operators in Banach spaces. Many already studied problems in the literature can be considered as special cases of this paper.},
author = {Shehu, Yekini},
issn = {1420-9012},
journal = {Results in Mathematics},
number = {4},
publisher = {Springer},
title = {{Convergence results of forward-backward algorithms for sum of monotone operators in Banach spaces}},
doi = {10.1007/s00025-019-1061-4},
volume = {74},
year = {2019},
}
@article{7412,
abstract = {We develop a framework for the rigorous analysis of focused stochastic local search algorithms. These algorithms search a state space by repeatedly selecting some constraint that is violated in the current state and moving to a random nearby state that addresses the violation, while (we hope) not introducing many new violations. An important class of focused local search algorithms with provable performance guarantees has recently arisen from algorithmizations of the Lovász local lemma (LLL), a nonconstructive tool for proving the existence of satisfying states by introducing a background measure on the state space. While powerful, the state transitions of algorithms in this class must be, in a precise sense, perfectly compatible with the background measure. In many applications this is a very restrictive requirement, and one needs to step outside the class. Here we introduce the notion of measure distortion and develop a framework for analyzing arbitrary focused stochastic local search algorithms, recovering LLL algorithmizations as the special case of no distortion. Our framework takes as input an arbitrary algorithm of such type and an arbitrary probability measure and shows how to use the measure as a yardstick of algorithmic progress, even for algorithms designed independently of the measure.},
author = {Achlioptas, Dimitris and Iliopoulos, Fotis and Kolmogorov, Vladimir},
issn = {1095-7111},
journal = {SIAM Journal on Computing},
number = {5},
pages = {1583--1602},
publisher = {SIAM},
title = {{A local lemma for focused stochastical algorithms}},
doi = {10.1137/16m109332x},
volume = {48},
year = {2019},
}
@inproceedings{6725,
abstract = {A Valued Constraint Satisfaction Problem (VCSP) provides a common framework that can express a wide range of discrete optimization problems. A VCSP instance is given by a finite set of variables, a finite domain of labels, and an objective function to be minimized. This function is represented as a sum of terms where each term depends on a subset of the variables. To obtain different classes of optimization problems, one can restrict all terms to come from a fixed set Γ of cost functions, called a language.
Recent breakthrough results have established a complete complexity classification of such classes with respect to language Γ: if all cost functions in Γ satisfy a certain algebraic condition then all Γ-instances can be solved in polynomial time, otherwise the problem is NP-hard. Unfortunately, testing this condition for a given language Γ is known to be NP-hard. We thus study exponential algorithms for this meta-problem. We show that the tractability condition of a finite-valued language Γ can be tested in O(3‾√3|D|⋅poly(size(Γ))) time, where D is the domain of Γ and poly(⋅) is some fixed polynomial. We also obtain a matching lower bound under the Strong Exponential Time Hypothesis (SETH). More precisely, we prove that for any constant δ<1 there is no O(3‾√3δ|D|) algorithm, assuming that SETH holds.},
author = {Kolmogorov, Vladimir},
booktitle = {46th International Colloquium on Automata, Languages and Programming},
isbn = {978-3-95977-109-2},
issn = {1868-8969},
location = {Patras, Greece},
pages = {77:1--77:12},
publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
title = {{Testing the complexity of a valued CSP language}},
doi = {10.4230/LIPICS.ICALP.2019.77},
volume = {132},
year = {2019},
}
@inproceedings{7468,
abstract = {We present a new proximal bundle method for Maximum-A-Posteriori (MAP) inference in structured energy minimization problems. The method optimizes a Lagrangean relaxation of the original energy minimization problem using a multi plane block-coordinate Frank-Wolfe method that takes advantage of the specific structure of the Lagrangean decomposition. We show empirically that our method outperforms state-of-the-art Lagrangean decomposition based algorithms on some challenging Markov Random Field, multi-label discrete tomography and graph matching problems.},
author = {Swoboda, Paul and Kolmogorov, Vladimir},
booktitle = {Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition},
isbn = {9781728132938},
issn = {10636919},
location = {Long Beach, CA, United States},
publisher = {IEEE},
title = {{Map inference via block-coordinate Frank-Wolfe algorithm}},
doi = {10.1109/CVPR.2019.01140},
volume = {2019-June},
year = {2019},
}
@inproceedings{7639,
abstract = {Deep neural networks (DNNs) have become increasingly important due to their excellent empirical performance on a wide range of problems. However, regularization is generally achieved by indirect means, largely due to the complex set of functions defined by a network and the difficulty in measuring function complexity. There exists no method in the literature for additive regularization based on a norm of the function, as is classically considered in statistical learning theory. In this work, we study the tractability of function norms for deep neural networks with ReLU activations. We provide, to the best of our knowledge, the first proof in the literature of the NP-hardness of computing function norms of DNNs of 3 or more layers. We also highlight a fundamental difference between shallow and deep networks. In the light on these results, we propose a new regularization strategy based on approximate function norms, and show its efficiency on a segmentation task with a DNN.},
author = {Rannen-Triki, Amal and Berman, Maxim and Kolmogorov, Vladimir and Blaschko, Matthew B.},
booktitle = {Proceedings of the 2019 International Conference on Computer Vision Workshop},
isbn = {9781728150239},
location = {Seoul, South Korea},
publisher = {IEEE},
title = {{Function norms for neural networks}},
doi = {10.1109/ICCVW.2019.00097},
year = {2019},
}
@article{7161,
abstract = {In this paper, we introduce an inertial projection-type method with different updating strategies for solving quasi-variational inequalities with strongly monotone and Lipschitz continuous operators in real Hilbert spaces. Under standard assumptions, we establish different strong convergence results for the proposed algorithm. Primary numerical experiments demonstrate the potential applicability of our scheme compared with some related methods in the literature.},
author = {Shehu, Yekini and Gibali, Aviv and Sagratella, Simone},
issn = {1573-2878},
journal = {Journal of Optimization Theory and Applications},
publisher = {Springer Nature},
title = {{Inertial projection-type methods for solving quasi-variational inequalities in real Hilbert spaces}},
doi = {10.1007/s10957-019-01616-6},
year = {2019},
}
@article{6032,
abstract = {The main result of this article is a generalization of the classical blossom algorithm for finding perfect matchings. Our algorithm can efficiently solve Boolean CSPs where each variable appears in exactly two constraints (we call it edge CSP) and all constraints are even Δ-matroid relations (represented by lists of tuples). As a consequence of this, we settle the complexity classification of planar Boolean CSPs started by Dvorak and Kupec. Using a reduction to even Δ-matroids, we then extend the tractability result to larger classes of Δ-matroids that we call efficiently coverable. It properly includes classes that were known to be tractable before, namely, co-independent, compact, local, linear, and binary, with the following caveat:We represent Δ-matroids by lists of tuples, while the last two use a representation by matrices. Since an n ×n matrix can represent exponentially many tuples, our tractability result is not strictly stronger than the known algorithm for linear and binary Δ-matroids.},
author = {Kazda, Alexandr and Kolmogorov, Vladimir and Rolinek, Michal},
journal = {ACM Transactions on Algorithms},
number = {2},
publisher = {ACM},
title = {{Even delta-matroids and the complexity of planar boolean CSPs}},
doi = {10.1145/3230649},
volume = {15},
year = {2019},
}