@article{9234, abstract = {In this paper, we present two new inertial projection-type methods for solving multivalued variational inequality problems in finite-dimensional spaces. We establish the convergence of the sequence generated by these methods when the multivalued mapping associated with the problem is only required to be locally bounded without any monotonicity assumption. Furthermore, the inertial techniques that we employ in this paper are quite different from the ones used in most papers. Moreover, based on the weaker assumptions on the inertial factor in our methods, we derive several special cases of our methods. Finally, we present some experimental results to illustrate the profits that we gain by introducing the inertial extrapolation steps.}, author = {Izuchukwu, Chinedu and Shehu, Yekini}, issn = {1572-9427}, journal = {Networks and Spatial Economics}, keywords = {Computer Networks and Communications, Software, Artificial Intelligence}, number = {2}, pages = {291--323}, publisher = {Springer Nature}, title = {{New inertial projection methods for solving multivalued variational inequality problems beyond monotonicity}}, doi = {10.1007/s11067-021-09517-w}, volume = {21}, year = {2021}, } @inproceedings{9227, abstract = {In the multiway cut problem we are given a weighted undirected graph G=(V,E) and a set T⊆V of k terminals. The goal is to find a minimum weight set of edges E′⊆E with the property that by removing E′ from G all the terminals become disconnected. In this paper we present a simple local search approximation algorithm for the multiway cut problem with approximation ratio 2−2k . We present an experimental evaluation of the performance of our local search algorithm and show that it greatly outperforms the isolation heuristic of Dalhaus et al. and it has similar performance as the much more complex algorithms of Calinescu et al., Sharma and Vondrak, and Buchbinder et al. which have the currently best known approximation ratios for this problem.}, author = {Bloch-Hansen, Andrew and Samei, Nasim and Solis-Oba, Roberto}, booktitle = {Conference on Algorithms and Discrete Applied Mathematics}, isbn = {9783030678982}, issn = {1611-3349}, location = {Rupnagar, India}, pages = {346--358}, publisher = {Springer Nature}, title = {{Experimental evaluation of a local search approximation algorithm for the multiway cut problem}}, doi = {10.1007/978-3-030-67899-9_28}, volume = {12601}, year = {2021}, } @article{8817, abstract = {The paper introduces an inertial extragradient subgradient method with self-adaptive step sizes for solving equilibrium problems in real Hilbert spaces. Weak convergence of the proposed method is obtained under the condition that the bifunction is pseudomonotone and Lipchitz continuous. Linear convergence is also given when the bifunction is strongly pseudomonotone and Lipchitz continuous. Numerical implementations and comparisons with other related inertial methods are given using test problems including a real-world application to Nash–Cournot oligopolistic electricity market equilibrium model.}, author = {Shehu, Yekini and Iyiola, Olaniyi S. and Thong, Duong Viet and Van, Nguyen Thi Cam}, issn = {1432-5217}, journal = {Mathematical Methods of Operations Research}, number = {2}, pages = {213--242}, publisher = {Springer Nature}, title = {{An inertial subgradient extragradient algorithm extended to pseudomonotone equilibrium problems}}, doi = {10.1007/s00186-020-00730-w}, volume = {93}, year = {2021}, } @article{9315, abstract = {We consider inertial iteration methods for Fermat–Weber location problem and primal–dual three-operator splitting in real Hilbert spaces. To do these, we first obtain weak convergence analysis and nonasymptotic O(1/n) convergence rate of the inertial Krasnoselskii–Mann iteration for fixed point of nonexpansive operators in infinite dimensional real Hilbert spaces under some seemingly easy to implement conditions on the iterative parameters. One of our contributions is that the convergence analysis and rate of convergence results are obtained using conditions which appear not complicated and restrictive as assumed in other previous related results in the literature. We then show that Fermat–Weber location problem and primal–dual three-operator splitting are special cases of fixed point problem of nonexpansive mapping and consequently obtain the convergence analysis of inertial iteration methods for Fermat–Weber location problem and primal–dual three-operator splitting in real Hilbert spaces. Some numerical implementations are drawn from primal–dual three-operator splitting to support the theoretical analysis.}, author = {Iyiola, Olaniyi S. and Shehu, Yekini}, issn = {1420-9012}, journal = {Results in Mathematics}, number = {2}, publisher = {Springer Nature}, title = {{New convergence results for inertial Krasnoselskii–Mann iterations in Hilbert spaces with applications}}, doi = {10.1007/s00025-021-01381-x}, volume = {76}, year = {2021}, } @article{9365, abstract = {In this paper, we propose a new iterative method with alternated inertial step for solving split common null point problem in real Hilbert spaces. We obtain weak convergence of the proposed iterative algorithm. Furthermore, we introduce the notion of bounded linear regularity property for the split common null point problem and obtain the linear convergence property for the new algorithm under some mild assumptions. Finally, we provide some numerical examples to demonstrate the performance and efficiency of the proposed method.}, author = {Ogbuisi, Ferdinard U. and Shehu, Yekini and Yao, Jen Chih}, issn = {1029-4945}, journal = {Optimization}, publisher = {Taylor and Francis}, title = {{Convergence analysis of new inertial method for the split common null point problem}}, doi = {10.1080/02331934.2021.1914035}, year = {2021}, } @article{8196, abstract = {This paper aims to obtain a strong convergence result for a Douglas–Rachford splitting method with inertial extrapolation step for finding a zero of the sum of two set-valued maximal monotone operators without any further assumption of uniform monotonicity on any of the involved maximal monotone operators. Furthermore, our proposed method is easy to implement and the inertial factor in our proposed method is a natural choice. Our method of proof is of independent interest. Finally, some numerical implementations are given to confirm the theoretical analysis.}, author = {Shehu, Yekini and Dong, Qiao-Li and Liu, Lu-Lu and Yao, Jen-Chih}, issn = {1573-2924}, journal = {Optimization and Engineering}, pages = {2627--2653}, publisher = {Springer Nature}, title = {{New strong convergence method for the sum of two maximal monotone operators}}, doi = {10.1007/s11081-020-09544-5}, volume = {22}, year = {2021}, } @article{7925, abstract = {In this paper, we introduce a relaxed CQ method with alternated inertial step for solving split feasibility problems. We give convergence of the sequence generated by our method under some suitable assumptions. Some numerical implementations from sparse signal and image deblurring are reported to show the efficiency of our method.}, author = {Shehu, Yekini and Gibali, Aviv}, issn = {1862-4480}, journal = {Optimization Letters}, pages = {2109--2126}, publisher = {Springer Nature}, title = {{New inertial relaxed method for solving split feasibilities}}, doi = {10.1007/s11590-020-01603-1}, volume = {15}, year = {2021}, } @article{6593, abstract = {We consider the monotone variational inequality problem in a Hilbert space and describe a projection-type method with inertial terms under the following properties: (a) The method generates a strongly convergent iteration sequence; (b) The method requires, at each iteration, only one projection onto the feasible set and two evaluations of the operator; (c) The method is designed for variational inequality for which the underline operator is monotone and uniformly continuous; (d) The method includes an inertial term. The latter is also shown to speed up the convergence in our numerical results. A comparison with some related methods is given and indicates that the new method is promising.}, author = {Shehu, Yekini and Li, Xiao-Huan and Dong, Qiao-Li}, issn = {1572-9265}, journal = {Numerical Algorithms}, pages = {365--388}, publisher = {Springer Nature}, title = {{An efficient projection-type method for monotone variational inequalities in Hilbert spaces}}, doi = {10.1007/s11075-019-00758-y}, volume = {84}, year = {2020}, } @article{8077, abstract = {The projection methods with vanilla inertial extrapolation step for variational inequalities have been of interest to many authors recently due to the improved convergence speed contributed by the presence of inertial extrapolation step. However, it is discovered that these projection methods with inertial steps lose the Fejér monotonicity of the iterates with respect to the solution, which is being enjoyed by their corresponding non-inertial projection methods for variational inequalities. This lack of Fejér monotonicity makes projection methods with vanilla inertial extrapolation step for variational inequalities not to converge faster than their corresponding non-inertial projection methods at times. Also, it has recently been proved that the projection methods with vanilla inertial extrapolation step may provide convergence rates that are worse than the classical projected gradient methods for strongly convex functions. In this paper, we introduce projection methods with alternated inertial extrapolation step for solving variational inequalities. We show that the sequence of iterates generated by our methods converges weakly to a solution of the variational inequality under some appropriate conditions. The Fejér monotonicity of even subsequence is recovered in these methods and linear rate of convergence is obtained. The numerical implementations of our methods compared with some other inertial projection methods show that our method is more efficient and outperforms some of these inertial projection methods.}, author = {Shehu, Yekini and Iyiola, Olaniyi S.}, issn = {0168-9274}, journal = {Applied Numerical Mathematics}, pages = {315--337}, publisher = {Elsevier}, title = {{Projection methods with alternating inertial steps for variational inequalities: Weak and linear convergence}}, doi = {10.1016/j.apnum.2020.06.009}, volume = {157}, year = {2020}, } @article{7161, abstract = {In this paper, we introduce an inertial projection-type method with different updating strategies for solving quasi-variational inequalities with strongly monotone and Lipschitz continuous operators in real Hilbert spaces. Under standard assumptions, we establish different strong convergence results for the proposed algorithm. Primary numerical experiments demonstrate the potential applicability of our scheme compared with some related methods in the literature.}, author = {Shehu, Yekini and Gibali, Aviv and Sagratella, Simone}, issn = {1573-2878}, journal = {Journal of Optimization Theory and Applications}, pages = {877–894}, publisher = {Springer Nature}, title = {{Inertial projection-type methods for solving quasi-variational inequalities in real Hilbert spaces}}, doi = {10.1007/s10957-019-01616-6}, volume = {184}, year = {2020}, } @inproceedings{6725, abstract = {A Valued Constraint Satisfaction Problem (VCSP) provides a common framework that can express a wide range of discrete optimization problems. A VCSP instance is given by a finite set of variables, a finite domain of labels, and an objective function to be minimized. This function is represented as a sum of terms where each term depends on a subset of the variables. To obtain different classes of optimization problems, one can restrict all terms to come from a fixed set Γ of cost functions, called a language. Recent breakthrough results have established a complete complexity classification of such classes with respect to language Γ: if all cost functions in Γ satisfy a certain algebraic condition then all Γ-instances can be solved in polynomial time, otherwise the problem is NP-hard. Unfortunately, testing this condition for a given language Γ is known to be NP-hard. We thus study exponential algorithms for this meta-problem. We show that the tractability condition of a finite-valued language Γ can be tested in O(3‾√3|D|⋅poly(size(Γ))) time, where D is the domain of Γ and poly(⋅) is some fixed polynomial. We also obtain a matching lower bound under the Strong Exponential Time Hypothesis (SETH). More precisely, we prove that for any constant δ<1 there is no O(3‾√3δ|D|) algorithm, assuming that SETH holds.}, author = {Kolmogorov, Vladimir}, booktitle = {46th International Colloquium on Automata, Languages and Programming}, isbn = {978-3-95977-109-2}, issn = {1868-8969}, location = {Patras, Greece}, pages = {77:1--77:12}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Testing the complexity of a valued CSP language}}, doi = {10.4230/LIPICS.ICALP.2019.77}, volume = {132}, year = {2019}, } @article{6596, abstract = {It is well known that many problems in image recovery, signal processing, and machine learning can be modeled as finding zeros of the sum of maximal monotone and Lipschitz continuous monotone operators. Many papers have studied forward-backward splitting methods for finding zeros of the sum of two monotone operators in Hilbert spaces. Most of the proposed splitting methods in the literature have been proposed for the sum of maximal monotone and inverse-strongly monotone operators in Hilbert spaces. In this paper, we consider splitting methods for finding zeros of the sum of maximal monotone operators and Lipschitz continuous monotone operators in Banach spaces. We obtain weak and strong convergence results for the zeros of the sum of maximal monotone and Lipschitz continuous monotone operators in Banach spaces. Many already studied problems in the literature can be considered as special cases of this paper.}, author = {Shehu, Yekini}, issn = {1420-9012}, journal = {Results in Mathematics}, number = {4}, publisher = {Springer}, title = {{Convergence results of forward-backward algorithms for sum of monotone operators in Banach spaces}}, doi = {10.1007/s00025-019-1061-4}, volume = {74}, year = {2019}, } @article{7000, abstract = {The main contributions of this paper are the proposition and the convergence analysis of a class of inertial projection-type algorithm for solving variational inequality problems in real Hilbert spaces where the underline operator is monotone and uniformly continuous. We carry out a unified analysis of the proposed method under very mild assumptions. In particular, weak convergence of the generated sequence is established and nonasymptotic O(1 / n) rate of convergence is established, where n denotes the iteration counter. We also present some experimental results to illustrate the profits gained by introducing the inertial extrapolation steps.}, author = {Shehu, Yekini and Iyiola, Olaniyi S. and Li, Xiao-Huan and Dong, Qiao-Li}, issn = {1807-0302}, journal = {Computational and Applied Mathematics}, number = {4}, publisher = {Springer Nature}, title = {{Convergence analysis of projection method for variational inequalities}}, doi = {10.1007/s40314-019-0955-9}, volume = {38}, year = {2019}, } @article{7412, abstract = {We develop a framework for the rigorous analysis of focused stochastic local search algorithms. These algorithms search a state space by repeatedly selecting some constraint that is violated in the current state and moving to a random nearby state that addresses the violation, while (we hope) not introducing many new violations. An important class of focused local search algorithms with provable performance guarantees has recently arisen from algorithmizations of the Lovász local lemma (LLL), a nonconstructive tool for proving the existence of satisfying states by introducing a background measure on the state space. While powerful, the state transitions of algorithms in this class must be, in a precise sense, perfectly compatible with the background measure. In many applications this is a very restrictive requirement, and one needs to step outside the class. Here we introduce the notion of measure distortion and develop a framework for analyzing arbitrary focused stochastic local search algorithms, recovering LLL algorithmizations as the special case of no distortion. Our framework takes as input an arbitrary algorithm of such type and an arbitrary probability measure and shows how to use the measure as a yardstick of algorithmic progress, even for algorithms designed independently of the measure.}, author = {Achlioptas, Dimitris and Iliopoulos, Fotis and Kolmogorov, Vladimir}, issn = {1095-7111}, journal = {SIAM Journal on Computing}, number = {5}, pages = {1583--1602}, publisher = {SIAM}, title = {{A local lemma for focused stochastical algorithms}}, doi = {10.1137/16m109332x}, volume = {48}, year = {2019}, } @inproceedings{7468, abstract = {We present a new proximal bundle method for Maximum-A-Posteriori (MAP) inference in structured energy minimization problems. The method optimizes a Lagrangean relaxation of the original energy minimization problem using a multi plane block-coordinate Frank-Wolfe method that takes advantage of the specific structure of the Lagrangean decomposition. We show empirically that our method outperforms state-of-the-art Lagrangean decomposition based algorithms on some challenging Markov Random Field, multi-label discrete tomography and graph matching problems.}, author = {Swoboda, Paul and Kolmogorov, Vladimir}, booktitle = {Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition}, isbn = {9781728132938}, issn = {10636919}, location = {Long Beach, CA, United States}, publisher = {IEEE}, title = {{Map inference via block-coordinate Frank-Wolfe algorithm}}, doi = {10.1109/CVPR.2019.01140}, volume = {2019-June}, year = {2019}, } @inproceedings{7639, abstract = {Deep neural networks (DNNs) have become increasingly important due to their excellent empirical performance on a wide range of problems. However, regularization is generally achieved by indirect means, largely due to the complex set of functions defined by a network and the difficulty in measuring function complexity. There exists no method in the literature for additive regularization based on a norm of the function, as is classically considered in statistical learning theory. In this work, we study the tractability of function norms for deep neural networks with ReLU activations. We provide, to the best of our knowledge, the first proof in the literature of the NP-hardness of computing function norms of DNNs of 3 or more layers. We also highlight a fundamental difference between shallow and deep networks. In the light on these results, we propose a new regularization strategy based on approximate function norms, and show its efficiency on a segmentation task with a DNN.}, author = {Rannen-Triki, Amal and Berman, Maxim and Kolmogorov, Vladimir and Blaschko, Matthew B.}, booktitle = {Proceedings of the 2019 International Conference on Computer Vision Workshop}, isbn = {9781728150239}, location = {Seoul, South Korea}, publisher = {IEEE}, title = {{Function norms for neural networks}}, doi = {10.1109/ICCVW.2019.00097}, year = {2019}, } @article{703, abstract = {We consider the NP-hard problem of MAP-inference for undirected discrete graphical models. We propose a polynomial time and practically efficient algorithm for finding a part of its optimal solution. Specifically, our algorithm marks some labels of the considered graphical model either as (i) optimal, meaning that they belong to all optimal solutions of the inference problem; (ii) non-optimal if they provably do not belong to any solution. With access to an exact solver of a linear programming relaxation to the MAP-inference problem, our algorithm marks the maximal possible (in a specified sense) number of labels. We also present a version of the algorithm, which has access to a suboptimal dual solver only and still can ensure the (non-)optimality for the marked labels, although the overall number of the marked labels may decrease. We propose an efficient implementation, which runs in time comparable to a single run of a suboptimal dual solver. Our method is well-scalable and shows state-of-the-art results on computational benchmarks from machine learning and computer vision.}, author = {Shekhovtsov, Alexander and Swoboda, Paul and Savchynskyy, Bogdan}, issn = {01628828}, journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, number = {7}, pages = {1668--1682}, publisher = {IEEE}, title = {{Maximum persistency via iterative relaxed inference with graphical models}}, doi = {10.1109/TPAMI.2017.2730884}, volume = {40}, year = {2018}, } @inbook{10864, abstract = {We prove that every congruence distributive variety has directed Jónsson terms, and every congruence modular variety has directed Gumm terms. The directed terms we construct witness every case of absorption witnessed by the original Jónsson or Gumm terms. This result is equivalent to a pair of claims about absorption for admissible preorders in congruence distributive and congruence modular varieties, respectively. For finite algebras, these absorption theorems have already seen significant applications, but until now, it was not clear if the theorems hold for general algebras as well. Our method also yields a novel proof of a result by P. Lipparini about the existence of a chain of terms (which we call Pixley terms) in varieties that are at the same time congruence distributive and k-permutable for some k.}, author = {Kazda, Alexandr and Kozik, Marcin and McKenzie, Ralph and Moore, Matthew}, booktitle = {Don Pigozzi on Abstract Algebraic Logic, Universal Algebra, and Computer Science}, editor = {Czelakowski, J}, isbn = {9783319747712}, issn = {2211-2766}, pages = {203--220}, publisher = {Springer Nature}, title = {{Absorption and directed Jónsson terms}}, doi = {10.1007/978-3-319-74772-9_7}, volume = {16}, year = {2018}, } @inproceedings{273, abstract = {The accuracy of information retrieval systems is often measured using complex loss functions such as the average precision (AP) or the normalized discounted cumulative gain (NDCG). Given a set of positive and negative samples, the parameters of a retrieval system can be estimated by minimizing these loss functions. However, the non-differentiability and non-decomposability of these loss functions does not allow for simple gradient based optimization algorithms. This issue is generally circumvented by either optimizing a structured hinge-loss upper bound to the loss function or by using asymptotic methods like the direct-loss minimization framework. Yet, the high computational complexity of loss-augmented inference, which is necessary for both the frameworks, prohibits its use in large training data sets. To alleviate this deficiency, we present a novel quicksort flavored algorithm for a large class of non-decomposable loss functions. We provide a complete characterization of the loss functions that are amenable to our algorithm, and show that it includes both AP and NDCG based loss functions. Furthermore, we prove that no comparison based algorithm can improve upon the computational complexity of our approach asymptotically. We demonstrate the effectiveness of our approach in the context of optimizing the structured hinge loss upper bound of AP and NDCG loss for learning models for a variety of vision tasks. We show that our approach provides significantly better results than simpler decomposable loss functions, while requiring a comparable training time.}, author = {Mohapatra, Pritish and Rolinek, Michal and Jawahar, C V and Kolmogorov, Vladimir and Kumar, M Pawan}, booktitle = {2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition}, isbn = {9781538664209}, location = {Salt Lake City, UT, USA}, pages = {3693--3701}, publisher = {IEEE}, title = {{Efficient optimization for rank-based loss functions}}, doi = {10.1109/cvpr.2018.00389}, year = {2018}, } @inproceedings{193, abstract = {We show attacks on five data-independent memory-hard functions (iMHF) that were submitted to the password hashing competition (PHC). Informally, an MHF is a function which cannot be evaluated on dedicated hardware, like ASICs, at significantly lower hardware and/or energy cost than evaluating a single instance on a standard single-core architecture. Data-independent means the memory access pattern of the function is independent of the input; this makes iMHFs harder to construct than data-dependent ones, but the latter can be attacked by various side-channel attacks. Following [Alwen-Blocki'16], we capture the evaluation of an iMHF as a directed acyclic graph (DAG). The cumulative parallel pebbling complexity of this DAG is a measure for the hardware cost of evaluating the iMHF on an ASIC. Ideally, one would like the complexity of a DAG underlying an iMHF to be as close to quadratic in the number of nodes of the graph as possible. Instead, we show that (the DAGs underlying) the following iMHFs are far from this bound: Rig.v2, TwoCats and Gambit each having an exponent no more than 1.75. Moreover, we show that the complexity of the iMHF modes of the PHC finalists Pomelo and Lyra2 have exponents at most 1.83 and 1.67 respectively. To show this we investigate a combinatorial property of each underlying DAG (called its depth-robustness. By establishing upper bounds on this property we are then able to apply the general technique of [Alwen-Block'16] for analyzing the hardware costs of an iMHF.}, author = {Alwen, Joel F and Gazi, Peter and Kamath Hosdurg, Chethan and Klein, Karen and Osang, Georg F and Pietrzak, Krzysztof Z and Reyzin, Lenoid and Rolinek, Michal and Rybar, Michal}, booktitle = {Proceedings of the 2018 on Asia Conference on Computer and Communication Security}, location = {Incheon, Republic of Korea}, pages = {51 -- 65}, publisher = {ACM}, title = {{On the memory hardness of data independent password hashing functions}}, doi = {10.1145/3196494.3196534}, year = {2018}, }