@article{11623, abstract = {Brightness variations due to dark spots on the stellar surface encode information about stellar surface rotation and magnetic activity. In this work, we analyze the Kepler long-cadence data of 26,521 main-sequence stars of spectral types M and K in order to measure their surface rotation and photometric activity level. Rotation-period estimates are obtained by the combination of a wavelet analysis and autocorrelation function of the light curves. Reliable rotation estimates are determined by comparing the results from the different rotation diagnostics and four data sets. We also measure the photometric activity proxy Sph using the amplitude of the flux variations on an appropriate timescale. We report rotation periods and photometric activity proxies for about 60% of the sample, including 4431 targets for which McQuillan et al. did not report a rotation period. For the common targets with rotation estimates in this study and in McQuillan et al., our rotation periods agree within 99%. In this work, we also identify potential polluters, such as misclassified red giants and classical pulsator candidates. Within the parameter range we study, there is a mild tendency for hotter stars to have shorter rotation periods. The photometric activity proxy spans a wider range of values with increasing effective temperature. The rotation period and photometric activity proxy are also related, with Sph being larger for fast rotators. Similar to McQuillan et al., we find a bimodal distribution of rotation periods.}, author = {Santos, A. R. G. and García, R. A. and Mathur, S. and Bugnet, Lisa Annabelle and van Saders, J. L. and Metcalfe, T. S. and Simonian, G. V. A. and Pinsonneault, M. H.}, issn = {0067-0049}, journal = {The Astrophysical Journal Supplement Series}, keywords = {Space and Planetary Science, Astronomy and Astrophysics, methods: data analysis, stars: activity, stars: low-mass, stars: rotation, starspots, techniques: photometric}, number = {1}, publisher = {IOP Publishing}, title = {{Surface rotation and photometric activity for Kepler targets. I. M and K main-sequence stars}}, doi = {10.3847/1538-4365/ab3b56}, volume = {244}, year = {2019}, } @unpublished{11627, abstract = {For a solar-like star, the surface rotation evolves with time, allowing in principle to estimate the age of a star from its surface rotation period. Here we are interested in measuring surface rotation periods of solar-like stars observed by the NASA mission Kepler. Different methods have been developed to track rotation signals in Kepler photometric light curves: time-frequency analysis based on wavelet techniques, autocorrelation and composite spectrum. We use the learning abilities of random forest classifiers to take decisions during two crucial steps of the analysis. First, given some input parameters, we discriminate the considered Kepler targets between rotating MS stars, non-rotating MS stars, red giants, binaries and pulsators. We then use a second classifier only on the MS rotating targets to decide the best data analysis treatment.}, author = {Breton, S. N. and Bugnet, Lisa Annabelle and Santos, A. R. G. and Saux, A. Le and Mathur, S. and Palle, P. L. and Garcia, R. A.}, booktitle = {arXiv}, keywords = {asteroseismology, rotation, solar-like stars, kepler, machine learning, random forest}, title = {{Determining surface rotation periods of solar-like stars observed by the Kepler mission using machine learning techniques}}, doi = {10.48550/arXiv.1906.09609}, year = {2019}, } @unpublished{11630, abstract = {The second mission of NASA’s Kepler satellite, K2, has collected hundreds of thousands of lightcurves for stars close to the ecliptic plane. This new sample could increase the number of known pulsating stars and then improve our understanding of those stars. For the moment only a few stars have been properly classified and published. In this work, we present a method to automaticly classify K2 pulsating stars using a Machine Learning technique called Random Forest. The objective is to sort out the stars in four classes: red giant (RG), main-sequence Solar-like stars (SL), classical pulsators (PULS) and Other. To do this we use the effective temperatures and the luminosities of the stars as well as the FliPer features, that measures the amount of power contained in the power spectral density. The classifier now retrieves the right classification for more than 80% of the stars.}, author = {Saux, A. Le and Bugnet, Lisa Annabelle and Mathur, S. and Breton, S. N. and Garcia, R. A.}, booktitle = {arXiv}, keywords = {asteroseismology - methods, data analysis - thecniques, machine learning - stars, oscillations}, title = {{Automatic classification of K2 pulsating stars using machine learning techniques}}, doi = {10.48550/arXiv.1906.09611}, year = {2019}, } @inproceedings{11826, abstract = {The diameter, radius and eccentricities are natural graph parameters. While these problems have been studied extensively, there are no known dynamic algorithms for them beyond the ones that follow from trivial recomputation after each update or from solving dynamic All-Pairs Shortest Paths (APSP), which is very computationally intensive. This is the situation for dynamic approximation algorithms as well, and even if only edge insertions or edge deletions need to be supported. This paper provides a comprehensive study of the dynamic approximation of Diameter, Radius and Eccentricities, providing both conditional lower bounds, and new algorithms whose bounds are optimal under popular hypotheses in fine-grained complexity. Some of the highlights include: - Under popular hardness hypotheses, there can be no significantly better fully dynamic approximation algorithms than recomputing the answer after each update, or maintaining full APSP. - Nearly optimal partially dynamic (incremental/decremental) algorithms can be achieved via efficient reductions to (incremental/decremental) maintenance of Single-Source Shortest Paths. For instance, a nearly (3/2+epsilon)-approximation to Diameter in directed or undirected n-vertex, m-edge graphs can be maintained decrementally in total time m^{1+o(1)}sqrt{n}/epsilon^2. This nearly matches the static 3/2-approximation algorithm for the problem that is known to be conditionally optimal.}, author = {Ancona, Bertie and Henzinger, Monika H and Roditty, Liam and Williams, Virginia Vassilevska and Wein, Nicole}, booktitle = {46th International Colloquium on Automata, Languages, and Programming}, isbn = {978-3-95977-109-2}, issn = {1868-8969}, location = {Patras, Greece}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Algorithms and hardness for diameter in dynamic graphs}}, doi = {10.4230/LIPICS.ICALP.2019.13}, volume = {132}, year = {2019}, } @inproceedings{11850, abstract = {Modern networked systems are increasingly reconfigurable, enabling demand-aware infrastructures whose resources can be adjusted according to the workload they currently serve. Such dynamic adjustments can be exploited to improve network utilization and hence performance, by moving frequently interacting communication partners closer, e.g., collocating them in the same server or datacenter. However, dynamically changing the embedding of workloads is algorithmically challenging: communication patterns are often not known ahead of time, but must be learned. During the learning process, overheads related to unnecessary moves (i.e., re-embeddings) should be minimized. This paper studies a fundamental model which captures the tradeoff between the benefits and costs of dynamically collocating communication partners on l servers, in an online manner. Our main contribution is a distributed online algorithm which is asymptotically almost optimal, i.e., almost matches the lower bound (also derived in this paper) on the competitive ratio of any (distributed or centralized) online algorithm.}, author = {Henzinger, Monika H and Neumann, Stefan and Schmid, Stefan}, booktitle = {SIGMETRICS'19: International Conference on Measurement and Modeling of Computer Systems}, isbn = {978-1-4503-6678-6}, location = {Phoenix, AZ, United States}, pages = {43–44}, publisher = {Association for Computing Machinery}, title = {{Efficient distributed workload (re-)embedding}}, doi = {10.1145/3309697.3331503}, year = {2019}, } @inbook{11847, abstract = {This paper serves as a user guide to the Vienna graph clustering framework. We review our general memetic algorithm, VieClus, to tackle the graph clustering problem. A key component of our contribution are natural recombine operators that employ ensemble clusterings as well as multi-level techniques. Lastly, we combine these techniques with a scalable communication protocol, producing a system that is able to compute high-quality solutions in a short amount of time. After giving a description of the algorithms employed, we establish the connection of the graph clustering problem to protein–protein interaction networks and moreover give a description on how the software can be used, what file formats are expected, and how this can be used to find functional groups in protein–protein interaction networks.}, author = {Biedermann, Sonja and Henzinger, Monika H and Schulz, Christian and Schuster, Bernhard}, booktitle = {Protein-Protein Interaction Networks}, editor = {Canzar, Stefan and Rojas Ringeling, Francisca}, isbn = {9781493998722}, issn = {1940-6029}, pages = {215–231}, publisher = {Springer Nature}, title = {{Vienna Graph Clustering}}, doi = {10.1007/978-1-4939-9873-9_16}, volume = {2074}, year = {2019}, } @inproceedings{11853, abstract = {We present a deterministic dynamic algorithm for maintaining a (1+ε)f-approximate minimum cost set cover with O(f log(Cn)/ε^2) amortized update time, when the input set system is undergoing element insertions and deletions. Here, n denotes the number of elements, each element appears in at most f sets, and the cost of each set lies in the range [1/C, 1]. Our result, together with that of Gupta~et~al.~[STOC'17], implies that there is a deterministic algorithm for this problem with O(f log(Cn)) amortized update time and O(min(log n, f)) -approximation ratio, which nearly matches the polynomial-time hardness of approximation for minimum set cover in the static setting. Our update time is only O(log (Cn)) away from a trivial lower bound. Prior to our work, the previous best approximation ratio guaranteed by deterministic algorithms was O(f^2), which was due to Bhattacharya~et~al.~[ICALP`15]. In contrast, the only result that guaranteed O(f) -approximation was obtained very recently by Abboud~et~al.~[STOC`19], who designed a dynamic algorithm with (1+ε)f-approximation ratio and O(f^2 log n/ε) amortized update time. Besides the extra O(f) factor in the update time compared to our and Gupta~et~al.'s results, the Abboud~et~al.~algorithm is randomized, and works only when the adversary is oblivious and the sets are unweighted (each set has the same cost). We achieve our result via the primal-dual approach, by maintaining a fractional packing solution as a dual certificate. This approach was pursued previously by Bhattacharya~et~al.~and Gupta~et~al., but not in the recent paper by Abboud~et~al. Unlike previous primal-dual algorithms that try to satisfy some local constraints for individual sets at all time, our algorithm basically waits until the dual solution changes significantly globally, and fixes the solution only where the fix is needed.}, author = {Bhattacharya, Sayan and Henzinger, Monika H and Nanongkai, Danupon}, booktitle = {60th Annual Symposium on Foundations of Computer Science}, isbn = {978-1-7281-4953-0}, issn = {2575-8454}, location = {Baltimore, MD, United States}, pages = {406--423}, publisher = {Institute of Electrical and Electronics Engineers}, title = {{A new deterministic algorithm for dynamic set cover}}, doi = {10.1109/focs.2019.00033}, year = {2019}, } @inproceedings{11851, abstract = {The minimum cut problem for an undirected edge-weighted graph asks us to divide its set of nodes into two blocks while minimizing the weighted sum of the cut edges. In this paper, we engineer the fastest known exact algorithm for the problem. State-of-the-art algorithms like the algorithm of Padberg and Rinaldi or the algorithm of Nagamochi, Ono and Ibaraki identify edges that can be contracted to reduce the graph size such that at least one minimum cut is maintained in the contracted graph. Our algorithm achieves improvements in running time over these algorithms by a multitude of techniques. First, we use a recently developed fast and parallel inexact minimum cut algorithm to obtain a better bound for the problem. Afterwards, we use reductions that depend on this bound to reduce the size of the graph much faster than previously possible. We use improved data structures to further lower the running time of our algorithm. Additionally, we parallelize the contraction routines of Nagamochi et al. . Overall, we arrive at a system that significantly outperforms the fastest state-of-the-art solvers for the exact minimum cut problem.}, author = {Henzinger, Monika H and Noe, Alexander and Schulz, Christian}, booktitle = {33rd International Parallel and Distributed Processing Symposium}, isbn = {978-1-7281-1247-3}, issn = {1530-2075}, location = {Rio de Janeiro, Brazil}, publisher = {Institute of Electrical and Electronics Engineers}, title = {{Shared-memory exact minimum cuts}}, doi = {10.1109/ipdps.2019.00013}, year = {2019}, } @inproceedings{11865, abstract = {We present the first sublinear-time algorithm that can compute the edge connectivity λ of a network exactly on distributed message-passing networks (the CONGEST model), as long as the network contains no multi-edge. We present the first sublinear-time algorithm for a distributed message-passing network sto compute its edge connectivity λ exactly in the CONGEST model, as long as there are no parallel edges. Our algorithm takes Õ(n1−1/353D1/353+n1−1/706) time to compute λ and a cut of cardinality λ with high probability, where n and D are the number of nodes and the diameter of the network, respectively, and Õ hides polylogarithmic factors. This running time is sublinear in n (i.e. Õ(n1−є)) whenever D is. Previous sublinear-time distributed algorithms can solve this problem either (i) exactly only when λ=O(n1/8−є) [Thurimella PODC’95; Pritchard, Thurimella, ACM Trans. Algorithms’11; Nanongkai, Su, DISC’14] or (ii) approximately [Ghaffari, Kuhn, DISC’13; Nanongkai, Su, DISC’14]. To achieve this we develop and combine several new techniques. First, we design the first distributed algorithm that can compute a k-edge connectivity certificate for any k=O(n1−є) in time Õ(√nk+D). The previous sublinear-time algorithm can do so only when k=o(√n) [Thurimella PODC’95]. In fact, our algorithm can be turned into the first parallel algorithm with polylogarithmic depth and near-linear work. Previous near-linear work algorithms are essentially sequential and previous polylogarithmic-depth algorithms require Ω(mk) work in the worst case (e.g. [Karger, Motwani, STOC’93]). Second, we show that by combining the recent distributed expander decomposition technique of [Chang, Pettie, Zhang, SODA’19] with techniques from the sequential deterministic edge connectivity algorithm of [Kawarabayashi, Thorup, STOC’15], we can decompose the network into a sublinear number of clusters with small average diameter and without any mincut separating a cluster (except the “trivial” ones). This leads to a simplification of the Kawarabayashi-Thorup framework (except that we are randomized while they are deterministic). This might make this framework more useful in other models of computation. Finally, by extending the tree packing technique from [Karger STOC’96], we can find the minimum cut in time proportional to the number of components. As a byproduct of this technique, we obtain an Õ(n)-time algorithm for computing exact minimum cut for weighted graphs.}, author = {Daga, Mohit and Henzinger, Monika H and Nanongkai, Danupon and Saranurak, Thatchaphol}, booktitle = {Proceedings of the 51st Annual ACM SIGACT Symposium on Theory of Computing}, isbn = {978-1-4503-6705-9}, issn = {0737-8017}, location = {Phoenix, AZ, United States}, pages = {343–354}, publisher = {Association for Computing Machinery}, title = {{Distributed edge connectivity in sublinear time}}, doi = {10.1145/3313276.3316346}, year = {2019}, } @inproceedings{11871, abstract = {Many dynamic graph algorithms have an amortized update time, rather than a stronger worst-case guarantee. But amortized data structures are not suitable for real-time systems, where each individual operation has to be executed quickly. For this reason, there exist many recent randomized results that aim to provide a guarantee stronger than amortized expected. The strongest possible guarantee for a randomized algorithm is that it is always correct (Las Vegas), and has high-probability worst-case update time, which gives a bound on the time for each individual operation that holds with high probability. In this paper we present the first polylogarithmic high-probability worst-case time bounds for the dynamic spanner and the dynamic maximal matching problem. 1. For dynamic spanner, the only known o(n) worst-case bounds were O(n3/4) high-probability worst-case update time for maintaining a 3-spanner, and O(n5/9) for maintaining a 5-spanner. We give a O(1)k log3(n) high-probability worst-case time bound for maintaining a (2k – 1)-spanner, which yields the first worst-case polylog update time for all constant k. (All the results above maintain the optimal tradeoff of stretch 2k – 1 and Õ(n1+1/k) edges.) 2. For dynamic maximal matching, or dynamic 2-approximate maximum matching, no algorithm with o(n) worst-case time bound was known and we present an algorithm with O(log5 (n)) high-probability worst-case time; similar worst-case bounds existed only for maintaining a matching that was (2 + ∊)-approximate, and hence not maximal. Our results are achieved using a new approach for converting amortized guarantees to worst-case ones for randomized data structures by going through a third type of guarantee, which is a middle ground between the two above: an algorithm is said to have worst-case expected update time α if for every update σ, the expected time to process σ is at most α. Although stronger than amortized expected, the worst-case expected guarantee does not resolve the fundamental problem of amortization: a worst-case expected update time of O(1) still allows for the possibility that every 1/f(n) updates requires Θ(f(n)) time to process, for arbitrarily high f(n). In this paper we present a black-box reduction that converts any data structure with worst-case expected update time into one with a high-probability worst-case update time: the query time remains the same, while the update time increases by a factor of O(log2(n)). Thus we achieve our results in two steps: (1) First we show how to convert existing dynamic graph algorithms with amortized expected polylogarithmic running times into algorithms with worst-case expected polylogarithmic running times. (2) Then we use our black-box reduction to achieve the polylogarithmic high-probability worst-case time bound. All our algorithms are Las-Vegas-type algorithms.}, author = {Bernstein, Aaron and Forster, Sebastian and Henzinger, Monika H}, booktitle = {30th Annual ACM-SIAM Symposium on Discrete Algorithms}, location = {San Diego, CA, United States}, pages = {1899--1918}, publisher = {Society for Industrial and Applied Mathematics}, title = {{A deamortization approach for dynamic spanner and dynamic maximal matching}}, doi = {10.1137/1.9781611975482.115}, year = {2019}, }