@inproceedings{780, abstract = {Population protocols are networks of finite-state agents, interacting randomly, and updating their states using simple rules. Despite their extreme simplicity, these systems have been shown to cooperatively perform complex computational tasks, such as simulating register machines to compute standard arithmetic functions. The election of a unique leader agent is a key requirement in such computational constructions. Yet, the fastest currently known population protocol for electing a leader only has linear convergence time, and it has recently been shown that no population protocol using a constant number of states per node may overcome this linear bound. In this paper, we give the first population protocol for leader election with polylogarithmic convergence time, using polylogarithmic memory states per node. The protocol structure is quite simple: each node has an associated value, and is either a leader (still in contention) or a minion (following some leader). A leader keeps incrementing its value and “defeats” other leaders in one-to-one interactions, and will drop from contention and become a minion if it meets a leader with higher value. Importantly, a leader also drops out if it meets a minion with higher absolute value. While these rules are quite simple, the proof that this algorithm achieves polylogarithmic convergence time is non-trivial. In particular, the argument combines careful use of concentration inequalities with anti-concentration bounds, showing that the leaders’ values become spread apart as the execution progresses, which in turn implies that straggling leaders get quickly eliminated. We complement our analysis with empirical results, showing that our protocol converges extremely fast, even for large network sizes.}, author = {Alistarh, Dan-Adrian and Gelashvili, Rati}, pages = {479 -- 491}, publisher = {Springer}, title = {{Polylogarithmic-time leader election in population protocols}}, doi = {10.1007/978-3-662-47666-6_38}, volume = {9135}, year = {2015}, } @inproceedings{781, abstract = {Population protocols, roughly defined as systems consisting of large numbers of simple identical agents, interacting at random and updating their state following simple rules, are an important research topic at the intersection of distributed computing and biology. One of the fundamental tasks that a population protocol may solve is majority: each node starts in one of two states; the goal is for all nodes to reach a correct consensus on which of the two states was initially the majority. Despite considerable research effort, known protocols for this problem are either exact but slow (taking linear parallel time to converge), or fast but approximate (with non-zero probability of error). In this paper, we show that this trade-off between preciasion and speed is not inherent. We present a new protocol called Average and Conquer (AVC) that solves majority ex-actly in expected parallel convergence time O(log n/(sε) + log n log s), where n is the number of nodes, εn is the initial node advantage of the majority state, and s = Ω(log n log log n) is the number of states the protocol employs. This shows that the majority problem can be solved exactly in time poly-logarithmic in n, provided that the memory per node is s = Ω(1/ε + lognlog1/ε). On the negative side, we establish a lower bound of Ω(1/ε) on the expected paraallel convergence time for the case of four memory states per node, and a lower bound of Ω(logn) parallel time for protocols using any number of memory states per node.per node, and a lower bound of (log n) parallel time for protocols using any number of memory states per node.}, author = {Alistarh, Dan-Adrian and Gelashvili, Rati and Vojnović, Milan}, pages = {47 -- 56}, publisher = {ACM}, title = {{Fast and exact majority in population protocols}}, doi = {10.1145/2767386.2767429}, volume = {2015-July}, year = {2015}, } @inproceedings{782, abstract = {In this work, we consider the following random process, mo- Tivated by the analysis of lock-free concurrent algorithms under high memory contention. In each round, a new scheduling step is allocated to one of n threads, according to a distribution p = (p1; p2; : : : ; pn), where thread i is scheduled with probability pi. When some thread first reaches a set threshold of executed steps, it registers a win, completing its current operation, and resets its step count to 1. At the same time, threads whose step count was close to the threshold also get reset because of the win, but to 0 steps, being penalized for almost winning. We are interested in two questions: how often does some thread complete an operation (system latency), and how often does a specific thread complete an operation (individual latency)? We provide asymptotically tight bounds for the system and individual latency of this general concurrency pattern, for arbitrary scheduling distributions p. Surprisingly, a sim- ple characterization exists: in expectation, the system will complete a new operation every Θ(1/p 2) steps, while thread i will complete a new operation every Θ(1/2=p i ) steps. The proof is interesting in its own right, as it requires a careful analysis of how the higher norms of the vector p inuence the thread step counts and latencies in this random process. Our result offers a simple connection between the scheduling distribution and the average performance of concurrent algorithms, which has several applications.}, author = {Alistarh, Dan-Adrian and Sauerwald, Thomas and Vojnović, Milan}, pages = {251 -- 260}, publisher = {ACM}, title = {{Lock-Free algorithms under stochastic schedulers}}, doi = {10.1145/2767386.2767430}, volume = {2015-July}, year = {2015}, } @inproceedings{783, abstract = {The problem of electing a leader from among n contenders is one of the fundamental questions in distributed computing. In its simplest formulation, the task is as follows: given n processors, all participants must eventually return a win or lose indication, such that a single contender may win. Despite a considerable amount of work on leader election, the following question is still open: can we elect a leader in an asynchronous fault-prone system faster than just running a Θ(log n)-time tournament, against a strong adaptive adversary? In this paper, we answer this question in the affirmative, improving on a decades-old upper bound. We introduce two new algorithmic ideas to reduce the time complexity of electing a leader to O(log∗ n), using O(n2) point-to-point messages. A non-trivial application of our algorithm is a new upper bound for the tight renaming problem, assigning n items to the n participants in expected O(log2 n) time and O(n2) messages. We complement our results with lower bound of Ω(n2) messages for solving these two problems, closing the question of their message complexity.}, author = {Alistarh, Dan-Adrian and Gelashvili, Rati and Vladu, Adrian}, pages = {365 -- 374}, publisher = {ACM}, title = {{How to elect a leader faster than a tournament}}, doi = {10.1145/2767386.2767420}, volume = {2015-July}, year = {2015}, } @inproceedings{784, abstract = {We demonstrate an optical switch design that can scale up to a thousand ports with high per-port bandwidth (25 Gbps+) and low switching latency (40 ns). Our design uses a broadcast and select architecture, based on a passive star coupler and fast tunable transceivers. In addition we employ time division multiplexing to achieve very low switching latency. Our demo shows the feasibility of the switch data plane using a small testbed, comprising two transmitters and a receiver, connected through a star coupler.}, author = {Alistarh, Dan-Adrian and Ballani, Hitesh and Costa, Paolo and Funnell, Adam and Benjamin, Joshua and Watts, Philip and Thomsen, Benn}, isbn = {978-1-4503-3542-3}, location = {London, United Kindgdom}, pages = {367 -- 368}, publisher = {ACM}, title = {{A high-radix, low-latency optical switch for data centers}}, doi = {10.1145/2785956.2790035}, year = {2015}, }