@article{6354, abstract = {Blood platelets are critical for hemostasis and thrombosis, but also play diverse roles during immune responses. We have recently reported that platelets migrate at sites of infection in vitro and in vivo. Importantly, platelets use their ability to migrate to collect and bundle fibrin (ogen)-bound bacteria accomplishing efficient intravascular bacterial trapping. Here, we describe a method that allows analyzing platelet migration in vitro, focusing on their ability to collect bacteria and trap bacteria under flow.}, author = {Fan, Shuxia and Lorenz, Michael and Massberg, Steffen and Gärtner, Florian R}, issn = {2331-8325}, journal = {Bio-Protocol}, keywords = {Platelets, Cell migration, Bacteria, Shear flow, Fibrinogen, E. coli}, number = {18}, publisher = {Bio-Protocol}, title = {{Platelet migration and bacterial trapping assay under flow}}, doi = {10.21769/bioprotoc.3018}, volume = {8}, year = {2018}, } @misc{6459, author = {Petritsch, Barbara}, keywords = {Open Access, Publication Analysis}, location = {Graz, Austria}, publisher = {IST Austria}, title = {{Open Access at IST Austria 2009-2017}}, doi = {10.5281/zenodo.1410279}, year = {2018}, } @article{690, abstract = {We consider spectral properties and the edge universality of sparse random matrices, the class of random matrices that includes the adjacency matrices of the Erdős–Rényi graph model G(N, p). We prove a local law for the eigenvalue density up to the spectral edges. Under a suitable condition on the sparsity, we also prove that the rescaled extremal eigenvalues exhibit GOE Tracy–Widom fluctuations if a deterministic shift of the spectral edge due to the sparsity is included. For the adjacency matrix of the Erdős–Rényi graph this establishes the Tracy–Widom fluctuations of the second largest eigenvalue when p is much larger than N−2/3 with a deterministic shift of order (Np)−1.}, author = {Lee, Jii and Schnelli, Kevin}, journal = {Probability Theory and Related Fields}, number = {1-2}, publisher = {Springer}, title = {{Local law and Tracy–Widom limit for sparse random matrices}}, doi = {10.1007/s00440-017-0787-8}, volume = {171}, year = {2018}, } @article{703, abstract = {We consider the NP-hard problem of MAP-inference for undirected discrete graphical models. We propose a polynomial time and practically efficient algorithm for finding a part of its optimal solution. Specifically, our algorithm marks some labels of the considered graphical model either as (i) optimal, meaning that they belong to all optimal solutions of the inference problem; (ii) non-optimal if they provably do not belong to any solution. With access to an exact solver of a linear programming relaxation to the MAP-inference problem, our algorithm marks the maximal possible (in a specified sense) number of labels. We also present a version of the algorithm, which has access to a suboptimal dual solver only and still can ensure the (non-)optimality for the marked labels, although the overall number of the marked labels may decrease. We propose an efficient implementation, which runs in time comparable to a single run of a suboptimal dual solver. Our method is well-scalable and shows state-of-the-art results on computational benchmarks from machine learning and computer vision.}, author = {Shekhovtsov, Alexander and Swoboda, Paul and Savchynskyy, Bogdan}, issn = {01628828}, journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, number = {7}, pages = {1668--1682}, publisher = {IEEE}, title = {{Maximum persistency via iterative relaxed inference with graphical models}}, doi = {10.1109/TPAMI.2017.2730884}, volume = {40}, year = {2018}, } @inproceedings{7116, abstract = {Training deep learning models has received tremendous research interest recently. In particular, there has been intensive research on reducing the communication cost of training when using multiple computational devices, through reducing the precision of the underlying data representation. Naturally, such methods induce system trade-offs—lowering communication precision could de-crease communication overheads and improve scalability; but, on the other hand, it can also reduce the accuracy of training. In this paper, we study this trade-off space, and ask:Can low-precision communication consistently improve the end-to-end performance of training modern neural networks, with no accuracy loss?From the performance point of view, the answer to this question may appear deceptively easy: compressing communication through low precision should help when the ratio between communication and computation is high. However, this answer is less straightforward when we try to generalize this principle across various neural network architectures (e.g., AlexNet vs. ResNet),number of GPUs (e.g., 2 vs. 8 GPUs), machine configurations(e.g., EC2 instances vs. NVIDIA DGX-1), communication primitives (e.g., MPI vs. NCCL), and even different GPU architectures(e.g., Kepler vs. Pascal). Currently, it is not clear how a realistic realization of all these factors maps to the speed up provided by low-precision communication. In this paper, we conduct an empirical study to answer this question and report the insights.}, author = {Grubic, Demjan and Tam, Leo and Alistarh, Dan-Adrian and Zhang, Ce}, booktitle = {Proceedings of the 21st International Conference on Extending Database Technology}, isbn = {9783893180783}, issn = {2367-2005}, location = {Vienna, Austria}, pages = {145--156}, publisher = {OpenProceedings}, title = {{Synchronous multi-GPU training for deep learning with low-precision communications: An empirical study}}, doi = {10.5441/002/EDBT.2018.14}, year = {2018}, }