@misc{6426, abstract = {Synchronous programs are easy to specify because the side effects of an operation are finished by the time the invocation of the operation returns to the caller. Asynchronous programs, on the other hand, are difficult to specify because there are side effects due to pending computation scheduled as a result of the invocation of an operation. They are also difficult to verify because of the large number of possible interleavings of concurrent asynchronous computation threads. We show that specifications and correctness proofs for asynchronous programs can be structured by introducing the fiction, for proof purposes, that intermediate, non-quiescent states of asynchronous operations can be ignored. Then, the task of specification becomes relatively simple and the task of verification can be naturally decomposed into smaller sub-tasks. The sub-tasks iteratively summarize, guided by the structure of an asynchronous program, the atomic effect of non-atomic operations and the synchronous effect of asynchronous operations. This structuring of specifications and proofs corresponds to the introduction of multiple layers of stepwise refinement for asynchronous programs. We present the first proof rule, called synchronization, to reduce asynchronous invocations on a lower layer to synchronous invocations on a higher layer. We implemented our proof method in CIVL and evaluated it on a collection of benchmark programs.}, author = {Henzinger, Thomas A and Kragl, Bernhard and Qadeer, Shaz}, issn = {2664-1690}, pages = {28}, publisher = {IST Austria}, title = {{Synchronizing the asynchronous}}, doi = {10.15479/AT:IST-2018-853-v2-2}, year = {2017}, } @article{643, abstract = {It has been reported that nicotinamide-overload induces oxidative stress associated with insulin resistance, the key feature of type 2 diabetes mellitus (T2DM). This study aimed to investigate the effects of B vitamins in T2DM. Glucose tolerance tests (GTT) were carried out in adult Sprague-Dawley rats treated with or without cumulative doses of B vitamins. More specifically, insulin tolerance tests (ITT) were also carried out in adult Sprague-Dawley rats treated with or without cumulative doses of Vitamin B3. We found that cumulative Vitamin B1 and Vitamin B3 administration significantly increased the plasma H2O2 levels associated with high insulin levels. Only Vitamin B3 reduced muscular and hepatic glycogen contents. Cumulative administration of nicotinic acid, another form of Vitamin B3, also significantly increased plasma insulin level and H2O2 generation. Moreover, cumulative administration of nicotinic acid or nicotinamide impaired glucose metabolism. This study suggested that excess Vitamin B1 and Vitamin B3 caused oxidative stress and insulin resistance.}, author = {Sun, Wuping and Zhai, Ming-Zhu and Zhou, Qian and Qian, Chengrui and Jiang, Changyu}, issn = {03044920}, journal = {Chinese Journal of Physiology}, number = {4}, pages = {207 -- 214}, publisher = {Chinese Physiological Society}, title = {{Effects of B vitamins overload on plasma insulin level and hydrogen peroxide generation in rats}}, doi = {10.4077/CJP.2017.BAF469}, volume = {60}, year = {2017}, } @article{642, abstract = {Cauchy problems with SPDEs on the whole space are localized to Cauchy problems on a ball of radius R. This localization reduces various kinds of spatial approximation schemes to finite dimensional problems. The error is shown to be exponentially small. As an application, a numerical scheme is presented which combines the localization and the space and time discretization, and thus is fully implementable.}, author = {Gerencser, Mate and Gyöngy, István}, issn = {00255718}, journal = {Mathematics of Computation}, number = {307}, pages = {2373 -- 2397}, publisher = {American Mathematical Society}, title = {{Localization errors in solving stochastic partial differential equations in the whole space}}, doi = {10.1090/mcom/3201}, volume = {86}, year = {2017}, } @inproceedings{645, abstract = {Markov decision processes (MDPs) are standard models for probabilistic systems with non-deterministic behaviours. Long-run average rewards provide a mathematically elegant formalism for expressing long term performance. Value iteration (VI) is one of the simplest and most efficient algorithmic approaches to MDPs with other properties, such as reachability objectives. Unfortunately, a naive extension of VI does not work for MDPs with long-run average rewards, as there is no known stopping criterion. In this work our contributions are threefold. (1) We refute a conjecture related to stopping criteria for MDPs with long-run average rewards. (2) We present two practical algorithms for MDPs with long-run average rewards based on VI. First, we show that a combination of applying VI locally for each maximal end-component (MEC) and VI for reachability objectives can provide approximation guarantees. Second, extending the above approach with a simulation-guided on-demand variant of VI, we present an anytime algorithm that is able to deal with very large models. (3) Finally, we present experimental results showing that our methods significantly outperform the standard approaches on several benchmarks.}, author = {Ashok, Pranav and Chatterjee, Krishnendu and Daca, Przemyslaw and Kretinsky, Jan and Meggendorfer, Tobias}, editor = {Majumdar, Rupak and Kunčak, Viktor}, isbn = {978-331963386-2}, location = {Heidelberg, Germany}, pages = {201 -- 221}, publisher = {Springer}, title = {{Value iteration for long run average reward in markov decision processes}}, doi = {10.1007/978-3-319-63387-9_10}, volume = {10426}, year = {2017}, } @article{644, abstract = {An instance of the valued constraint satisfaction problem (VCSP) is given by a finite set of variables, a finite domain of labels, and a sum of functions, each function depending on a subset of the variables. Each function can take finite values specifying costs of assignments of labels to its variables or the infinite value, which indicates an infeasible assignment. The goal is to find an assignment of labels to the variables that minimizes the sum. We study, assuming that P 6= NP, how the complexity of this very general problem depends on the set of functions allowed in the instances, the so-called constraint language. The case when all allowed functions take values in f0;1g corresponds to ordinary CSPs, where one deals only with the feasibility issue, and there is no optimization. This case is the subject of the algebraic CSP dichotomy conjecture predicting for which constraint languages CSPs are tractable (i.e., solvable in polynomial time) and for which they are NP-hard. The case when all allowed functions take only finite values corresponds to a finitevalued CSP, where the feasibility aspect is trivial and one deals only with the optimization issue. The complexity of finite-valued CSPs was fully classified by Thapper and Živný. An algebraic necessary condition for tractability of a general-valued CSP with a fixed constraint language was recently given by Kozik and Ochremiak. As our main result, we prove that if a constraint language satisfies this algebraic necessary condition, and the feasibility CSP (i.e., the problem of deciding whether a given instance has a feasible solution) corresponding to the VCSP with this language is tractable, then the VCSP is tractable. The algorithm is a simple combination of the assumed algorithm for the feasibility CSP and the standard LP relaxation. As a corollary, we obtain that a dichotomy for ordinary CSPs would imply a dichotomy for general-valued CSPs.}, author = {Kolmogorov, Vladimir and Krokhin, Andrei and Rolinek, Michal}, journal = {SIAM Journal on Computing}, number = {3}, pages = {1087 -- 1110}, publisher = {SIAM}, title = {{The complexity of general-valued CSPs}}, doi = {10.1137/16M1091836}, volume = {46}, year = {2017}, } @inproceedings{646, abstract = {We present a novel convex relaxation and a corresponding inference algorithm for the non-binary discrete tomography problem, that is, reconstructing discrete-valued images from few linear measurements. In contrast to state of the art approaches that split the problem into a continuous reconstruction problem for the linear measurement constraints and a discrete labeling problem to enforce discrete-valued reconstructions, we propose a joint formulation that addresses both problems simultaneously, resulting in a tighter convex relaxation. For this purpose a constrained graphical model is set up and evaluated using a novel relaxation optimized by dual decomposition. We evaluate our approach experimentally and show superior solutions both mathematically (tighter relaxation) and experimentally in comparison to previously proposed relaxations.}, author = {Kuske, Jan and Swoboda, Paul and Petra, Stefanie}, editor = {Lauze, François and Dong, Yiqiu and Bjorholm Dahl, Anders}, isbn = {978-331958770-7}, location = {Kolding, Denmark}, pages = {235 -- 246}, publisher = {Springer}, title = {{A novel convex relaxation for non binary discrete tomography}}, doi = {10.1007/978-3-319-58771-4_19}, volume = {10302}, year = {2017}, } @inproceedings{648, abstract = {Pseudoentropy has found a lot of important applications to cryptography and complexity theory. In this paper we focus on the foundational problem that has not been investigated so far, namely by how much pseudoentropy (the amount seen by computationally bounded attackers) differs from its information-theoretic counterpart (seen by unbounded observers), given certain limits on attacker’s computational power? We provide the following answer for HILL pseudoentropy, which exhibits a threshold behavior around the size exponential in the entropy amount:– If the attacker size (s) and advantage () satisfy s (formula presented) where k is the claimed amount of pseudoentropy, then the pseudoentropy boils down to the information-theoretic smooth entropy. – If s (formula presented) then pseudoentropy could be arbitrarily bigger than the information-theoretic smooth entropy. Besides answering the posted question, we show an elegant application of our result to the complexity theory, namely that it implies the clas-sical result on the existence of functions hard to approximate (due to Pippenger). In our approach we utilize non-constructive techniques: the duality of linear programming and the probabilistic method.}, author = {Skórski, Maciej}, editor = {Jäger, Gerhard and Steila, Silvia}, isbn = {978-331955910-0}, location = {Bern, Switzerland}, pages = {600 -- 613}, publisher = {Springer}, title = {{On the complexity of breaking pseudoentropy}}, doi = {10.1007/978-3-319-55911-7_43}, volume = {10185}, year = {2017}, } @inbook{649, abstract = {We give a short overview on a recently developed notion of Ricci curvature for discrete spaces. This notion relies on geodesic convexity properties of the relative entropy along geodesics in the space of probability densities, for a metric which is similar to (but different from) the 2-Wasserstein metric. The theory can be considered as a discrete counterpart to the theory of Ricci curvature for geodesic measure spaces developed by Lott–Sturm–Villani.}, author = {Maas, Jan}, booktitle = {Modern Approaches to Discrete Curvature}, editor = {Najman, Laurent and Romon, Pascal}, isbn = {978-3-319-58001-2}, issn = {978-3-319-58002-9}, pages = {159 -- 174}, publisher = {Springer}, title = {{Entropic Ricci curvature for discrete spaces}}, doi = {10.1007/978-3-319-58002-9_5}, volume = {2184}, year = {2017}, } @inproceedings{650, abstract = {In this work we present a short and unified proof for the Strong and Weak Regularity Lemma, based on the cryptographic tech-nique called low-complexity approximations. In short, both problems reduce to a task of finding constructively an approximation for a certain target function under a class of distinguishers (test functions), where dis-tinguishers are combinations of simple rectangle-indicators. In our case these approximations can be learned by a simple iterative procedure, which yields a unified and simple proof, achieving for any graph with density d and any approximation parameter the partition size. The novelty in our proof is: (a) a simple approach which yields both strong and weaker variant, and (b) improvements when d = o(1). At an abstract level, our proof can be seen a refinement and simplification of the “analytic” proof given by Lovasz and Szegedy.}, author = {Skórski, Maciej}, editor = {Jäger, Gerhard and Steila, Silvia}, issn = {03029743}, location = {Bern, Switzerland}, pages = {586 -- 599}, publisher = {Springer}, title = {{A cryptographic view of regularity lemmas: Simpler unified proofs and refined bounds}}, doi = {10.1007/978-3-319-55911-7_42}, volume = {10185}, year = {2017}, } @inproceedings{6519, abstract = {Graph games with omega-regular winning conditions provide a mathematical framework to analyze a wide range of problems in the analysis of reactive systems and programs (such as the synthesis of reactive systems, program repair, and the verification of branching time properties). Parity conditions are canonical forms to specify omega-regular winning conditions. Graph games with parity conditions are equivalent to mu-calculus model checking, and thus a very important algorithmic problem. Symbolic algorithms are of great significance because they provide scalable algorithms for the analysis of large finite-state systems, as well as algorithms for the analysis of infinite-state systems with finite quotient. A set-based symbolic algorithm uses the basic set operations and the one-step predecessor operators. We consider graph games with n vertices and parity conditions with c priorities (equivalently, a mu-calculus formula with c alternations of least and greatest fixed points). While many explicit algorithms exist for graph games with parity conditions, for set-based symbolic algorithms there are only two algorithms (notice that we use space to refer to the number of sets stored by a symbolic algorithm): (a) the basic algorithm that requires O(n^c) symbolic operations and linear space; and (b) an improved algorithm that requires O(n^{c/2+1}) symbolic operations but also O(n^{c/2+1}) space (i.e., exponential space). In this work we present two set-based symbolic algorithms for parity games: (a) our first algorithm requires O(n^{c/2+1}) symbolic operations and only requires linear space; and (b) developing on our first algorithm, we present an algorithm that requires O(n^{c/3+1}) symbolic operations and only linear space. We also present the first linear space set-based symbolic algorithm for parity games that requires at most a sub-exponential number of symbolic operations. }, author = {Chatterjee, Krishnendu and Dvorák, Wolfgang and Henzinger, Monika H and Loitzenbauer, Veronika}, location = {Stockholm, Sweden}, publisher = {Schloss Dagstuhl -Leibniz-Zentrum fuer Informatik}, title = {{Improved set-based symbolic algorithms for parity games}}, doi = {10.4230/LIPICS.CSL.2017.18}, volume = {82}, year = {2017}, } @inproceedings{6517, abstract = {A (possibly degenerate) drawing of a graph G in the plane is approximable by an embedding if it can be turned into an embedding by an arbitrarily small perturbation. We show that testing, whether a drawing of a planar graph G in the plane is approximable by an embedding, can be carried out in polynomial time, if a desired embedding of G belongs to a fixed isotopy class, i.e., the rotation system (or equivalently the faces) of the embedding of G and the choice of outer face are fixed. In other words, we show that c-planarity with embedded pipes is tractable for graphs with fixed embeddings. To the best of our knowledge an analogous result was previously known essentially only when G is a cycle.}, author = {Fulek, Radoslav}, location = {Phuket, Thailand}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik}, title = {{Embedding graphs into embedded graphs}}, doi = {10.4230/LIPICS.ISAAC.2017.34}, volume = {92}, year = {2017}, } @inproceedings{652, abstract = {We present an approach that enables robots to self-organize their sensorimotor behavior from scratch without providing specific information about neither the robot nor its environment. This is achieved by a simple neural control law that increases the consistency between external sensor dynamics and internal neural dynamics of the utterly simple controller. In this way, the embodiment and the agent-environment coupling are the only source of individual development. We show how an anthropomorphic tendon driven arm-shoulder system develops different behaviors depending on that coupling. For instance: Given a bottle half-filled with water, the arm starts to shake it, driven by the physical response of the water. When attaching a brush, the arm can be manipulated into wiping a table, and when connected to a revolvable wheel it finds out how to rotate it. Thus, the robot may be said to discover the affordances of the world. When allowing two (simulated) humanoid robots to interact physically, they engage into a joint behavior development leading to, for instance, spontaneous cooperation. More social effects are observed if the robots can visually perceive each other. Although, as an observer, it is tempting to attribute an apparent intentionality, there is nothing of the kind put in. As a conclusion, we argue that emergent behavior may be much less rooted in explicit intentions, internal motivations, or specific reward systems than is commonly believed.}, author = {Der, Ralf and Martius, Georg S}, isbn = {978-150905069-7}, location = {Cergy-Pontoise, France}, publisher = {IEEE}, title = {{Dynamical self consistency leads to behavioral development and emergent social interactions in robots}}, doi = {10.1109/DEVLRN.2016.7846789}, year = {2017}, } @article{651, abstract = {Superhydrophobic surfaces reduce the frictional drag between water and solid materials, but this effect is often temporary. The realization of sustained drag reduction has applications for water vehicles and pipeline flows. }, author = {Hof, Björn}, issn = {00280836}, journal = {Nature}, number = {7636}, pages = {161 -- 162}, publisher = {Nature Publishing Group}, title = {{Fluid dynamics: Water flows out of touch}}, doi = {10.1038/541161a}, volume = {541}, year = {2017}, } @article{653, abstract = {The extent of heterogeneity among driver gene mutations present in naturally occurring metastases - that is, treatment-naive metastatic disease - is largely unknown. To address this issue, we carried out 60× whole-genome sequencing of 26 metastases from four patients with pancreatic cancer. We found that identical mutations in known driver genes were present in every metastatic lesion for each patient studied. Passenger gene mutations, which do not have known or predicted functional consequences, accounted for all intratumoral heterogeneity. Even with respect to these passenger mutations, our analysis suggests that the genetic similarity among the founding cells of metastases was higher than that expected for any two cells randomly taken from a normal tissue. The uniformity of known driver gene mutations among metastases in the same patient has critical and encouraging implications for the success of future targeted therapies in advanced-stage disease.}, author = {Makohon Moore, Alvin and Zhang, Ming and Reiter, Johannes and Božić, Ivana and Allen, Benjamin and Kundu, Deepanjan and Chatterjee, Krishnendu and Wong, Fay and Jiao, Yuchen and Kohutek, Zachary and Hong, Jungeui and Attiyeh, Marc and Javier, Breanna and Wood, Laura and Hruban, Ralph and Nowak, Martin and Papadopoulos, Nickolas and Kinzler, Kenneth and Vogelstein, Bert and Iacobuzio Donahue, Christine}, issn = {10614036}, journal = {Nature Genetics}, number = {3}, pages = {358 -- 366}, publisher = {Nature Publishing Group}, title = {{Limited heterogeneity of known driver gene mutations among the metastases of individual patients with pancreatic cancer}}, doi = {10.1038/ng.3764}, volume = {49}, year = {2017}, } @inproceedings{6527, abstract = {A memory-hard function (MHF) ƒn with parameter n can be computed in sequential time and space n. Simultaneously, a high amortized parallel area-time complexity (aAT) is incurred per evaluation. In practice, MHFs are used to limit the rate at which an adversary (using a custom computational device) can evaluate a security sensitive function that still occasionally needs to be evaluated by honest users (using an off-the-shelf general purpose device). The most prevalent examples of such sensitive functions are Key Derivation Functions (KDFs) and password hashing algorithms where rate limits help mitigate off-line dictionary attacks. As the honest users' inputs to these functions are often (low-entropy) passwords special attention is given to a class of side-channel resistant MHFs called iMHFs. Essentially all iMHFs can be viewed as some mode of operation (making n calls to some round function) given by a directed acyclic graph (DAG) with very low indegree. Recently, a combinatorial property of a DAG has been identified (called "depth-robustness") which results in good provable security for an iMHF based on that DAG. Depth-robust DAGs have also proven useful in other cryptographic applications. Unfortunately, up till now, all known very depth-robust DAGs are impractically complicated and little is known about their exact (i.e. non-asymptotic) depth-robustness both in theory and in practice. In this work we build and analyze (both formally and empirically) several exceedingly simple and efficient to navigate practical DAGs for use in iMHFs and other applications. For each DAG we: *Prove that their depth-robustness is asymptotically maximal. *Prove bounds of at least 3 orders of magnitude better on their exact depth-robustness compared to known bounds for other practical iMHF. *Implement and empirically evaluate their depth-robustness and aAT against a variety of state-of-the art (and several new) depth-reduction and low aAT attacks. We find that, against all attacks, the new DAGs perform significantly better in practice than Argon2i, the most widely deployed iMHF in practice. Along the way we also improve the best known empirical attacks on the aAT of Argon2i by implementing and testing several heuristic versions of a (hitherto purely theoretical) depth-reduction attack. Finally, we demonstrate practicality of our constructions by modifying the Argon2i code base to use one of the new high aAT DAGs. Experimental benchmarks on a standard off-the-shelf CPU show that the new modifications do not adversely affect the impressive throughput of Argon2i (despite seemingly enjoying significantly higher aAT). }, author = {Alwen, Joel F and Blocki, Jeremiah and Harsha, Ben}, booktitle = {Proceedings of the 2017 ACM SIGSAC Conference on Computer and Communications Security}, isbn = {9781450349468}, location = {Dallas, TX, USA}, pages = {1001--1017}, publisher = {ACM Press}, title = {{Practical graphs for optimal side-channel resistant memory-hard functions}}, doi = {10.1145/3133956.3134031}, year = {2017}, } @article{654, abstract = {In November 2016, developmental biologists, synthetic biologists and engineers gathered in Paris for a meeting called ‘Engineering the embryo’. The participants shared an interest in exploring how synthetic systems can reveal new principles of embryonic development, and how the in vitro manipulation and modeling of development using stem cells can be used to integrate ideas and expertise from physics, developmental biology and tissue engineering. As we review here, the conference pinpointed some of the challenges arising at the intersection of these fields, along with great enthusiasm for finding new approaches and collaborations.}, author = {Kicheva, Anna and Rivron, Nicolas}, issn = {09501991}, journal = {Development}, number = {5}, pages = {733 -- 736}, publisher = {Company of Biologists}, title = {{Creating to understand – developmental biology meets engineering in Paris}}, doi = {10.1242/dev.144915}, volume = {144}, year = {2017}, } @inproceedings{6526, abstract = {This paper studies the complexity of estimating Rényi divergences of discrete distributions: p observed from samples and the baseline distribution q known a priori. Extending the results of Acharya et al. (SODA'15) on estimating Rényi entropy, we present improved estimation techniques together with upper and lower bounds on the sample complexity. We show that, contrarily to estimating Rényi entropy where a sublinear (in the alphabet size) number of samples suffices, the sample complexity is heavily dependent on events occurring unlikely in q, and is unbounded in general (no matter what an estimation technique is used). For any divergence of integer order bigger than 1, we provide upper and lower bounds on the number of samples dependent on probabilities of p and q (the lower bounds hold for non-integer orders as well). We conclude that the worst-case sample complexity is polynomial in the alphabet size if and only if the probabilities of q are non-negligible. This gives theoretical insights into heuristics used in the applied literature to handle numerical instability, which occurs for small probabilities of q. Our result shows that they should be handled with care not only because of numerical issues, but also because of a blow up in the sample complexity.}, author = {Skórski, Maciej}, booktitle = {2017 IEEE International Symposium on Information Theory (ISIT)}, isbn = {9781509040964}, location = {Aachen, Germany}, publisher = {IEEE}, title = {{On the complexity of estimating Rènyi divergences}}, doi = {10.1109/isit.2017.8006529}, year = {2017}, } @article{655, abstract = {The bacterial flagellum is a self-assembling nanomachine. The external flagellar filament, several times longer than a bacterial cell body, is made of a few tens of thousands subunits of a single protein: flagellin. A fundamental problem concerns the molecular mechanism of how the flagellum grows outside the cell, where no discernible energy source is available. Here, we monitored the dynamic assembly of individual flagella using in situ labelling and real-time immunostaining of elongating flagellar filaments. We report that the rate of flagellum growth, initially ~1,700 amino acids per second, decreases with length and that the previously proposed chain mechanism does not contribute to the filament elongation dynamics. Inhibition of the proton motive force-dependent export apparatus revealed a major contribution of substrate injection in driving filament elongation. The combination of experimental and mathematical evidence demonstrates that a simple, injection-diffusion mechanism controls bacterial flagella growth outside the cell.}, author = {Renault, Thibaud and Abraham, Anthony and Bergmiller, Tobias and Paradis, Guillaume and Rainville, Simon and Charpentier, Emmanuelle and Guet, Calin C and Tu, Yuhai and Namba, Keiichi and Keener, James and Minamino, Tohru and Erhardt, Marc}, issn = {2050084X}, journal = {eLife}, publisher = {eLife Sciences Publications}, title = {{Bacterial flagella grow through an injection diffusion mechanism}}, doi = {10.7554/eLife.23136}, volume = {6}, year = {2017}, } @article{657, abstract = {Plant organs are typically organized into three main tissue layers. The middle ground tissue layer comprises the majority of the plant body and serves a wide range of functions, including photosynthesis, selective nutrient uptake and storage, and gravity sensing. Ground tissue patterning and maintenance in Arabidopsis are controlled by a well-established gene network revolving around the key regulator SHORT-ROOT (SHR). In contrast, it is completely unknown how ground tissue identity is first specified from totipotent precursor cells in the embryo. The plant signaling molecule auxin, acting through AUXIN RESPONSE FACTOR (ARF) transcription factors, is critical for embryo patterning. The auxin effector ARF5/MONOPTEROS (MP) acts both cell-autonomously and noncell-autonomously to control embryonic vascular tissue formation and root initiation, respectively. Here we show that auxin response and ARF activity cell-autonomously control the asymmetric division of the first ground tissue cells. By identifying embryonic target genes, we show that MP transcriptionally initiates the ground tissue lineage and acts upstream of the regulatory network that controls ground tissue patterning and maintenance. Strikingly, whereas the SHR network depends on MP, this MP function is, at least in part, SHR independent. Our study therefore identifies auxin response as a regulator of ground tissue specification in the embryonic root, and reveals that ground tissue initiation and maintenance use different regulators and mechanisms. Moreover, our data provide a framework for the simultaneous formation of multiple cell types by the same transcriptional regulator.}, author = {Möller, Barbara and Ten Hove, Colette and Xiang, Daoquan and Williams, Nerys and López, Lorena and Yoshida, Saiko and Smit, Margot and Datla, Raju and Weijers, Dolf}, issn = {00278424}, journal = {PNAS}, number = {12}, pages = {E2533 -- E2539}, publisher = {National Academy of Sciences}, title = {{Auxin response cell autonomously controls ground tissue initiation in the early arabidopsis embryo}}, doi = {10.1073/pnas.1616493114}, volume = {114}, year = {2017}, } @article{656, abstract = {Human neurons transplanted into a mouse model for Alzheimer’s disease show human-specific vulnerability to β-amyloid plaques and may help to identify new therapeutic targets.}, author = {Novarino, Gaia}, issn = {19466234}, journal = {Science Translational Medicine}, number = {381}, publisher = {American Association for the Advancement of Science}, title = {{Modeling Alzheimer's disease in mice with human neurons}}, doi = {10.1126/scitranslmed.aam9867}, volume = {9}, year = {2017}, }