@inproceedings{3250, abstract = {The Learning Parity with Noise (LPN) problem has recently found many applications in cryptography as the hardness assumption underlying the constructions of "provably secure" cryptographic schemes like encryption or authentication protocols. Being provably secure means that the scheme comes with a proof showing that the existence of an efficient adversary against the scheme implies that the underlying hardness assumption is wrong. LPN based schemes are appealing for theoretical and practical reasons. On the theoretical side, LPN based schemes offer a very strong security guarantee. The LPN problem is equivalent to the problem of decoding random linear codes, a problem that has been extensively studied in the last half century. The fastest known algorithms run in exponential time and unlike most number-theoretic problems used in cryptography, the LPN problem does not succumb to known quantum algorithms. On the practical side, LPN based schemes are often extremely simple and efficient in terms of code-size as well as time and space requirements. This makes them prime candidates for light-weight devices like RFID tags, which are too weak to implement standard cryptographic primitives like the AES block-cipher. This talk will be a gentle introduction to provable security using simple LPN based schemes as examples. Starting from pseudorandom generators and symmetric key encryption, over secret-key authentication protocols, and, if time admits, touching on recent constructions of public-key identification, commitments and zero-knowledge proofs.}, author = {Pietrzak, Krzysztof Z}, location = {Špindlerův Mlýn, Czech Republic}, pages = {99 -- 114}, publisher = {Springer}, title = {{Cryptography from learning parity with noise}}, doi = {10.1007/978-3-642-27660-6_9}, volume = {7147}, year = {2012}, } @article{3256, abstract = {We use a distortion to define the dual complex of a cubical subdivision of ℝ n as an n-dimensional subcomplex of the nerve of the set of n-cubes. Motivated by the topological analysis of high-dimensional digital image data, we consider such subdivisions defined by generalizations of quad- and oct-trees to n dimensions. Assuming the subdivision is balanced, we show that mapping each vertex to the center of the corresponding n-cube gives a geometric realization of the dual complex in ℝ n.}, author = {Edelsbrunner, Herbert and Kerber, Michael}, journal = {Discrete & Computational Geometry}, number = {2}, pages = {393 -- 414}, publisher = {Springer}, title = {{Dual complexes of cubical subdivisions of ℝn}}, doi = {10.1007/s00454-011-9382-4}, volume = {47}, year = {2012}, } @article{3254, abstract = {The theory of graph games with ω-regular winning conditions is the foundation for modeling and synthesizing reactive processes. In the case of stochastic reactive processes, the corresponding stochastic graph games have three players, two of them (System and Environment) behaving adversarially, and the third (Uncertainty) behaving probabilistically. We consider two problems for stochastic graph games: the qualitative problem asks for the set of states from which a player can win with probability 1 (almost-sure winning); and the quantitative problem asks for the maximal probability of winning (optimal winning) from each state. We consider ω-regular winning conditions formalized as Müller winning conditions. We present optimal memory bounds for pure (deterministic) almost-sure winning and optimal winning strategies in stochastic graph games with Müller winning conditions. We also study the complexity of stochastic Müller games and show that both the qualitative and quantitative analysis problems are PSPACE-complete. Our results are relevant in synthesis of stochastic reactive processes.}, author = {Chatterjee, Krishnendu}, journal = {Information and Computation}, pages = {29 -- 48}, publisher = {Elsevier}, title = {{The complexity of stochastic Müller games}}, doi = {10.1016/j.ic.2011.11.004}, volume = {211}, year = {2012}, } @inproceedings{3253, abstract = {We describe a framework for reasoning about programs with lists carrying integer numerical data. We use abstract domains to describe and manipulate complex constraints on configurations of these programs mixing constraints on the shape of the heap, sizes of the lists, on the multisets of data stored in these lists, and on the data at their different positions. Moreover, we provide powerful techniques for automatic validation of Hoare-triples and invariant checking, as well as for automatic synthesis of invariants and procedure summaries using modular inter-procedural analysis. The approach has been implemented in a tool called Celia and experimented successfully on a large benchmark of programs.}, author = {Bouajjani, Ahmed and Dragoi, Cezara and Enea, Constantin and Sighireanu, Mihaela}, location = {Philadelphia, PA, USA}, pages = {1 -- 22}, publisher = {Springer}, title = {{Abstract domains for automated reasoning about list manipulating programs with infinite data}}, doi = {10.1007/978-3-642-27940-9_1}, volume = {7148}, year = {2012}, } @inproceedings{3265, abstract = {We propose a mid-level statistical model for image segmentation that composes multiple figure-ground hypotheses (FG) obtained by applying constraints at different locations and scales, into larger interpretations (tilings) of the entire image. Inference is cast as optimization over sets of maximal cliques sampled from a graph connecting all non-overlapping figure-ground segment hypotheses. Potential functions over cliques combine unary, Gestalt-based figure qualities, and pairwise compatibilities among spatially neighboring segments, constrained by T-junctions and the boundary interface statistics of real scenes. Learning the model parameters is based on maximum likelihood, alternating between sampling image tilings and optimizing their potential function parameters. State of the art results are reported on the Berkeley and Stanford segmentation datasets, as well as VOC2009, where a 28% improvement was achieved.}, author = {Ion, Adrian and Carreira, Joao and Sminchisescu, Cristian}, location = {Barcelona, Spain}, publisher = {IEEE}, title = {{Image segmentation by figure-ground composition into maximal cliques}}, doi = {10.1109/ICCV.2011.6126486}, year = {2012}, } @inproceedings{3282, abstract = {Traditionally, symmetric-key message authentication codes (MACs) are easily built from pseudorandom functions (PRFs). In this work we propose a wide variety of other approaches to building efficient MACs, without going through a PRF first. In particular, unlike deterministic PRF-based MACs, where each message has a unique valid tag, we give a number of probabilistic MAC constructions from various other primitives/assumptions. Our main results are summarized as follows: We show several new probabilistic MAC constructions from a variety of general assumptions, including CCA-secure encryption, Hash Proof Systems and key-homomorphic weak PRFs. By instantiating these frameworks under concrete number theoretic assumptions, we get several schemes which are more efficient than just using a state-of-the-art PRF instantiation under the corresponding assumption. For probabilistic MACs, unlike deterministic ones, unforgeability against a chosen message attack (uf-cma ) alone does not imply security if the adversary can additionally make verification queries (uf-cmva ). We give an efficient generic transformation from any uf-cma secure MAC which is "message-hiding" into a uf-cmva secure MAC. This resolves the main open problem of Kiltz et al. from Eurocrypt'11; By using our transformation on their constructions, we get the first efficient MACs from the LPN assumption. While all our new MAC constructions immediately give efficient actively secure, two-round symmetric-key identification schemes, we also show a very simple, three-round actively secure identification protocol from any weak PRF. In particular, the resulting protocol is much more efficient than the trivial approach of building a regular PRF from a weak PRF. © 2012 International Association for Cryptologic Research.}, author = {Dodis, Yevgeniy and Pietrzak, Krzysztof Z and Kiltz, Eike and Wichs, Daniel}, location = {Cambridge, UK}, pages = {355 -- 374}, publisher = {Springer}, title = {{Message authentication, revisited}}, doi = {10.1007/978-3-642-29011-4_22}, volume = {7237}, year = {2012}, } @inproceedings{3280, abstract = {The (decisional) learning with errors problem (LWE) asks to distinguish "noisy" inner products of a secret vector with random vectors from uniform. The learning parities with noise problem (LPN) is the special case where the elements of the vectors are bits. In recent years, the LWE and LPN problems have found many applications in cryptography. In this paper we introduce a (seemingly) much stronger adaptive assumption, called "subspace LWE" (SLWE), where the adversary can learn the inner product of the secret and random vectors after they were projected into an adaptively and adversarially chosen subspace. We prove that, surprisingly, the SLWE problem mapping into subspaces of dimension d is almost as hard as LWE using secrets of length d (the other direction is trivial.) This result immediately implies that several existing cryptosystems whose security is based on the hardness of the LWE/LPN problems are provably secure in a much stronger sense than anticipated. As an illustrative example we show that the standard way of using LPN for symmetric CPA secure encryption is even secure against a very powerful class of related key attacks. }, author = {Pietrzak, Krzysztof Z}, location = {Taormina, Sicily, Italy}, pages = {548 -- 563}, publisher = {Springer}, title = {{Subspace LWE}}, doi = {10.1007/978-3-642-28914-9_31}, volume = {7194}, year = {2012}, } @inproceedings{3281, abstract = {We consider the problem of amplifying the "lossiness" of functions. We say that an oracle circuit C*: {0,1} m → {0,1}* amplifies relative lossiness from ℓ/n to L/m if for every function f:{0,1} n → {0,1} n it holds that 1 If f is injective then so is C f. 2 If f has image size of at most 2 n-ℓ, then C f has image size at most 2 m-L. The question is whether such C* exists for L/m ≫ ℓ/n. This problem arises naturally in the context of cryptographic "lossy functions," where the relative lossiness is the key parameter. We show that for every circuit C* that makes at most t queries to f, the relative lossiness of C f is at most L/m ≤ ℓ/n + O(log t)/n. In particular, no black-box method making a polynomial t = poly(n) number of queries can amplify relative lossiness by more than an O(logn)/n additive term. We show that this is tight by giving a simple construction (cascading with some randomization) that achieves such amplification.}, author = {Pietrzak, Krzysztof Z and Rosen, Alon and Segev, Gil}, location = {Taormina, Sicily, Italy}, pages = {458 -- 475}, publisher = {Springer}, title = {{Lossy functions do not amplify well}}, doi = {10.1007/978-3-642-28914-9_26}, volume = {7194}, year = {2012}, } @inproceedings{3284, abstract = {We study the complexity of valued constraint satisfaction problems (VCSP). A problem from VCSP is characterised by a constraint language, a fixed set of cost functions over a finite domain. An instance of the problem is specified by a sum of cost functions from the language and the goal is to minimise the sum. Under the unique games conjecture, the approximability of finite-valued VCSPs is well-understood, see Raghavendra [FOCS’08]. However, there is no characterisation of finite-valued VCSPs, let alone general-valued VCSPs, that can be solved exactly in polynomial time, thus giving insights from a combinatorial optimisation perspective. We consider the case of languages containing all possible unary cost functions. In the case of languages consisting of only {0, ∞}-valued cost functions (i.e. relations), such languages have been called conservative and studied by Bulatov [LICS’03] and recently by Barto [LICS’11]. Since we study valued languages, we call a language conservative if it contains all finite-valued unary cost functions. The computational complexity of conservative valued languages has been studied by Cohen et al. [AIJ’06] for languages over Boolean domains, by Deineko et al. [JACM’08] for {0,1}-valued languages (a.k.a Max-CSP), and by Takhanov [STACS’10] for {0,∞}-valued languages containing all finite- valued unary cost functions (a.k.a. Min-Cost-Hom). We prove a Schaefer-like dichotomy theorem for conservative valued languages: if all cost functions in the language satisfy a certain condition (specified by a complementary combination of STP and MJN multimorphisms), then any instance can be solved in polynomial time (via a new algorithm developed in this paper), otherwise the language is NP-hard. This is the first complete complexity classification of general-valued constraint languages over non-Boolean domains. It is a common phenomenon that complexity classifications of problems over non-Boolean domains is significantly harder than the Boolean case. The polynomial-time algorithm we present for the tractable cases is a generalisation of the submodular minimisation problem and a result of Cohen et al. [TCS’08]. Our results generalise previous results by Takhanov [STACS’10] and (a subset of results) by Cohen et al. [AIJ’06] and Deineko et al. [JACM’08]. Moreover, our results do not rely on any computer-assisted search as in Deineko et al. [JACM’08], and provide a powerful tool for proving hardness of finite-valued and general-valued languages.}, author = {Vladimir Kolmogorov and Živný, Stanislav}, pages = {750 -- 759}, publisher = {SIAM}, title = {{The complexity of conservative valued CSPs}}, year = {2012}, } @article{330, abstract = {A procedure for the continuous production of Cu 2ZnSnS 4 (CZTS) nanoparticles with controlled composition is presented. CZTS nanoparticles were prepared through the reaction of the metals' amino complexes with elemental sulfur in a continuous-flow reactor at moderate temperatures (300-330 °C). High-resolution transmission electron microscopy and X-ray diffraction analysis showed the nanocrystals to have a crystallographic structure compatible with that of the kesterite. Chemical characterization of the materials showed the presence of the four elements in each individual nanocrystal. Composition control was achieved by adjusting the solution flow rate through the reactor and the proper choice of the nominal precursor concentration within the flowing solution. Single-particle analysis revealed a composition distribution within each sample, which was optimized at the highest synthesis temperatures used. }, author = {Shavel, Alexey and Cadavid, Doris and Ibáñez, Maria and Carrete, Alex and Cabot, Andreu}, journal = {Journal of the American Chemical Society}, number = {3}, pages = {1438 -- 1441}, publisher = {ACS}, title = {{Continuous production of Cu inf 2 inf ZnSnS inf 4 inf nanocrystals in a flow reactor}}, doi = {10.1021/ja209688a}, volume = {134}, year = {2012}, } @article{3317, abstract = {The physical distance between presynaptic Ca2+ channels and the Ca2+ sensors that trigger exocytosis of neurotransmitter-containing vesicles is a key determinant of the signalling properties of synapses in the nervous system. Recent functional analysis indicates that in some fast central synapses, transmitter release is triggered by a small number of Ca2+ channels that are coupled to Ca2+ sensors at the nanometre scale. Molecular analysis suggests that this tight coupling is generated by protein–protein interactions involving Ca2+ channels, Ca2+ sensors and various other synaptic proteins. Nanodomain coupling has several functional advantages, as it increases the efficacy, speed and energy efficiency of synaptic transmission.}, author = {Eggermann, Emmanuel and Bucurenciu, Iancu and Goswami, Sarit and Jonas, Peter M}, journal = {Nature Reviews Neuroscience}, number = {1}, pages = {7 -- 21}, publisher = {Nature Publishing Group}, title = {{Nanodomain coupling between Ca(2+) channels and sensors of exocytosis at fast mammalian synapses}}, doi = {10.1038/nrn3125}, volume = {13}, year = {2012}, } @article{3314, abstract = {We introduce two-level discounted and mean-payoff games played by two players on a perfect-information stochastic game graph. The upper level game is a discounted or mean-payoff game and the lower level game is a (undiscounted) reachability game. Two-level games model hierarchical and sequential decision making under uncertainty across different time scales. For both discounted and mean-payoff two-level games, we show the existence of pure memoryless optimal strategies for both players and an ordered field property. We show that if there is only one player (Markov decision processes), then the values can be computed in polynomial time. It follows that whether the value of a player is equal to a given rational constant in two-level discounted or mean-payoff games can be decided in NP ∩ coNP. We also give an alternate strategy improvement algorithm to compute the value. © 2012 World Scientific Publishing Company.}, author = {Chatterjee, Krishnendu and Majumdar, Ritankar}, journal = {International Journal of Foundations of Computer Science}, number = {3}, pages = {609 -- 625}, publisher = {World Scientific Publishing}, title = {{Discounting and averaging in games across time scales}}, doi = {10.1142/S0129054112400308}, volume = {23}, year = {2012}, } @article{3115, abstract = {We consider the offset-deconstruction problem: Given a polygonal shape Q with n vertices, can it be expressed, up to a tolerance ε in Hausdorff distance, as the Minkowski sum of another polygonal shape P with a disk of fixed radius? If it does, we also seek a preferably simple-looking solution P; then, P's offset constitutes an accurate, vertex-reduced, and smoothened approximation of Q. We give an O(nlogn)-time exact decision algorithm that handles any polygonal shape, assuming the real-RAM model of computation. A variant of the algorithm, which we have implemented using the cgal library, is based on rational arithmetic and answers the same deconstruction problem up to an uncertainty parameter δ its running time additionally depends on δ. If the input shape is found to be approximable, this algorithm also computes an approximate solution for the problem. It also allows us to solve parameter-optimization problems induced by the offset-deconstruction problem. For convex shapes, the complexity of the exact decision algorithm drops to O(n), which is also the time required to compute a solution P with at most one more vertex than a vertex-minimal one.}, author = {Berberich, Eric and Halperin, Dan and Kerber, Michael and Pogalnikova, Roza}, journal = {Discrete & Computational Geometry}, number = {4}, pages = {964 -- 989}, publisher = {Springer}, title = {{Deconstructing approximate offsets}}, doi = {10.1007/s00454-012-9441-5}, volume = {48}, year = {2012}, } @article{3331, abstract = {Computing the topology of an algebraic plane curve C means computing a combinatorial graph that is isotopic to C and thus represents its topology in R2. We prove that, for a polynomial of degree n with integer coefficients bounded by 2ρ, the topology of the induced curve can be computed with bit operations ( indicates that we omit logarithmic factors). Our analysis improves the previous best known complexity bounds by a factor of n2. The improvement is based on new techniques to compute and refine isolating intervals for the real roots of polynomials, and on the consequent amortized analysis of the critical fibers of the algebraic curve.}, author = {Kerber, Michael and Sagraloff, Michael}, journal = { Journal of Symbolic Computation}, number = {3}, pages = {239 -- 258}, publisher = {Elsevier}, title = {{A worst case bound for topology computation of algebraic curves}}, doi = {10.1016/j.jsc.2011.11.001}, volume = {47}, year = {2012}, } @article{346, abstract = {Arrays of vertically aligned ZnO : Cl/TiO2 and ZnO : Cl/ZnxTiOy/TiO2 core–shell nanowires (NWs) were prepared by means of the combination of two solution-growth processes. First, single-crystal ZnO NWs with controlled n-type doping were grown on conducting substrates by a low-cost, high-yield and seed-free electrochemical route. These NWs were covered by a titanium oxide shell of tunable thickness mediating successive adsorption-hydrolysis-condensation steps. Using this atomic-layer growth procedure, titania shells with controlled thickness and the anatase TiO2 phase were obtained after sintering at 450 °C. Higher sintering temperatures resulted in the formation of ZnO : Cl/ZnxTiOy/TiO2 core–shell NWs by the interdiffusion of Zn and Ti ions at the ZnO–TiO2 interface. The performance of ZnO : Cl/TiO2 and ZnO : Cl/ZnxTiOy/TiO2 core–shell NWs towards photoelectrochemical (PEC) water splitting was investigated as a function of the titania shell thickness. Furthermore, the performance of such core–shell NWs as photoelectrodes in dye-sensitized solar cells was also characterized. The TiO2 presence at the ZnO : Cl surface promoted a two-fold increase on the produced photocurrent densities, probing their potential for PEC and optoelectronic applications. Electrochemical impedance spectroscopy was used to corroborate the lower resistance for charge transfer between the NWs and the electrolyte in the presence of the TiO2 shell.}, author = {Fan, Jiandong and Zamani, Reza and Fábrega, Cristina and Shavel, Alexey and Flox, Cristina and Ibáñez, Maria and Andreu, Teresa and López, Amtonio and Arbiol, Jordi and Morante, Joan and Cabot, Andreu}, journal = {Journal of Physics D: Applied Physics}, number = {41}, publisher = {IOP Publishing Ltd.}, title = {{Solution-growth and optoelectronic performance of ZnO : Cl/TiO2 and ZnO : Cl/ZnxTiOy/TiO2 core–shell nanowires with tunable shell thickness}}, doi = {10.1088/0022-3727/45/41/415301}, volume = {45}, year = {2012}, } @article{3168, abstract = {The induction of a signaling pathway is characterized by transient complex formation and mutual posttranslational modification of proteins. To faithfully capture this combinatorial process in a mathematical model is an important challenge in systems biology. Exploiting the limited context on which most binding and modification events are conditioned, attempts have been made to reduce the combinatorial complexity by quotienting the reachable set of molecular species into species aggregates while preserving the deterministic semantics of the thermodynamic limit. Recently, we proposed a quotienting that also preserves the stochastic semantics and that is complete in the sense that the semantics of individual species can be recovered from the aggregate semantics. In this paper, we prove that this quotienting yields a sufficient condition for weak lumpability (that is to say that the quotient system is still Markovian for a given set of initial distributions) and that it gives rise to a backward Markov bisimulation between the original and aggregated transition system (which means that the conditional probability of being in a given state in the original system knowing that we are in its equivalence class is an invariant of the system). We illustrate the framework on a case study of the epidermal growth factor (EGF)/insulin receptor crosstalk.}, author = {Feret, Jérôme and Henzinger, Thomas A and Koeppl, Heinz and Petrov, Tatjana}, journal = {Theoretical Computer Science}, pages = {137 -- 164}, publisher = {Elsevier}, title = {{Lumpability abstractions of rule based systems}}, doi = {10.1016/j.tcs.2011.12.059}, volume = {431}, year = {2012}, } @article{377, abstract = {The potential to control the composition and crystal phase at the nanometer scale enable the production of nanocrystalline materials with enhanced functionalities and new applications. In the present work, we detail a novel colloidal synthesis route to prepare nanoparticles of the ternary semiconductor Cu2GeSe3 (CGSe) with nanometer-scale control over their crystal phases. We also demonstrate the structural effect on the thermoelectric properties of bottom-up-prepared CGSe nanomaterials. By careful adjustment of the nucleation and growth temperatures, pure orthorhombic CGSe nanoparticles with cationic order or polytypic CGSe nanoparticles with disordered cation positions can be produced. In this second type of nanoparticle, a high density of twins can be created to periodically change the atomic plane stacking, forming a hexagonal wurtzite CGSe phase. The high yield of the synthetic routes reported here allows the production of single-phase and multiphase CGSe nanoparticles in the gram scale, which permits characterization of the thermoelectric properties of these materials. Reduced thermal conductivities and a related 2.5-fold increase of the thermoelectric figure of merit for multiphase nanomaterials compared to pure-phase CGSe are systematically obtained. These results are discussed in terms of the density and efficiency of phonon scattering centers in both types of materials.}, author = {Ibáñez, Maria and Zamani, Reza and Li, Wenhua and Cadavid, Doris and Gorse, Stéphane and Katchoi, Nebll and Shavel, Alexey and López, Antonioo and Morante, Joan and Arbiol, Jordi and Cabot, Andreu}, journal = {Chemistry of Materials}, number = {23}, pages = {4615 -- 4622}, publisher = {American Chemical Society}, title = {{Crystallographic control at the nanoscale to enhance functionality: Polytypic Cu2GeSe3 nanoparticles as thermoelectric materials}}, doi = {10.1021/cm303252q}, volume = {24}, year = {2012}, } @article{3846, abstract = {We summarize classical and recent results about two-player games played on graphs with ω-regular objectives. These games have applications in the verification and synthesis of reactive systems. Important distinctions are whether a graph game is turn-based or concurrent; deterministic or stochastic; zero-sum or not. We cluster known results and open problems according to these classifications.}, author = {Chatterjee, Krishnendu and Henzinger, Thomas A}, journal = {Journal of Computer and System Sciences}, number = {2}, pages = {394 -- 413}, publisher = {Elsevier}, title = {{A survey of stochastic ω regular games}}, doi = {10.1016/j.jcss.2011.05.002}, volume = {78}, year = {2012}, } @article{387, abstract = {In this Letter we present detailed study of the density of states near defects in Bi 2Se 3. In particular, we present data on the commonly found triangular defects in this system. While we do not find any measurable quasiparticle scattering interference effects, we do find localized resonances, which can be well fitted by theory once the potential is taken to be extended to properly account for the observed defects. The data together with the fits confirm that while the local density of states around the Dirac point of the electronic spectrum at the surface is significantly disrupted near the impurity by the creation of low-energy resonance state, the Dirac point is not locally destroyed. We discuss our results in terms of the expected protected surface state of topological insulators. © 2012 American Physical Society.}, author = {Alpichshev, Zhanybek and Biswas, Rudro and Balatsky, Alexander and Analytis, James and Chu, Jiunhaw and Fisher, Ian and Kapitulnik, Aharon}, journal = {Physical Review Letters}, number = {20}, publisher = {American Physical Society}, title = {{STM imaging of impurity resonances on Bi 2Se 3}}, doi = {10.1103/PhysRevLett.108.206402}, volume = {108}, year = {2012}, } @article{3110, abstract = {The directional transport of the phytohormone auxin depends on the phosphorylation status and polar localization of PIN-FORMED (PIN) auxin efflux proteins. While PINIOD (PID) kinase is directly involved in the phosphorylation of PIN proteins, the phosphatase holoenzyme complexes that dephosphorylate PIN proteins remain elusive. Here, we demonstrate that mutations simultaneously disrupting the function of Arabidopsis thaliana FyPP1 (for Phytochrome-associated serine/threonine protein phosphatase1) and FyPP3, two homologous genes encoding the catalytic subunits of protein phosphatase6 (PP6), cause elevated accumulation of phosphorylated PIN proteins, correlating with a basal-to-apical shift in subcellular PIN localization. The changes in PIN polarity result in increased root basipetal auxin transport and severe defects, including shorter roots, fewer lateral roots, defective columella cells, root meristem collapse, abnormal cotyledons (small, cup-shaped, or fused cotyledons), and altered leaf venation. Our molecular, biochemical, and genetic data support the notion that FyPP1/3, SAL (for SAPS DOMAIN-LIKE), and PP2AA proteins (RCN1 [for ROOTS CURL IN NAPHTHYLPHTHALAMIC ACID1] or PP2AA1, PP2AA2, and PP2AA3) physically interact to form a novel PP6-type heterotrimeric holoenzyme complex. We also show that FyPP1/3, SAL, and PP2AA interact with a subset of PIN proteins and that for SAL the strength of the interaction depends on the PIN phosphorylation status. Thus, an Arabidopsis PP6-type phosphatase holoenzyme acts antagonistically with PID to direct auxin transport polarity and plant development by directly regulating PIN phosphorylation. }, author = {Dai, Mingqiu and Zhang, Chen and Urszula Kania and Chen, Fang and Xue, Qin and McCray, Tyra and Li, Gang and Qin, Genji and Wakeley, Michelle and Terzaghi, William and Wan, Jianmin and Zhao, Yunde and Xu, Jian and Jirí Friml and Deng, Xing W and Wang, Haiyang}, journal = {Plant Cell}, number = {6}, pages = {2497 -- 2514}, publisher = {American Society of Plant Biologists}, title = {{A PP6 type phosphatase holoenzyme directly regulates PIN phosphorylation and auxin efflux in Arabidopsis}}, doi = {10.1105/tpc.112.098905}, volume = {24}, year = {2012}, }