@article{12919, abstract = {We report the visible light photocatalytic cleavage of trityl thioethers or ethers under pH-neutral conditions. The method results in the formation of the respective symmetrical disulfides and alcohols in moderate to excellent yield. The protocol only requires the addition of a suitable photocatalyst and light rendering it orthogonal to several functionalities, including acid labile protective groups. The same conditions can be used to directly convert trityl-protected thiols into unsymmetrical disulfides or selenosulfides, and to cleave trityl resins in solid phase organic synthesis.}, author = {Murakami, Sho and Brudy, Cosima and Bachmann, Moritz and Takemoto, Yoshiji and Pieber, Bartholomäus}, issn = {1437-210X}, journal = {Synthesis}, keywords = {Organic Chemistry, Catalysis}, number = {09}, pages = {1367--1374}, publisher = {Georg Thieme Verlag}, title = {{Photocatalytic cleavage of trityl protected thiols and alcohols}}, doi = {10.1055/a-1979-5933}, volume = {55}, year = {2023}, } @inproceedings{13048, abstract = {In this paper we introduce a pruning of the medial axis called the (λ,α)-medial axis (axλα). We prove that the (λ,α)-medial axis of a set K is stable in a Gromov-Hausdorff sense under weak assumptions. More formally we prove that if K and K′ are close in the Hausdorff (dH) sense then the (λ,α)-medial axes of K and K′ are close as metric spaces, that is the Gromov-Hausdorff distance (dGH) between the two is 1/4-Hölder in the sense that dGH (axλα(K),axλα(K′)) ≲ dH(K,K′)1/4. The Hausdorff distance between the two medial axes is also bounded, by dH (axλα(K),λα(K′)) ≲ dH(K,K′)1/2. These quantified stability results provide guarantees for practical computations of medial axes from approximations. Moreover, they provide key ingredients for studying the computability of the medial axis in the context of computable analysis.}, author = {Lieutier, André and Wintraecken, Mathijs}, booktitle = {Proceedings of the 55th Annual ACM Symposium on Theory of Computing}, isbn = {9781450399135}, location = {Orlando, FL, United States}, pages = {1768--1776}, publisher = {Association for Computing Machinery}, title = {{Hausdorff and Gromov-Hausdorff stable subsets of the medial axis}}, doi = {10.1145/3564246.3585113}, year = {2023}, } @inproceedings{13053, abstract = {Deep neural networks (DNNs) often have to be compressed, via pruning and/or quantization, before they can be deployed in practical settings. In this work we propose a new compression-aware minimizer dubbed CrAM that modifies the optimization step in a principled way, in order to produce models whose local loss behavior is stable under compression operations such as pruning. Thus, dense models trained via CrAM should be compressible post-training, in a single step, without significant accuracy loss. Experimental results on standard benchmarks, such as residual networks for ImageNet classification and BERT models for language modelling, show that CrAM produces dense models that can be more accurate than the standard SGD/Adam-based baselines, but which are stable under weight pruning: specifically, we can prune models in one-shot to 70-80% sparsity with almost no accuracy loss, and to 90% with reasonable (∼1%) accuracy loss, which is competitive with gradual compression methods. Additionally, CrAM can produce sparse models which perform well for transfer learning, and it also works for semi-structured 2:4 pruning patterns supported by GPU hardware. The code for reproducing the results is available at this https URL .}, author = {Peste, Elena-Alexandra and Vladu, Adrian and Kurtic, Eldar and Lampert, Christoph and Alistarh, Dan-Adrian}, booktitle = {11th International Conference on Learning Representations }, location = {Kigali, Rwanda }, title = {{CrAM: A Compression-Aware Minimizer}}, year = {2023}, } @inproceedings{13143, abstract = {GIMPS and PrimeGrid are large-scale distributed projects dedicated to searching giant prime numbers, usually of special forms like Mersenne and Proth primes. The numbers in the current search-space are millions of digits large and the participating volunteers need to run resource-consuming primality tests. Once a candidate prime N has been found, the only way for another party to independently verify the primality of N used to be by repeating the expensive primality test. To avoid the need for second recomputation of each primality test, these projects have recently adopted certifying mechanisms that enable efficient verification of performed tests. However, the mechanisms presently in place only detect benign errors and there is no guarantee against adversarial behavior: a malicious volunteer can mislead the project to reject a giant prime as being non-prime. In this paper, we propose a practical, cryptographically-sound mechanism for certifying the non-primality of Proth numbers. That is, a volunteer can – parallel to running the primality test for N – generate an efficiently verifiable proof at a little extra cost certifying that N is not prime. The interactive protocol has statistical soundness and can be made non-interactive using the Fiat-Shamir heuristic. Our approach is based on a cryptographic primitive called Proof of Exponentiation (PoE) which, for a group G, certifies that a tuple (x,y,T)∈G2×N satisfies x2T=y (Pietrzak, ITCS 2019 and Wesolowski, J. Cryptol. 2020). In particular, we show how to adapt Pietrzak’s PoE at a moderate additional cost to make it a cryptographically-sound certificate of non-primality.}, author = {Hoffmann, Charlotte and Hubáček, Pavel and Kamath, Chethan and Pietrzak, Krzysztof Z}, booktitle = {Public-Key Cryptography - PKC 2023}, isbn = {9783031313677}, issn = {1611-3349}, location = {Atlanta, GA, United States}, pages = {530--553}, publisher = {Springer Nature}, title = {{Certifying giant nonprimes}}, doi = {10.1007/978-3-031-31368-4_19}, volume = {13940}, year = {2023}, } @inproceedings{13142, abstract = {Reinforcement learning has received much attention for learning controllers of deterministic systems. We consider a learner-verifier framework for stochastic control systems and survey recent methods that formally guarantee a conjunction of reachability and safety properties. Given a property and a lower bound on the probability of the property being satisfied, our framework jointly learns a control policy and a formal certificate to ensure the satisfaction of the property with a desired probability threshold. Both the control policy and the formal certificate are continuous functions from states to reals, which are learned as parameterized neural networks. While in the deterministic case, the certificates are invariant and barrier functions for safety, or Lyapunov and ranking functions for liveness, in the stochastic case the certificates are supermartingales. For certificate verification, we use interval arithmetic abstract interpretation to bound the expected values of neural network functions.}, author = {Chatterjee, Krishnendu and Henzinger, Thomas A and Lechner, Mathias and Zikelic, Dorde}, booktitle = {Tools and Algorithms for the Construction and Analysis of Systems }, isbn = {9783031308222}, issn = {1611-3349}, location = {Paris, France}, pages = {3--25}, publisher = {Springer Nature}, title = {{A learner-verifier framework for neural network controllers and certificates of stochastic systems}}, doi = {10.1007/978-3-031-30823-9_1}, volume = {13993}, year = {2023}, }