@inproceedings{7808,
abstract = {Quantization converts neural networks into low-bit fixed-point computations which can be carried out by efficient integer-only hardware, and is standard practice for the deployment of neural networks on real-time embedded devices. However, like their real-numbered counterpart, quantized networks are not immune to malicious misclassification caused by adversarial attacks. We investigate how quantization affects a network’s robustness to adversarial attacks, which is a formal verification question. We show that neither robustness nor non-robustness are monotonic with changing the number of bits for the representation and, also, neither are preserved by quantization from a real-numbered network. For this reason, we introduce a verification method for quantized neural networks which, using SMT solving over bit-vectors, accounts for their exact, bit-precise semantics. We built a tool and analyzed the effect of quantization on a classifier for the MNIST dataset. We demonstrate that, compared to our method, existing methods for the analysis of real-numbered networks often derive false conclusions about their quantizations, both when determining robustness and when detecting attacks, and that existing methods for quantized networks often miss attacks. Furthermore, we applied our method beyond robustness, showing how the number of bits in quantization enlarges the gender bias of a predictor for students’ grades.},
author = {Giacobbe, Mirco and Henzinger, Thomas A and Lechner, Mathias},
booktitle = {International Conference on Tools and Algorithms for the Construction and Analysis of Systems},
isbn = {9783030452360},
issn = {16113349},
location = {Dublin, Ireland},
pages = {79--97},
publisher = {Springer Nature},
title = {{How many bits does it take to quantize your neural network?}},
doi = {10.1007/978-3-030-45237-7_5},
volume = {12079},
year = {2020},
}
@inproceedings{8194,
abstract = {Fixed-point arithmetic is a popular alternative to floating-point arithmetic on embedded systems. Existing work on the verification of fixed-point programs relies on custom formalizations of fixed-point arithmetic, which makes it hard to compare the described techniques or reuse the implementations. In this paper, we address this issue by proposing and formalizing an SMT theory of fixed-point arithmetic. We present an intuitive yet comprehensive syntax of the fixed-point theory, and provide formal semantics for it based on rational arithmetic. We also describe two decision procedures for this theory: one based on the theory of bit-vectors and the other on the theory of reals. We implement the two decision procedures, and evaluate our implementations using existing mature SMT solvers on a benchmark suite we created. Finally, we perform a case study of using the theory we propose to verify properties of quantized neural networks.},
author = {Baranowski, Marek and He, Shaobo and Lechner, Mathias and Nguyen, Thanh Son and Rakamarić, Zvonimir},
booktitle = {Automated Reasoning},
isbn = {9783030510732},
issn = {16113349},
location = {Paris, France},
pages = {13--31},
publisher = {Springer Nature},
title = {{An SMT theory of fixed-point arithmetic}},
doi = {10.1007/978-3-030-51074-9_2},
volume = {12166},
year = {2020},
}
@inproceedings{6985,
abstract = {In this paper, we introduce a novel method to interpret recurrent neural networks (RNNs), particularly long short-term memory networks (LSTMs) at the cellular level. We propose a systematic pipeline for interpreting individual hidden state dynamics within the network using response characterization methods. The ranked contribution of individual cells to the network's output is computed by analyzing a set of interpretable metrics of their decoupled step and sinusoidal responses. As a result, our method is able to uniquely identify neurons with insightful dynamics, quantify relationships between dynamical properties and test accuracy through ablation analysis, and interpret the impact of network capacity on a network's dynamical distribution. Finally, we demonstrate the generalizability and scalability of our method by evaluating a series of different benchmark sequential datasets.},
author = {Hasani, Ramin and Amini, Alexander and Lechner, Mathias and Naser, Felix and Grosu, Radu and Rus, Daniela},
booktitle = {Proceedings of the International Joint Conference on Neural Networks},
isbn = {9781728119854},
location = {Budapest, Hungary},
publisher = {IEEE},
title = {{Response characterization for auditing cell dynamics in long short-term memory networks}},
doi = {10.1109/ijcnn.2019.8851954},
year = {2019},
}