[{"_id":"11362","keyword":["neural networks","verification","machine learning"],"status":"public","tmp":{"legal_code_url":"https://creativecommons.org/licenses/by-nd/4.0/legalcode","image":"/image/cc_by_nd.png","name":"Creative Commons Attribution-NoDerivatives 4.0 International (CC BY-ND 4.0)","short":"CC BY-ND (4.0)"},"type":"dissertation","ddc":["004"],"date_updated":"2023-08-17T06:58:38Z","supervisor":[{"first_name":"Thomas A","id":"40876CD8-F248-11E8-B48F-1D18A9856A87","last_name":"Henzinger","orcid":"0000-0002-2985-7724","full_name":"Henzinger, Thomas A"}],"department":[{"_id":"GradSch"},{"_id":"ToHe"}],"file_date_updated":"2022-05-17T15:19:39Z","oa_version":"Published Version","abstract":[{"lang":"eng","text":"Deep learning has enabled breakthroughs in challenging computing problems and has emerged as the standard problem-solving tool for computer vision and natural language processing tasks.\r\nOne exception to this trend is safety-critical tasks where robustness and resilience requirements contradict the black-box nature of neural networks. \r\nTo deploy deep learning methods for these tasks, it is vital to provide guarantees on neural network agents' safety and robustness criteria. \r\nThis can be achieved by developing formal verification methods to verify the safety and robustness properties of neural networks.\r\n\r\nOur goal is to design, develop and assess safety verification methods for neural networks to improve their reliability and trustworthiness in real-world applications.\r\nThis thesis establishes techniques for the verification of compressed and adversarially trained models as well as the design of novel neural networks for verifiably safe decision-making.\r\n\r\nFirst, we establish the problem of verifying quantized neural networks. Quantization is a technique that trades numerical precision for the computational efficiency of running a neural network and is widely adopted in industry.\r\nWe show that neglecting the reduced precision when verifying a neural network can lead to wrong conclusions about the robustness and safety of the network, highlighting that novel techniques for quantized network verification are necessary. We introduce several bit-exact verification methods explicitly designed for quantized neural networks and experimentally confirm on realistic networks that the network's robustness and other formal properties are affected by the quantization.\r\n\r\nFurthermore, we perform a case study providing evidence that adversarial training, a standard technique for making neural networks more robust, has detrimental effects on the network's performance. This robustness-accuracy tradeoff has been studied before regarding the accuracy obtained on classification datasets where each data point is independent of all other data points. On the other hand, we investigate the tradeoff empirically in robot learning settings where a both, a high accuracy and a high robustness, are desirable.\r\nOur results suggest that the negative side-effects of adversarial training outweigh its robustness benefits in practice.\r\n\r\nFinally, we consider the problem of verifying safety when running a Bayesian neural network policy in a feedback loop with systems over the infinite time horizon. Bayesian neural networks are probabilistic models for learning uncertainties in the data and are therefore often used on robotic and healthcare applications where data is inherently stochastic.\r\nWe introduce a method for recalibrating Bayesian neural networks so that they yield probability distributions over safe decisions only.\r\nOur method learns a safety certificate that guarantees safety over the infinite time horizon to determine which decisions are safe in every possible state of the system.\r\nWe demonstrate the effectiveness of our approach on a series of reinforcement learning benchmarks."}],"month":"05","alternative_title":["ISTA Thesis"],"language":[{"iso":"eng"}],"file":[{"file_name":"src.zip","date_created":"2022-05-13T12:33:26Z","file_size":13210143,"date_updated":"2022-05-13T12:49:00Z","creator":"mlechner","file_id":"11378","checksum":"8eefa9c7c10ca7e1a2ccdd731962a645","content_type":"application/zip","relation":"source_file","access_level":"closed"},{"file_name":"thesis_main-a2.pdf","date_created":"2022-05-16T08:02:28Z","creator":"mlechner","file_size":2732536,"date_updated":"2022-05-17T15:19:39Z","file_id":"11382","checksum":"1b9e1e5a9a83ed9d89dad2f5133dc026","relation":"main_file","access_level":"open_access","content_type":"application/pdf"}],"publication_status":"published","degree_awarded":"PhD","publication_identifier":{"isbn":["978-3-99078-017-6"]},"ec_funded":1,"license":"https://creativecommons.org/licenses/by-nd/4.0/","related_material":{"record":[{"relation":"part_of_dissertation","id":"10665","status":"public"},{"relation":"part_of_dissertation","id":"10667","status":"public"},{"id":"11366","status":"public","relation":"part_of_dissertation"},{"status":"public","id":"7808","relation":"part_of_dissertation"},{"id":"10666","status":"public","relation":"part_of_dissertation"}]},"project":[{"_id":"25F42A32-B435-11E9-9278-68D0E5697425","call_identifier":"FWF","grant_number":"Z211","name":"The Wittgenstein Prize"},{"call_identifier":"H2020","_id":"62781420-2b32-11ec-9570-8d9b63373d4d","grant_number":"101020093","name":"Vigilant Algorithmic Monitoring of Software"}],"user_id":"8b945eb4-e2f2-11eb-945a-df72226e66a9","citation":{"ieee":"M. Lechner, “Learning verifiable representations,” Institute of Science and Technology Austria, 2022.","short":"M. Lechner, Learning Verifiable Representations, Institute of Science and Technology Austria, 2022.","apa":"Lechner, M. (2022). Learning verifiable representations. Institute of Science and Technology Austria. https://doi.org/10.15479/at:ista:11362","ama":"Lechner M. Learning verifiable representations. 2022. doi:10.15479/at:ista:11362","mla":"Lechner, Mathias. Learning Verifiable Representations. Institute of Science and Technology Austria, 2022, doi:10.15479/at:ista:11362.","ista":"Lechner M. 2022. Learning verifiable representations. Institute of Science and Technology Austria.","chicago":"Lechner, Mathias. “Learning Verifiable Representations.” Institute of Science and Technology Austria, 2022. https://doi.org/10.15479/at:ista:11362."},"title":"Learning verifiable representations","article_processing_charge":"No","author":[{"full_name":"Lechner, Mathias","last_name":"Lechner","first_name":"Mathias","id":"3DC22916-F248-11E8-B48F-1D18A9856A87"}],"oa":1,"publisher":"Institute of Science and Technology Austria","day":"12","year":"2022","has_accepted_license":"1","date_created":"2022-05-12T07:14:01Z","date_published":"2022-05-12T00:00:00Z","doi":"10.15479/at:ista:11362","page":"124"},{"project":[{"name":"ISTplus - Postdoctoral Fellowships","grant_number":"754411","_id":"260C2330-B435-11E9-9278-68D0E5697425","call_identifier":"H2020"},{"_id":"25F42A32-B435-11E9-9278-68D0E5697425","call_identifier":"FWF","name":"The Wittgenstein Prize","grant_number":"Z211"}],"author":[{"full_name":"Lukina, Anna","last_name":"Lukina","first_name":"Anna","id":"CBA4D1A8-0FE8-11E9-BDE6-07BFE5697425"},{"last_name":"Schilling","orcid":"0000-0003-3658-1065","full_name":"Schilling, Christian","id":"3A2F4DCE-F248-11E8-B48F-1D18A9856A87","first_name":"Christian"},{"first_name":"Thomas A","id":"40876CD8-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0002-2985-7724","full_name":"Henzinger, Thomas A","last_name":"Henzinger"}],"article_processing_charge":"No","external_id":{"isi":["000719383800003"],"arxiv":["2009.06429"]},"title":"Into the unknown: active monitoring of neural networks","citation":{"chicago":"Lukina, Anna, Christian Schilling, and Thomas A Henzinger. “Into the Unknown: Active Monitoring of Neural Networks.” In 21st International Conference on Runtime Verification, 12974:42–61. Cham: Springer Nature, 2021. https://doi.org/10.1007/978-3-030-88494-9_3.","ista":"Lukina A, Schilling C, Henzinger TA. 2021. Into the unknown: active monitoring of neural networks. 21st International Conference on Runtime Verification. RV: Runtime Verification, LNCS, vol. 12974, 42–61.","mla":"Lukina, Anna, et al. “Into the Unknown: Active Monitoring of Neural Networks.” 21st International Conference on Runtime Verification, vol. 12974, Springer Nature, 2021, pp. 42–61, doi:10.1007/978-3-030-88494-9_3.","ieee":"A. Lukina, C. Schilling, and T. A. Henzinger, “Into the unknown: active monitoring of neural networks,” in 21st International Conference on Runtime Verification, Virtual, 2021, vol. 12974, pp. 42–61.","short":"A. Lukina, C. Schilling, T.A. Henzinger, in:, 21st International Conference on Runtime Verification, Springer Nature, Cham, 2021, pp. 42–61.","ama":"Lukina A, Schilling C, Henzinger TA. Into the unknown: active monitoring of neural networks. In: 21st International Conference on Runtime Verification. Vol 12974. Cham: Springer Nature; 2021:42-61. doi:10.1007/978-3-030-88494-9_3","apa":"Lukina, A., Schilling, C., & Henzinger, T. A. (2021). Into the unknown: active monitoring of neural networks. In 21st International Conference on Runtime Verification (Vol. 12974, pp. 42–61). Cham: Springer Nature. https://doi.org/10.1007/978-3-030-88494-9_3"},"user_id":"4359f0d1-fa6c-11eb-b949-802e58b17ae8","quality_controlled":"1","publisher":"Springer Nature","oa":1,"acknowledgement":"We thank Christoph Lampert and Alex Greengold for fruitful discussions. This research was supported in part by the Simons Institute for the Theory of Computing, the Austrian Science Fund (FWF) under grant Z211-N23 (Wittgenstein Award), and the European Union’s Horizon 2020 research and innovation programme under the Marie Skłodowska-Curie grant agreement No. 754411.","page":"42-61","doi":"10.1007/978-3-030-88494-9_3","date_published":"2021-10-06T00:00:00Z","date_created":"2021-10-31T23:01:31Z","isi":1,"year":"2021","day":"06","publication":"21st International Conference on Runtime Verification","type":"conference","conference":{"start_date":"2021-10-11","location":"Virtual","end_date":"2021-10-14","name":"RV: Runtime Verification"},"status":"public","keyword":["monitoring","neural networks","novelty detection"],"_id":"10206","department":[{"_id":"ToHe"}],"date_updated":"2024-01-30T12:06:56Z","scopus_import":"1","alternative_title":["LNCS"],"main_file_link":[{"url":"https://arxiv.org/abs/2009.06429","open_access":"1"}],"month":"10","place":"Cham","abstract":[{"text":"Neural-network classifiers achieve high accuracy when predicting the class of an input that they were trained to identify. Maintaining this accuracy in dynamic environments, where inputs frequently fall outside the fixed set of initially known classes, remains a challenge. The typical approach is to detect inputs from novel classes and retrain the classifier on an augmented dataset. However, not only the classifier but also the detection mechanism needs to adapt in order to distinguish between newly learned and yet unknown input classes. To address this challenge, we introduce an algorithmic framework for active monitoring of a neural network. A monitor wrapped in our framework operates in parallel with the neural network and interacts with a human user via a series of interpretable labeling queries for incremental adaptation. In addition, we propose an adaptive quantitative monitor to improve precision. An experimental evaluation on a diverse set of benchmarks with varying numbers of classes confirms the benefits of our active monitoring framework in dynamic scenarios.","lang":"eng"}],"oa_version":"Preprint","volume":"12974 ","related_material":{"record":[{"id":"13234","status":"public","relation":"extended_version"}]},"ec_funded":1,"publication_identifier":{"eisbn":["978-3-030-88494-9"],"eissn":["1611-3349"],"isbn":["9-783-0308-8493-2"],"issn":["0302-9743"]},"publication_status":"published","language":[{"iso":"eng"}]},{"file":[{"creator":"system","file_size":1142543971,"date_updated":"2020-07-14T12:47:07Z","file_name":"IST-2018-98-v1+1_BBalls_area2_tile2_20x20.mat","date_created":"2018-12-12T13:02:24Z","relation":"main_file","access_level":"open_access","content_type":"application/octet-stream","checksum":"6808748837b9afbbbabc2a356ca2b88a","file_id":"5590"},{"file_size":702336,"date_updated":"2020-07-14T12:47:07Z","creator":"system","file_name":"IST-2018-98-v1+2_ExperimentStructure.pdf","date_created":"2018-12-12T13:02:25Z","content_type":"application/pdf","relation":"main_file","access_level":"open_access","checksum":"d6d6cd07743038fe3a12352983fcf9dd","file_id":"5591"},{"creator":"system","file_size":432,"date_updated":"2020-07-14T12:47:07Z","file_name":"IST-2018-98-v1+3_GoodLocations_area2_20x20.mat","date_created":"2018-12-12T13:02:26Z","relation":"main_file","access_level":"open_access","content_type":"application/octet-stream","file_id":"5592","checksum":"0c9cfb4dab35bb3dc25a04395600b1c8"},{"file_size":986,"date_updated":"2020-07-14T12:47:07Z","creator":"system","file_name":"IST-2018-98-v1+4_README.txt","date_created":"2018-12-12T13:02:26Z","content_type":"text/plain","relation":"main_file","access_level":"open_access","file_id":"5593","checksum":"2a83b011012e21e934b4596285b1a183"}],"day":"29","has_accepted_license":"1","year":"2018","datarep_id":"98","related_material":{"record":[{"relation":"used_in_publication","status":"public","id":"292"}]},"doi":"10.15479/AT:ISTA:98","date_published":"2018-03-29T00:00:00Z","date_created":"2018-12-12T12:31:39Z","oa_version":"Published Version","abstract":[{"lang":"eng","text":"This package contains data for the publication \"Nonlinear decoding of a complex movie from the mammalian retina\" by Deny S. et al, PLOS Comput Biol (2018). \r\n\r\nThe data consists of\r\n(i) 91 spike sorted, isolated rat retinal ganglion cells that pass stability and quality criteria, recorded on the multi-electrode array, in response to the presentation of the complex movie with many randomly moving dark discs. The responses are represented as 648000 x 91 binary matrix, where the first index indicates the timebin of duration 12.5 ms, and the second index the neural identity. The matrix entry is 0/1 if the neuron didn't/did spike in the particular time bin.\r\n(ii) README file and a graphical illustration of the structure of the experiment, specifying how the 648000 timebins are split into epochs where 1, 2, 4, or 10 discs were displayed, and which stimulus segments are exact repeats or unique ball trajectories.\r\n(iii) a 648000 x 400 matrix of luminance traces for each of the 20 x 20 positions (\"sites\") in the movie frame, with time that is locked to the recorded raster. The luminance traces are produced as described in the manuscript by filtering the raw disc movie with a small gaussian spatial kernel. "}],"month":"03","publisher":"Institute of Science and Technology Austria","oa":1,"ddc":["570"],"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","citation":{"ieee":"S. Deny, O. Marre, V. Botella-Soler, G. S. Martius, and G. Tkačik, “Nonlinear decoding of a complex movie from the mammalian retina.” Institute of Science and Technology Austria, 2018.","short":"S. Deny, O. Marre, V. Botella-Soler, G.S. Martius, G. Tkačik, (2018).","apa":"Deny, S., Marre, O., Botella-Soler, V., Martius, G. S., & Tkačik, G. (2018). Nonlinear decoding of a complex movie from the mammalian retina. Institute of Science and Technology Austria. https://doi.org/10.15479/AT:ISTA:98","ama":"Deny S, Marre O, Botella-Soler V, Martius GS, Tkačik G. Nonlinear decoding of a complex movie from the mammalian retina. 2018. doi:10.15479/AT:ISTA:98","mla":"Deny, Stephane, et al. Nonlinear Decoding of a Complex Movie from the Mammalian Retina. Institute of Science and Technology Austria, 2018, doi:10.15479/AT:ISTA:98.","ista":"Deny S, Marre O, Botella-Soler V, Martius GS, Tkačik G. 2018. Nonlinear decoding of a complex movie from the mammalian retina, Institute of Science and Technology Austria, 10.15479/AT:ISTA:98.","chicago":"Deny, Stephane, Olivier Marre, Vicente Botella-Soler, Georg S Martius, and Gašper Tkačik. “Nonlinear Decoding of a Complex Movie from the Mammalian Retina.” Institute of Science and Technology Austria, 2018. https://doi.org/10.15479/AT:ISTA:98."},"date_updated":"2024-02-21T13:45:26Z","file_date_updated":"2020-07-14T12:47:07Z","title":"Nonlinear decoding of a complex movie from the mammalian retina","department":[{"_id":"ChLa"},{"_id":"GaTk"}],"author":[{"first_name":"Stephane","last_name":"Deny","full_name":"Deny, Stephane"},{"last_name":"Marre","full_name":"Marre, Olivier","first_name":"Olivier"},{"last_name":"Botella-Soler","full_name":"Botella-Soler, Vicente","first_name":"Vicente"},{"first_name":"Georg S","id":"3A276B68-F248-11E8-B48F-1D18A9856A87","full_name":"Martius, Georg S","last_name":"Martius"},{"id":"3D494DCA-F248-11E8-B48F-1D18A9856A87","first_name":"Gasper","last_name":"Tkacik","orcid":"0000-0002-6699-1455","full_name":"Tkacik, Gasper"}],"article_processing_charge":"No","_id":"5584","status":"public","project":[{"_id":"254D1A94-B435-11E9-9278-68D0E5697425","call_identifier":"FWF","grant_number":"P 25651-N26","name":"Sensitivity to higher-order statistics in natural scenes"}],"keyword":["retina","decoding","regression","neural networks","complex stimulus"],"type":"research_data","tmp":{"image":"/images/cc_0.png","legal_code_url":"https://creativecommons.org/publicdomain/zero/1.0/legalcode","name":"Creative Commons Public Domain Dedication (CC0 1.0)","short":"CC0 (1.0)"}}]