{"citation":{"apa":"Fellner, A. (2015). Experimental part of CAV 2015 publication: Counterexample Explanation by Learning Small Strategies in Markov Decision Processes. Institute of Science and Technology Austria. https://doi.org/10.15479/AT:ISTA:28","short":"A. Fellner, (2015).","mla":"Fellner, Andreas. Experimental Part of CAV 2015 Publication: Counterexample Explanation by Learning Small Strategies in Markov Decision Processes. Institute of Science and Technology Austria, 2015, doi:10.15479/AT:ISTA:28.","ista":"Fellner A. 2015. Experimental part of CAV 2015 publication: Counterexample Explanation by Learning Small Strategies in Markov Decision Processes, Institute of Science and Technology Austria, 10.15479/AT:ISTA:28.","chicago":"Fellner, Andreas. “Experimental Part of CAV 2015 Publication: Counterexample Explanation by Learning Small Strategies in Markov Decision Processes.” Institute of Science and Technology Austria, 2015. https://doi.org/10.15479/AT:ISTA:28.","ama":"Fellner A. Experimental part of CAV 2015 publication: Counterexample Explanation by Learning Small Strategies in Markov Decision Processes. 2015. doi:10.15479/AT:ISTA:28","ieee":"A. Fellner, “Experimental part of CAV 2015 publication: Counterexample Explanation by Learning Small Strategies in Markov Decision Processes.” Institute of Science and Technology Austria, 2015."},"publist_id":"5564","status":"public","related_material":{"record":[{"status":"public","relation":"popular_science","id":"1603"}]},"ddc":["004"],"has_accepted_license":"1","ec_funded":1,"keyword":["Markov Decision Process","Decision Tree","Probabilistic Verification","Counterexample Explanation"],"date_created":"2018-12-12T12:31:29Z","year":"2015","oa_version":"Published Version","date_updated":"2024-02-21T13:52:07Z","file_date_updated":"2020-07-14T12:47:00Z","contributor":[{"last_name":"Kretinsky","id":"44CEF464-F248-11E8-B48F-1D18A9856A87","first_name":"Jan"}],"date_published":"2015-08-13T00:00:00Z","license":"https://creativecommons.org/publicdomain/zero/1.0/","project":[{"call_identifier":"FP7","_id":"2581B60A-B435-11E9-9278-68D0E5697425","name":"Quantitative Graph Games: Theory and Applications","grant_number":"279307"},{"name":"Rigorous Systems Engineering","_id":"25832EC2-B435-11E9-9278-68D0E5697425","call_identifier":"FWF","grant_number":"S 11407_N23"}],"type":"research_data","publisher":"Institute of Science and Technology Austria","datarep_id":"28","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","month":"08","oa":1,"tmp":{"short":"CC0 (1.0)","legal_code_url":"https://creativecommons.org/publicdomain/zero/1.0/legalcode","image":"/images/cc_0.png","name":"Creative Commons Public Domain Dedication (CC0 1.0)"},"file":[{"date_updated":"2020-07-14T12:47:00Z","date_created":"2018-12-12T13:02:31Z","access_level":"open_access","checksum":"b8bcb43c0893023cda66c1b69c16ac62","file_size":49557109,"file_id":"5597","relation":"main_file","creator":"system","content_type":"application/zip","file_name":"IST-2015-28-v1+2_Fellner_DataRep.zip"}],"abstract":[{"lang":"eng","text":"This repository contains the experimental part of the CAV 2015 publication Counterexample Explanation by Learning Small Strategies in Markov Decision Processes.\r\nWe extended the probabilistic model checker PRISM to represent strategies of Markov Decision Processes as Decision Trees.\r\nThe archive contains a java executable version of the extended tool (prism_dectree.jar) together with a few examples of the PRISM benchmark library.\r\nTo execute the program, please have a look at the README.txt, which provides instructions and further information on the archive.\r\nThe archive contains scripts that (if run often enough) reproduces the data presented in the publication."}],"doi":"10.15479/AT:ISTA:28","_id":"5549","author":[{"first_name":"Andreas","full_name":"Fellner, Andreas","id":"42BABFB4-F248-11E8-B48F-1D18A9856A87","last_name":"Fellner"}],"day":"13","department":[{"_id":"KrCh"},{"_id":"ToHe"}],"article_processing_charge":"No","title":"Experimental part of CAV 2015 publication: Counterexample Explanation by Learning Small Strategies in Markov Decision Processes"}