@inproceedings{bbdf3a7ff61247b5a0cfccbd8b8ec002,
title = "Rationale Discovery and Explainable AI",
abstract = "The justification of an algorithm's outcomes is important in many domains, and in particular in the law. However, previous research has shown that machine learning systems can make the right decisions for the wrong reasons: despite high accuracies, not all of the conditions that define the domain of the training data are learned. In this study, we investigate what the system does learn, using state-of-the-art explainable AI techniques. With the use of SHAP and LIME, we are able to show which features impact the decision making process and how the impact changes with different distributions of the training data. However, our results also show that even high accuracy and good relevant feature detection are no guarantee for a sound rationale. Hence these state-of-the-art explainable AI techniques cannot be used to fully expose unsound rationales, further advocating the need for a separate method for rationale evaluation. ",
keywords = "Data, Explainable AI, Knowledge, Machine Learning",
author = "Cor Steging and Silja Renooij and Bart Verheij",
note = "Funding Information: This research was funded by the Hybrid Intelligence Center, a 10-year programme funded by the Dutch Ministry of Education, Culture and Science through the Netherlands Organisation for Scientific Research, https://hybrid-intelligence-centre.nl. Publisher Copyright: {\textcopyright} 2021 The authors and IOS Press.; 34th International Conference on Legal Knowledge and Information Systems, JURIX 2021 ; Conference date: 08-12-2021 Through 10-12-2021",
year = "2021",
month = dec,
day = "2",
doi = "10.3233/FAIA210341",
language = "English",
series = "Frontiers in Artificial Intelligence and Applications",
publisher = "IOS Press",
pages = "225--234",
editor = "Erich Schweighofer",
booktitle = "Legal Knowledge and Information Systems - JURIX 2021",
}