@techreport{TR-IC-PFG-23-65, number = {IC-PFG-23-65}, author = {Leonardo Almeida Reis and Marcos Medeiros Raimundo}, title = {{Explainability in Credit Scoring Neural Networks}}, month = {December}, year = {2023}, institution = {Institute of Computing, University of Campinas}, note = {In English, 10 pages. \par\selectlanguage{english}\textbf{Abstract} Credit scoring is a crucial element in the economic sector, relying on an individual's trustworthiness in honoring financial commitments. Institutions providing credit increasingly leverage Artificial Intelligence (AI) for widespread credit assessments. However, the integration of Machine Learning (ML) models raises ethical concerns, particularly regarding biases inherent in AI models. This document explainabity methods based on feature importance and emphasizes the need for transparency in data utilization. The research aims to enhance model explainability and proposes techniques for a clearer understanding of model operations. \par The study employs Feature Importance Techniques, like Permutation Importance, in order to study which features impact the model the most. To address data generation anomalies created by the previous method, Novelty Detection Algorithms, including Isolation Forest and Density Forest, are introduced. Additionally, we also explore Counterfactual Explanations as a method to explain ML model outcomes and how to change an specific data to retrieve a desired prediction. } }