
@Article{jcs.2025.066312,
AUTHOR = {Hossana Maghiri Sufficient, Abdulazeez Murtala Mohammed, Bashir Danjuma},
TITLE = {Ethical Implications of AI-Driven Ethical Hacking: A Systematic Review and Governance Framework},
JOURNAL = {Journal of Cyber Security},
VOLUME = {7},
YEAR = {2025},
NUMBER = {1},
PAGES = {239--253},
URL = {http://www.techscience.com/JCS/v7n1/62970},
ISSN = {2579-0064},
ABSTRACT = {The rapid integration of artificial intelligence (AI) into ethical hacking practices has transformed vulnerability discovery and threat mitigation; however, it raises pressing ethical questions regarding responsibility, justice, and privacy. This paper presents a PRISMA-guided systematic review of twelve peer-reviewed studies published between 2015 and March 2024, supplemented by Braun and Clarke’s thematic analysis, to map four core challenges: (1) autonomy and human oversight, (2) algorithmic bias and mitigation strategies, (3) data privacy preservation mechanisms, and (4) limitations of General Data Protection Regulation (GDPR) and the European Union’s AI Act in addressing AI-specific risks, alongside the imperative to balance automation with expert judgment. While artificial intelligence has greatly enhanced efficiency and reduced hazard detection, its actual lack of transparency and dependence on past data may exacerbate inequality in its approach, adversely affecting under-resourced sectors such as rural healthcare systems and small enterprises. For example, a 2024 University of Illinois Urbana-Champaign study demonstrated that generative pre-trained transformer 4 (GPT-4) agents could autonomously exploit 87% of one-day vulnerabilities in a small-business web application, illustrating how AI-driven attacks can rapidly overwhelm under-resourced enterprises without dedicated security teams. To promote equity and accountability, we advocate embedding bias-aware data curation toolkits (e.g., IBM AI Fairness 360, Google What-If Tool, Microsoft Fairlearn, Aequitas) at the data-ingestion stage and adopting adaptive governance models with continuous impact assessments and human-in-the-loop checkpoints. Our findings inform a pragmatic framework for harmonizing regulatory, technical, and organizational controls, and we outline a research agenda focused on adaptive oversight, privacy-enhancing policies, and multidisciplinary collaboration to guide responsible deployment of AI in cybersecurity.},
DOI = {10.32604/jcs.2025.066312}
}



