@conference{e89b0f0f1b5e41109239ee8d81e3f0a3,
title = "Patterns of LLM Weaponization: A Comparative Analysis of Exploitation Incidents Across Commercial AI Systems",
abstract = "This comparative study examines patterns of Large Language Model (LLM) weaponization through systematic analysis of four major exploitation incidents spanning 2023-2025. While existing research focuses on isolated incidents or theoretical vulnerabilities, this study provides the first comprehensive comparative framework analyzing exploitation patterns across state-sponsored cyber-espionage (Anthropic Claude incident), academic security research (GPT-4 autonomous privilege escalation), social engineering platforms (SpearBot phishing framework), and underground criminal commoditization (WormGPT/FraudGPT ecosystem). Through comparative analysis across eight dimensions—adversary sophistication, target selection, exploitation techniques, autonomy levels, detection evasion, attribution challenges, defensive gaps, and capability democratization—this research identifies critical cross-case patterns informing defensive prioritization. Findings reveal three universal exploitation mechanisms transcending adversary types: autonomous goal decomposition via chain-of-thought reasoning (present in all four cases), dynamic tool invocation and code generation (3/4 cases), and adaptive social engineering (4/4 cases). Analysis demonstrates progressive capability democratization: state-level sophistication (Claude: 80-90\% autonomy) transitioning to academic accessibility (GPT-4: 33-83\% success rates), specialized criminal tooling (SpearBot: generative-critique architecture), and mass commoditization (WormGPT: \$200-1700/year subscriptions). Comparative findings identify four cross-cutting defensive imperatives applicable regardless of adversary type: multi-turn conversational context monitoring, behavioral fingerprinting distinguishing legitimate from malicious complex workflows, federated threat intelligence enabling rapid cross-organizational learning, and capability-based access controls proportional to LLM reasoning sophistication.",
keywords = "large language models, comparative analysis, cyber exploitation patterns, llm weaponization, autonomous agents, capability democratization, underground ai, defensive frameworks",
author = "George Antoniou",
note = "SDG Alignment: • SDG 9 – Industry, Innovation, and Infrastructure This research advances understanding of secure and resilient digital infrastructure by analyzing how large language models are exploited across commercial AI systems. • SDG 16 – Peace, Justice, and Strong Institutions The study addresses AI-driven cyber exploitation, including state-sponsored and criminal misuse, supporting the protection of digital trust and institutional resilience. • SDG 17 – Partnerships for the Goals The work emphasizes the need for federated threat intelligence and cross-sector collaboration to address AI-enabled cyber threats globally. Presentation 1 Patterns of LLM Weaponization: A Comparative Analysis of Exploitation Incidents Across Commercial AI Systems International Conference on Academic Studies in Science, Engineering and Technology (ICASET 2025) Presented December 15, 2025 SDG 9 – Industry, Innovation, and Infrastructure Targets 9.1 \& 9.5 • “Develop quality, reliable, sustainable and resilient infrastructure, including regional and transborder infrastructure.” • “Enhance scientific research and upgrade the technological capabilities of industrial sectors.” How the work aligns: This research contributes to the resilience of digital and AI-driven infrastructure by identifying systemic vulnerabilities in commercial large language models and analyzing exploitation patterns across multiple adversary types. By proposing capability-based defensive frameworks and security-by-design principles, the work supports safer innovation and the responsible deployment of advanced AI technologies. SDG 16 – Peace, Justice, and Strong Institutions Targets 16.6 \& 16.10 • “Develop effective, accountable and transparent institutions at all levels.” • “Ensure public access to information and protect fundamental freedoms.” How the work aligns: The study addresses emerging threats posed by AI-enabled cyber-espionage, large-scale fraud, and automated social engineering that undermine institutional trust and governance. By examining real-world exploitation incidents and emphasizing transparency, threat intelligence sharing, and accountability in AI systems, the work supports stronger digital institutions and information integrity. SDG 17 – Partnerships for the Goals Targets 17.16 \& 17.17 • “Enhance the Global Partnership for Sustainable Development.” • “Encourage effective public, public-private and civil society partnerships.” How the work aligns: The research highlights the necessity of cross-sector collaboration, federated threat intelligence, and international cooperation to counter AI-enabled cyber threats. It explicitly calls for shared responsibility among academia, industry, policymakers, and security practitioners to address global AI risks ; International Conference on Academic Studies in Science, Engineering and Technology (ICASET), ICASET25 ; Conference date: 13-12-2025 Through 15-12-2025",
year = "2025",
month = dec,
day = "15",
language = "American English",
url = "https://www.arste.org/icaset/2025/",
}