
@Article{cmes.2025.072261,
AUTHOR = {Jeongsu Park, Moohong Min},
TITLE = {Robustness and Performance Comparison of Generative AI Time Series Anomaly Detection under Noise},
JOURNAL = {Computer Modeling in Engineering \& Sciences},
VOLUME = {145},
YEAR = {2025},
NUMBER = {3},
PAGES = {3913--3948},
URL = {http://www.techscience.com/CMES/v145n3/64971},
ISSN = {1526-1506},
ABSTRACT = {Time series anomaly detection is critical in domains such as manufacturing, finance, and cybersecurity. Recent generative AI models, particularly Transformer- and Autoencoder-based architectures, show strong accuracy but their robustness under noisy conditions is less understood. This study evaluates three representative models—AnomalyTransformer, TranAD, and USAD—on the Server Machine Dataset (SMD) and cross-domain benchmarks including the Soil Moisture Active Passive (SMAP) dataset, the Mars Science Laboratory (MSL) dataset, and the Secure Water Treatment (SWaT) testbed. Seven noise settings (five canonical, two mixed) at multiple intensities are tested under fixed clean-data training, with variations in window, stride, and thresholding. Results reveal distinct robustness profiles: AnomalyTransformer maintains recall but loses precision under abrupt noise, TranAD balances sensitivity yet is vulnerable to structured anomalies, and USAD resists Gaussian perturbations but collapses under block anomalies. Quantitatively, F1 drops 60%–70% on noisy SMD, with severe collapse in SWaT (F1 <mml:math id="mml-ieqn-1"><mml:mo>≤</mml:mo></mml:math> 0.10, Drop up to 84%) but relative stability on SMAP/MSL (Drop within <mml:math id="mml-ieqn-2"><mml:mo>±</mml:mo></mml:math>10%). Overall, generative models exhibit complementary robustness patterns, highlighting noise-type dependent vulnerabilities and providing practical guidance for robust deployment.},
DOI = {10.32604/cmes.2025.072261}
}



