
@Article{cmc.2025.061661,
AUTHOR = {Zhen-Yu Chen, Feng-Chi Liu, Xin Wang, Cheng-Hsiung Lee, Ching-Sheng Lin},
TITLE = {Efficient Parameterization for Knowledge Graph Embedding Using Hierarchical Attention Network},
JOURNAL = {Computers, Materials \& Continua},
VOLUME = {82},
YEAR = {2025},
NUMBER = {3},
PAGES = {4287--4300},
URL = {http://www.techscience.com/cmc/v82n3/59943},
ISSN = {1546-2226},
ABSTRACT = {In the domain of knowledge graph embedding, conventional approaches typically transform entities and relations into continuous vector spaces. However, parameter efficiency becomes increasingly crucial when dealing with large-scale knowledge graphs that contain vast numbers of entities and relations. In particular, resource-intensive embeddings often lead to increased computational costs, and may limit scalability and adaptability in practical environments, such as in low-resource settings or real-world applications. This paper explores an approach to knowledge graph representation learning that leverages small, reserved entities and relation sets for parameter-efficient embedding. We introduce a hierarchical attention network designed to refine and maximize the representational quality of embeddings by selectively focusing on these reserved sets, thereby reducing model complexity. Empirical assessments validate that our model achieves high performance on the benchmark dataset with fewer parameters and smaller embedding dimensions. The ablation studies further highlight the impact and contribution of each component in the proposed hierarchical attention structure.},
DOI = {10.32604/cmc.2025.061661}
}



