
@Article{cmc.2024.050018,
AUTHOR = {Jian Luo, Bo Xu, Tardi Tjahjadi, Jian Yi},
TITLE = {A Novel 3D Gait Model for Subject Identification Robust against Carrying and Dressing Variations},
JOURNAL = {Computers, Materials \& Continua},
VOLUME = {80},
YEAR = {2024},
NUMBER = {1},
PAGES = {235--261},
URL = {http://www.techscience.com/cmc/v80n1/57368},
ISSN = {1546-2226},
ABSTRACT = {Subject identification via the subject’s gait is challenging due to variations in the subject’s carrying and dressing conditions in real-life scenes. This paper proposes a novel targeted 3-dimensional (3D) gait model (3D<i>Gait</i>) represented by a set of interpretable 3D<i>Gait</i> descriptors based on a 3D parametric body model. The 3D<i>Gait</i> descriptors are utilised as invariant gait features in the 3D<i>Gait</i> recognition method to address object carrying and dressing. The 3D<i>Gait</i> recognition method involves 2-dimensional (2D) to 3D<i>Gait</i> data learning based on 3D virtual samples, a semantic gait parameter estimation Long Short Time Memory (LSTM) network (3D-SGPE-LSTM), a feature fusion deep model based on a multi-set canonical correlation analysis, and SoftMax recognition network. First, a sensory experiment based on 3D body shape and pose deformation with 3D virtual dressing is used to fit 3D<i>Gait</i> onto the given 2D gait images. 3D interpretable semantic parameters control the 3D morphing and dressing involved. Similarity degree measurement determines the semantic descriptors of 2D gait images of subjects with various shapes, poses and styles. Second, using the 2D gait images as input and the subjects’ corresponding 3D semantic descriptors as output, an end-to-end 3D-SGPE-LSTM is constructed and trained. Third, body shape, pose and external gait factors (3D-<i>eFactors</i>) are estimated using the 3D-SGPE-LSTM model to create a set of interpretable gait descriptors to represent the 3D<i>Gait</i> Model, i.e., 3D intrinsic semantic shape descriptor (3D-<i>Shape</i>); 3D skeleton-based gait pose descriptor (3D-<i>Pose</i>) and 3D dressing with other 3D-<i>eFators</i>. Finally, the 3D-<i>Shape</i> and 3D-<i>Pose</i> descriptors are coupled to a unified pattern space by learning prior knowledge from the 3D-<i>eFators</i>. Practical research on CASIA B, CMU MoBo, TUM GAID and GPJATK databases shows that 3D<i>Gait</i> is robust against object carrying and dressing variations, especially under multi-cross variations.},
DOI = {10.32604/cmc.2024.050018}
}



