@Article{cmc.2023.028333, AUTHOR = {Saman Riaz, Ali Arshad, Shahab S. Band, Amir Mosavi}, TITLE = {Deep Bimodal Fusion Approach for Apparent Personality Analysis}, JOURNAL = {Computers, Materials \& Continua}, VOLUME = {75}, YEAR = {2023}, NUMBER = {1}, PAGES = {2301--2312}, URL = {http://www.techscience.com/cmc/v75n1/51419}, ISSN = {1546-2226}, ABSTRACT = {Personality distinguishes individuals’ patterns of feeling, thinking, and behaving. Predicting personality from small video series is an exciting research area in computer vision. The majority of the existing research concludes preliminary results to get immense knowledge from visual and Audio (sound) modality. To overcome the deficiency, we proposed the Deep Bimodal Fusion (DBF) approach to predict five traits of personality-agreeableness, extraversion, openness, conscientiousness and neuroticism. In the proposed framework, regarding visual modality, the modified convolution neural networks (CNN), more specifically Descriptor Aggregator Model (DAN) are used to attain significant visual modality. The proposed model extracts audio representations for greater efficiency to construct the long short-term memory (LSTM) for the audio modality. Moreover, employing modality-based neural networks allows this framework to independently determine the traits before combining them with weighted fusion to achieve a conclusive prediction of the given traits. The proposed approach attains the optimal mean accuracy score, which is 0.9183. It is achieved based on the average of five personality traits and is thus better than previously proposed frameworks.}, DOI = {10.32604/cmc.2023.028333} }