{ "id": "1805.10205", "version": "v1", "published": "2018-05-25T15:40:11.000Z", "updated": "2018-05-25T15:40:11.000Z", "title": "Multimodal Sentiment Analysis To Explore the Structure of Emotions", "authors": [ "Anthony Hu", "Seth Flaxman" ], "comment": "Accepted as a conference paper at KDD 2018", "doi": "10.1145/3219819.3219853", "categories": [ "stat.ML", "cs.LG", "stat.AP" ], "abstract": "We propose a novel approach to multimodal sentiment analysis using deep neural networks combining visual analysis and natural language processing. Our goal is different than the standard sentiment analysis goal of predicting whether a sentence expresses positive or negative sentiment; instead, we aim to infer the latent emotional state of the user. Thus, we focus on predicting the emotion word tags attached by users to their Tumblr posts, treating these as \"self-reported emotions.\" We demonstrate that our multimodal model combining both text and image features outperforms separate models based solely on either images or text. Our model's results are interpretable, automatically yielding sensible word lists associated with emotions. We explore the structure of emotions implied by our model and compare it to what has been posited in the psychology literature, and validate our model on a set of images that have been used in psychology studies. Finally, our work also provides a useful tool for the growing academic study of images - both photographs and memes - on social networks.", "revisions": [ { "version": "v1", "updated": "2018-05-25T15:40:11.000Z" } ], "analyses": { "keywords": [ "multimodal sentiment analysis", "networks combining visual analysis", "neural networks combining visual", "yielding sensible word lists", "image features outperforms separate models" ], "tags": [ "conference paper", "journal article" ], "publication": { "publisher": "ACM", "journal": "Commun. ACM" }, "note": { "typesetting": "TeX", "pages": 0, "language": "en", "license": "arXiv", "status": "editable" } } }