{ "id": "1711.03946", "version": "v1", "published": "2017-11-10T18:09:15.000Z", "updated": "2017-11-10T18:09:15.000Z", "title": "Bayesian Paragraph Vectors", "authors": [ "Geng Ji", "Robert Bamler", "Erik B. Sudderth", "Stephan Mandt" ], "comment": "Submitted to the NIPS 2017 workshop \"Advances in Approximate Bayesian Inference\"", "categories": [ "cs.CL", "cs.LG", "stat.ML" ], "abstract": "Word2vec (Mikolov et al., 2013) has proven to be successful in natural language processing by capturing the semantic relationships between different words. Built on top of single-word embeddings, paragraph vectors (Le and Mikolov, 2014) find fixed-length representations for pieces of text with arbitrary lengths, such as documents, paragraphs, and sentences. In this work, we propose a novel interpretation for neural-network-based paragraph vectors by developing an unsupervised generative model whose maximum likelihood solution corresponds to traditional paragraph vectors. This probabilistic formulation allows us to go beyond point estimates of parameters and to perform Bayesian posterior inference. We find that the entropy of paragraph vectors decreases with the length of documents, and that information about posterior uncertainty improves performance in supervised learning tasks such as sentiment analysis and paraphrase detection.", "revisions": [ { "version": "v1", "updated": "2017-11-10T18:09:15.000Z" } ], "analyses": { "keywords": [ "bayesian paragraph vectors", "perform bayesian posterior inference", "maximum likelihood solution corresponds", "traditional paragraph vectors", "paragraph vectors decreases" ], "note": { "typesetting": "TeX", "pages": 0, "language": "en", "license": "arXiv", "status": "editable" } } }