{ "id": "1707.01408", "version": "v1", "published": "2017-07-05T14:15:06.000Z", "updated": "2017-07-05T14:15:06.000Z", "title": "Video Representation Learning and Latent Concept Mining for Large-scale Multi-label Video Classification", "authors": [ "Po-Yao Huang", "Ye Yuan", "Zhenzhong Lan", "Lu Jiang", "Alexander G. Hauptmann" ], "categories": [ "cs.CV" ], "abstract": "We report on CMU Informedia Lab's system used in Google's YouTube 8 Million Video Understanding Challenge. Our pipeline achieved 84.675% and 84.662% GAP on our evaluation split and the official test set. We attribute the good performance to three components: 1) Refined video representation learning with residual links and hypercolumns 2) Latent concept mining which captures interactions among concepts. 3) Learning with temporal segmentation and weighted multi-model ensemble. We conduct experiments to validate and analyze the contribution of our models. We also share some unsuccessful trials when leveraging conventional approaches such as recurrent neural networks over large-scale video dataset. All the codes to reproduce the results will be publicly available soon.", "revisions": [ { "version": "v1", "updated": "2017-07-05T14:15:06.000Z" } ], "analyses": { "keywords": [ "large-scale multi-label video classification", "video representation learning", "latent concept mining", "cmu informedia labs system", "recurrent neural networks" ], "note": { "typesetting": "TeX", "pages": 0, "language": "en", "license": "arXiv", "status": "editable" } } }