{ "id": "1312.3429", "version": "v2", "published": "2013-12-12T10:03:47.000Z", "updated": "2013-12-16T16:11:52.000Z", "title": "Unsupervised learning of depth and motion", "authors": [ "Kishore Konda", "Roland Memisevic" ], "categories": [ "cs.CV", "cs.LG", "stat.ML" ], "abstract": "We present a model for the joint estimation of disparity and motion. The model is based on learning about the interrelations between images from multiple cameras, multiple frames in a video, or the combination of both. We show that learning depth and motion cues, as well as their combinations, from data is possible within a single type of architecture and a single type of learning algorithm, by using biologically inspired \"complex cell\" like units, which encode correlations between the pixels across image pairs. Our experimental results show that the learning of depth and motion makes it possible to achieve state-of-the-art performance in 3-D activity analysis, and to outperform existing hand-engineered 3-D motion features by a very large margin.", "revisions": [ { "version": "v2", "updated": "2013-12-16T16:11:52.000Z" } ], "analyses": { "keywords": [ "unsupervised learning", "single type", "achieve state-of-the-art performance", "multiple cameras", "multiple frames" ], "note": { "typesetting": "TeX", "pages": 0, "language": "en", "license": "arXiv", "status": "editable", "adsabs": "2013arXiv1312.3429K" } } }