{ "id": "1906.06822", "version": "v1", "published": "2019-06-17T02:59:11.000Z", "updated": "2019-06-17T02:59:11.000Z", "title": "Spatio-Temporal Fusion Networks for Action Recognition", "authors": [ "Sangwoo Cho", "Hassan Foroosh" ], "journal": "Asian Conference on Computer Vision (2018) 347-364", "doi": "10.1007/978-3-030-20887-5_22", "categories": [ "cs.CV" ], "abstract": "The video based CNN works have focused on effective ways to fuse appearance and motion networks, but they typically lack utilizing temporal information over video frames. In this work, we present a novel spatio-temporal fusion network (STFN) that integrates temporal dynamics of appearance and motion information from entire videos. The captured temporal dynamic information is then aggregated for a better video level representation and learned via end-to-end training. The spatio-temporal fusion network consists of two set of Residual Inception blocks that extract temporal dynamics and a fusion connection for appearance and motion features. The benefits of STFN are: (a) it captures local and global temporal dynamics of complementary data to learn video-wide information; and (b) it is applicable to any network for video classification to boost performance. We explore a variety of design choices for STFN and verify how the network performance is varied with the ablation studies. We perform experiments on two challenging human activity datasets, UCF101 and HMDB51, and achieve the state-of-the-art results with the best network.", "revisions": [ { "version": "v1", "updated": "2019-06-17T02:59:11.000Z" } ], "analyses": { "keywords": [ "action recognition", "lack utilizing temporal information", "novel spatio-temporal fusion network", "spatio-temporal fusion network consists", "better video level representation" ], "tags": [ "journal article" ], "publication": { "publisher": "Springer" }, "note": { "typesetting": "TeX", "pages": 0, "language": "en", "license": "arXiv", "status": "editable" } } }