{ "id": "2306.00640", "version": "v1", "published": "2023-06-01T13:06:44.000Z", "updated": "2023-06-01T13:06:44.000Z", "title": "Multi-Modal Deep Learning for Multi-Temporal Urban Mapping With a Partly Missing Optical Modality", "authors": [ "Sebastian Hafner", "Yifang Ban" ], "comment": "4 pages, 2 figures, accepted for publication in the IGARSS 2023 Proceedings", "categories": [ "cs.CV", "eess.IV" ], "abstract": "This paper proposes a novel multi-temporal urban mapping approach using multi-modal satellite data from the Sentinel-1 Synthetic Aperture Radar (SAR) and Sentinel-2 MultiSpectral Instrument (MSI) missions. In particular, it focuses on the problem of a partly missing optical modality due to clouds. The proposed model utilizes two networks to extract features from each modality separately. In addition, a reconstruction network is utilized to approximate the optical features based on the SAR data in case of a missing optical modality. Our experiments on a multi-temporal urban mapping dataset with Sentinel-1 SAR and Sentinel-2 MSI data demonstrate that the proposed method outperforms a multi-modal approach that uses zero values as a replacement for missing optical data, as well as a uni-modal SAR-based approach. Therefore, the proposed method is effective in exploiting multi-modal data, if available, but it also retains its effectiveness in case the optical modality is missing.", "revisions": [ { "version": "v1", "updated": "2023-06-01T13:06:44.000Z" } ], "analyses": { "keywords": [ "partly missing optical modality", "multi-modal deep learning", "novel multi-temporal urban mapping approach" ], "note": { "typesetting": "TeX", "pages": 4, "language": "en", "license": "arXiv", "status": "editable" } } }