{ "id": "1609.05396", "version": "v1", "published": "2016-09-17T21:46:21.000Z", "updated": "2016-09-17T21:46:21.000Z", "title": "A Deep Metric for Multimodal Registration", "authors": [ "Martin Simonovsky", "Benjamín Gutiérrez-Becker", "Diana Mateus", "Nassir Navab", "Nikos Komodakis" ], "comment": "Accepted to MICCAI 2016; extended version", "categories": [ "cs.CV", "cs.LG", "cs.NE" ], "abstract": "Multimodal registration is a challenging problem in medical imaging due the high variability of tissue appearance under different imaging modalities. The crucial component here is the choice of the right similarity measure. We make a step towards a general learning-based solution that can be adapted to specific situations and present a metric based on a convolutional neural network. Our network can be trained from scratch even from a few aligned image pairs. The metric is validated on intersubject deformable registration on a dataset different from the one used for training, demonstrating good generalization. In this task, we outperform mutual information by a significant margin.", "revisions": [ { "version": "v1", "updated": "2016-09-17T21:46:21.000Z" } ], "analyses": { "keywords": [ "multimodal registration", "deep metric", "convolutional neural network", "right similarity measure", "outperform mutual information" ], "note": { "typesetting": "TeX", "pages": 0, "language": "en", "license": "arXiv", "status": "editable" } } }