{ "id": "2303.13825", "version": "v1", "published": "2023-03-24T06:19:19.000Z", "updated": "2023-03-24T06:19:19.000Z", "title": "HandNeRF: Neural Radiance Fields for Animatable Interacting Hands", "authors": [ "Zhiyang Guo", "Wengang Zhou", "Min Wang", "Li Li", "Houqiang Li" ], "comment": "CVPR 2023", "categories": [ "cs.CV" ], "abstract": "We propose a novel framework to reconstruct accurate appearance and geometry with neural radiance fields (NeRF) for interacting hands, enabling the rendering of photo-realistic images and videos for gesture animation from arbitrary views. Given multi-view images of a single hand or interacting hands, an off-the-shelf skeleton estimator is first employed to parameterize the hand poses. Then we design a pose-driven deformation field to establish correspondence from those different poses to a shared canonical space, where a pose-disentangled NeRF for one hand is optimized. Such unified modeling efficiently complements the geometry and texture cues in rarely-observed areas for both hands. Meanwhile, we further leverage the pose priors to generate pseudo depth maps as guidance for occlusion-aware density learning. Moreover, a neural feature distillation method is proposed to achieve cross-domain alignment for color optimization. We conduct extensive experiments to verify the merits of our proposed HandNeRF and report a series of state-of-the-art results both qualitatively and quantitatively on the large-scale InterHand2.6M dataset.", "revisions": [ { "version": "v1", "updated": "2023-03-24T06:19:19.000Z" } ], "analyses": { "keywords": [ "neural radiance fields", "animatable interacting hands", "generate pseudo depth maps", "neural feature distillation method", "achieve cross-domain alignment" ], "note": { "typesetting": "TeX", "pages": 0, "language": "en", "license": "arXiv", "status": "editable" } } }