{ "id": "1807.07560", "version": "v1", "published": "2018-07-19T17:57:16.000Z", "updated": "2018-07-19T17:57:16.000Z", "title": "Compositional GAN: Learning Conditional Image Composition", "authors": [ "Samaneh Azadi", "Deepak Pathak", "Sayna Ebrahimi", "Trevor Darrell" ], "categories": [ "cs.CV", "cs.AI", "cs.LG", "stat.ML" ], "abstract": "Generative Adversarial Networks (GANs) can produce images of surprising complexity and realism, but are generally modeled to sample from a single latent source ignoring the explicit spatial interaction between multiple entities that could be present in a scene. Capturing such complex interactions between different objects in the world, including their relative scaling, spatial layout, occlusion, or viewpoint transformation is a challenging problem. In this work, we propose to model object composition in a GAN framework as a self-consistent composition-decomposition network. Our model is conditioned on the object images from their marginal distributions to generate a realistic image from their joint distribution by explicitly learning the possible interactions. We evaluate our model through qualitative experiments and user evaluations in both the scenarios when either paired or unpaired examples for the individual object images and the joint scenes are given during training. Our results reveal that the learned model captures potential interactions between the two object domains given as input to output new instances of composed scene at test time in a reasonable fashion.", "revisions": [ { "version": "v1", "updated": "2018-07-19T17:57:16.000Z" } ], "analyses": { "keywords": [ "learning conditional image composition", "compositional gan", "learned model captures potential interactions", "individual object images" ], "note": { "typesetting": "TeX", "pages": 0, "language": "en", "license": "arXiv", "status": "editable" } } }