{ "id": "2312.15964", "version": "v1", "published": "2023-12-26T09:02:17.000Z", "updated": "2023-12-26T09:02:17.000Z", "title": "Semantic Guidance Tuning for Text-To-Image Diffusion Models", "authors": [ "Hyun Kang", "Dohae Lee", "Myungjin Shin", "In-Kwon Lee" ], "categories": [ "cs.CV", "cs.AI" ], "abstract": "Recent advancements in Text-to-Image (T2I) diffusion models have demonstrated impressive success in generating high-quality images with zero-shot generalization capabilities. Yet, current models struggle to closely adhere to prompt semantics, often misrepresenting or overlooking specific attributes. To address this, we propose a simple, training-free approach that modulates the guidance direction of diffusion models during inference. We first decompose the prompt semantics into a set of concepts, and monitor the guidance trajectory in relation to each concept. Our key observation is that deviations in model's adherence to prompt semantics are highly correlated with divergence of the guidance from one or more of these concepts. Based on this observation, we devise a technique to steer the guidance direction towards any concept from which the model diverges. Extensive experimentation validates that our method improves the semantic alignment of images generated by diffusion models in response to prompts. Project page is available at: https://korguy.github.io/", "revisions": [ { "version": "v1", "updated": "2023-12-26T09:02:17.000Z" } ], "analyses": { "keywords": [ "text-to-image diffusion models", "semantic guidance tuning", "prompt semantics", "guidance direction", "current models struggle" ], "note": { "typesetting": "TeX", "pages": 0, "language": "en", "license": "arXiv", "status": "editable" } } }