@article{johnson2006semantic, author = {Johnson, Matthew and Brostow, Gabriel and Shotton, Jamie and Arandjelovic, Ognen and Kwatra, Vivek and Cipolla, Roberto}, title = {Semantic Photo Synthesis}, year = {2006}, month = {September}, abstract = {Composite images are synthesized from existing photographs by artists who make concept art, e.g., storyboards for movies or architectural planning. Current techniques allow an artist to fabricate such an image by digitally splicing parts of stock photographs. While these images serve mainly to “quickly” convey how a scene should look, their production is laborious. We propose a technique that allows a person to design a new photograph with substantially less effort. This paper presents a method that generates a composite image when a user types in nouns, such as “boat” and “sand.” The artist can optionally design an intended image by specifying other constraints. Our algorithm formulates the constraints as queries to search an automatically annotated image database. The desired photograph, not a collage, is then synthesized using graph-cut optimization, optionally allowing for further user interaction to edit or choose among alternative generated photos. An implementation of our approach, shown in the associated video, demonstrates our contributions of (1) a method for creating specific images with minimal human effort, and (2) a combined algorithm for automatically building an image library with semantic annotations from any photo collection.}, url = {http://approjects.co.za/?big=en-us/research/publication/semantic-photo-synthesis/}, journal = {Computer Graphics Forum}, volume = {25}, number = {3}, }