CubeGAN: Omnidirectional Image Synthesis Using Generative Adversarial Networks
        
        
            
            
            
            
            
            
            C. May
            
            
            
             
            
            
            
            and 
            
            
            
            
            
            D. Aliaga
            
            
            
            
        
        
        
            Computer Graphics Forum - 2023
            
        
        
        
            Computer Graphics Forum - 2023
        
        
        May, C., and D. Aliaga. “CubeGAN: Omnidirectional Image Synthesis Using Generative Adversarial Networks.” Computer Graphics Forum, vol. 42, no. 2, 2023, pp. 213–24, https://doi.org/https://doi.org/10.1111/cgf.14755.
        @article{May_2023_Eurographics,
  author = {May, C. and Aliaga, D.},
  title = {CubeGAN: Omnidirectional Image Synthesis Using Generative Adversarial Networks},
  journal = {Computer Graphics Forum},
  volume = {42},
  number = {2},
  pages = {213-224},
  keywords = {CCS Concepts, • Computing methodologies → Computer graphics, Rendering, Neural networks},
  doi = {https://doi.org/10.1111/cgf.14755},
  url = {https://onlinelibrary.wiley.com/doi/abs/10.1111/cgf.14755},
  eprint = {https://onlinelibrary.wiley.com/doi/pdf/10.1111/cgf.14755},
  abstract = {Abstract We propose a framework to create projectively-correct and seam-free cube-map images using generative adversarial learning. Deep generation of cube-maps that contain the correct projection of the environment onto its faces is not straightforward as has been recognized in prior work. Our approach extends an existing framework, StyleGAN3, to produce cube-maps instead of planar images. In addition to reshaping the output, we include a cube-specific volumetric initialization component, a projective resampling component, and a modification of augmentation operations to the spherical domain. Our results demonstrate the network's generation capabilities trained on imagery from various 3D environments. Additionally, we show the power and quality of our GAN design in an inversion task, combined with navigation capabilities, to perform novel view synthesis.},
  year = {2023},
  pdf = {May-Eurographics-2023-Cubegan.pdf},
  image = {May-Eurographics-2023-Cubegan.jpg}
}