@article{M6E14E557, title = "Improving Fidelity of Synthesized Voices Generated by Using GANs", journal = "KIPS Transactions on Software and Data Engineering", year = "2021", issn = "2287-5905", doi = "https://doi.org/10.3745/KTSDE.2021.10.1.9", author = "Moon-Ki Back/Seung-Won Yoon/Sang-Baek Lee/Kyu-Chul Lee", keywords = "Generative Adversarial Networks, Fréchet Inception Distance, Fidelity Improvement, Synthesized Voice", abstract = "Although Generative Adversarial Networks (GANs) have gained great popularity in computer vision and related fields, generating audio signals independently has yet to be presented. Unlike images, an audio signal is a sampled signal consisting of discrete samples, so it is not easy to learn the signals using CNN architectures, which is widely used in image generation tasks. In order to overcome this difficulty, GAN researchers proposed a strategy of applying time-frequency representations of audio to existing image-generating GANs. Following this strategy, we propose an improved method for increasing the fidelity of synthesized audio signals generated by using GANs. Our method is demonstrated on a public speech dataset, and evaluated by Fréchet Inception Distance (FID). When employing our method, the FID showed 10.504, but 11.973 as for the existing state of the art method (lower FID indicates better fidelity)." }