From fb1cec38f4f8987b43c315ad6eb27dcadd0a5d66 Mon Sep 17 00:00:00 2001 From: Dilshad Ali <47120933+Dilshad24@users.noreply.github.com> Date: Fri, 16 Aug 2024 17:33:07 +0530 Subject: [PATCH] Fix console output issue in `tts` function of `synthesizer` class Fix Console Printing Issue in synthesizer Class This update addresses an issue with the tts function in the synthesizer class where console output, including the array of split sentences, processing time, and real-time factor, was being printed during execution. This was causing unwanted console output when the synthesizer function is run in a thread. Changes Made: Added criteria SuppresPrintStatements type boolen to suppress console output generated by the tts function. Ensured that print statements are only triggered when explicitly required, preventing cluttered console output during threaded execution. This modification improves the usability of the synthesizer class when integrated into threaded applications and enhances the overall user experience by minimizing unnecessary console output. --- TTS/api.py | 7 ++++++- TTS/utils/synthesizer.py | 19 +++++++++++++------ 2 files changed, 19 insertions(+), 7 deletions(-) diff --git a/TTS/api.py b/TTS/api.py index 7abc188e74..0900db0525 100644 --- a/TTS/api.py +++ b/TTS/api.py @@ -88,7 +88,7 @@ def models(self): @property def is_multi_speaker(self): - if hasattr(self.synthesizer.tts_model, "speaker_manager") and self.synthesizer.tts_model.speaker_manager: + if hasattr(self.synthesizer.tts_model, "speaker_Smanager") and self.synthesizer.tts_model.speaker_manager: return self.synthesizer.tts_model.speaker_manager.num_speakers > 1 return False @@ -243,6 +243,7 @@ def tts( emotion: str = None, speed: float = None, split_sentences: bool = True, + SuppresPrintStatements: bool = False, **kwargs, ): """Convert text to speech. @@ -267,6 +268,9 @@ def tts( Split text into sentences, synthesize them separately and concatenate the file audio. Setting it False uses more VRAM and possibly hit model specific text length or VRAM limits. Only applicable to the 🐸TTS models. Defaults to True. + SuppresPrintStatements (bool, optional): + Suppress All the Print statements so that when runnging the function in thread the print statement will not apears in the terminal. + Setting it to True will suppress the print statements kwargs (dict, optional): Additional arguments for the model. """ @@ -283,6 +287,7 @@ def tts( style_text=None, reference_speaker_name=None, split_sentences=split_sentences, + SuppresPrintStatements=SuppresPrintStatements, **kwargs, ) return wav diff --git a/TTS/utils/synthesizer.py b/TTS/utils/synthesizer.py index b98647c30c..2666f868a8 100644 --- a/TTS/utils/synthesizer.py +++ b/TTS/utils/synthesizer.py @@ -265,6 +265,7 @@ def tts( reference_wav=None, reference_speaker_name=None, split_sentences: bool = True, + SuppresPrintStatements: bool= False, **kwargs, ) -> List[int]: """🐸 TTS magic. Run all the models and generate speech. @@ -279,6 +280,7 @@ def tts( reference_wav ([type], optional): reference waveform for voice conversion. Defaults to None. reference_speaker_name ([type], optional): speaker id of reference waveform. Defaults to None. split_sentences (bool, optional): split the input text into sentences. Defaults to True. + SuppresPrintStatements (bool, optional): Suppress the Print statements. **kwargs: additional arguments to pass to the TTS model. Returns: List[int]: [description] @@ -294,9 +296,11 @@ def tts( if text: sens = [text] if split_sentences: - print(" > Text splitted to sentences.") + if not SuppresPrintStatements: + print(" > Text splitted to sentences.") sens = self.split_into_sentences(text) - print(sens) + if not SuppresPrintStatements: + print(sens) # handle multi-speaker if "voice_dir" in kwargs: @@ -420,7 +424,8 @@ def tts( self.vocoder_config["audio"]["sample_rate"] / self.tts_model.ap.sample_rate, ] if scale_factor[1] != 1: - print(" > interpolating tts model output.") + if not SuppresPrintStatements: + print(" > interpolating tts model output.") vocoder_input = interpolate_vocoder_input(scale_factor, vocoder_input) else: vocoder_input = torch.tensor(vocoder_input).unsqueeze(0) # pylint: disable=not-callable @@ -484,7 +489,8 @@ def tts( self.vocoder_config["audio"]["sample_rate"] / self.tts_model.ap.sample_rate, ] if scale_factor[1] != 1: - print(" > interpolating tts model output.") + if not SuppresPrintStatements: + print(" > interpolating tts model output.") vocoder_input = interpolate_vocoder_input(scale_factor, vocoder_input) else: vocoder_input = torch.tensor(vocoder_input).unsqueeze(0) # pylint: disable=not-callable @@ -500,6 +506,7 @@ def tts( # compute stats process_time = time.time() - start_time audio_time = len(wavs) / self.tts_config.audio["sample_rate"] - print(f" > Processing time: {process_time}") - print(f" > Real-time factor: {process_time / audio_time}") + if not SuppresPrintStatements: + print(f" > Processing time: {process_time}") + print(f" > Real-time factor: {process_time / audio_time}") return wavs