diff --git a/app.py b/app.py index ee6449c..97a7564 100644 --- a/app.py +++ b/app.py @@ -1,13 +1,13 @@ import os import sys + import gradio as gr -from tabs.welcome import welcome_tab from tabs.conversion.conversion import conversion_tab from tabs.conversion.edge_tts import edge_tts_tab +from tabs.install.install_models import files_upload, url_download, zip_upload from tabs.processing.processing import processing_tab -from tabs.install.install_models import url_download, zip_upload, files_upload - +from tabs.welcome import welcome_tab DEFAULT_PORT = 4000 MAX_PORT_ATTEMPTS = 10 diff --git a/download_models.py b/download_models.py index a22e072..a8850d5 100644 --- a/download_models.py +++ b/download_models.py @@ -1,4 +1,5 @@ import os + import requests PREDICTORS = "https://huggingface.co/Politrees/RVC_resources/resolve/main/predictors/" diff --git a/rvc/infer/infer.py b/rvc/infer/infer.py index 39241e9..ab9fb6a 100644 --- a/rvc/infer/infer.py +++ b/rvc/infer/infer.py @@ -1,10 +1,12 @@ -import torch from multiprocessing import cpu_count + +import torch from fairseq import checkpoint_utils from scipy.io import wavfile from rvc.lib.algorithm.synthesizers import Synthesizer from rvc.lib.my_utils import load_audio + from .pipeline import VC diff --git a/rvc/infer/pipeline.py b/rvc/infer/pipeline.py index e7aa657..1a74bb8 100644 --- a/rvc/infer/pipeline.py +++ b/rvc/infer/pipeline.py @@ -1,11 +1,12 @@ -import os import gc -import torch -import torch.nn.functional as F -import torchcrepe +import os + import faiss import librosa import numpy as np +import torch +import torch.nn.functional as F +import torchcrepe from scipy import signal from rvc.lib.predictors.FCPE import FCPEF0Predictor @@ -19,7 +20,9 @@ FILTER_ORDER = 5 # Порядок фильтра CUTOFF_FREQUENCY = 48 # Частота среза (в Гц) SAMPLE_RATE = 16000 # Частота дискретизации (в Гц) -bh, ah = signal.butter(N=FILTER_ORDER, Wn=CUTOFF_FREQUENCY, btype="high", fs=SAMPLE_RATE) +bh, ah = signal.butter( + N=FILTER_ORDER, Wn=CUTOFF_FREQUENCY, btype="high", fs=SAMPLE_RATE +) input_audio_path2wav = {} @@ -56,7 +59,8 @@ def change_rms(source_audio, source_rate, target_audio, target_rate, rate): rms2 = torch.maximum(rms2, torch.zeros_like(rms2) + 1e-6) adjusted_audio = ( - target_audio * (torch.pow(rms1, 1 - rate) * torch.pow(rms2, rate - 1)).numpy() + target_audio + * (torch.pow(rms1, 1 - rate) * torch.pow(rms2, rate - 1)).numpy() ) return adjusted_audio @@ -186,9 +190,13 @@ def get_f0( delta_t = np.round( (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1 ).astype("int16") - replace_f0 = np.interp(list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1]) + replace_f0 = np.interp( + list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1] + ) shape = f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)].shape[0] - f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)] = replace_f0[:shape] + f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)] = replace_f0[ + :shape + ] f0bak = f0.copy() f0_mel = 1127 * np.log(1 + f0 / 700) @@ -452,7 +460,9 @@ def pipeline( audio, self.sample_rate, audio_opt, tgt_sr, volume_envelope ) if resample_sr >= self.sample_rate and tgt_sr != resample_sr: - audio_opt = librosa.resample(audio_opt, orig_sr=tgt_sr, target_sr=resample_sr) + audio_opt = librosa.resample( + audio_opt, orig_sr=tgt_sr, target_sr=resample_sr + ) audio_max = np.abs(audio_opt).max() / 0.99 max_int16 = 32768 diff --git a/rvc/lib/algorithm/attentions.py b/rvc/lib/algorithm/attentions.py index 63258a5..aa6970c 100644 --- a/rvc/lib/algorithm/attentions.py +++ b/rvc/lib/algorithm/attentions.py @@ -1,4 +1,5 @@ import math + import torch from torch import nn from torch.nn import functional as F @@ -78,7 +79,9 @@ def attention(self, query, key, value, mask=None): scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) if self.window_size is not None: - assert t_s == t_t, "Relative attention is only available for self-attention." + assert ( + t_s == t_t + ), "Relative attention is only available for self-attention." key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) rel_logits = self._matmul_with_relative_keys( query / math.sqrt(self.k_channels), key_relative_embeddings @@ -93,7 +96,9 @@ def attention(self, query, key, value, mask=None): if mask is not None: scores = scores.masked_fill(mask == 0, -1e4) if self.block_length is not None: - assert t_s == t_t, "Local attention is only available for self-attention." + assert ( + t_s == t_t + ), "Local attention is only available for self-attention." block_mask = ( torch.ones_like(scores) .triu(-self.block_length) @@ -105,7 +110,9 @@ def attention(self, query, key, value, mask=None): output = torch.matmul(p_attn, value) if self.window_size is not None: relative_weights = self._absolute_position_to_relative_position(p_attn) - value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s) + value_relative_embeddings = self._get_relative_embeddings( + self.emb_rel_v, t_s + ) output = output + self._matmul_with_relative_values( relative_weights, value_relative_embeddings ) diff --git a/rvc/lib/algorithm/commons.py b/rvc/lib/algorithm/commons.py index 6137a0f..3645fb8 100644 --- a/rvc/lib/algorithm/commons.py +++ b/rvc/lib/algorithm/commons.py @@ -1,7 +1,8 @@ import math +from typing import List, Optional + import torch from torch.nn import functional as F -from typing import List, Optional def init_weights(m, mean=0.0, std=0.01): @@ -22,7 +23,9 @@ def convert_pad_shape(pad_shape): def kl_divergence(m_p, logs_p, m_q, logs_q): kl = (logs_q - logs_p) - 0.5 - kl += 0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q) + kl += ( + 0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q) + ) return kl diff --git a/rvc/lib/algorithm/discriminators.py b/rvc/lib/algorithm/discriminators.py index a143547..947f1dd 100644 --- a/rvc/lib/algorithm/discriminators.py +++ b/rvc/lib/algorithm/discriminators.py @@ -6,7 +6,6 @@ from .commons import get_padding from .residuals import LRELU_SLOPE - PERIODS_V1 = [2, 3, 5, 7, 11, 17] PERIODS_V2 = [2, 3, 5, 7, 11, 17, 23, 37] IN_CHANNELS = [1, 32, 128, 512, 1024] @@ -18,7 +17,10 @@ def __init__(self, use_spectral_norm=False): super(MultiPeriodDiscriminator, self).__init__() self.discriminators = nn.ModuleList( [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - + [DiscriminatorP(p, use_spectral_norm=use_spectral_norm) for p in PERIODS_V1] + + [ + DiscriminatorP(p, use_spectral_norm=use_spectral_norm) + for p in PERIODS_V1 + ] ) def forward(self, y, y_hat): @@ -39,7 +41,10 @@ def __init__(self, use_spectral_norm=False): super(MultiPeriodDiscriminatorV2, self).__init__() self.discriminators = nn.ModuleList( [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - + [DiscriminatorP(p, use_spectral_norm=use_spectral_norm) for p in PERIODS_V2] + + [ + DiscriminatorP(p, use_spectral_norm=use_spectral_norm) + for p in PERIODS_V2 + ] ) def forward(self, y, y_hat): diff --git a/rvc/lib/algorithm/encoders.py b/rvc/lib/algorithm/encoders.py index c301dd5..a095019 100644 --- a/rvc/lib/algorithm/encoders.py +++ b/rvc/lib/algorithm/encoders.py @@ -1,8 +1,9 @@ import math +from typing import Optional + import torch from torch import nn from torch.nn.utils.weight_norm import remove_weight_norm -from typing import Optional from .attentions import FFN, MultiHeadAttention from .commons import sequence_mask diff --git a/rvc/lib/algorithm/generators.py b/rvc/lib/algorithm/generators.py index 3c7395e..99942e8 100644 --- a/rvc/lib/algorithm/generators.py +++ b/rvc/lib/algorithm/generators.py @@ -1,9 +1,10 @@ +from typing import Optional + import torch from torch import nn from torch.nn import functional as F -from torch.nn.utils.weight_norm import remove_weight_norm from torch.nn.utils.parametrizations import weight_norm -from typing import Optional +from torch.nn.utils.weight_norm import remove_weight_norm from .commons import init_weights from .residuals import LRELU_SLOPE, ResBlock1, ResBlock2 @@ -121,10 +122,14 @@ def forward(self, f0: torch.Tensor, upp: int): f0_buf[:, :, 0] = f0[:, :, 0] f0_buf[:, :, 1:] = ( f0_buf[:, :, 0:1] - * torch.arange(2, self.harmonic_num + 2, device=f0.device)[None, None, :] + * torch.arange(2, self.harmonic_num + 2, device=f0.device)[ + None, None, : + ] ) rad_values = (f0_buf / float(self.sample_rate)) % 1 - rand_ini = torch.rand(f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device) + rand_ini = torch.rand( + f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device + ) rand_ini[:, 0] = 0 rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini tmp_over_one = torch.cumsum(rad_values, 1) diff --git a/rvc/lib/algorithm/modules.py b/rvc/lib/algorithm/modules.py index dd3ff31..ed4937d 100644 --- a/rvc/lib/algorithm/modules.py +++ b/rvc/lib/algorithm/modules.py @@ -1,7 +1,7 @@ import torch from torch import nn -from torch.nn.utils.weight_norm import remove_weight_norm from torch.nn.utils.parametrizations import weight_norm +from torch.nn.utils.weight_norm import remove_weight_norm from .commons import fused_add_tanh_sigmoid_multiply diff --git a/rvc/lib/algorithm/nsf.py b/rvc/lib/algorithm/nsf.py index 2f19f0f..709843c 100644 --- a/rvc/lib/algorithm/nsf.py +++ b/rvc/lib/algorithm/nsf.py @@ -1,10 +1,11 @@ import math +from typing import Optional + import torch from torch import nn from torch.nn import functional as F -from torch.nn.utils.weight_norm import remove_weight_norm from torch.nn.utils.parametrizations import weight_norm -from typing import Optional +from torch.nn.utils.weight_norm import remove_weight_norm from .commons import init_weights from .generators import SineGen @@ -59,7 +60,9 @@ def __init__( self.num_kernels = len(resblock_kernel_sizes) self.num_upsamples = len(upsample_rates) self.f0_upsamp = nn.Upsample(scale_factor=math.prod(upsample_rates)) - self.m_source = SourceModuleHnNSF(sample_rate=sr, harmonic_num=0, is_half=is_half) + self.m_source = SourceModuleHnNSF( + sample_rate=sr, harmonic_num=0, is_half=is_half + ) self.conv_pre = nn.Conv1d( initial_channel, upsample_initial_channel, 7, 1, padding=3 @@ -70,7 +73,8 @@ def __init__( self.noise_convs = nn.ModuleList() channels = [ - upsample_initial_channel // (2 ** (i + 1)) for i in range(len(upsample_rates)) + upsample_initial_channel // (2 ** (i + 1)) + for i in range(len(upsample_rates)) ] stride_f0s = [ math.prod(upsample_rates[i + 1 :]) if i + 1 < len(upsample_rates) else 1 diff --git a/rvc/lib/algorithm/residuals.py b/rvc/lib/algorithm/residuals.py index 45d851b..236565f 100644 --- a/rvc/lib/algorithm/residuals.py +++ b/rvc/lib/algorithm/residuals.py @@ -1,14 +1,14 @@ +from typing import Optional + import torch from torch import nn from torch.nn import functional as F -from torch.nn.utils.weight_norm import remove_weight_norm from torch.nn.utils.parametrizations import weight_norm -from typing import Optional +from torch.nn.utils.weight_norm import remove_weight_norm from .commons import get_padding, init_weights from .modules import WaveNet - LRELU_SLOPE = 0.1 diff --git a/rvc/lib/algorithm/synthesizers.py b/rvc/lib/algorithm/synthesizers.py index 35aae21..83877bb 100644 --- a/rvc/lib/algorithm/synthesizers.py +++ b/rvc/lib/algorithm/synthesizers.py @@ -1,10 +1,11 @@ +from typing import Optional + import torch from torch import nn from torch.nn.utils.weight_norm import remove_weight_norm -from typing import Optional -from .commons import slice_segments, rand_slice_segments -from .encoders import TextEncoder, PosteriorEncoder +from .commons import rand_slice_segments, slice_segments +from .encoders import PosteriorEncoder, TextEncoder from .generators import Generator from .nsf import GeneratorNSF from .residuals import ResidualCouplingBlock diff --git a/rvc/lib/predictors/FCPE.py b/rvc/lib/predictors/FCPE.py index ca49736..bf26fe5 100644 --- a/rvc/lib/predictors/FCPE.py +++ b/rvc/lib/predictors/FCPE.py @@ -1,21 +1,20 @@ +import math +import os +from functools import partial from typing import Union -import torch.nn.functional as F +import librosa import numpy as np +import soundfile as sf import torch import torch.nn as nn -from torch.nn.utils.parametrizations import weight_norm -from torchaudio.transforms import Resample -import os -import librosa -import soundfile as sf +import torch.nn.functional as F import torch.utils.data -from librosa.filters import mel as librosa_mel_fn -import math -from functools import partial - from einops import rearrange, repeat +from librosa.filters import mel as librosa_mel_fn from local_attention import LocalAttention +from torch.nn.utils.parametrizations import weight_norm +from torchaudio.transforms import Resample os.environ["LRU_CACHE_CAPACITY"] = "3" @@ -47,7 +46,9 @@ def load_wav_to_torch(full_path, target_sr=None, return_empty_on_exception=False return [], sample_rate or target_sr or 48000 if target_sr is not None and sample_rate != target_sr: data = torch.from_numpy( - librosa.core.resample(data.numpy(), orig_sr=sample_rate, target_sr=target_sr) + librosa.core.resample( + data.numpy(), orig_sr=sample_rate, target_sr=target_sr + ) ) sample_rate = target_sr @@ -187,7 +188,9 @@ def softmax_kernel( if is_query: data_dash = ratio * ( torch.exp( - data_dash - diag_data - torch.max(data_dash, dim=-1, keepdim=True).values + data_dash + - diag_data + - torch.max(data_dash, dim=-1, keepdim=True).values ) + eps ) @@ -355,12 +358,16 @@ def gaussian_orthogonal_random_matrix( block_list = [] for _ in range(nb_full_blocks): - q = orthogonal_matrix_chunk(nb_columns, qr_uniform_q=qr_uniform_q, device=device) + q = orthogonal_matrix_chunk( + nb_columns, qr_uniform_q=qr_uniform_q, device=device + ) block_list.append(q) remaining_rows = nb_rows - nb_full_blocks * nb_columns if remaining_rows > 0: - q = orthogonal_matrix_chunk(nb_columns, qr_uniform_q=qr_uniform_q, device=device) + q = orthogonal_matrix_chunk( + nb_columns, qr_uniform_q=qr_uniform_q, device=device + ) block_list.append(q[:remaining_rows]) final_matrix = torch.cat(block_list) @@ -661,7 +668,9 @@ def forward( def cents_decoder(self, y, mask=True): B, N, _ = y.size() ci = self.cent_table[None, None, :].expand(B, N, -1) - rtn = torch.sum(ci * y, dim=-1, keepdim=True) / torch.sum(y, dim=-1, keepdim=True) + rtn = torch.sum(ci * y, dim=-1, keepdim=True) / torch.sum( + y, dim=-1, keepdim=True + ) if mask: confident = torch.max(y, dim=-1, keepdim=True)[0] confident_mask = torch.ones_like(confident) @@ -778,7 +787,9 @@ def extract_mel(self, audio, sample_rate, keyshift=0, train=False): mel = self.extract_nvstft(audio_res, keyshift=keyshift, train=train) n_frames = int(audio.shape[1] // self.hop_size) + 1 - mel = torch.cat((mel, mel[:, -1:, :]), 1) if n_frames > int(mel.shape[1]) else mel + mel = ( + torch.cat((mel, mel[:, -1:, :]), 1) if n_frames > int(mel.shape[1]) else mel + ) mel = mel[:, :n_frames, :] if n_frames < int(mel.shape[1]) else mel return mel @@ -833,7 +844,9 @@ def repeat_expand( ): ndim = content.ndim content = ( - content[None, None] if ndim == 1 else content[None] if ndim == 2 else content + content[None, None] + if ndim == 1 + else content[None] if ndim == 2 else content ) assert content.ndim == 3 is_np = isinstance(content, np.ndarray) diff --git a/rvc/lib/predictors/RMVPE.py b/rvc/lib/predictors/RMVPE.py index 929f399..32f2c75 100644 --- a/rvc/lib/predictors/RMVPE.py +++ b/rvc/lib/predictors/RMVPE.py @@ -1,10 +1,10 @@ -import torch import numpy as np +import torch import torch.nn as nn import torch.nn.functional as F from librosa.filters import mel +from librosa.util import normalize, pad_center, tiny from scipy.signal import get_window -from librosa.util import pad_center, tiny, normalize def window_sumsquare( @@ -176,7 +176,9 @@ def forward(self, x): class ResEncoderBlock(nn.Module): - def __init__(self, in_channels, out_channels, kernel_size, n_blocks=1, momentum=0.01): + def __init__( + self, in_channels, out_channels, kernel_size, n_blocks=1, momentum=0.01 + ): super(ResEncoderBlock, self).__init__() self.conv = nn.ModuleList( [ diff --git a/rvc/modules/download_source.py b/rvc/modules/download_source.py index a1e108f..c786437 100644 --- a/rvc/modules/download_source.py +++ b/rvc/modules/download_source.py @@ -1,7 +1,8 @@ import urllib.request + import gdown -import requests import gradio as gr +import requests from mega import Mega diff --git a/rvc/modules/model_manager.py b/rvc/modules/model_manager.py index 3e1792e..a66dd21 100644 --- a/rvc/modules/model_manager.py +++ b/rvc/modules/model_manager.py @@ -1,7 +1,8 @@ import os -import sys import shutil +import sys import zipfile + import gradio as gr from rvc.modules.download_source import download_file @@ -151,7 +152,9 @@ def upload_separate_files(pth_file, index_file, dir_name, progress=gr.Progress() # Основная функция для вызова из командной строки def main(): if len(sys.argv) != 3: - print('\nИспользование:\npython3 -m rvc.modules.model_manager "url" "dir_name"\n') + print( + '\nИспользование:\npython3 -m rvc.modules.model_manager "url" "dir_name"\n' + ) sys.exit(1) url = sys.argv[1] diff --git a/rvc/scripts/audio_processing.py b/rvc/scripts/audio_processing.py index 5198b3b..1fa27ed 100644 --- a/rvc/scripts/audio_processing.py +++ b/rvc/scripts/audio_processing.py @@ -1,22 +1,22 @@ import os + +import gradio as gr import librosa import numpy as np -import gradio as gr import soundfile as sf from pedalboard import ( - Pedalboard, - Reverb, + Chorus, Compressor, HighpassFilter, - LowShelfFilter, HighShelfFilter, + LowShelfFilter, NoiseGate, - Chorus, + Pedalboard, + Reverb, ) from pedalboard.io import AudioFile from pydub import AudioSegment - OUTPUT_DIR = os.path.join(os.getcwd(), "output") diff --git a/rvc/scripts/edge_tts_conversion.py b/rvc/scripts/edge_tts_conversion.py index e480251..e423de1 100644 --- a/rvc/scripts/edge_tts_conversion.py +++ b/rvc/scripts/edge_tts_conversion.py @@ -1,15 +1,15 @@ +import asyncio import gc import os -import torch + +import edge_tts +import gradio as gr import librosa import numpy as np -import gradio as gr import soundfile as sf -import edge_tts -import asyncio - -from rvc.infer.infer import Config, load_hubert, get_vc, rvc_infer +import torch +from rvc.infer.infer import Config, get_vc, load_hubert, rvc_infer RVC_MODELS_DIR = os.path.join(os.getcwd(), "models") HUBERT_MODEL_PATH = os.path.join( diff --git a/rvc/scripts/voice_conversion.py b/rvc/scripts/voice_conversion.py index 5e5eba3..c9df367 100644 --- a/rvc/scripts/voice_conversion.py +++ b/rvc/scripts/voice_conversion.py @@ -1,13 +1,13 @@ import gc import os -import torch + +import gradio as gr import librosa import numpy as np -import gradio as gr import soundfile as sf +import torch -from rvc.infer.infer import Config, load_hubert, get_vc, rvc_infer - +from rvc.infer.infer import Config, get_vc, load_hubert, rvc_infer RVC_MODELS_DIR = os.path.join(os.getcwd(), "models") HUBERT_MODEL_PATH = os.path.join( diff --git a/tabs/conversion/conversion.py b/tabs/conversion/conversion.py index a1b59cf..5020e2b 100644 --- a/tabs/conversion/conversion.py +++ b/tabs/conversion/conversion.py @@ -1,16 +1,16 @@ import os + import gradio as gr -from rvc.scripts.voice_conversion import voice_pipeline from rvc.modules.model_manager import get_folders, update_models_list from rvc.modules.ui_updates import ( process_file_upload, show_hop_slider, - update_button_text, - swap_visibility, swap_buttons, + swap_visibility, + update_button_text, ) - +from rvc.scripts.voice_conversion import voice_pipeline from tabs.install.install_huberts import install_hubert_tab rvc_models_dir = os.path.join(os.getcwd(), "models") diff --git a/tabs/conversion/edge_tts.py b/tabs/conversion/edge_tts.py index d918c54..efc8687 100644 --- a/tabs/conversion/edge_tts.py +++ b/tabs/conversion/edge_tts.py @@ -1,10 +1,10 @@ import os + import gradio as gr -from rvc.scripts.edge_tts_conversion import edge_tts_pipeline from rvc.modules.model_manager import get_folders, update_models_list from rvc.modules.ui_updates import show_hop_slider - +from rvc.scripts.edge_tts_conversion import edge_tts_pipeline from tabs.install.install_huberts import install_hubert_tab rvc_models_dir = os.path.join(os.getcwd(), "models") diff --git a/tabs/install/install_huberts.py b/tabs/install/install_huberts.py index 6c24913..a317b6f 100644 --- a/tabs/install/install_huberts.py +++ b/tabs/install/install_huberts.py @@ -2,6 +2,7 @@ import re import shutil import urllib.request + import gradio as gr embedders_dir = os.path.join(os.getcwd(), "rvc", "models", "embedders") diff --git a/tabs/install/install_models.py b/tabs/install/install_models.py index 9e35757..8a74e9f 100644 --- a/tabs/install/install_models.py +++ b/tabs/install/install_models.py @@ -2,8 +2,8 @@ from rvc.modules.model_manager import ( download_from_url, - upload_zip_file, upload_separate_files, + upload_zip_file, ) @@ -66,9 +66,13 @@ def zip_upload(): info="Дайте вашей загружаемой модели уникальное имя, " "отличное от других голосовых моделей.", ) - model_upload_button = gr.Button("Загрузить модель", variant="primary") + model_upload_button = gr.Button( + "Загрузить модель", variant="primary" + ) - local_upload_output_message = gr.Text(label="Сообщение вывода", interactive=False) + local_upload_output_message = gr.Text( + label="Сообщение вывода", interactive=False + ) model_upload_button.click( upload_zip_file, inputs=[zip_file, local_model_name], @@ -93,7 +97,9 @@ def files_upload(): info="Дайте вашей загружаемой модели уникальное имя, " "отличное от других голосовых моделей.", ) - separate_upload_button = gr.Button("Загрузить модель", variant="primary") + separate_upload_button = gr.Button( + "Загрузить модель", variant="primary" + ) separate_upload_output_message = gr.Text( label="Сообщение вывода", interactive=False diff --git a/tabs/processing/processing.py b/tabs/processing/processing.py index b2e34ef..df5e20f 100644 --- a/tabs/processing/processing.py +++ b/tabs/processing/processing.py @@ -1,14 +1,14 @@ import gradio as gr -from rvc.scripts.audio_processing import process_audio from rvc.modules.ui_updates import ( process_file_upload, - update_button_text_voc, - update_button_text_inst, - swap_visibility, - swap_buttons, show_effects, + swap_buttons, + swap_visibility, + update_button_text_inst, + update_button_text_voc, ) +from rvc.scripts.audio_processing import process_audio def processing_tab():