Skip to content

Commit

Permalink
Автоматическое форматирование кода (#51)
Browse files Browse the repository at this point in the history
Автоматическое форматирование кода с помощью black
  • Loading branch information
Bebra777228 authored Jan 4, 2025
2 parents c17cc05 + f1448ce commit 3957a28
Show file tree
Hide file tree
Showing 25 changed files with 156 additions and 91 deletions.
6 changes: 3 additions & 3 deletions app.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
import os
import sys

import gradio as gr

from tabs.welcome import welcome_tab
from tabs.conversion.conversion import conversion_tab
from tabs.conversion.edge_tts import edge_tts_tab
from tabs.install.install_models import files_upload, url_download, zip_upload
from tabs.processing.processing import processing_tab
from tabs.install.install_models import url_download, zip_upload, files_upload

from tabs.welcome import welcome_tab

DEFAULT_PORT = 4000
MAX_PORT_ATTEMPTS = 10
Expand Down
1 change: 1 addition & 0 deletions download_models.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import os

import requests

PREDICTORS = "https://huggingface.co/Politrees/RVC_resources/resolve/main/predictors/"
Expand Down
4 changes: 3 additions & 1 deletion rvc/infer/infer.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,12 @@
import torch
from multiprocessing import cpu_count

import torch
from fairseq import checkpoint_utils
from scipy.io import wavfile

from rvc.lib.algorithm.synthesizers import Synthesizer
from rvc.lib.my_utils import load_audio

from .pipeline import VC


Expand Down
28 changes: 19 additions & 9 deletions rvc/infer/pipeline.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
import os
import gc
import torch
import torch.nn.functional as F
import torchcrepe
import os

import faiss
import librosa
import numpy as np
import torch
import torch.nn.functional as F
import torchcrepe
from scipy import signal

from rvc.lib.predictors.FCPE import FCPEF0Predictor
Expand All @@ -19,7 +20,9 @@
FILTER_ORDER = 5 # Порядок фильтра
CUTOFF_FREQUENCY = 48 # Частота среза (в Гц)
SAMPLE_RATE = 16000 # Частота дискретизации (в Гц)
bh, ah = signal.butter(N=FILTER_ORDER, Wn=CUTOFF_FREQUENCY, btype="high", fs=SAMPLE_RATE)
bh, ah = signal.butter(
N=FILTER_ORDER, Wn=CUTOFF_FREQUENCY, btype="high", fs=SAMPLE_RATE
)


input_audio_path2wav = {}
Expand Down Expand Up @@ -56,7 +59,8 @@ def change_rms(source_audio, source_rate, target_audio, target_rate, rate):
rms2 = torch.maximum(rms2, torch.zeros_like(rms2) + 1e-6)

adjusted_audio = (
target_audio * (torch.pow(rms1, 1 - rate) * torch.pow(rms2, rate - 1)).numpy()
target_audio
* (torch.pow(rms1, 1 - rate) * torch.pow(rms2, rate - 1)).numpy()
)
return adjusted_audio

Expand Down Expand Up @@ -186,9 +190,13 @@ def get_f0(
delta_t = np.round(
(inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1
).astype("int16")
replace_f0 = np.interp(list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1])
replace_f0 = np.interp(
list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1]
)
shape = f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)].shape[0]
f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)] = replace_f0[:shape]
f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)] = replace_f0[
:shape
]

f0bak = f0.copy()
f0_mel = 1127 * np.log(1 + f0 / 700)
Expand Down Expand Up @@ -452,7 +460,9 @@ def pipeline(
audio, self.sample_rate, audio_opt, tgt_sr, volume_envelope
)
if resample_sr >= self.sample_rate and tgt_sr != resample_sr:
audio_opt = librosa.resample(audio_opt, orig_sr=tgt_sr, target_sr=resample_sr)
audio_opt = librosa.resample(
audio_opt, orig_sr=tgt_sr, target_sr=resample_sr
)

audio_max = np.abs(audio_opt).max() / 0.99
max_int16 = 32768
Expand Down
13 changes: 10 additions & 3 deletions rvc/lib/algorithm/attentions.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import math

import torch
from torch import nn
from torch.nn import functional as F
Expand Down Expand Up @@ -78,7 +79,9 @@ def attention(self, query, key, value, mask=None):

scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
if self.window_size is not None:
assert t_s == t_t, "Relative attention is only available for self-attention."
assert (
t_s == t_t
), "Relative attention is only available for self-attention."
key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
rel_logits = self._matmul_with_relative_keys(
query / math.sqrt(self.k_channels), key_relative_embeddings
Expand All @@ -93,7 +96,9 @@ def attention(self, query, key, value, mask=None):
if mask is not None:
scores = scores.masked_fill(mask == 0, -1e4)
if self.block_length is not None:
assert t_s == t_t, "Local attention is only available for self-attention."
assert (
t_s == t_t
), "Local attention is only available for self-attention."
block_mask = (
torch.ones_like(scores)
.triu(-self.block_length)
Expand All @@ -105,7 +110,9 @@ def attention(self, query, key, value, mask=None):
output = torch.matmul(p_attn, value)
if self.window_size is not None:
relative_weights = self._absolute_position_to_relative_position(p_attn)
value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s)
value_relative_embeddings = self._get_relative_embeddings(
self.emb_rel_v, t_s
)
output = output + self._matmul_with_relative_values(
relative_weights, value_relative_embeddings
)
Expand Down
7 changes: 5 additions & 2 deletions rvc/lib/algorithm/commons.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
import math
from typing import List, Optional

import torch
from torch.nn import functional as F
from typing import List, Optional


def init_weights(m, mean=0.0, std=0.01):
Expand All @@ -22,7 +23,9 @@ def convert_pad_shape(pad_shape):

def kl_divergence(m_p, logs_p, m_q, logs_q):
kl = (logs_q - logs_p) - 0.5
kl += 0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q)
kl += (
0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q)
)
return kl


Expand Down
11 changes: 8 additions & 3 deletions rvc/lib/algorithm/discriminators.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@
from .commons import get_padding
from .residuals import LRELU_SLOPE


PERIODS_V1 = [2, 3, 5, 7, 11, 17]
PERIODS_V2 = [2, 3, 5, 7, 11, 17, 23, 37]
IN_CHANNELS = [1, 32, 128, 512, 1024]
Expand All @@ -18,7 +17,10 @@ def __init__(self, use_spectral_norm=False):
super(MultiPeriodDiscriminator, self).__init__()
self.discriminators = nn.ModuleList(
[DiscriminatorS(use_spectral_norm=use_spectral_norm)]
+ [DiscriminatorP(p, use_spectral_norm=use_spectral_norm) for p in PERIODS_V1]
+ [
DiscriminatorP(p, use_spectral_norm=use_spectral_norm)
for p in PERIODS_V1
]
)

def forward(self, y, y_hat):
Expand All @@ -39,7 +41,10 @@ def __init__(self, use_spectral_norm=False):
super(MultiPeriodDiscriminatorV2, self).__init__()
self.discriminators = nn.ModuleList(
[DiscriminatorS(use_spectral_norm=use_spectral_norm)]
+ [DiscriminatorP(p, use_spectral_norm=use_spectral_norm) for p in PERIODS_V2]
+ [
DiscriminatorP(p, use_spectral_norm=use_spectral_norm)
for p in PERIODS_V2
]
)

def forward(self, y, y_hat):
Expand Down
3 changes: 2 additions & 1 deletion rvc/lib/algorithm/encoders.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
import math
from typing import Optional

import torch
from torch import nn
from torch.nn.utils.weight_norm import remove_weight_norm
from typing import Optional

from .attentions import FFN, MultiHeadAttention
from .commons import sequence_mask
Expand Down
13 changes: 9 additions & 4 deletions rvc/lib/algorithm/generators.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
from typing import Optional

import torch
from torch import nn
from torch.nn import functional as F
from torch.nn.utils.weight_norm import remove_weight_norm
from torch.nn.utils.parametrizations import weight_norm
from typing import Optional
from torch.nn.utils.weight_norm import remove_weight_norm

from .commons import init_weights
from .residuals import LRELU_SLOPE, ResBlock1, ResBlock2
Expand Down Expand Up @@ -121,10 +122,14 @@ def forward(self, f0: torch.Tensor, upp: int):
f0_buf[:, :, 0] = f0[:, :, 0]
f0_buf[:, :, 1:] = (
f0_buf[:, :, 0:1]
* torch.arange(2, self.harmonic_num + 2, device=f0.device)[None, None, :]
* torch.arange(2, self.harmonic_num + 2, device=f0.device)[
None, None, :
]
)
rad_values = (f0_buf / float(self.sample_rate)) % 1
rand_ini = torch.rand(f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device)
rand_ini = torch.rand(
f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device
)
rand_ini[:, 0] = 0
rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
tmp_over_one = torch.cumsum(rad_values, 1)
Expand Down
2 changes: 1 addition & 1 deletion rvc/lib/algorithm/modules.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import torch
from torch import nn
from torch.nn.utils.weight_norm import remove_weight_norm
from torch.nn.utils.parametrizations import weight_norm
from torch.nn.utils.weight_norm import remove_weight_norm

from .commons import fused_add_tanh_sigmoid_multiply

Expand Down
12 changes: 8 additions & 4 deletions rvc/lib/algorithm/nsf.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,11 @@
import math
from typing import Optional

import torch
from torch import nn
from torch.nn import functional as F
from torch.nn.utils.weight_norm import remove_weight_norm
from torch.nn.utils.parametrizations import weight_norm
from typing import Optional
from torch.nn.utils.weight_norm import remove_weight_norm

from .commons import init_weights
from .generators import SineGen
Expand Down Expand Up @@ -59,7 +60,9 @@ def __init__(
self.num_kernels = len(resblock_kernel_sizes)
self.num_upsamples = len(upsample_rates)
self.f0_upsamp = nn.Upsample(scale_factor=math.prod(upsample_rates))
self.m_source = SourceModuleHnNSF(sample_rate=sr, harmonic_num=0, is_half=is_half)
self.m_source = SourceModuleHnNSF(
sample_rate=sr, harmonic_num=0, is_half=is_half
)

self.conv_pre = nn.Conv1d(
initial_channel, upsample_initial_channel, 7, 1, padding=3
Expand All @@ -70,7 +73,8 @@ def __init__(
self.noise_convs = nn.ModuleList()

channels = [
upsample_initial_channel // (2 ** (i + 1)) for i in range(len(upsample_rates))
upsample_initial_channel // (2 ** (i + 1))
for i in range(len(upsample_rates))
]
stride_f0s = [
math.prod(upsample_rates[i + 1 :]) if i + 1 < len(upsample_rates) else 1
Expand Down
6 changes: 3 additions & 3 deletions rvc/lib/algorithm/residuals.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,14 @@
from typing import Optional

import torch
from torch import nn
from torch.nn import functional as F
from torch.nn.utils.weight_norm import remove_weight_norm
from torch.nn.utils.parametrizations import weight_norm
from typing import Optional
from torch.nn.utils.weight_norm import remove_weight_norm

from .commons import get_padding, init_weights
from .modules import WaveNet


LRELU_SLOPE = 0.1


Expand Down
7 changes: 4 additions & 3 deletions rvc/lib/algorithm/synthesizers.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,11 @@
from typing import Optional

import torch
from torch import nn
from torch.nn.utils.weight_norm import remove_weight_norm
from typing import Optional

from .commons import slice_segments, rand_slice_segments
from .encoders import TextEncoder, PosteriorEncoder
from .commons import rand_slice_segments, slice_segments
from .encoders import PosteriorEncoder, TextEncoder
from .generators import Generator
from .nsf import GeneratorNSF
from .residuals import ResidualCouplingBlock
Expand Down
Loading

0 comments on commit 3957a28

Please sign in to comment.