From bfbd06c79e889f85df52d626c83f98e5ca162a3a Mon Sep 17 00:00:00 2001 From: ARVINDH-CT06 <24bct106@kgcas.com> Date: Fri, 18 Oct 2024 22:08:34 +0530 Subject: [PATCH] training llm webui.py --- webui.py | 263 ++++++++++++++++++------------------------------------- 1 file changed, 83 insertions(+), 180 deletions(-) diff --git a/webui.py b/webui.py index 0233a94..6ce143a 100644 --- a/webui.py +++ b/webui.py @@ -1,188 +1,91 @@ -# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Liu Yue) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os -import sys -import argparse -import gradio as gr -import numpy as np import torch -import torchaudio -import random -import librosa -ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) -sys.path.append('{}/third_party/Matcha-TTS'.format(ROOT_DIR)) -from cosyvoice.cli.cosyvoice import CosyVoice -from cosyvoice.utils.file_utils import load_wav, logging -from cosyvoice.utils.common import set_all_random_seed - -inference_mode_list = ['预训练音色', '3s极速复刻', '跨语种复刻', '自然语言控制'] -instruct_dict = {'预训练音色': '1. 选择预训练音色\n2. 点击生成音频按钮', - '3s极速复刻': '1. 选择prompt音频文件,或录入prompt音频,注意不超过30s,若同时提供,优先选择prompt音频文件\n2. 输入prompt文本\n3. 点击生成音频按钮', - '跨语种复刻': '1. 选择prompt音频文件,或录入prompt音频,注意不超过30s,若同时提供,优先选择prompt音频文件\n2. 点击生成音频按钮', - '自然语言控制': '1. 选择预训练音色\n2. 输入instruct文本\n3. 点击生成音频按钮'} -stream_mode_list = [('否', False), ('是', True)] -max_val = 0.8 - - -def generate_seed(): - seed = random.randint(1, 100000000) - return { - "__type__": "update", - "value": seed - } - - -def postprocess(speech, top_db=60, hop_length=220, win_length=440): - speech, _ = librosa.effects.trim( - speech, top_db=top_db, - frame_length=win_length, - hop_length=hop_length - ) - if speech.abs().max() > max_val: - speech = speech / speech.abs().max() * max_val - speech = torch.concat([speech, torch.zeros(1, int(target_sr * 0.2))], dim=1) - return speech - - -def change_instruction(mode_checkbox_group): - return instruct_dict[mode_checkbox_group] - - -def generate_audio(tts_text, mode_checkbox_group, sft_dropdown, prompt_text, prompt_wav_upload, prompt_wav_record, instruct_text, - seed, stream, speed): - if prompt_wav_upload is not None: - prompt_wav = prompt_wav_upload - elif prompt_wav_record is not None: - prompt_wav = prompt_wav_record - else: - prompt_wav = None - # if instruct mode, please make sure that model is iic/CosyVoice-300M-Instruct and not cross_lingual mode - if mode_checkbox_group in ['自然语言控制']: - if cosyvoice.frontend.instruct is False: - gr.Warning('您正在使用自然语言控制模式, {}模型不支持此模式, 请使用iic/CosyVoice-300M-Instruct模型'.format(args.model_dir)) - yield (target_sr, default_data) - if instruct_text == '': - gr.Warning('您正在使用自然语言控制模式, 请输入instruct文本') - yield (target_sr, default_data) - if prompt_wav is not None or prompt_text != '': - gr.Info('您正在使用自然语言控制模式, prompt音频/prompt文本会被忽略') - # if cross_lingual mode, please make sure that model is iic/CosyVoice-300M and tts_text prompt_text are different language - if mode_checkbox_group in ['跨语种复刻']: - if cosyvoice.frontend.instruct is True: - gr.Warning('您正在使用跨语种复刻模式, {}模型不支持此模式, 请使用iic/CosyVoice-300M模型'.format(args.model_dir)) - yield (target_sr, default_data) - if instruct_text != '': - gr.Info('您正在使用跨语种复刻模式, instruct文本会被忽略') - if prompt_wav is None: - gr.Warning('您正在使用跨语种复刻模式, 请提供prompt音频') - yield (target_sr, default_data) - gr.Info('您正在使用跨语种复刻模式, 请确保合成文本和prompt文本为不同语言') - # if in zero_shot cross_lingual, please make sure that prompt_text and prompt_wav meets requirements - if mode_checkbox_group in ['3s极速复刻', '跨语种复刻']: - if prompt_wav is None: - gr.Warning('prompt音频为空,您是否忘记输入prompt音频?') - yield (target_sr, default_data) - if torchaudio.info(prompt_wav).sample_rate < prompt_sr: - gr.Warning('prompt音频采样率{}低于{}'.format(torchaudio.info(prompt_wav).sample_rate, prompt_sr)) - yield (target_sr, default_data) - # sft mode only use sft_dropdown - if mode_checkbox_group in ['预训练音色']: - if instruct_text != '' or prompt_wav is not None or prompt_text != '': - gr.Info('您正在使用预训练音色模式,prompt文本/prompt音频/instruct文本会被忽略!') - # zero_shot mode only use prompt_wav prompt text - if mode_checkbox_group in ['3s极速复刻']: - if prompt_text == '': - gr.Warning('prompt文本为空,您是否忘记输入prompt文本?') - yield (target_sr, default_data) - if instruct_text != '': - gr.Info('您正在使用3s极速复刻模式,预训练音色/instruct文本会被忽略!') - - if mode_checkbox_group == '预训练音色': - logging.info('get sft inference request') - set_all_random_seed(seed) - for i in cosyvoice.inference_sft(tts_text, sft_dropdown, stream=stream, speed=speed): - yield (target_sr, i['tts_speech'].numpy().flatten()) - elif mode_checkbox_group == '3s极速复刻': - logging.info('get zero_shot inference request') - prompt_speech_16k = postprocess(load_wav(prompt_wav, prompt_sr)) - set_all_random_seed(seed) - for i in cosyvoice.inference_zero_shot(tts_text, prompt_text, prompt_speech_16k, stream=stream, speed=speed): - yield (target_sr, i['tts_speech'].numpy().flatten()) - elif mode_checkbox_group == '跨语种复刻': - logging.info('get cross_lingual inference request') - prompt_speech_16k = postprocess(load_wav(prompt_wav, prompt_sr)) - set_all_random_seed(seed) - for i in cosyvoice.inference_cross_lingual(tts_text, prompt_speech_16k, stream=stream, speed=speed): - yield (target_sr, i['tts_speech'].numpy().flatten()) - else: - logging.info('get instruct inference request') - set_all_random_seed(seed) - for i in cosyvoice.inference_instruct(tts_text, sft_dropdown, instruct_text, stream=stream, speed=speed): - yield (target_sr, i['tts_speech'].numpy().flatten()) +import torch.nn as nn +import torch.optim as optim +from torch.utils.data import DataLoader +from transformers import YourModel, YourTokenizer +import argparse +import os +def train(model, dataloader, optimizer, device, accumulation_steps=2): + model.train() + total_loss = 0 + optimizer.zero_grad() + + for i, (inputs, labels) in enumerate(dataloader): + inputs, labels = inputs.to(device), labels.to(device) + + # Forward pass + outputs = model(inputs) + loss = nn.CrossEntropyLoss()(outputs, labels) + + # Normalize loss by accumulation steps + loss = loss / accumulation_steps + loss.backward() + + # Accumulate gradients + if (i + 1) % accumulation_steps == 0: + optimizer.step() + optimizer.zero_grad() + + total_loss += loss.item() + + return total_loss / len(dataloader) def main(): - with gr.Blocks() as demo: - gr.Markdown("### 代码库 [CosyVoice](https://github.com/FunAudioLLM/CosyVoice) \ - 预训练模型 [CosyVoice-300M](https://www.modelscope.cn/models/iic/CosyVoice-300M) \ - [CosyVoice-300M-Instruct](https://www.modelscope.cn/models/iic/CosyVoice-300M-Instruct) \ - [CosyVoice-300M-SFT](https://www.modelscope.cn/models/iic/CosyVoice-300M-SFT)") - gr.Markdown("#### 请输入需要合成的文本,选择推理模式,并按照提示步骤进行操作") - - tts_text = gr.Textbox(label="输入合成文本", lines=1, value="我是通义实验室语音团队全新推出的生成式语音大模型,提供舒适自然的语音合成能力。") - with gr.Row(): - mode_checkbox_group = gr.Radio(choices=inference_mode_list, label='选择推理模式', value=inference_mode_list[0]) - instruction_text = gr.Text(label="操作步骤", value=instruct_dict[inference_mode_list[0]], scale=0.5) - sft_dropdown = gr.Dropdown(choices=sft_spk, label='选择预训练音色', value=sft_spk[0], scale=0.25) - stream = gr.Radio(choices=stream_mode_list, label='是否流式推理', value=stream_mode_list[0][1]) - speed = gr.Number(value=1, label="速度调节(仅支持非流式推理)", minimum=0.5, maximum=2.0, step=0.1) - with gr.Column(scale=0.25): - seed_button = gr.Button(value="\U0001F3B2") - seed = gr.Number(value=0, label="随机推理种子") - - with gr.Row(): - prompt_wav_upload = gr.Audio(sources='upload', type='filepath', label='选择prompt音频文件,注意采样率不低于16khz') - prompt_wav_record = gr.Audio(sources='microphone', type='filepath', label='录制prompt音频文件') - prompt_text = gr.Textbox(label="输入prompt文本", lines=1, placeholder="请输入prompt文本,需与prompt音频内容一致,暂时不支持自动识别...", value='') - instruct_text = gr.Textbox(label="输入instruct文本", lines=1, placeholder="请输入instruct文本.", value='') - - generate_button = gr.Button("生成音频") - - audio_output = gr.Audio(label="合成音频", autoplay=True, streaming=True) - - seed_button.click(generate_seed, inputs=[], outputs=seed) - generate_button.click(generate_audio, - inputs=[tts_text, mode_checkbox_group, sft_dropdown, prompt_text, prompt_wav_upload, prompt_wav_record, instruct_text, - seed, stream, speed], - outputs=[audio_output]) - mode_checkbox_group.change(fn=change_instruction, inputs=[mode_checkbox_group], outputs=[instruction_text]) - demo.queue(max_size=4, default_concurrency_limit=2) - demo.launch(server_name='0.0.0.0', server_port=args.port) + parser = argparse.ArgumentParser() + parser.add_argument('--epochs', type=int, default=3, help='Number of training epochs') + parser.add_argument('--batch_size', type=int, default=8, help='Training batch size') + parser.add_argument('--lr', type=float, default=3e-5, help='Learning rate') + parser.add_argument('--model_path', type=str, default='path_to_model', help='Path to your model') + parser.add_argument('--use_fp16', action='store_true', help='Use mixed precision training') + args = parser.parse_args() + # Set device + device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + + # Load model and tokenizer + model = YourModel.from_pretrained(args.model_path).to(device) + tokenizer = YourTokenizer.from_pretrained(args.model_path) + + # Data loading and preparation + train_dataset = YourDataset() # Customize this + train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True) + + # Optimizer and learning rate scheduling + optimizer = optim.AdamW(model.parameters(), lr=args.lr) + + # Mixed precision training + scaler = torch.cuda.amp.GradScaler() if args.use_fp16 else None + + # Training loop + for epoch in range(args.epochs): + print(f'Epoch {epoch + 1}/{args.epochs}') + + model.train() + total_loss = 0 + + for i, (inputs, labels) in enumerate(train_loader): + inputs, labels = inputs.to(device), labels.to(device) + + optimizer.zero_grad() + + with torch.cuda.amp.autocast(enabled=args.use_fp16): + outputs = model(inputs) + loss = nn.CrossEntropyLoss()(outputs, labels) + + # Scale loss for mixed precision training + if scaler: + scaler.scale(loss).backward() + scaler.step(optimizer) + scaler.update() + else: + loss.backward() + optimizer.step() + + total_loss += loss.item() + + avg_loss = total_loss / len(train_loader) + print(f'Average Loss: {avg_loss:.4f}') if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--port', - type=int, - default=8000) - parser.add_argument('--model_dir', - type=str, - default='pretrained_models/CosyVoice-300M', - help='local path or modelscope repo id') - args = parser.parse_args() - cosyvoice = CosyVoice(args.model_dir) - sft_spk = cosyvoice.list_avaliable_spks() - prompt_sr, target_sr = 16000, 22050 - default_data = np.zeros(target_sr) main()