Skip to content

Commit

Permalink
Added better optimizer chooised and param support
Browse files Browse the repository at this point in the history
  • Loading branch information
jaretburkett committed Jul 24, 2023
1 parent 9a28199 commit e6fb022
Show file tree
Hide file tree
Showing 5 changed files with 63 additions and 22 deletions.
5 changes: 5 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -104,3 +104,8 @@ Just went in and out. It is much worse on smaller faces than shown here.

<img src="https://raw.githubusercontent.com/ostris/ai-toolkit/main/assets/VAE_test1.jpg" width="768" height="auto">

## TODO
- [ ] Add proper regs on sliders
- [ ] Add SDXL support (base model only for now)
- [ ] Add plain erasing
- [ ] Make Textual inversion network trainer (network that spits out TI embeddings)
20 changes: 10 additions & 10 deletions jobs/process/TrainSliderProcess.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@

from toolkit.kohya_model_util import load_vae
from toolkit.lora_special import LoRASpecialNetwork
from toolkit.optimizer import get_optimizer
from toolkit.paths import REPOS_ROOT
import sys

Expand Down Expand Up @@ -41,6 +42,7 @@ def flush():
UNET_IN_CHANNELS = 4 # Stable Diffusion の in_channels は 4 で固定。XLも同じ。
VAE_SCALE_FACTOR = 8 # 2 ** (len(vae.config.block_out_channels) - 1) = 8


class StableDiffusion:
def __init__(self, vae, tokenizer, text_encoder, unet, noise_scheduler):
self.vae = vae
Expand Down Expand Up @@ -98,6 +100,7 @@ def __init__(self, **kwargs):
self.train_unet = kwargs.get('train_unet', True)
self.train_text_encoder = kwargs.get('train_text_encoder', True)
self.noise_offset = kwargs.get('noise_offset', 0.0)
self.optimizer_params = kwargs.get('optimizer_params', {})


class ModelConfig:
Expand Down Expand Up @@ -377,17 +380,14 @@ def run(self):

self.network.prepare_grad_etc(text_encoder, unet)

optimizer_type = self.train_config.optimizer.lower()
# we call it something different than leco
if optimizer_type == "dadaptation":
optimizer_type = "dadaptadam"
optimizer_module = train_util.get_optimizer(optimizer_type)
optimizer = optimizer_module(
self.network.prepare_optimizer_params(
self.train_config.lr, self.train_config.lr, self.train_config.lr
),
lr=self.train_config.lr
params = self.network.prepare_optimizer_params(
text_encoder_lr=self.train_config.lr,
unet_lr=self.train_config.lr,
default_lr=self.train_config.lr
)
optimizer_type = self.train_config.optimizer.lower()
optimizer = get_optimizer(params, optimizer_type, learning_rate=self.train_config.lr,
optimizer_params=self.train_config.optimizer_params)
lr_scheduler = train_util.get_lr_scheduler(
self.train_config.lr_scheduler,
optimizer,
Expand Down
17 changes: 13 additions & 4 deletions jobs/process/TrainVAEProcess.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,8 @@ def __init__(
lambda_gp=10,
start_step=0,
warmup_steps=1000,
process=None
process=None,
optimizer_params=None,
):
self.learning_rate = learning_rate
self.device = device
Expand All @@ -65,6 +66,10 @@ def __init__(
self.warmup_steps = warmup_steps
self.start_step = start_step
self.lambda_gp = lambda_gp

if optimizer_params is None:
optimizer_params = {}
self.optimizer_params = optimizer_params
self.print = self.process.print
print(f" Critic config: {self.__dict__}")

Expand All @@ -75,7 +80,8 @@ def setup(self):
self.model.train()
self.model.requires_grad_(True)
params = self.model.parameters()
self.optimizer = get_optimizer(params, self.optimizer_type, self.learning_rate)
self.optimizer = get_optimizer(params, self.optimizer_type, self.learning_rate,
optimizer_params=self.optimizer_params)
self.scheduler = torch.optim.lr_scheduler.ConstantLR(
self.optimizer,
total_iters=self.process.max_steps * self.num_critic_per_gen,
Expand Down Expand Up @@ -196,6 +202,7 @@ def __init__(self, process_id: int, job, config: OrderedDict):
self.tv_weight = self.get_conf('tv_weight', 1e0, as_type=float)
self.critic_weight = self.get_conf('critic_weight', 1, as_type=float)
self.pattern_weight = self.get_conf('pattern_weight', 1, as_type=float)
self.optimizer_params = self.get_conf('optimizer_params', {})

self.blocks_to_train = self.get_conf('blocks_to_train', ['all'])
self.torch_dtype = get_torch_dtype(self.dtype)
Expand Down Expand Up @@ -342,7 +349,8 @@ def get_tv_loss(self, pred, target):

def get_pattern_loss(self, pred, target):
if self._pattern_loss is None:
self._pattern_loss = PatternLoss(pattern_size=8, dtype=self.torch_dtype).to(self.device, dtype=self.torch_dtype)
self._pattern_loss = PatternLoss(pattern_size=8, dtype=self.torch_dtype).to(self.device,
dtype=self.torch_dtype)
loss = torch.mean(self._pattern_loss(pred, target))
return loss

Expand Down Expand Up @@ -504,7 +512,8 @@ def run(self):
if self.use_critic:
self.critic.setup()

optimizer = get_optimizer(params, self.optimizer_type, self.learning_rate)
optimizer = get_optimizer(params, self.optimizer_type, self.learning_rate,
optimizer_params=self.optimizer_params)

# setup scheduler
# todo allow other schedulers
Expand Down
6 changes: 3 additions & 3 deletions jobs/process/models/vgg19_critic.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,11 +21,11 @@ def __init__(self):
super(Vgg19Critic, self).__init__()
self.main = nn.Sequential(
# input (bs, 512, 32, 32)
nn.Conv2d(512, 512, kernel_size=3, stride=2, padding=1),
nn.Conv2d(512, 1024, kernel_size=3, stride=2, padding=1),
nn.LeakyReLU(0.2), # (bs, 512, 16, 16)
nn.Conv2d(512, 512, kernel_size=3, stride=2, padding=1),
nn.Conv2d(1024, 1024, kernel_size=3, stride=2, padding=1),
nn.LeakyReLU(0.2), # (bs, 512, 8, 8)
nn.Conv2d(512, 1, kernel_size=3, stride=2, padding=1),
nn.Conv2d(1024, 1024, kernel_size=3, stride=2, padding=1),
# (bs, 1, 4, 4)
MeanReduce(), # (bs, 1, 1, 1)
nn.Flatten(), # (bs, 1)
Expand Down
37 changes: 32 additions & 5 deletions toolkit/optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,18 +4,45 @@
def get_optimizer(
params,
optimizer_type='adam',
learning_rate=1e-6
learning_rate=1e-6,
optimizer_params=None
):
if optimizer_params is None:
optimizer_params = {}
lower_type = optimizer_type.lower()
if lower_type == 'dadaptation':
if lower_type.startswith("dadaptation"):
# dadaptation optimizer does not use standard learning rate. 1 is the default value
import dadaptation
print("Using DAdaptAdam optimizer")
optimizer = dadaptation.DAdaptAdam(params, lr=1.0)
use_lr = learning_rate
if use_lr < 0.1:
# dadaptation uses different lr that is values of 0.1 to 1.0. default to 1.0
use_lr = 1.0
if lower_type.endswith('lion'):
optimizer = dadaptation.DAdaptLion(params, lr=use_lr, **optimizer_params)
elif lower_type.endswith('adam'):
optimizer = dadaptation.DAdaptLion(params, lr=use_lr, **optimizer_params)
elif lower_type == 'dadaptation':
# backwards compatibility
optimizer = dadaptation.DAdaptAdam(params, lr=use_lr, **optimizer_params)
# warn user that dadaptation is deprecated
print("WARNING: Dadaptation optimizer type has been changed to DadaptationAdam. Please update your config.")
elif lower_type.endswith("8bit"):
import bitsandbytes

if lower_type == "adam8bit":
return bitsandbytes.optim.Adam8bit(params, lr=learning_rate, **optimizer_params)
elif lower_type == "lion8bit":
return bitsandbytes.optim.Lion8bit(params, lr=learning_rate, **optimizer_params)
else:
raise ValueError(f'Unknown optimizer type {optimizer_type}')
elif lower_type == 'adam':
optimizer = torch.optim.Adam(params, lr=float(learning_rate))
optimizer = torch.optim.Adam(params, lr=float(learning_rate), **optimizer_params)
elif lower_type == 'adamw':
optimizer = torch.optim.AdamW(params, lr=float(learning_rate))
optimizer = torch.optim.AdamW(params, lr=float(learning_rate), **optimizer_params)
elif lower_type == 'lion':
from lion_pytorch import Lion
return Lion(params, lr=learning_rate, **optimizer_params)
else:
raise ValueError(f'Unknown optimizer type {optimizer_type}')
return optimizer

0 comments on commit e6fb022

Please sign in to comment.