Skip to content

Commit

Permalink
Neva 340b configs (#379)
Browse files Browse the repository at this point in the history
* nemotron4_340b pretrain config

Signed-off-by: HuiyingLi <[email protected]>

* nemotron4 340b peft config

Signed-off-by: HuiyingLi <[email protected]>

* fixes for vision encoder hiddensize, template, and peft constant lr

Signed-off-by: HuiyingLi <[email protected]>

---------

Signed-off-by: HuiyingLi <[email protected]>
Co-authored-by: Yu Yao <[email protected]>
  • Loading branch information
HuiyingLi and yaoyu-33 authored Jul 23, 2024
1 parent 867ab57 commit 3fc0d65
Show file tree
Hide file tree
Showing 2 changed files with 450 additions and 0 deletions.
230 changes: 230 additions & 0 deletions launcher_scripts/conf/peft/neva/nemotron4_340b_chat.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,230 @@
run:
name: neva_nemotron4_340b_chat_peft
time_limit: "1-00:00:00"
dependency: "singleton"
model_train_name: neva_nemotron4_340b_chat
results_dir: ${base_results_dir}/${.model_train_name}/${.name}

trainer:
devices: 8
num_nodes: 8
accelerator: gpu
precision: bf16
logger: False # logger provided by exp_manager
enable_checkpointing: False
use_distributed_sampler: False
max_epochs: -1 # PTL default. In practice, max_steps will be reached first.
max_steps: 5190 # consumed_samples = global_step * micro_batch_size * data_parallel_size * accumulate_grad_batches
log_every_n_steps: 1
val_check_interval: 100
check_val_every_n_epoch: null
limit_val_batches: 50
limit_test_batches: 500
accumulate_grad_batches: 1 # do not modify, grad acc is automatic for training megatron models
gradient_clip_val: 1.0
benchmark: False
enable_model_summary: False # default PTL callback for this does not support model parallelism, instead we log manually

exp_manager:
explicit_log_dir: ${peft.run.results_dir}/results
exp_dir: null
name: nemo_neva
create_wandb_logger: False
wandb_logger_kwargs:
project: nemo_neva
name: ${peft.run.name}
resume_if_exists: True
resume_ignore_no_checkpoint: True
resume_from_checkpoint: ${peft.model.resume_from_checkpoint}
create_checkpoint_callback: True
checkpoint_callback_params:
monitor: val_loss
save_top_k: 10
mode: min
always_save_nemo: False # saves nemo file during validation, not implemented for model parallel
save_nemo_on_train_end: True # not recommended when training large models on clusters with short time limits
filename: 'neva--{val_loss:.2f}-{step}-{consumed_samples}'
model_parallel_size: ${multiply:${peft.model.tensor_model_parallel_size}, ${peft.model.pipeline_model_parallel_size}}
ema:
enable: False
decay: 0.9999
validate_original_weights: False
every_n_steps: 1
cpu_offload: False

model:
precision: ${peft.trainer.precision}

# specify micro_batch_size, global_batch_size, and model parallelism
# gradient accumulation will be done automatically based on data_parallel_size

# Batch size guideline for different types of dataset
micro_batch_size: 1 # limited by GPU memory
global_batch_size: 128 # will use more micro batches to reach global batch size

tensor_model_parallel_size: 8 # intra-layer model parallelism
pipeline_model_parallel_size: 8 # inter-layer model parallelism
virtual_pipeline_model_parallel_size: null # interleaved pipeline

restore_from_path: ${base_results_dir}/${peft.run.model_train_name}/results/checkpoints/nemo_neva.nemo # used in fine-tuning

# Multimodal configs
mm_cfg:
llm:
from_pretrained: ${data_dir}/neva/checkpoints/340b-chat-extracted # path to nemo checkpoint
freeze: True
model_type: nv_gpt
vision_encoder:
from_pretrained: "google/siglip-so400m-patch14-384" # path or name
from_hf: True
patch_dim: 14
crop_size:
- 384
- 384
hidden_size: 1152 # could be found from model but tricky in code
vision_select_layer: -2 # default to the last layer
class_token_length: 0
freeze: True
pretrain_mm_mlp_adapter: null # path to pretrained mm adapter
mm_mlp_adapter_type: linear
use_im_start_end: False

peft:
peft_scheme: "lora"
restore_from_path: null
lora_tuning:
adapter_dim: 128
adapter_dropout: 0.05
column_init_method: 'xavier' # IGNORED if linear_adapter is used, options: xavier, zero or normal
row_init_method: 'zero' # IGNORED if linear_adapter is used, options: xavier, zero or normal
layer_selection: null # selects in which layers to add lora adapters. e.g. [1,12] will add lora to layer 1 (lowest) and 12. null will apply adapters to all layers
weight_tying: False
position_embedding_strategy: null # used only when weight_tying is True

# LLM configs
# use GPTModel from megatron.core
mcore_gpt: True

# model architecture
encoder_seq_length: 4096
max_position_embeddings: ${.encoder_seq_length}
position_embedding_type: rope
num_layers: 96
hidden_size: 18432
ffn_hidden_size: 73728 # Transformer FFN hidden size. Usually 4 * hidden_size.
num_attention_heads: 96
init_method_std: 0.014 # Standard deviation of the zero mean normal distribution used for weight initialization.')
use_scaled_init_method: True # use scaled residuals initialization
hidden_dropout: 0.0 # Dropout probability for hidden state transformer.
attention_dropout: 0.0 # Dropout probability for attention
ffn_dropout: 0.0 # Dropout probability in the feed-forward layer.
kv_channels: null # Projection weights dimension in multi-head attention. Set to hidden_size // num_attention_heads if null
apply_query_key_layer_scaling: True # scale Q * K^T by 1 / layer-number.
normalization: layernorm1p # Type of normalization layers
layernorm_epsilon: 1e-5
do_layer_norm_weight_decay: False # True means weight decay on all params
make_vocab_size_divisible_by: 128 # Pad the vocab size to be divisible by this value for computation efficiency.
pre_process: True # add embedding
post_process: True # add pooler
persist_layer_norm: True # Use of persistent fused layer norm kernel.
bias: False # Whether to use bias terms in all weight matrices.
activation: 'squared-relu' # Options ['gelu', 'geglu', 'swiglu', 'reglu', 'squared-relu', 'fast-geglu', 'fast-swiglu', 'fast-reglu']
headscale: False # Whether to learn extra parameters that scale the output of the each self-attention head.
transformer_block_type: 'pre_ln' # Options ['pre_ln', 'post_ln', 'normformer']
normalize_attention_scores: True # Whether to scale the output Q * K^T by 1 / sqrt(hidden_size_per_head). This arg is provided as a configuration option mostly for compatibility with models that have been weight-converted from HF. You almost always want to se this to True.
rotary_percentage: 0.5 # If using position_embedding_type=rope, then the per head dim is multiplied by this.
attention_type: 'multihead' # Attention type. Options ['multihead']
share_embeddings_and_output_weights: False # Share embedding and output layer weights.
overlap_p2p_comm: False # Overlap p2p communication with computes. This argument is valid only when `virtual_pipeline_model_parallel_size` is larger than 1
batch_p2p_comm: True # Batch consecutive inter-peer send/recv operations. This argument is valid only when `virtual_pipeline_model_parallel_size` is larger than 1
seq_len_interpolation_factor: null # RoPE Interpolation factor for sequence length. This is used to build long-context models with RoPE ex: https://arxiv.org/abs/2306.15595.
num_query_groups: 8 # Number of query groups for group query attention. If None, normal attention is used.
use_flash_attention: True

## Activation Checkpointing
activations_checkpoint_granularity: null # 'selective' or 'full'
activations_checkpoint_method: null # 'uniform', 'block', not used with 'selective'
activations_checkpoint_num_layers: null # not used with 'selective'
num_micro_batches_with_partial_activation_checkpoints: null
activations_checkpoint_layers_per_pipeline: null
sequence_parallel: False

# precision
native_amp_init_scale: 4294967296 # 2 ** 32
native_amp_growth_interval: 1000
hysteresis: 2 # Gradient scale hysteresis
fp32_residual_connection: False # Move residual connections to fp32
fp16_lm_cross_entropy: False # Move the cross entropy unreduced loss calculation for lm head to fp16

# model fusions
masked_softmax_fusion: True # Use a kernel that fuses the attention softmax with it's mask.
bias_dropout_add_fusion: False # Use a kernel that fuses the bias addition, dropout and residual connection addition.

use_cpu_initialization: False # Init weights on the CPU (slow for large models)
onnx_safe: False # Use work-arounds for known problems with Torch ONNX exporter.
gradient_accumulation_fusion: False # Fuse weight gradient accumulation to GEMMs. Only used with pipeline parallelism.
openai_gelu: False
bias_activation_fusion: False
megatron_legacy: False

transformer_engine: True
fp8: False # enables fp8 in TransformerLayer forward
fp8_e4m3: False # sets fp8_format = recipe.Format.E4M3
fp8_hybrid: False # sets fp8_format = recipe.Format.HYBRID
fp8_margin: 0 # scaling margin
fp8_interval: 1 # scaling update interval
fp8_amax_history_len: 1 # Number of steps for which amax history is recorded per tensor
fp8_amax_compute_algo: most_recent # 'most_recent' or 'max'. Algorithm for computing amax from history
use_emha: False # Use fused multi-head attention for large sequence-length. Note this is not yet supported. Please set to False.

# Megatron O2-style half-precision
megatron_amp_O2: True # Enable O2-level automatic mixed precision using main parameters
async_grad_allreduce: False
grad_allreduce_chunk_size_mb: 125
grad_div_ar_fusion: True # Fuse grad division into torch.distributed.all_reduce

# miscellaneous
seed: 1234
resume_from_checkpoint: null # manually set the checkpoint file to load from
apex_transformer_log_level: 30 # Python logging level displays logs with severity greater than or equal to this
gradient_as_bucket_view: True # PyTorch DDP argument. Allocate gradients in a contiguous bucket to save memory (less fragmentation and buffer memory)

tokenizer:
library: 'sentencepiece'
type: null
model: ${data_dir}/neva/tokenizers/tokenizer_neva.model
vocab_file: null
merge_file: null
delimiter: null # only used for tabular tokenizer
sentencepiece_legacy: False # Legacy=True allows you to add special tokens to sentencepiece tokenizers.

data:
num_workers: 8
dataloader_type: cyclic
data_path: ${data_dir}/neva/datasets/LLaVA-Instruct-mixture/llava_v1_5_mix665k.json
lazy_preprocess: True
is_multimodal: True
media_type: image
sep_image_conv_front: False
conv_template: nv_dpo
image_folder: ${data_dir}/neva/datasets/LLaVA-Instruct-mixture/images
image_aspect_ratio: 'square'

# Nsys profiling options
nsys_profile:
enabled: False
start_step: 10 # Global batch to start profiling
end_step: 10 # Global batch to end profiling
ranks: [ 0 ] # Global rank IDs to profile
gen_shape: False # Generate model and kernel details including input shapes

optim:
name: fused_adam
lr: 3e-5
weight_decay: 0.
betas:
- 0.9
- 0.95
sched:
name: WarmupHoldPolicy
warmup_steps: 0
Loading

0 comments on commit 3fc0d65

Please sign in to comment.