Skip to content

Commit

Permalink
fix lint
Browse files Browse the repository at this point in the history
  • Loading branch information
HIT-cwh committed Jul 19, 2024
1 parent 27cf856 commit 415ad36
Show file tree
Hide file tree
Showing 6 changed files with 16 additions and 14 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@
use_varlen_attn = False

# Data
data_files = ['/root/ld/pull_request/xtuner/xtuner/configs/custom_dataset/pretrain/minicpm/pretrain.json']
data_files = ['/path/to/json/file.json']
max_length = 2048
pack_to_max_length = True

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@
use_varlen_attn = False

# Data
data_files = ['/root/ld/pull_request/xtuner/xtuner/configs/custom_dataset/pretrain/minicpm/pretrain.json']
data_files = ['/path/to/json/file.json']
max_length = 2048
pack_to_max_length = True

Expand Down
8 changes: 5 additions & 3 deletions xtuner/configs/minicpm/1_2b/minicpm_1b_dpo_qlora.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,15 @@
# Copyright (c) OpenMMLab. All rights reserved.
from datasets import load_dataset
import torch
from datasets import load_dataset
from mmengine.dataset import DefaultSampler
from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook,
LoggerHook, ParamSchedulerHook)
from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR, LinearLR
from torch.optim import AdamW
from transformers import AutoModelForCausalLM, AutoTokenizer,BitsAndBytesConfig
from peft import LoraConfig
from torch.optim import AdamW
from transformers import (AutoModelForCausalLM, AutoTokenizer,
BitsAndBytesConfig)

from xtuner.dataset.collate_fns.preference_collate_fn import \
preference_collate_fn
from xtuner.dataset.preference_dataset import (build_preference_dataset,
Expand Down
5 changes: 2 additions & 3 deletions xtuner/configs/minicpm/1_2b/minicpm_1b_lora_alpaca_zh_e3.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,7 @@
from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR, LinearLR
from peft import LoraConfig
from torch.optim import AdamW
from transformers import (AutoModelForCausalLM, AutoTokenizer,
BitsAndBytesConfig)
from transformers import AutoModelForCausalLM, AutoTokenizer

from xtuner.dataset import process_hf_dataset
from xtuner.dataset.collate_fns import default_collate_fn
Expand Down Expand Up @@ -78,7 +77,7 @@
pretrained_model_name_or_path=pretrained_model_name_or_path,
trust_remote_code=True,
torch_dtype=torch.float16,
),
),
lora=dict(
type=LoraConfig,
r=64,
Expand Down
8 changes: 5 additions & 3 deletions xtuner/configs/minicpm/2b/minicpm_2b_dpo_qlora.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,15 @@
# Copyright (c) OpenMMLab. All rights reserved.
from datasets import load_dataset
import torch
from datasets import load_dataset
from mmengine.dataset import DefaultSampler
from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook,
LoggerHook, ParamSchedulerHook)
from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR, LinearLR
from torch.optim import AdamW
from transformers import AutoModelForCausalLM, AutoTokenizer,BitsAndBytesConfig
from peft import LoraConfig
from torch.optim import AdamW
from transformers import (AutoModelForCausalLM, AutoTokenizer,
BitsAndBytesConfig)

from xtuner.dataset.collate_fns.preference_collate_fn import \
preference_collate_fn
from xtuner.dataset.preference_dataset import (build_preference_dataset,
Expand Down
5 changes: 2 additions & 3 deletions xtuner/configs/minicpm/2b/minicpm_2b_lora_alpaca_zh_e3.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,7 @@
from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR, LinearLR
from peft import LoraConfig
from torch.optim import AdamW
from transformers import (AutoModelForCausalLM, AutoTokenizer,
BitsAndBytesConfig)
from transformers import AutoModelForCausalLM, AutoTokenizer

from xtuner.dataset import process_hf_dataset
from xtuner.dataset.collate_fns import default_collate_fn
Expand Down Expand Up @@ -78,7 +77,7 @@
pretrained_model_name_or_path=pretrained_model_name_or_path,
trust_remote_code=True,
torch_dtype=torch.float16,
),
),
lora=dict(
type=LoraConfig,
r=64,
Expand Down

0 comments on commit 415ad36

Please sign in to comment.