-
Notifications
You must be signed in to change notification settings - Fork 1
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Refine inference code and add alternative inference with vllm
- Loading branch information
Showing
8 changed files
with
203 additions
and
8 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,86 @@ | ||
import json | ||
import os | ||
from pathlib import Path | ||
from pprint import pformat | ||
from typing import Any | ||
|
||
import hydra | ||
import torch | ||
from datasets import load_dataset | ||
from loguru import logger | ||
from omegaconf import DictConfig | ||
from openai import BaseModel | ||
from pydantic import Field | ||
from vllm import LLM, SamplingParams | ||
|
||
from juddges.config import DatasetConfig, LLMConfig | ||
from juddges.preprocessing.context_truncator import ContextTruncator | ||
from juddges.preprocessing.text_encoder import TextEncoderForEvalPlainTextFormat | ||
from juddges.settings import CONFIG_PATH | ||
from juddges.utils.config import resolve_config | ||
|
||
torch.set_float32_matmul_precision("high") | ||
DEVICE = "cuda" if torch.cuda.is_available() else "cpu" | ||
NUM_PROC = int(os.getenv("NUM_PROC", 1)) | ||
|
||
|
||
class PredictConfig(BaseModel, extra="forbid"): | ||
model: LLMConfig | ||
dataset: DatasetConfig | ||
device_map: str | ||
output_file: Path | ||
truncate_context: bool | ||
generate_kwargs: dict[str, Any] = Field(default_factory=dict) | ||
random_seed: int | ||
|
||
@property | ||
def corrected_max_seq_length(self) -> int: | ||
return self.model.max_seq_length - self.dataset.max_output_tokens | ||
|
||
|
||
@hydra.main(version_base="1.3", config_path=str(CONFIG_PATH), config_name="predict.yaml") | ||
@torch.inference_mode() | ||
def main(cfg: DictConfig) -> None: | ||
config = PredictConfig(**resolve_config(cfg)) | ||
logger.info(f"config:\n{pformat(config.model_dump())}") | ||
|
||
output_file = Path(config.output_file) | ||
output_file.parent.mkdir(parents=True, exist_ok=True) | ||
|
||
ds = load_dataset(config.dataset.name, split="test") | ||
|
||
llm = LLM( | ||
model=config.model.name, | ||
quantization="bitsandbytes", | ||
load_format="bitsandbytes", | ||
enable_lora=True, | ||
qlora_adapter_name_or_path=config.model.adapter_path, | ||
max_model_len=config.model.max_seq_length, | ||
max_num_seqs=config.model.batch_size, | ||
) | ||
|
||
truncator = ContextTruncator( | ||
tokenizer=llm.llm_engine.tokenizer.get_lora_tokenizer(), | ||
max_length=config.corrected_max_seq_length, | ||
) | ||
encoder = TextEncoderForEvalPlainTextFormat(truncator=truncator) | ||
ds = ds.map(encoder, num_proc=NUM_PROC) | ||
ds = ds.select(range(10)) | ||
|
||
params = SamplingParams( | ||
max_tokens=config.generate_kwargs.get("max_new_tokens", 100), | ||
temperature=config.generate_kwargs.get("temperature", 0.0), | ||
) | ||
|
||
outputs = llm.generate( | ||
prompts=ds["final_input"], | ||
sampling_params=params, | ||
) | ||
results = [{"answer": ans, "gold": gold} for ans, gold in zip(outputs, ds["output"])] | ||
|
||
with open(output_file, "w") as f: | ||
json.dump(results, f, indent="\t", ensure_ascii=False) | ||
|
||
|
||
if __name__ == "__main__": | ||
main() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,88 @@ | ||
import os | ||
import shutil | ||
import subprocess | ||
from pathlib import Path | ||
from pprint import pformat | ||
|
||
import typer | ||
from loguru import logger | ||
from tqdm import tqdm | ||
|
||
SCRIPT_NAME = "zero_to_fp32.py" | ||
CONVERTED_MODEL_PATTERN = "model*.safetensors" | ||
|
||
|
||
def main( | ||
root_dir: Path = typer.Option(), | ||
adapter_only: bool = typer.Option(False, help="Only convert adapter"), | ||
remove: bool = typer.Option(False, help="Removes original deepspeed checkpoints"), | ||
remove_for_converted: bool = typer.Option( | ||
False, help="Removes only original deepspeed checkpoints for already converted models" | ||
), | ||
) -> None: | ||
checkpoint_dirs = [script_file.parent for script_file in root_dir.rglob(SCRIPT_NAME)] | ||
logger.info(f"Found {len(checkpoint_dirs)} checkpoints to convert:\n{pformat(checkpoint_dirs)}") | ||
for ckpt_dir in tqdm(checkpoint_dirs, desc="Converting checkpoints"): | ||
logger.info(f"Converting {ckpt_dir}") | ||
if list(ckpt_dir.glob(CONVERTED_MODEL_PATTERN)): | ||
logger.warning(f"Model already converted, skipping {ckpt_dir}") | ||
if remove_for_converted: | ||
logger.info(f"Removing deepspeed artifacts for {ckpt_dir}") | ||
remove_deepspeed_artifacts(ckpt_dir) | ||
continue | ||
else: | ||
convert(ckpt_dir) | ||
|
||
# deepspeed saves model as model.safetensors, need to rename it to adapter_model.safetensors | ||
if adapter_only: | ||
# there should be (almost) empty adapter_model.safetensors | ||
assert (ckpt_dir / "adapter_model.safetensors").exists() | ||
for model_file in ckpt_dir.glob("model*.safetensors"): | ||
model_file.rename( | ||
model_file.with_stem(model_file.stem.replace("model", "adapter_model")) | ||
) | ||
|
||
if remove: | ||
remove_deepspeed_artifacts(ckpt_dir) | ||
|
||
|
||
def convert(ckpt_dir: Path) -> None: | ||
script_file = ckpt_dir / SCRIPT_NAME | ||
step_dir = get_latest_step_dir(ckpt_dir) | ||
logger.info(f"Converting {step_dir}") | ||
cmd = [ | ||
"python", | ||
str(script_file), | ||
str(ckpt_dir), # checkpoint_dir | ||
str(ckpt_dir), # output_dir | ||
"--safe_serialization", # writes as safetensors file | ||
"--max_shard_size", | ||
"5GB", | ||
"--tag", | ||
step_dir.name, # points to directory globalstep<step_num> | ||
] | ||
env = os.environ.copy() | {"CUDA_VISIBLE_DEVICES": "-1"} | ||
subprocess.run(cmd, check=True, env=env) | ||
|
||
|
||
def remove_deepspeed_artifacts(ckpt_dir: Path) -> None: | ||
step_dir = get_latest_step_dir(ckpt_dir) | ||
logger.info(f"Removing {step_dir}") | ||
shutil.rmtree(step_dir) | ||
|
||
for rng_file in ckpt_dir.glob("rng_state_*.pth"): | ||
os.remove(rng_file) | ||
|
||
os.remove(ckpt_dir / SCRIPT_NAME) | ||
os.remove(ckpt_dir / "latest") | ||
os.remove(ckpt_dir / "scheduler.pt") | ||
|
||
|
||
def get_latest_step_dir(ckpt_dir: Path) -> Path: | ||
with open(ckpt_dir / "latest") as f: | ||
step_dirname = f.read().strip() | ||
return ckpt_dir / step_dirname | ||
|
||
|
||
if __name__ == "__main__": | ||
typer.run(main) |