Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Format llm-lora project #94

Merged
merged 1 commit into from
Mar 22, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion llm-lora-finetuning/lit_gpt/lora.py
Original file line number Diff line number Diff line change
Expand Up @@ -373,7 +373,9 @@ def zero_pad(self, x: torch.Tensor) -> torch.Tensor:
) # (4096, 256)
return result.view(
(*x.shape[:-1], self.linear.out_features)
).transpose(0, 1) # (64, 64, 384)
).transpose(
0, 1
) # (64, 64, 384)

def conv1d(
self, input: torch.Tensor, weight: torch.Tensor
Expand Down
8 changes: 6 additions & 2 deletions llm-lora-finetuning/lit_gpt/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -217,7 +217,9 @@ def forward(
B,
T,
C,
) = x.size() # batch size, sequence length, embedding dimensionality (n_embd)
) = (
x.size()
) # batch size, sequence length, embedding dimensionality (n_embd)

qkv = self.attn(x)

Expand Down Expand Up @@ -394,7 +396,9 @@ def forward(self, x: torch.Tensor) -> torch.Tensor:
B,
T,
C,
) = x.size() # batch size, sequence length, embedding dimensionality (n_embd)
) = (
x.size()
) # batch size, sequence length, embedding dimensionality (n_embd)
x = x.view(-1, C) # (B*T, C)
router = self.gate(x) # (B*T, n_expert)
probs, indices = torch.topk(
Expand Down
4 changes: 2 additions & 2 deletions llm-lora-finetuning/pipelines/evaluate.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,11 +15,11 @@
# limitations under the License.
#

from steps import evaluate

from zenml import pipeline
from zenml.config import DockerSettings

from steps import evaluate


@pipeline(
settings={
Expand Down
4 changes: 2 additions & 2 deletions llm-lora-finetuning/pipelines/feature_engineering.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,11 +15,11 @@
# limitations under the License.
#

from steps import feature_engineering

from zenml import pipeline
from zenml.config import DockerSettings

from steps import feature_engineering


@pipeline(
settings={
Expand Down
4 changes: 2 additions & 2 deletions llm-lora-finetuning/pipelines/finetuning.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,11 @@

from typing import Optional

from steps import finetune

from zenml import get_pipeline_context, pipeline
from zenml.config import DockerSettings

from steps import finetune


@pipeline(
settings={
Expand Down
4 changes: 2 additions & 2 deletions llm-lora-finetuning/pipelines/merge.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,11 +15,11 @@
# limitations under the License.
#

from steps import merge

from zenml import pipeline
from zenml.config import DockerSettings

from steps import merge


@pipeline(
settings={
Expand Down
4 changes: 2 additions & 2 deletions llm-lora-finetuning/run.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,15 +19,15 @@
from typing import Optional

import click
from zenml.logger import get_logger

from pipelines import (
llm_lora_evaluation,
llm_lora_feature_engineering,
llm_lora_finetuning,
llm_lora_merging,
)

from zenml.logger import get_logger

logger = get_logger(__name__)


Expand Down
10 changes: 5 additions & 5 deletions llm-lora-finetuning/steps/evaluate.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,20 +21,20 @@
from typing import Any, Dict, List, Literal, Optional

import torch
from evaluate.lm_eval_harness import run_eval_harness
from huggingface_hub import snapshot_download
from pydantic import BaseModel
from scripts.download import download_from_hub
from scripts.merge_lora import merge_lora
from typing_extensions import Annotated
from zenml import step
from zenml.logger import get_logger

from evaluate.lm_eval_harness import run_eval_harness
from scripts.download import download_from_hub
from scripts.merge_lora import merge_lora
from steps.params import LoraParameters
from steps.utils import (
convert_to_lit_checkpoint_if_necessary,
get_huggingface_access_token,
)
from zenml import step
from zenml.logger import get_logger

logger = get_logger(__file__)

Expand Down
8 changes: 4 additions & 4 deletions llm-lora-finetuning/steps/feature_engineering.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,14 +21,14 @@
from pathlib import Path
from typing import Any, Dict

from lit_gpt import Config
from materializers.directory_materializer import DirectoryMaterializer
from pydantic import BaseModel
from scripts.download import download_from_hub
from typing_extensions import Annotated
from zenml import log_artifact_metadata, step

from lit_gpt import Config
from materializers.directory_materializer import DirectoryMaterializer
from scripts.download import download_from_hub
from steps.utils import get_huggingface_access_token
from zenml import log_artifact_metadata, step


class FeatureEngineeringParameters(BaseModel):
Expand Down
14 changes: 7 additions & 7 deletions llm-lora-finetuning/steps/finetune.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,25 +20,25 @@
from typing import Literal, Optional

import torch
from finetune.lora import setup
from huggingface_hub import upload_folder
from pydantic import BaseModel
from typing_extensions import Annotated
from zenml import get_step_context, log_model_metadata, step
from zenml.logger import get_logger
from zenml.materializers import BuiltInMaterializer

from finetune.lora import setup
from lit_gpt.args import EvalArgs, IOArgs, TrainArgs
from materializers.directory_materializer import DirectoryMaterializer
from pydantic import BaseModel
from scripts.convert_lit_checkpoint import convert_lit_checkpoint
from scripts.download import download_from_hub
from scripts.merge_lora import merge_lora
from scripts.prepare_alpaca import prepare
from typing_extensions import Annotated

from steps.params import LoraParameters
from steps.utils import (
convert_to_lit_checkpoint_if_necessary,
get_huggingface_access_token,
)
from zenml import get_step_context, log_model_metadata, step
from zenml.logger import get_logger
from zenml.materializers import BuiltInMaterializer

logger = get_logger(__file__)

Expand Down
6 changes: 3 additions & 3 deletions llm-lora-finetuning/steps/merge.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,17 +21,17 @@

from huggingface_hub import snapshot_download, upload_folder
from pydantic import BaseModel
from zenml import log_model_metadata, step
from zenml.logger import get_logger

from scripts.convert_lit_checkpoint import convert_lit_checkpoint
from scripts.download import download_from_hub
from scripts.merge_lora import merge_lora

from steps.params import LoraParameters
from steps.utils import (
convert_to_lit_checkpoint_if_necessary,
get_huggingface_access_token,
)
from zenml import log_model_metadata, step
from zenml.logger import get_logger

logger = get_logger(__file__)

Expand Down
4 changes: 2 additions & 2 deletions llm-lora-finetuning/steps/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,10 +19,10 @@
from pathlib import Path
from typing import Optional

from scripts.convert_hf_checkpoint import convert_hf_checkpoint

from zenml.client import Client

from scripts.convert_hf_checkpoint import convert_hf_checkpoint


def get_huggingface_access_token() -> Optional[str]:
"""Get access token for huggingface.
Expand Down
Loading