Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: adding the CI tests for linting #41

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Binary file removed .DS_Store
Binary file not shown.
43 changes: 43 additions & 0 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
name: CI

on:
push:
branches:
- main
- dev
pull_request:
branches:
- main
- dev

# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:

jobs:

Linting:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ['3.8', '3.9', '3.10', '3.11']

steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}

- uses: actions/cache@v2
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip

- name: Install Dependencies
run: |
python -m pip install --upgrade pip
make dev

- name: Lint with mypy, black and ruff
run: |
make lint
3 changes: 2 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -159,4 +159,5 @@ cython_debug/
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/

.vscode/
.vscode/
.DS_Store
8 changes: 7 additions & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -15,11 +15,17 @@ watch-docs: ## Build and watch documentation
sphinx-autobuild docs/ docs/_build/html --open-browser --watch $(GIT_ROOT)/llama_index/

build:
mypy .
black . --check
ruff check .
rm -rf dist/ build/
python -m pip install build
python -m build .

upload:
python -m pip install twine
python -m twine upload dist/portkey-ai-*
rm -rf dist
rm -rf dist

dev:
pip install -e ".[dev]"
3 changes: 2 additions & 1 deletion portkey/api_resources/base_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -354,7 +354,8 @@ def _extract_stream_chunk_type(self, stream_cls: Type) -> type:
args = get_args(stream_cls)
if not args:
raise TypeError(
f"Expected stream_cls to have been given a generic type argument, e.g. Stream[Foo] but received {stream_cls}",
f"Expected stream_cls to have been given a generic type argument, e.g. \
Stream[Foo] but received {stream_cls}",
)
return cast(type, args[0])

Expand Down
2 changes: 1 addition & 1 deletion portkey/api_resources/common_types.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from typing import TypeVar, Union, Any
from typing import TypeVar, Union
from .streaming import Stream
from .utils import ChatCompletionChunk, TextCompletionChunk, GenericResponse

Expand Down
3 changes: 0 additions & 3 deletions portkey/api_resources/streaming.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,11 +6,8 @@
import httpx

from .utils import (
ChatCompletionChunk,
ResponseT,
TextCompletionChunk,
make_status_error,
ApiType,
)


Expand Down
3 changes: 1 addition & 2 deletions portkey/api_resources/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@
)
from .global_constants import (
MISSING_API_KEY_ERROR_MESSAGE,
INVALID_PORTKEY_MODE,
MISSING_BASE_URL,
MISSING_CONFIG_MESSAGE,
MISSING_MODE_MESSAGE,
Expand Down Expand Up @@ -48,7 +47,7 @@ class CacheType(str, Enum, metaclass=MetaEnum):

ResponseT = TypeVar(
"ResponseT",
bound="Union[ChatCompletionChunk, ChatCompletion, TextCompletionChunk, TextCompletion, GenericResponse]",
bound="Union[ChatCompletionChunk, ChatCompletion, TextCompletionChunk, TextCompletion, GenericResponse]", # noqa: E501
)


Expand Down
5 changes: 4 additions & 1 deletion setup.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -38,8 +38,11 @@ dev =
black==23.7.0
typing_extensions==4.7.1
pydantic==1.10.12
pytest==7.4.2
python-dotenv==1.0.0
ruff==0.0.292

[options.packages.find]
exclude =
tests
tests/
tests.*
3 changes: 0 additions & 3 deletions setup.py

This file was deleted.

18 changes: 9 additions & 9 deletions tests/anyscale_tests/test_anyscale_CodeLlama-34b-Instruct-hf.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from typing import Any
import pytest
import portkey
from portkey import TextCompletion, TextCompletionChunk, Config, LLMOptions
from portkey import Config, LLMOptions
from dotenv import load_dotenv

# from tests.utils import assert_matches_type
Expand All @@ -31,7 +31,7 @@ def test_method_create_non_stream(self, client: Any) -> None:
),
)
client.config = config
completion = client.Completions.create(
_ = client.Completions.create(
max_tokens=256,
prompt="why is the sky blue ?",
)
Expand All @@ -51,7 +51,7 @@ def test_method_create_with_all_params_non_stream(self, client: Any) -> None:
),
)
client.config = config
completion = client.Completions.create(
_ = client.Completions.create(
max_tokens=256,
prompt="why is the sky blue ?",
stop_sequences=["string", "string", "string"],
Expand All @@ -75,7 +75,7 @@ def test_method_create_streaming(self, client: Any) -> None:
),
)
client.config = config
completion_streaming = client.Completions.create(
_ = client.Completions.create(
max_tokens=256,
prompt="why is the sky blue ?",
stream=True,
Expand All @@ -97,7 +97,7 @@ def test_method_create_with_all_params_streaming(self, client: Any) -> None:
),
)
client.config = config
completion_streaming = client.Completions.create(
_ = client.Completions.create(
max_tokens=256,
prompt="why is the sky blue ?",
stream=True,
Expand Down Expand Up @@ -126,7 +126,7 @@ def test_method_create_non_stream(self, client: Any) -> None:
),
)
client.config = config
completion = client.ChatCompletions.create(
_ = client.ChatCompletions.create(
max_tokens=256,
messages=[{"role": "user", "content": "why is the sky blue ?"}],
)
Expand All @@ -146,7 +146,7 @@ def test_method_create_with_all_params_non_stream(self, client: Any) -> None:
),
)
client.config = config
completion = client.ChatCompletions.create(
_ = client.ChatCompletions.create(
max_tokens=256,
messages=[{"role": "user", "content": "why is the sky blue ?"}],
stop_sequences=["string", "string", "string"],
Expand All @@ -170,7 +170,7 @@ def test_method_create_streaming(self, client: Any) -> None:
),
)
client.config = config
completion_streaming = client.ChatCompletions.create(
_ = client.ChatCompletions.create(
max_tokens=256,
messages=[{"role": "user", "content": "why is the sky blue ?"}],
stream=True,
Expand All @@ -192,7 +192,7 @@ def test_method_create_with_all_params_streaming(self, client: Any) -> None:
),
)
client.config = config
completion_streaming = client.ChatCompletions.create(
_ = client.ChatCompletions.create(
max_tokens=256,
messages=[{"role": "user", "content": "why is the sky blue ?"}],
stream=True,
Expand Down
18 changes: 9 additions & 9 deletions tests/anyscale_tests/test_anyscale_Llama-2-13b-chat-hf.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from typing import Any
import pytest
import portkey
from portkey import TextCompletion, TextCompletionChunk, Config, LLMOptions
from portkey import Config, LLMOptions
from dotenv import load_dotenv

# from tests.utils import assert_matches_type
Expand All @@ -31,7 +31,7 @@ def test_method_create_non_stream(self, client: Any) -> None:
),
)
client.config = config
completion = client.Completions.create(
_ = client.Completions.create(
max_tokens=256,
prompt="why is the sky blue ?",
)
Expand All @@ -51,7 +51,7 @@ def test_method_create_with_all_params_non_stream(self, client: Any) -> None:
),
)
client.config = config
completion = client.Completions.create(
_ = client.Completions.create(
max_tokens=256,
prompt="why is the sky blue ?",
stop_sequences=["string", "string", "string"],
Expand All @@ -75,7 +75,7 @@ def test_method_create_streaming(self, client: Any) -> None:
),
)
client.config = config
completion_streaming = client.Completions.create(
_ = client.Completions.create(
max_tokens=256,
prompt="why is the sky blue ?",
stream=True,
Expand All @@ -97,7 +97,7 @@ def test_method_create_with_all_params_streaming(self, client: Any) -> None:
),
)
client.config = config
completion_streaming = client.Completions.create(
_ = client.Completions.create(
max_tokens=256,
prompt="why is the sky blue ?",
stream=True,
Expand Down Expand Up @@ -126,7 +126,7 @@ def test_method_create_non_stream(self, client: Any) -> None:
),
)
client.config = config
completion = client.ChatCompletions.create(
_ = client.ChatCompletions.create(
max_tokens=256,
messages=[{"role": "user", "content": "why is the sky blue ?"}],
)
Expand All @@ -146,7 +146,7 @@ def test_method_create_with_all_params_non_stream(self, client: Any) -> None:
),
)
client.config = config
completion = client.ChatCompletions.create(
_ = client.ChatCompletions.create(
max_tokens=256,
messages=[{"role": "user", "content": "why is the sky blue ?"}],
stop_sequences=["string", "string", "string"],
Expand All @@ -170,7 +170,7 @@ def test_method_create_streaming(self, client: Any) -> None:
),
)
client.config = config
completion_streaming = client.ChatCompletions.create(
_ = client.ChatCompletions.create(
max_tokens=256,
messages=[{"role": "user", "content": "why is the sky blue ?"}],
stream=True,
Expand All @@ -192,7 +192,7 @@ def test_method_create_with_all_params_streaming(self, client: Any) -> None:
),
)
client.config = config
completion_streaming = client.ChatCompletions.create(
_ = client.ChatCompletions.create(
max_tokens=256,
messages=[{"role": "user", "content": "why is the sky blue ?"}],
stream=True,
Expand Down
18 changes: 9 additions & 9 deletions tests/anyscale_tests/test_anyscale_Llama-2-70b-chat-hf.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from typing import Any
import pytest
import portkey
from portkey import TextCompletion, TextCompletionChunk, Config, LLMOptions
from portkey import Config, LLMOptions
from dotenv import load_dotenv

# from tests.utils import assert_matches_type
Expand All @@ -31,7 +31,7 @@ def test_method_create_non_stream(self, client: Any) -> None:
),
)
client.config = config
completion = client.Completions.create(
_ = client.Completions.create(
max_tokens=256,
prompt="why is the sky blue ?",
)
Expand All @@ -51,7 +51,7 @@ def test_method_create_with_all_params_non_stream(self, client: Any) -> None:
),
)
client.config = config
completion = client.Completions.create(
_ = client.Completions.create(
max_tokens=256,
prompt="why is the sky blue ?",
stop_sequences=["string", "string", "string"],
Expand All @@ -75,7 +75,7 @@ def test_method_create_streaming(self, client: Any) -> None:
),
)
client.config = config
completion_streaming = client.Completions.create(
_ = client.Completions.create(
max_tokens=256,
prompt="why is the sky blue ?",
stream=True,
Expand All @@ -97,7 +97,7 @@ def test_method_create_with_all_params_streaming(self, client: Any) -> None:
),
)
client.config = config
completion_streaming = client.Completions.create(
_ = client.Completions.create(
max_tokens=256,
prompt="why is the sky blue ?",
stream=True,
Expand Down Expand Up @@ -126,7 +126,7 @@ def test_method_create_non_stream(self, client: Any) -> None:
),
)
client.config = config
completion = client.ChatCompletions.create(
_ = client.ChatCompletions.create(
max_tokens=256,
messages=[{"role": "user", "content": "why is the sky blue ?"}],
)
Expand All @@ -146,7 +146,7 @@ def test_method_create_with_all_params_non_stream(self, client: Any) -> None:
),
)
client.config = config
completion = client.ChatCompletions.create(
_ = client.ChatCompletions.create(
max_tokens=256,
messages=[{"role": "user", "content": "why is the sky blue ?"}],
stop_sequences=["string", "string", "string"],
Expand All @@ -170,7 +170,7 @@ def test_method_create_streaming(self, client: Any) -> None:
),
)
client.config = config
completion_streaming = client.ChatCompletions.create(
_ = client.ChatCompletions.create(
max_tokens=256,
messages=[{"role": "user", "content": "why is the sky blue ?"}],
stream=True,
Expand All @@ -192,7 +192,7 @@ def test_method_create_with_all_params_streaming(self, client: Any) -> None:
),
)
client.config = config
completion_streaming = client.ChatCompletions.create(
_ = client.ChatCompletions.create(
max_tokens=256,
messages=[{"role": "user", "content": "why is the sky blue ?"}],
stream=True,
Expand Down
Loading
Loading