Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add CLI unit tests for GenAI-PA CLI #479

Merged
merged 10 commits into from
Feb 29, 2024
Merged
Show file tree
Hide file tree
Changes from 6 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions src/c++/perf_analyzer/genai-pa/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ keywords = []
requires-python = ">=3.8,<4"
dependencies = [
"numpy",
"pytest",
"rich"
]

Expand Down
68 changes: 61 additions & 7 deletions src/c++/perf_analyzer/genai-pa/tests/test_cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,14 +24,68 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

import io
Fixed Show fixed Hide fixed
import sys
Fixed Show fixed Hide fixed

import pytest
debermudez marked this conversation as resolved.
Show resolved Hide resolved
from genai_pa import parser
from genai_pa.main import run


# NOTE: Placeholder
class TestHelp:
@pytest.mark.parametrize("arg", ["-h", "--help"])
def test_help(self, arg):
args = [arg]
with pytest.raises(SystemExit):
run(args)
class TestCLIArguments:
@pytest.mark.parametrize(
"arg, expected_output",
[
(["-h"], "CLI to profile LLMs and Generative AI models with Perf Analyzer"),
(
["--help"],
"CLI to profile LLMs and Generative AI models with Perf Analyzer",
),
],
)
def test_help_arguments_output_and_exit(self, arg, expected_output, capsys):
with pytest.raises(SystemExit) as excinfo:
_ = parser.parse_args(arg)

# Check that the exit was successful
assert excinfo.value.code == 0

# Capture that the correct message was displayed
captured = capsys.readouterr()
assert expected_output in captured.out

@pytest.mark.parametrize(
"arg, expected_output",
[
(["-b", "2"], "batch_size=2"),
(["--batch-size", "2"], "batch_size=2"),
(["--concurrency", "3"], "concurrency_range='3'"),
(["--max-threads", "4"], "max_threads=4"),
(
["--profile-export-file", "text.txt"],
"profile_export_file=PosixPath('text.txt')",
),
(["--request-rate", "1.5"], "request_rate_range='1.5'"),
(["--service-kind", "triton"], "service_kind='triton'"),
(["--service-kind", "openai"], "service_kind='openai'"),
# TODO: Remove streaming from implementation. It is invalid with HTTP.
debermudez marked this conversation as resolved.
Show resolved Hide resolved
# (["--streaming"], "Streaming=True"),
(["--version"], "version=True"),
(["-u", "test_url"], "u='test_url'"),
(["--url", "test_url"], "u='test_url'"),
],
)
def test_arguments_output(self, arg, expected_output, capsys):
combined_args = ["--model", "test_model"] + arg
_ = parser.parse_args(combined_args)

# Capture that the correct message was displayed
captured = capsys.readouterr()
debermudez marked this conversation as resolved.
Show resolved Hide resolved
assert expected_output in captured.out

def test_arguments_model_not_provided(self):
with pytest.raises(SystemExit) as exc_info:
_ = parser.parse_args()

# Check that the exit was unsuccessful
assert exc_info.value.code != 0
Loading