Skip to content

Commit

Permalink
Add unit testing for conditional parsing in GenAi-Perf (#531)
Browse files Browse the repository at this point in the history
  • Loading branch information
dyastremsky committed Mar 15, 2024
1 parent 81d0925 commit b0b5b27
Show file tree
Hide file tree
Showing 4 changed files with 132 additions and 9 deletions.
20 changes: 19 additions & 1 deletion src/c++/perf_analyzer/genai-perf/genai_perf/parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,23 @@
logger = logging.getLogger(LOGGER_NAME)


def _check_conditional_args(
parser: argparse.ArgumentParser, args: argparse.ArgumentParser
) -> None:
"""
Check for conditional args and raise an error if they are not set.
"""
if args.service_kind == "openai":
if args.endpoint is None:
parser.error(
"The --endpoint option is required when using the 'openai' service-kind."
)
elif args.endpoint is not None:
logger.warning(
"The --endpoint option is ignored when not using the 'openai' service-kind."
)


def _prune_args(args: argparse.ArgumentParser) -> argparse.ArgumentParser:
"""
Prune the parsed arguments to remove args with None.
Expand Down Expand Up @@ -80,7 +97,7 @@ def _convert_str_to_enum_entry(args, option, enum):
def handler(args, extra_args):
from genai_perf.wrapper import Profiler

Profiler.run(model=args.model, args=args, extra_args=extra_args)
Profiler.run(args=args, extra_args=extra_args)


### Parsers ###
Expand Down Expand Up @@ -329,6 +346,7 @@ def parse_args():
passthrough_index = len(argv)

args = parser.parse_args(argv[1:passthrough_index])
_check_conditional_args(parser, args)
args = _update_load_manager_args(args)
args = _convert_str_to_enum_entry(args, "input_type", InputType)
args = _convert_str_to_enum_entry(args, "output_format", OutputFormat)
Expand Down
12 changes: 5 additions & 7 deletions src/c++/perf_analyzer/genai-perf/genai_perf/wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,13 +49,13 @@ def add_protocol_args(args):
return cmd

@staticmethod
def build_cmd(model, args, extra_args):
def build_cmd(args, extra_args):
skip_args = [
"model",
"func",
"dataset",
"input_type",
"input_format",
"model",
"output_format",
# The 'streaming' passed in to this script is to determine if the
# LLM response should be streaming. That is different than the
Expand All @@ -74,7 +74,7 @@ def build_cmd(model, args, extra_args):
else:
utils.remove_file(args.profile_export_file)

cmd = f"perf_analyzer -m {model} --async "
cmd = f"perf_analyzer -m {args.model} --async "
for arg, value in vars(args).items():
if arg in skip_args:
pass
Expand All @@ -85,8 +85,6 @@ def build_cmd(model, args, extra_args):
cmd += f"-{arg} "
else:
cmd += f"--{arg} "
elif arg == "batch_size":
cmd += f"-b {value} "
else:
if len(arg) == 1:
cmd += f"-{arg} {value} "
Expand All @@ -102,7 +100,7 @@ def build_cmd(model, args, extra_args):
return cmd

@staticmethod
def run(model, args=None, extra_args=None):
cmd = Profiler.build_cmd(model, args, extra_args)
def run(args=None, extra_args=None):
cmd = Profiler.build_cmd(args, extra_args)
logger.info(f"Running Perf Analyzer : '{cmd}'")
subprocess.run(cmd, shell=True, check=True)
19 changes: 18 additions & 1 deletion src/c++/perf_analyzer/genai-perf/tests/test_cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,10 @@ def test_help_arguments_output_and_exit(
(["--random-seed", "8"], {"random_seed": 8}),
(["--request-rate", "9.0"], {"request_rate_range": "9.0"}),
(["--service-kind", "triton"], {"service_kind": "triton"}),
(["--service-kind", "openai"], {"service_kind": "openai"}),
(
["--service-kind", "openai", "--endpoint", "v1/chat/completions"],
{"service_kind": "openai", "endpoint": "v1/chat/completions"},
),
(["--stability-percentage", "99.5"], {"stability_percentage": 99.5}),
(["-s", "99.5"], {"stability_percentage": 99.5}),
(["--streaming"], {"streaming": True}),
Expand Down Expand Up @@ -201,3 +204,17 @@ def test_unrecognized_arg(self, monkeypatch, capsys):
assert excinfo.value.code != 0
captured = capsys.readouterr()
assert expected_output in captured.err

def test_service_openai_no_endpoint(self, monkeypatch, capsys):
args = ["genai-perf", "-m", "test_model", "--service-kind", "openai"]
monkeypatch.setattr("sys.argv", args)
expected_output = (
"The --endpoint option is required when using the 'openai' service-kind."
)

with pytest.raises(SystemExit) as excinfo:
parser.parse_args()

assert excinfo.value.code != 0
captured = capsys.readouterr()
assert expected_output in captured.err
90 changes: 90 additions & 0 deletions src/c++/perf_analyzer/genai-perf/tests/test_cmd_builder.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,90 @@
# Copyright 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

import pytest
from genai_perf import parser
from genai_perf.constants import DEFAULT_GRPC_URL
from genai_perf.wrapper import Profiler


class TestCmdBuilder:
@pytest.mark.parametrize(
"arg",
[
([]),
(["-u", "testurl:1000"]),
(["--url", "testurl:1000"]),
],
)
def test_url_exactly_once(self, monkeypatch, arg):
args = ["genai-perf", "-m", "test_model"] + arg
monkeypatch.setattr("sys.argv", args)
args, extra_args = parser.parse_args()
cmd_string = Profiler.build_cmd(args, extra_args)

number_of_url_args = cmd_string.count(" -u ") + cmd_string.count(" --url ")
assert number_of_url_args == 1

@pytest.mark.parametrize(
"arg",
[
(["--output-format", "openai_chat_completions"]),
(["--output-format", "openai_completions"]),
(["--output-format", "trtllm"]),
(["--output-format", "vllm"]),
],
)
def test_service_triton(self, monkeypatch, arg):
args = ["genai-perf", "-m", "test_model", "--service-kind", "triton"] + arg
monkeypatch.setattr("sys.argv", args)
args, extra_args = parser.parse_args()
cmd_string = Profiler.build_cmd(args, extra_args)

# Ensure the correct arguments are appended.
assert cmd_string.count(" -i grpc") == 1
assert cmd_string.count(" --streaming") == 1
assert cmd_string.count(f"-u {DEFAULT_GRPC_URL}") == 1

if arg[1] == "trtllm":
assert cmd_string.count("--shape max_tokens:1") == 1
assert cmd_string.count("--shape text_input:1") == 1

def test_service_openai(self, monkeypatch):
args = [
"genai-perf",
"-m",
"test_model",
"--service-kind",
"openai",
"--endpoint",
"v1/completions",
]
monkeypatch.setattr("sys.argv", args)
args, extra_args = parser.parse_args()
cmd_string = Profiler.build_cmd(args, extra_args)

# Ensure the correct arguments are appended.
assert cmd_string.count(" -i http") == 1

0 comments on commit b0b5b27

Please sign in to comment.