Skip to content

Commit

Permalink
fix(infra): conform ruff to 150 LL (#781)
Browse files Browse the repository at this point in the history
Generally correctly format it with ruff format and manual style

Signed-off-by: Aaron <29749331+aarnphm@users.noreply.github.com>
  • Loading branch information
aarnphm committed Dec 14, 2023
1 parent 8d98976 commit c8c9663
Show file tree
Hide file tree
Showing 90 changed files with 1,830 additions and 1,891 deletions.
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ default_language_version:
exclude: '.*\.(css|js|svg)$'
repos:
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: 'v0.1.7'
rev: 'v0.1.8'
hooks:
- id: ruff
alias: r
Expand Down
102 changes: 23 additions & 79 deletions .ruff.toml
Original file line number Diff line number Diff line change
@@ -1,77 +1,38 @@
extend-exclude = [
"tools",
"examples",
"openllm-python/src/openllm/__init__.py",
"openllm-python/src/openllm/_version.py",
"openllm-python/src/openllm/models/__init__.py",
"openllm-python/src/openllm_cli/playground",
"openllm-client/src/openllm_client/pb/**",
]
exclude = ["tools", "examples", "openllm-python/src/openllm_cli/playground/"]
extend-include = ["*.ipynb"]
extend-select = [
"E",
preview = true
select = [
"F",
"B",
"PIE",
"G", # flake8-logging-format
"W", # pycodestyle
"Q", # flake8-quotes
"FA", # flake8-future-annotations
"TCH", # flake8-type-checking
"PLW", # pylint-warning
"PLR", # pylint-refactor
"PT", # flake8-pytest-style
"PERF", # perflint
"RUF", # Ruff-specific rules
"YTT", # flake8-2020
]
fix = true
ignore = [
"PLR0911",
"PLR0912",
"PLR0913",
"PLR0915",
"ANN", # Use mypy
"PLR2004", # magic value to use constant
"E501", # ignore line length violation
"E401", # ignore multiple line import
"W6",
"E71",
"E72",
"E112",
"E113",
# "E124",
"E203",
"E272",
# "E303",
# "E304",
# "E501",
# "E502",
"E702",
"TCH004", # don't move runtime import out, just warn about it
"RUF012", # mutable attributes to be used with ClassVar
"E701", # multiple statement on single line
"E703",
"E731",
"W191",
"W291",
"W293",
"UP039", # unnecessary-class-parentheses
]
line-length = 119
ignore = ["RUF012"]
line-length = 150
indent-width = 2
target-version = "py38"
typing-modules = [
"openllm_core._typing_compat",
"openllm_client._typing_compat",
]
unfixable = ["TCH004"]

[lint.flake8-type-checking]
exempt-modules = [
"typing",
"typing_extensions",
"openllm_core._typing_compat",
"openllm_client._typing_compat",
]
runtime-evaluated-base-classes = [
"openllm_core._configuration.LLMConfig",
"openllm_core._configuration.GenerationConfig",
"openllm_core._configuration.SamplingParams",
"openllm_core._configuration.ModelSettings",
"openllm.LLMConfig",
]
runtime-evaluated-decorators = [
"attrs.define",
"attrs.frozen",
"trait",
"attr.attrs",
'attr.define',
'_attr.define',
'attr.frozen',
]

[format]
preview = true
Expand All @@ -81,20 +42,3 @@ skip-magic-trailing-comma = true

[lint.pydocstyle]
convention = "google"

[lint.pycodestyle]
ignore-overlong-task-comments = true
max-line-length = 119

[lint.flake8-quotes]
avoid-escape = false
inline-quotes = "single"
multiline-quotes = "single"
docstring-quotes = "single"

[lint.extend-per-file-ignores]
"openllm-python/tests/**/*" = ["S101", "TID252", "PT011", "S307"]
"openllm-python/src/openllm/_llm.py" = ["F811"]
"openllm-core/src/openllm_core/utils/import_utils.py" = ["PLW0603", "F811"]
"openllm-core/src/openllm_core/_configuration.py" = ["F811", "Q001"]
"openllm-python/src/openllm/_service_vars_pkg.py" = ["F821"]
18 changes: 3 additions & 15 deletions cz.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,21 +22,13 @@ def run_cz(args):
with tokenize.open(filepath) as file_:
tokens = [t for t in tokenize.generate_tokens(file_.readline) if t.type in TOKEN_WHITELIST]
token_count, line_count = len(tokens), len(set([t.start[0] for t in tokens]))
table.append(
[
filepath.replace(os.path.join(args.dir, 'src'), ''),
line_count,
token_count / line_count if line_count != 0 else 0,
]
)
table.append([filepath.replace(os.path.join(args.dir, 'src'), ''), line_count, token_count / line_count if line_count != 0 else 0])
print(tabulate([headers, *sorted(table, key=lambda x: -x[1])], headers='firstrow', floatfmt='.1f') + '\n')
print(
tabulate(
[
(dir_name, sum([x[1] for x in group]))
for dir_name, group in itertools.groupby(
sorted([(x[0].rsplit('/', 1)[0], x[1]) for x in table]), key=lambda x: x[0]
)
for dir_name, group in itertools.groupby(sorted([(x[0].rsplit('/', 1)[0], x[1]) for x in table]), key=lambda x: x[0])
],
headers=['Directory', 'LOC'],
floatfmt='.1f',
Expand All @@ -54,10 +46,6 @@ def run_cz(args):

parser = argparse.ArgumentParser()
parser.add_argument(
'--dir',
choices=['openllm-python', 'openllm-core', 'openllm-client'],
help='directory to check',
default='openllm-python',
required=False,
'--dir', choices=['openllm-python', 'openllm-core', 'openllm-client'], help='directory to check', default='openllm-python', required=False
)
raise SystemExit(run_cz(parser.parse_args()))
24 changes: 6 additions & 18 deletions openllm-client/src/openllm_client/__init__.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -15,17 +15,11 @@ class HTTPClient:
address: str
helpers: _Helpers
@overload
def __init__(
self, address: str, timeout: int = ..., verify: bool = ..., max_retries: int = ..., api_version: str = ...
) -> None: ...
def __init__(self, address: str, timeout: int = ..., verify: bool = ..., max_retries: int = ..., api_version: str = ...) -> None: ...
@overload
def __init__(
self, address: str = ..., timeout: int = ..., verify: bool = ..., max_retries: int = ..., api_version: str = ...
) -> None: ...
def __init__(self, address: str = ..., timeout: int = ..., verify: bool = ..., max_retries: int = ..., api_version: str = ...) -> None: ...
@overload
def __init__(
self, address: None = ..., timeout: int = ..., verify: bool = ..., max_retries: int = ..., api_version: str = ...
) -> None: ...
def __init__(self, address: None = ..., timeout: int = ..., verify: bool = ..., max_retries: int = ..., api_version: str = ...) -> None: ...
@property
def is_ready(self) -> bool: ...
def health(self) -> bool: ...
Expand Down Expand Up @@ -66,17 +60,11 @@ class AsyncHTTPClient:
address: str
helpers: _AsyncHelpers
@overload
def __init__(
self, address: str, timeout: int = ..., verify: bool = ..., max_retries: int = ..., api_version: str = ...
) -> None: ...
def __init__(self, address: str, timeout: int = ..., verify: bool = ..., max_retries: int = ..., api_version: str = ...) -> None: ...
@overload
def __init__(
self, address: str = ..., timeout: int = ..., verify: bool = ..., max_retries: int = ..., api_version: str = ...
) -> None: ...
def __init__(self, address: str = ..., timeout: int = ..., verify: bool = ..., max_retries: int = ..., api_version: str = ...) -> None: ...
@overload
def __init__(
self, address: None = ..., timeout: int = ..., verify: bool = ..., max_retries: int = ..., api_version: str = ...
) -> None: ...
def __init__(self, address: None = ..., timeout: int = ..., verify: bool = ..., max_retries: int = ..., api_version: str = ...) -> None: ...
@property
def is_ready(self) -> bool: ...
async def health(self) -> bool: ...
Expand Down
28 changes: 7 additions & 21 deletions openllm-client/src/openllm_client/_http.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,14 +70,10 @@ def query(self, prompt, **attrs):
return self.generate(prompt, **attrs)

def health(self):
response = self._get(
'/readyz', response_cls=None, options={'return_raw_response': True, 'max_retries': self._max_retries}
)
response = self._get('/readyz', response_cls=None, options={'return_raw_response': True, 'max_retries': self._max_retries})
return response.status_code == 200

def generate(
self, prompt, llm_config=None, stop=None, adapter_name=None, timeout=None, verify=None, **attrs
) -> Response:
def generate(self, prompt, llm_config=None, stop=None, adapter_name=None, timeout=None, verify=None, **attrs) -> Response:
if timeout is None:
timeout = self._timeout
if verify is None:
Expand All @@ -100,9 +96,7 @@ def generate_stream(
for response_chunk in self.generate_iterator(prompt, llm_config, stop, adapter_name, timeout, verify, **attrs):
yield StreamingResponse.from_response_chunk(response_chunk)

def generate_iterator(
self, prompt, llm_config=None, stop=None, adapter_name=None, timeout=None, verify=None, **attrs
) -> t.Iterator[Response]:
def generate_iterator(self, prompt, llm_config=None, stop=None, adapter_name=None, timeout=None, verify=None, **attrs) -> t.Iterator[Response]:
if timeout is None:
timeout = self._timeout
if verify is None:
Expand Down Expand Up @@ -152,9 +146,7 @@ def _build_auth_headers(self) -> t.Dict[str, str]:
@property
async def _metadata(self) -> t.Awaitable[Metadata]:
if self.__metadata is None:
self.__metadata = await self._post(
f'/{self._api_version}/metadata', response_cls=Metadata, json={}, options={'max_retries': self._max_retries}
)
self.__metadata = await self._post(f'/{self._api_version}/metadata', response_cls=Metadata, json={}, options={'max_retries': self._max_retries})
return self.__metadata

@property
Expand All @@ -167,14 +159,10 @@ async def query(self, prompt, **attrs):
return await self.generate(prompt, **attrs)

async def health(self):
response = await self._get(
'/readyz', response_cls=None, options={'return_raw_response': True, 'max_retries': self._max_retries}
)
response = await self._get('/readyz', response_cls=None, options={'return_raw_response': True, 'max_retries': self._max_retries})
return response.status_code == 200

async def generate(
self, prompt, llm_config=None, stop=None, adapter_name=None, timeout=None, verify=None, **attrs
) -> Response:
async def generate(self, prompt, llm_config=None, stop=None, adapter_name=None, timeout=None, verify=None, **attrs) -> Response:
if timeout is None:
timeout = self._timeout
if verify is None:
Expand All @@ -195,9 +183,7 @@ async def generate(
async def generate_stream(
self, prompt, llm_config=None, stop=None, adapter_name=None, timeout=None, verify=None, **attrs
) -> t.AsyncGenerator[StreamingResponse, t.Any]:
async for response_chunk in self.generate_iterator(
prompt, llm_config, stop, adapter_name, timeout, verify, **attrs
):
async for response_chunk in self.generate_iterator(prompt, llm_config, stop, adapter_name, timeout, verify, **attrs):
yield StreamingResponse.from_response_chunk(response_chunk)

async def generate_iterator(
Expand Down
25 changes: 6 additions & 19 deletions openllm-client/src/openllm_client/_schemas.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,9 @@

@attr.define
class Metadata(_SchemaMixin):
'''NOTE: Metadata is a modified version of the original MetadataOutput from openllm-core.
"""NOTE: Metadata is a modified version of the original MetadataOutput from openllm-core.
The configuration is now structured into a dictionary for easy of use.'''
The configuration is now structured into a dictionary for easy of use."""

model_id: str
timeout: int
Expand All @@ -42,11 +42,7 @@ def _structure_metadata(data: t.Dict[str, t.Any], cls: type[Metadata]) -> Metada
raise RuntimeError(f'Malformed metadata configuration (Server-side issue): {e}') from None
try:
return cls(
model_id=data['model_id'],
timeout=data['timeout'],
model_name=data['model_name'],
backend=data['backend'],
configuration=configuration,
model_id=data['model_id'], timeout=data['timeout'], model_name=data['model_name'], backend=data['backend'], configuration=configuration
)
except Exception as e:
raise RuntimeError(f'Malformed metadata (Server-side issue): {e}') from None
Expand All @@ -65,10 +61,7 @@ class StreamingResponse(_SchemaMixin):
@classmethod
def from_response_chunk(cls, response: Response) -> StreamingResponse:
return cls(
request_id=response.request_id,
index=response.outputs[0].index,
text=response.outputs[0].text,
token_ids=response.outputs[0].token_ids[0],
request_id=response.request_id, index=response.outputs[0].index, text=response.outputs[0].text, token_ids=response.outputs[0].token_ids[0]
)


Expand All @@ -95,17 +88,11 @@ def async_client(self):
return self._async_client

def messages(self, messages, add_generation_prompt=False):
return self.client._post(
'/v1/helpers/messages',
response_cls=str,
json=dict(messages=messages, add_generation_prompt=add_generation_prompt),
)
return self.client._post('/v1/helpers/messages', response_cls=str, json=dict(messages=messages, add_generation_prompt=add_generation_prompt))

async def async_messages(self, messages, add_generation_prompt=False):
return await self.async_client._post(
'/v1/helpers/messages',
response_cls=str,
json=dict(messages=messages, add_generation_prompt=add_generation_prompt),
'/v1/helpers/messages', response_cls=str, json=dict(messages=messages, add_generation_prompt=add_generation_prompt)
)

@classmethod
Expand Down
Loading

0 comments on commit c8c9663

Please sign in to comment.