Skip to content

Commit

Permalink
chore(python): use black==22.3.0 (#283)
Browse files Browse the repository at this point in the history
Source-Link: googleapis/synthtool@6fab84a
Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:7cffbc10910c3ab1b852c05114a08d374c195a81cdec1d4a67a1d129331d0bfe

Co-authored-by: Owl Bot <gcf-owl-bot[bot]@users.noreply.github.com>
  • Loading branch information
gcf-owl-bot[bot] and gcf-owl-bot[bot] committed Mar 28, 2022
1 parent 53ef0e2 commit fb2ba09
Show file tree
Hide file tree
Showing 8 changed files with 34 additions and 34 deletions.
38 changes: 19 additions & 19 deletions language/snippets/api/analyze_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ def test_analyze_sentiment(capsys):
def test_analyze_syntax(capsys):
result = analyze.analyze_syntax(
textwrap.dedent(
u"""\
"""\
Keep away from people who try to belittle your ambitions. Small people
always do that, but the really great make you feel that you, too, can
become great.
Expand All @@ -71,7 +71,7 @@ def test_analyze_syntax_utf8():
bits. The offsets we get should be the index of the first byte of the
character.
"""
test_string = u"a \u00e3 \u0201 \U0001f636 b"
test_string = "a \u00e3 \u0201 \U0001f636 b"
byte_array = test_string.encode("utf8")
result = analyze.analyze_syntax(test_string, encoding="UTF8")
tokens = result["tokens"]
Expand All @@ -82,27 +82,27 @@ def test_analyze_syntax_utf8():
byte_array[offset : offset + 1].decode("utf8") == tokens[0]["text"]["content"]
)

assert tokens[1]["text"]["content"] == u"\u00e3"
assert tokens[1]["text"]["content"] == "\u00e3"
offset = tokens[1]["text"].get("beginOffset", 0)
assert (
byte_array[offset : offset + 2].decode("utf8") == tokens[1]["text"]["content"]
)

assert tokens[2]["text"]["content"] == u"\u0201"
assert tokens[2]["text"]["content"] == "\u0201"
offset = tokens[2]["text"].get("beginOffset", 0)
assert (
byte_array[offset : offset + 2].decode("utf8") == tokens[2]["text"]["content"]
)

assert tokens[3]["text"]["content"] == u"\U0001f636"
assert tokens[3]["text"]["content"] == "\U0001f636"
offset = tokens[3]["text"].get("beginOffset", 0)
assert (
byte_array[offset : offset + 4].decode("utf8") == tokens[3]["text"]["content"]
)

# This demonstrates that the offset takes into account the variable-length
# characters before the target token.
assert tokens[4]["text"]["content"] == u"b"
assert tokens[4]["text"]["content"] == "b"
offset = tokens[4]["text"].get("beginOffset", 0)
# 'b' is only one byte long
assert (
Expand All @@ -117,7 +117,7 @@ def test_analyze_syntax_utf16():
bits. The returned offsets will be the index of the first 2-byte character
of the token.
"""
test_string = u"a \u00e3 \u0201 \U0001f636 b"
test_string = "a \u00e3 \u0201 \U0001f636 b"
byte_array = test_string.encode("utf16")
# Remove the byte order marker, which the offsets don't account for
byte_array = byte_array[2:]
Expand All @@ -133,7 +133,7 @@ def test_analyze_syntax_utf16():
byte_array[offset : offset + 2].decode("utf16") == tokens[0]["text"]["content"]
)

assert tokens[1]["text"]["content"] == u"\u00e3"
assert tokens[1]["text"]["content"] == "\u00e3"
offset = 2 * tokens[1]["text"].get("beginOffset", 0)
# A UTF16 character with a low codepoint is 16 bits (2 bytes) long, so
# slice out 2 bytes starting from the offset. Then interpret the bytes as
Expand All @@ -142,7 +142,7 @@ def test_analyze_syntax_utf16():
byte_array[offset : offset + 2].decode("utf16") == tokens[1]["text"]["content"]
)

assert tokens[2]["text"]["content"] == u"\u0201"
assert tokens[2]["text"]["content"] == "\u0201"
offset = 2 * tokens[2]["text"].get("beginOffset", 0)
# A UTF16 character with a low codepoint is 16 bits (2 bytes) long, so
# slice out 2 bytes starting from the offset. Then interpret the bytes as
Expand All @@ -151,7 +151,7 @@ def test_analyze_syntax_utf16():
byte_array[offset : offset + 2].decode("utf16") == tokens[2]["text"]["content"]
)

assert tokens[3]["text"]["content"] == u"\U0001f636"
assert tokens[3]["text"]["content"] == "\U0001f636"
offset = 2 * tokens[3]["text"].get("beginOffset", 0)
# A UTF16 character with a high codepoint is 32 bits (4 bytes) long, so
# slice out 4 bytes starting from the offset. Then interpret those bytes as
Expand All @@ -162,7 +162,7 @@ def test_analyze_syntax_utf16():

# This demonstrates that the offset takes into account the variable-length
# characters before the target token.
assert tokens[4]["text"]["content"] == u"b"
assert tokens[4]["text"]["content"] == "b"
offset = 2 * tokens[4]["text"].get("beginOffset", 0)
# Even though 'b' is only one byte long, utf16 still encodes it using 16
# bits
Expand Down Expand Up @@ -192,7 +192,7 @@ def test_annotate_text_utf32():
unicode object with the raw offset returned by the api (ie without
multiplying it by 4, as it is below).
"""
test_string = u"a \u00e3 \u0201 \U0001f636 b"
test_string = "a \u00e3 \u0201 \U0001f636 b"
byte_array = test_string.encode("utf32")
# Remove the byte order marker, which the offsets don't account for
byte_array = byte_array[4:]
Expand All @@ -208,7 +208,7 @@ def test_annotate_text_utf32():
byte_array[offset : offset + 4].decode("utf32") == tokens[0]["text"]["content"]
)

assert tokens[1]["text"]["content"] == u"\u00e3"
assert tokens[1]["text"]["content"] == "\u00e3"
offset = 4 * tokens[1]["text"].get("beginOffset", 0)
# A UTF32 character with a low codepoint is 32 bits (4 bytes) long, so
# slice out 4 bytes starting from the offset. Then interpret the bytes as
Expand All @@ -217,7 +217,7 @@ def test_annotate_text_utf32():
byte_array[offset : offset + 4].decode("utf32") == tokens[1]["text"]["content"]
)

assert tokens[2]["text"]["content"] == u"\u0201"
assert tokens[2]["text"]["content"] == "\u0201"
offset = 4 * tokens[2]["text"].get("beginOffset", 0)
# A UTF32 character with a low codepoint is 32 bits (4 bytes) long, so
# slice out 4 bytes starting from the offset. Then interpret the bytes as
Expand All @@ -226,7 +226,7 @@ def test_annotate_text_utf32():
byte_array[offset : offset + 4].decode("utf32") == tokens[2]["text"]["content"]
)

assert tokens[3]["text"]["content"] == u"\U0001f636"
assert tokens[3]["text"]["content"] == "\U0001f636"
offset = 4 * tokens[3]["text"].get("beginOffset", 0)
# A UTF32 character with a high codepoint is 32 bits (4 bytes) long, so
# slice out 4 bytes starting from the offset. Then interpret those bytes as
Expand All @@ -237,7 +237,7 @@ def test_annotate_text_utf32():

# This demonstrates that the offset takes into account the variable-length
# characters before the target token.
assert tokens[4]["text"]["content"] == u"b"
assert tokens[4]["text"]["content"] == "b"
offset = 4 * tokens[4]["text"].get("beginOffset", 0)
# Even though 'b' is only one byte long, utf32 still encodes it using 32
# bits
Expand All @@ -252,19 +252,19 @@ def test_annotate_text_utf32_directly_index_into_unicode():
See the explanation for test_annotate_text_utf32. Essentially, indexing
into a utf32 array is equivalent to indexing into a python unicode object.
"""
test_string = u"a \u00e3 \u0201 \U0001f636 b"
test_string = "a \u00e3 \u0201 \U0001f636 b"
result = analyze.analyze_syntax(test_string, encoding="UTF32")
tokens = result["tokens"]

assert tokens[0]["text"]["content"] == "a"
offset = tokens[0]["text"].get("beginOffset", 0)
assert test_string[offset] == tokens[0]["text"]["content"]

assert tokens[1]["text"]["content"] == u"\u00e3"
assert tokens[1]["text"]["content"] == "\u00e3"
offset = tokens[1]["text"].get("beginOffset", 0)
assert test_string[offset] == tokens[1]["text"]["content"]

assert tokens[2]["text"]["content"] == u"\u0201"
assert tokens[2]["text"]["content"] == "\u0201"
offset = tokens[2]["text"].get("beginOffset", 0)
assert test_string[offset] == tokens[2]["text"]["content"]

Expand Down
4 changes: 2 additions & 2 deletions language/snippets/api/noxfile.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
# WARNING - WARNING - WARNING - WARNING - WARNING
# WARNING - WARNING - WARNING - WARNING - WARNING

BLACK_VERSION = "black==19.10b0"
BLACK_VERSION = "black==22.3.0"

# Copy `noxfile_config.py` to your directory and modify it instead.

Expand Down Expand Up @@ -253,7 +253,7 @@ def py(session: nox.sessions.Session) -> None:


def _get_repo_root() -> Optional[str]:
""" Returns the root folder of the project. """
"""Returns the root folder of the project."""
# Get root of this repository. Assume we don't have directories nested deeper than 10 items.
p = Path(os.getcwd())
for i in range(10):
Expand Down
8 changes: 4 additions & 4 deletions language/snippets/classify_text/classify_text_tutorial.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@

# [START language_classify_text_tutorial_classify]
def classify(text, verbose=True):
"""Classify the input text into categories. """
"""Classify the input text into categories."""

language_client = language_v1.LanguageServiceClient()

Expand All @@ -56,9 +56,9 @@ def classify(text, verbose=True):
if verbose:
print(text)
for category in categories:
print(u"=" * 20)
print(u"{:<16}: {}".format("category", category.name))
print(u"{:<16}: {}".format("confidence", category.confidence))
print("=" * 20)
print("{:<16}: {}".format("category", category.name))
print("{:<16}: {}".format("confidence", category.confidence))

return result

Expand Down
4 changes: 2 additions & 2 deletions language/snippets/classify_text/noxfile.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
# WARNING - WARNING - WARNING - WARNING - WARNING
# WARNING - WARNING - WARNING - WARNING - WARNING

BLACK_VERSION = "black==19.10b0"
BLACK_VERSION = "black==22.3.0"

# Copy `noxfile_config.py` to your directory and modify it instead.

Expand Down Expand Up @@ -253,7 +253,7 @@ def py(session: nox.sessions.Session) -> None:


def _get_repo_root() -> Optional[str]:
""" Returns the root folder of the project. """
"""Returns the root folder of the project."""
# Get root of this repository. Assume we don't have directories nested deeper than 10 items.
p = Path(os.getcwd())
for i in range(10):
Expand Down
4 changes: 2 additions & 2 deletions language/snippets/cloud-client/v1/noxfile.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
# WARNING - WARNING - WARNING - WARNING - WARNING
# WARNING - WARNING - WARNING - WARNING - WARNING

BLACK_VERSION = "black==19.10b0"
BLACK_VERSION = "black==22.3.0"

# Copy `noxfile_config.py` to your directory and modify it instead.

Expand Down Expand Up @@ -253,7 +253,7 @@ def py(session: nox.sessions.Session) -> None:


def _get_repo_root() -> Optional[str]:
""" Returns the root folder of the project. """
"""Returns the root folder of the project."""
# Get root of this repository. Assume we don't have directories nested deeper than 10 items.
p = Path(os.getcwd())
for i in range(10):
Expand Down
2 changes: 1 addition & 1 deletion language/snippets/cloud-client/v1/quickstart.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ def run_quickstart():
# [END language_python_migration_client]

# The text to analyze
text = u"Hello, world!"
text = "Hello, world!"
document = language_v1.Document(
content=text, type_=language_v1.Document.Type.PLAIN_TEXT
)
Expand Down
4 changes: 2 additions & 2 deletions language/snippets/generated-samples/v1/noxfile.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
# WARNING - WARNING - WARNING - WARNING - WARNING
# WARNING - WARNING - WARNING - WARNING - WARNING

BLACK_VERSION = "black==19.10b0"
BLACK_VERSION = "black==22.3.0"

# Copy `noxfile_config.py` to your directory and modify it instead.

Expand Down Expand Up @@ -253,7 +253,7 @@ def py(session: nox.sessions.Session) -> None:


def _get_repo_root() -> Optional[str]:
""" Returns the root folder of the project. """
"""Returns the root folder of the project."""
# Get root of this repository. Assume we don't have directories nested deeper than 10 items.
p = Path(os.getcwd())
for i in range(10):
Expand Down
4 changes: 2 additions & 2 deletions language/snippets/sentiment/noxfile.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
# WARNING - WARNING - WARNING - WARNING - WARNING
# WARNING - WARNING - WARNING - WARNING - WARNING

BLACK_VERSION = "black==19.10b0"
BLACK_VERSION = "black==22.3.0"

# Copy `noxfile_config.py` to your directory and modify it instead.

Expand Down Expand Up @@ -253,7 +253,7 @@ def py(session: nox.sessions.Session) -> None:


def _get_repo_root() -> Optional[str]:
""" Returns the root folder of the project. """
"""Returns the root folder of the project."""
# Get root of this repository. Assume we don't have directories nested deeper than 10 items.
p = Path(os.getcwd())
for i in range(10):
Expand Down

0 comments on commit fb2ba09

Please sign in to comment.