Skip to content

Commit

Permalink
⬆️ Bump black from 22.12.0 to 23.1.0 (#265)
Browse files Browse the repository at this point in the history
* ⬆️ Bump black from 22.12.0 to 23.1.0

Bumps [black](https://github.com/psf/black) from 22.12.0 to 23.1.0.
- [Release notes](https://github.com/psf/black/releases)
- [Changelog](https://github.com/psf/black/blob/main/CHANGES.md)
- [Commits](psf/black@22.12.0...23.1.0)

---
updated-dependencies:
- dependency-name: black
  dependency-type: direct:development
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>

* 🎨 reformat files using latest `black`

---------

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Ahmed TAHRI <ahmed.tahri@cloudnursery.dev>
  • Loading branch information
dependabot[bot] and Ousret committed Mar 6, 2023
1 parent 5730a34 commit 86617ac
Show file tree
Hide file tree
Showing 5 changed files with 4 additions and 10 deletions.
5 changes: 3 additions & 2 deletions charset_normalizer/api.py
Expand Up @@ -175,7 +175,6 @@ def from_bytes(
prioritized_encodings.append("utf_8")

for encoding_iana in prioritized_encodings + IANA_SUPPORTED:

if cp_isolation and encoding_iana not in cp_isolation:
continue

Expand Down Expand Up @@ -318,7 +317,9 @@ def from_bytes(
bom_or_sig_available and strip_sig_or_bom is False
):
break
except UnicodeDecodeError as e: # Lazy str loading may have missed something there
except (
UnicodeDecodeError
) as e: # Lazy str loading may have missed something there
logger.log(
TRACE,
"LazyStr Loading: After MD chunk decode, code page %s does not fit given bytes sequence at ALL. %s",
Expand Down
1 change: 0 additions & 1 deletion charset_normalizer/cd.py
Expand Up @@ -140,7 +140,6 @@ def alphabet_languages(
source_have_accents = any(is_accentuated(character) for character in characters)

for language, language_characters in FREQUENCIES.items():

target_have_accents, target_pure_latin = get_target_features(language)

if ignore_non_latin and target_pure_latin is False:
Expand Down
2 changes: 0 additions & 2 deletions charset_normalizer/cli/normalizer.py
Expand Up @@ -147,7 +147,6 @@ def cli_detect(argv: Optional[List[str]] = None) -> int:
x_ = []

for my_file in args.files:

matches = from_fp(my_file, threshold=args.threshold, explain=args.verbose)

best_guess = matches.best()
Expand Down Expand Up @@ -222,7 +221,6 @@ def cli_detect(argv: Optional[List[str]] = None) -> int:
)

if args.normalize is True:

if best_guess.encoding.startswith("utf") is True:
print(
'"{}" file does not need to be normalized, as it already came from unicode.'.format(
Expand Down
4 changes: 0 additions & 4 deletions charset_normalizer/utils.py
Expand Up @@ -311,7 +311,6 @@ def range_scan(decoded_sequence: str) -> List[str]:


def cp_similarity(iana_name_a: str, iana_name_b: str) -> float:

if is_multi_byte_encoding(iana_name_a) or is_multi_byte_encoding(iana_name_b):
return 0.0

Expand Down Expand Up @@ -351,7 +350,6 @@ def set_logging_handler(
level: int = logging.INFO,
format_string: str = "%(asctime)s | %(levelname)s | %(message)s",
) -> None:

logger = logging.getLogger(name)
logger.setLevel(level)

Expand All @@ -371,7 +369,6 @@ def cut_sequence_chunks(
is_multi_byte_decoder: bool,
decoded_payload: Optional[str] = None,
) -> Generator[str, None, None]:

if decoded_payload and is_multi_byte_decoder is False:
for i in offsets:
chunk = decoded_payload[i : i + chunk_size]
Expand All @@ -397,7 +394,6 @@ def cut_sequence_chunks(
# multi-byte bad cutting detector and adjustment
# not the cleanest way to perform that fix but clever enough for now.
if is_multi_byte_decoder and i > 0:

chunk_partial_size_chk: int = min(chunk_size, 16)

if (
Expand Down
2 changes: 1 addition & 1 deletion dev-requirements.txt
Expand Up @@ -6,7 +6,7 @@ pytest-cov==4.0.0
build==0.10.0
wheel==0.38.4

black==22.12.0
black==23.1.0
mypy==1.0.1
Flask==2.2.3
pytest==7.2.1
Expand Down

0 comments on commit 86617ac

Please sign in to comment.