Skip to content

Commit

Permalink
More cleanups for unicode.py
Browse files Browse the repository at this point in the history
  • Loading branch information
pawroman committed Apr 18, 2019
1 parent 89feb6d commit a580421
Showing 1 changed file with 23 additions and 25 deletions.
48 changes: 23 additions & 25 deletions src/libcore/unicode/unicode.py
Expand Up @@ -28,14 +28,14 @@
# we don't use enum.Enum because of Python 2.7 compatibility
class UnicodeFiles(object):
# ReadMe does not contain any unicode data, we
# use it to extract versions.
# only use it to extract versions.
README = "ReadMe.txt"

DERIVED_CORE_PROPERTIES = "DerivedCoreProperties.txt"
DERIVED_NORMALIZATION_PROPS = "DerivedNormalizationProps.txt"
SPECIAL_CASING = "SpecialCasing.txt"
SCRIPTS = "Scripts.txt"
PROPS = "PropList.txt"
SCRIPTS = "Scripts.txt"
SPECIAL_CASING = "SpecialCasing.txt"
UNICODE_DATA = "UnicodeData.txt"


Expand Down Expand Up @@ -66,15 +66,15 @@ class UnicodeFiles(object):
# Mapping taken from Table 12 from:
# http://www.unicode.org/reports/tr44/#General_Category_Values
EXPANDED_CATEGORIES = {
'Lu': ['LC', 'L'], 'Ll': ['LC', 'L'], 'Lt': ['LC', 'L'],
'Lm': ['L'], 'Lo': ['L'],
'Mn': ['M'], 'Mc': ['M'], 'Me': ['M'],
'Nd': ['N'], 'Nl': ['N'], 'No': ['N'],
'Pc': ['P'], 'Pd': ['P'], 'Ps': ['P'], 'Pe': ['P'],
'Pi': ['P'], 'Pf': ['P'], 'Po': ['P'],
'Sm': ['S'], 'Sc': ['S'], 'Sk': ['S'], 'So': ['S'],
'Zs': ['Z'], 'Zl': ['Z'], 'Zp': ['Z'],
'Cc': ['C'], 'Cf': ['C'], 'Cs': ['C'], 'Co': ['C'], 'Cn': ['C'],
"Lu": ["LC", "L"], "Ll": ["LC", "L"], "Lt": ["LC", "L"],
"Lm": ["L"], "Lo": ["L"],
"Mn": ["M"], "Mc": ["M"], "Me": ["M"],
"Nd": ["N"], "Nl": ["N"], "No": ["N"],
"Pc": ["P"], "Pd": ["P"], "Ps": ["P"], "Pe": ["P"],
"Pi": ["P"], "Pf": ["P"], "Po": ["P"],
"Sm": ["S"], "Sc": ["S"], "Sk": ["S"], "So": ["S"],
"Zs": ["Z"], "Zl": ["Z"], "Zp": ["Z"],
"Cc": ["C"], "Cf": ["C"], "Cs": ["C"], "Co": ["C"], "Cn": ["C"],
}

# these are the surrogate codepoints, which are not valid rust characters
Expand Down Expand Up @@ -115,7 +115,7 @@ def fetch_files(version=None):
readme_content = subprocess.check_output(("curl", readme_url))

unicode_version = parse_unicode_version(
str(readme_content, "utf8")
readme_content.decode("utf8")
)

download_dir = os.path.join(FETCH_DIR, unicode_version.as_str)
Expand Down Expand Up @@ -415,7 +415,7 @@ def compute_trie(rawdata, chunksize):
child_data = []
for i in range(len(rawdata) // chunksize):
data = rawdata[i * chunksize: (i + 1) * chunksize]
child = '|'.join(map(str, data))
child = "|".join(map(str, data))
if child not in childmap:
childmap[child] = len(childmap)
child_data.extend(data)
Expand Down Expand Up @@ -444,34 +444,34 @@ def emit_bool_trie(f, name, t_data, is_pub=True):
pub_string = "pub "
f.write(" %sconst %s: &super::BoolTrie = &super::BoolTrie {\n" % (pub_string, name))
f.write(" r1: [\n")
data = ','.join('0x%016x' % chunk for chunk in chunks[0:0x800 // chunk_size])
data = ",".join("0x%016x" % chunk for chunk in chunks[0:0x800 // chunk_size])
format_table_content(f, data, 12)
f.write("\n ],\n")

# 0x800..0x10000 trie
(r2, r3) = compute_trie(chunks[0x800 // chunk_size : 0x10000 // chunk_size], 64 // chunk_size)
f.write(" r2: [\n")
data = ','.join(str(node) for node in r2)
data = ",".join(str(node) for node in r2)
format_table_content(f, data, 12)
f.write("\n ],\n")
f.write(" r3: &[\n")
data = ','.join('0x%016x' % chunk for chunk in r3)
data = ",".join("0x%016x" % chunk for chunk in r3)
format_table_content(f, data, 12)
f.write("\n ],\n")

# 0x10000..0x110000 trie
(mid, r6) = compute_trie(chunks[0x10000 // chunk_size : 0x110000 // chunk_size], 64 // chunk_size)
(r4, r5) = compute_trie(mid, 64)
f.write(" r4: [\n")
data = ','.join(str(node) for node in r4)
data = ",".join(str(node) for node in r4)
format_table_content(f, data, 12)
f.write("\n ],\n")
f.write(" r5: &[\n")
data = ','.join(str(node) for node in r5)
data = ",".join(str(node) for node in r5)
format_table_content(f, data, 12)
f.write("\n ],\n")
f.write(" r6: &[\n")
data = ','.join('0x%016x' % chunk for chunk in r6)
data = ",".join("0x%016x" % chunk for chunk in r6)
format_table_content(f, data, 12)
f.write("\n ],\n")

Expand All @@ -497,12 +497,12 @@ def emit_small_bool_trie(f, name, t_data, is_pub=True):
(r1, r2) = compute_trie(chunks, 1)

f.write(" r1: &[\n")
data = ','.join(str(node) for node in r1)
data = ",".join(str(node) for node in r1)
format_table_content(f, data, 12)
f.write("\n ],\n")

f.write(" r2: &[\n")
data = ','.join('0x%016x' % node for node in r2)
data = ",".join("0x%016x" % node for node in r2)
format_table_content(f, data, 12)
f.write("\n ],\n")

Expand Down Expand Up @@ -599,11 +599,9 @@ def main():
print("Using Unicode version: {}".format(unicode_version.as_str))

tables_rs_path = os.path.join(THIS_DIR, "tables.rs")
if os.path.exists(tables_rs_path):
os.remove(tables_rs_path)

# will overwrite the file if it exists
with open(tables_rs_path, "w") as rf:
# write the file's preamble
rf.write(PREAMBLE)

unicode_version_notice = textwrap.dedent("""
Expand Down

0 comments on commit a580421

Please sign in to comment.