1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296
|
#!/usr/bin/env python3
"""
Tools to parse data files from the Unicode Character Database.
"""
from urllib.request import urlopen
import re
import logging
import os
from os.path import abspath, dirname, join as pjoin, pardir, sep
from typing import List
UNIDATA_URL = "https://unicode.org/Public/UNIDATA/"
UNIDATA_LICENSE_URL = "http://unicode.org/copyright.html#License"
# by default save output files to ../Lib/fontTools/unicodedata/
UNIDATA_PATH = (
pjoin(abspath(dirname(__file__)), pardir, "Lib", "fontTools", "unicodedata") + sep
)
SRC_ENCODING = "# -*- coding: utf-8 -*-\n"
NOTICE = "# NOTE: This file was auto-generated with MetaTools/buildUCD.py.\n"
MAX_UNICODE = 0x10FFFF
log = logging.getLogger()
def read_unidata_file(filename, local_ucd_path=None) -> List[str]:
"""Read a UCD file from https://unicode.org or optionally from a local directory.
Return the list of lines.
"""
if local_ucd_path is not None:
with open(pjoin(local_ucd_path, filename), "r", encoding="utf-8-sig") as f:
return f.readlines()
else:
url = UNIDATA_URL + filename
with urlopen(url) as response:
return response.read().decode("utf-8").splitlines(keepends=True)
def parse_unidata_header(file_lines: List[str]):
"""Read the top header of data files, until the first line
that does not start with '#'.
"""
header = []
for line in file_lines:
if line.startswith("#"):
header.append(line)
else:
break
return "".join(header)
def parse_range_properties(infile: List[str], default=None, is_set=False):
"""Parse a Unicode data file containing a column with one character or
a range of characters, and another column containing a property value
separated by a semicolon. Comments after '#' are ignored.
If the ranges defined in the data file are not continuous, assign the
'default' property to the unassigned codepoints.
Return a list of (start, end, property_name) tuples.
"""
ranges = []
line_regex = re.compile(
r"^"
r"([0-9A-F]{4,6})" # first character code
r"(?:\.\.([0-9A-F]{4,6}))?" # optional second character code
r"\s*;\s*"
r"([^#]+)"
) # everything up to the potential comment
for line in infile:
match = line_regex.match(line)
if not match:
continue
first, last, data = match.groups()
if last is None:
last = first
first = int(first, 16)
last = int(last, 16)
data = str(data.rstrip())
ranges.append((first, last, data))
ranges.sort()
# fill the gaps between explicitly defined ranges
last_start, last_end = -1, -1
full_ranges = []
for start, end, value in ranges:
assert last_end < start
assert start <= end
if start - last_end > 1:
full_ranges.append((last_end + 1, start - 1, default))
if is_set:
value = set(value.split())
full_ranges.append((start, end, value))
last_start, last_end = start, end
if last_end != MAX_UNICODE:
full_ranges.append((last_end + 1, MAX_UNICODE, default))
# reduce total number of ranges by combining continuous ones
last_start, last_end, last_value = full_ranges.pop(0)
merged_ranges = []
for start, end, value in full_ranges:
if value == last_value:
continue
else:
merged_ranges.append((last_start, start - 1, last_value))
last_start, line_end, last_value = start, end, value
merged_ranges.append((last_start, MAX_UNICODE, last_value))
# make sure that the ranges cover the full unicode repertoire
assert merged_ranges[0][0] == 0
for (cs, ce, cv), (ns, ne, nv) in zip(merged_ranges, merged_ranges[1:]):
assert ce + 1 == ns
assert merged_ranges[-1][1] == MAX_UNICODE
return merged_ranges
def parse_semicolon_separated_data(infile):
"""Parse a Unicode data file where each line contains a lists of values
separated by a semicolon (e.g. "PropertyValueAliases.txt").
The number of the values on different lines may be different.
Returns a list of lists each containing the values as strings.
"""
data = []
for line in infile:
line = line.split("#", 1)[0].strip() # remove the comment
if not line:
continue
fields = [str(field.strip()) for field in line.split(";")]
data.append(fields)
return data
def _set_repr(value):
return (
"None"
if value is None
else "{{{}}}".format(", ".join(repr(v) for v in sorted(value)))
)
def build_ranges(
filename, local_ucd=None, output_path=None, default=None, is_set=False, aliases=None
):
"""Fetch 'filename' UCD data file from Unicode official website, parse
the property ranges and values and write them as two Python lists
to 'fontTools.unicodedata.<filename>.py'.
'aliases' is an optional mapping of property codes (short names) to long
name aliases (list of strings, with the first item being the preferred
alias). When this is provided, the property values are written using the
short notation, and an additional 'NAMES' dict with the aliases is
written to the output module.
To load the data file from a local directory, you can use the
'local_ucd' argument.
"""
modname = os.path.splitext(filename)[0] + ".py"
if not output_path:
output_path = UNIDATA_PATH + modname
if local_ucd:
log.info("loading '%s' from local directory '%s'", filename, local_ucd)
else:
log.info("downloading '%s' from '%s'", filename, UNIDATA_URL)
file_lines = read_unidata_file(filename, local_ucd)
header = parse_unidata_header(file_lines)
ranges = parse_range_properties(file_lines, default=default, is_set=is_set)
if aliases:
reversed_aliases = {normalize(v[0]): k for k, v in aliases.items()}
max_value_length = 6 # 4-letter tags plus two quotes for repr
else:
max_value_length = min(56, max(len(repr(v)) for _, _, v in ranges))
with open(output_path, "w", encoding="utf-8") as f:
f.write(SRC_ENCODING)
f.write("#\n")
f.write(NOTICE)
f.write("# Source: {}{}\n".format(UNIDATA_URL, filename))
f.write("# License: {}\n".format(UNIDATA_LICENSE_URL))
f.write("#\n")
f.write(header + "\n")
f.write("RANGES = [\n")
for first, last, value in ranges:
f.write(
" 0x{:0>4X}, # .. 0x{:0>4X} ; {}\n".format(
first, last, _set_repr(value) if is_set else value
)
)
f.write("]\n")
f.write("\n")
f.write("VALUES = [\n")
for first, last, value in ranges:
comment = "# {:0>4X}..{:0>4X}".format(first, last)
if is_set:
value_repr = "{},".format(_set_repr(value))
else:
if aliases:
# append long name to comment and use the short code
comment += " ; {}".format(value)
value = reversed_aliases[normalize(value)]
value_repr = "{!r},".format(value)
f.write(
" {} {}\n".format(value_repr.ljust(max_value_length + 1), comment)
)
f.write("]\n")
if aliases:
f.write("\n")
f.write("NAMES = {\n")
for value, names in sorted(aliases.items()):
# we only write the first preferred alias
f.write(" {!r}: {!r},\n".format(value, names[0]))
f.write("}\n")
log.info("saved new file: '%s'", os.path.normpath(output_path))
_normalize_re = re.compile(r"[-_ ]+")
def normalize(string):
"""Remove case, strip space, '-' and '_' for loose matching."""
return _normalize_re.sub("", string).lower()
def parse_property_value_aliases(property_tag, local_ucd=None):
"""Fetch the current 'PropertyValueAliases.txt' from the Unicode website,
parse the values for the specified 'property_tag' and return a dictionary
of name aliases (list of strings) keyed by short value codes (strings).
To load the data file from a local directory, you can use the
'local_ucd' argument.
"""
filename = "PropertyValueAliases.txt"
if local_ucd:
log.info("loading '%s' from local directory '%s'", filename, local_ucd)
else:
log.info("downloading '%s' from '%s'", filename, UNIDATA_URL)
file_lines = read_unidata_file(filename, local_ucd)
header = parse_unidata_header(file_lines)
data = parse_semicolon_separated_data(file_lines)
aliases = {item[1]: item[2:] for item in data if item[0] == property_tag}
return aliases
def main():
import argparse
parser = argparse.ArgumentParser(
description="Generate fontTools.unicodedata from UCD data files"
)
parser.add_argument(
"--ucd-path", help="Path to local folder containing UCD data files"
)
parser.add_argument("-q", "--quiet", action="store_true")
options = parser.parse_args()
level = "WARNING" if options.quiet else "INFO"
logging.basicConfig(level=level, format="%(message)s")
build_ranges("Blocks.txt", local_ucd=options.ucd_path, default="No_Block")
script_aliases = parse_property_value_aliases("sc", options.ucd_path)
build_ranges(
"Scripts.txt",
local_ucd=options.ucd_path,
default="Unknown",
aliases=script_aliases,
)
build_ranges("ScriptExtensions.txt", local_ucd=options.ucd_path, is_set=True)
if __name__ == "__main__":
import sys
sys.exit(main())
|