use f-strings to print

pull/299/head
guidotheelen 2020-04-22 16:51:57 +02:00
parent 36b0c736eb
commit 82a638993a
2 changed files with 30 additions and 36 deletions

View File

@ -18,7 +18,7 @@ font will be at the top level.
## Check artwork coverage ## Check artwork coverage
By running `make CHECK_COVERAGE="True"` the build process is proceeded by a coverage check. The source directory is compared against the unicode standard used in [Nototools](https://github.com/googlefonts/nototools). The check flags any unexpected and missing filenames. By running `make CHECK_COVERAGE="True"` the build process is preceeded by a coverage check. The source directory is compared against the Unicode standard used in [Nototools](https://github.com/googlefonts/nototools). The check flags any unexpected and missing filenames.
## Using NotoColorEmoji ## Using NotoColorEmoji

View File

@ -78,7 +78,7 @@ def _check_no_vs(sorted_seq_to_filepath):
"""Our image data does not use emoji presentation variation selectors.""" """Our image data does not use emoji presentation variation selectors."""
for seq, fp in sorted_seq_to_filepath.items(): for seq, fp in sorted_seq_to_filepath.items():
if EMOJI_VS in seq: if EMOJI_VS in seq:
print('check no VS: FE0F in path: %s' % fp) print(f'check no VS: FE0F in path: {fp}')
def _check_valid_emoji_cps(sorted_seq_to_filepath, unicode_version): def _check_valid_emoji_cps(sorted_seq_to_filepath, unicode_version):
@ -108,13 +108,11 @@ def _check_valid_emoji_cps(sorted_seq_to_filepath, unicode_version):
if len(not_emoji): if len(not_emoji):
print( print(
'check valid emoji cps: %d non-emoji cp found' % len(not_emoji), f'check valid emoji cps: {len(not_emoji)} non-emoji cp found', file=sys.stderr)
file=sys.stderr)
for cp in sorted(not_emoji): for cp in sorted(not_emoji):
fps = not_emoji[cp] fps = not_emoji[cp]
print( print(
'check valid emoji cps: %04x (in %d sequences)' % (cp, len(fps)), f'check valid emoji cps: {cp} (in {len(fps)} sequences)', file=sys.stderr)
file=sys.stderr)
def _check_zwj(sorted_seq_to_filepath): def _check_zwj(sorted_seq_to_filepath):
@ -125,24 +123,24 @@ def _check_zwj(sorted_seq_to_filepath):
if ZWJ not in seq: if ZWJ not in seq:
continue continue
if seq[0] == ZWJ: if seq[0] == ZWJ:
print('check zwj: zwj at head of sequence in %s' % fp, file=sys.stderr) print(f'check zwj: zwj at head of sequence in {fp}', file=sys.stderr)
if len(seq) == 1: if len(seq) == 1:
continue continue
if seq[-1] == ZWJ: if seq[-1] == ZWJ:
print('check zwj: zwj at end of sequence in %s' % fp, file=sys.stderr) print(f'check zwj: zwj at end of sequence in {fp}', file=sys.stderr)
for i, cp in enumerate(seq): for i, cp in enumerate(seq):
if cp == ZWJ: if cp == ZWJ:
if i > 0: if i > 0:
pcp = seq[i-1] pcp = seq[i-1]
if pcp != EMOJI_VS and not unicode_data.is_emoji(pcp): if pcp != EMOJI_VS and not unicode_data.is_emoji(pcp):
print( print(
'check zwj: non-emoji %04x preceeds ZWJ in %s' % (pcp, fp), f'check zwj: non-emoji {pcp} preceeds ZWJ in {fp}',
file=sys.stderr) file=sys.stderr)
if i < len(seq) - 1: if i < len(seq) - 1:
fcp = seq[i+1] fcp = seq[i+1]
if not unicode_data.is_emoji(fcp): if not unicode_data.is_emoji(fcp):
print( print(
'check zwj: non-emoji %04x follows ZWJ in %s' % (fcp, fp), f'check zwj: non-emoji {fcp} follows ZWJ in {fp}',
file=sys.stderr) file=sys.stderr)
@ -157,13 +155,13 @@ def _check_flags(sorted_seq_to_filepath):
have_reg = is_reg have_reg = is_reg
elif have_reg != is_reg: elif have_reg != is_reg:
print( print(
'check flags: mix of regional and non-regional in %s' % fp, f'check flags: mix of regional and non-regional in {fp}',
file=sys.stderr) file=sys.stderr)
if have_reg and len(seq) > 2: if have_reg and len(seq) > 2:
# We provide dummy glyphs for regional indicators, so there are sequences # We provide dummy glyphs for regional indicators, so there are sequences
# with single regional indicator symbols, the len check handles this. # with single regional indicator symbols, the len check handles this.
print( print(
'check flags: regional indicator sequence length != 2 in %s' % fp, f'check flags: regional indicator sequence length != 2 in {fp}',
file=sys.stderr) file=sys.stderr)
def _check_tags(sorted_seq_to_filepath): def _check_tags(sorted_seq_to_filepath):
@ -179,13 +177,13 @@ def _check_tags(sorted_seq_to_filepath):
if not overlap_set: if not overlap_set:
continue continue
if seq[0] != BLACK_FLAG: if seq[0] != BLACK_FLAG:
print('check tags: bad start tag in %s' % fp) print(f'check tags: bad start tag in {fp}')
elif seq[-1] != END_TAG: elif seq[-1] != END_TAG:
print('check tags: bad end tag in %s' % fp) print(f'check tags: bad end tag in {fp}')
elif len(seq) < 4: elif len(seq) < 4:
print('check tags: sequence too short in %s' % fp) print(f'check tags: sequence too short in {fp}')
elif seq_set - TAG_SET != BLACK_FLAG_SET: elif seq_set - TAG_SET != BLACK_FLAG_SET:
print('check tags: non-tag items in %s' % fp) print(f'check tags: non-tag items in {fp}')
def _check_skintone(sorted_seq_to_filepath): def _check_skintone(sorted_seq_to_filepath):
@ -199,15 +197,15 @@ def _check_skintone(sorted_seq_to_filepath):
if i == 0: if i == 0:
if len(seq) > 1: if len(seq) > 1:
print( print(
'check skintone: skin color selector first in sequence %s' % fp, f'check skintone: skin color selector first in sequence {fp}',
file=sys.stderr) file=sys.stderr)
# standalone are ok # standalone are ok
continue continue
pcp = seq[i-1] pcp = seq[i-1]
if not unicode_data.is_emoji_modifier_base(pcp): if not unicode_data.is_emoji_modifier_base(pcp):
print( print(
'check skintone: emoji skintone modifier applied to non-base ' + f'check skintone: emoji skintone modifier applied to non-base at {i}: {fp}',
'at %d: %s' % (i, fp), file=sys.stderr) file=sys.stderr)
else: else:
if pcp not in base_to_modifiers: if pcp not in base_to_modifiers:
base_to_modifiers[pcp] = set() base_to_modifiers[pcp] = set()
@ -229,7 +227,7 @@ def _check_zwj_sequences(sorted_seq_to_filepath, unicode_version):
continue continue
age = unicode_data.get_emoji_sequence_age(seq) age = unicode_data.get_emoji_sequence_age(seq)
if age is None or unicode_version is not None and age > unicode_version: if age is None or unicode_version is not None and age > unicode_version:
print('check zwj sequences: undefined sequence %s' % fp) print(f'check zwj sequences: undefined sequence {fp}')
def _check_no_alias_sources(sorted_seq_to_filepath): def _check_no_alias_sources(sorted_seq_to_filepath):
@ -238,7 +236,7 @@ def _check_no_alias_sources(sorted_seq_to_filepath):
aliases = add_aliases.read_default_emoji_aliases() aliases = add_aliases.read_default_emoji_aliases()
for seq, fp in sorted_seq_to_filepath.items(): for seq, fp in sorted_seq_to_filepath.items():
if seq in aliases: if seq in aliases:
print('check no alias sources: aliased sequence %s' % fp) print(f'check no alias sources: aliased sequence {fp}')
def _check_coverage(seq_to_filepath, unicode_version): def _check_coverage(seq_to_filepath, unicode_version):
@ -258,13 +256,12 @@ def _check_coverage(seq_to_filepath, unicode_version):
if v not in seq_to_filepath and v not in non_vs_to_canonical: if v not in seq_to_filepath and v not in non_vs_to_canonical:
alias_str = unicode_data.seq_to_string(k) alias_str = unicode_data.seq_to_string(k)
target_str = unicode_data.seq_to_string(v) target_str = unicode_data.seq_to_string(v)
print('coverage: alias %s missing target %s' % (alias_str, target_str)) print(f'coverage: alias {alias_str} missing target {target_str}')
continue continue
if k in seq_to_filepath or k in non_vs_to_canonical: if k in seq_to_filepath or k in non_vs_to_canonical:
alias_str = unicode_data.seq_to_string(k) alias_str = unicode_data.seq_to_string(k)
target_str = unicode_data.seq_to_string(v) target_str = unicode_data.seq_to_string(v)
print('coverage: alias %s already exists as %s (%s)' % ( print(f'coverage: alias {alias_str} already exists as {target_str} ({seq_name(v)})')
alias_str, target_str, seq_name(v)))
continue continue
filename = seq_to_filepath.get(v) or seq_to_filepath[non_vs_to_canonical[v]] filename = seq_to_filepath.get(v) or seq_to_filepath[non_vs_to_canonical[v]]
seq_to_filepath[k] = 'alias:' + filename seq_to_filepath[k] = 'alias:' + filename
@ -274,14 +271,13 @@ def _check_coverage(seq_to_filepath, unicode_version):
for cp in emoji: for cp in emoji:
if tuple([cp]) not in seq_to_filepath: if tuple([cp]) not in seq_to_filepath:
print( print(
'coverage: missing single %04x (%s)' % ( f'coverage: missing single {cp} ({unicode_data.name(cp)})')
cp, unicode_data.name(cp, '<no name>')))
# special characters # special characters
# all but combining enclosing keycap are currently marked as emoji # all but combining enclosing keycap are currently marked as emoji
for cp in [ord('*'), ord('#'), ord(u'\u20e3')] + list(range(0x30, 0x3a)): for cp in [ord('*'), ord('#'), ord(u'\u20e3')] + list(range(0x30, 0x3a)):
if cp not in emoji and tuple([cp]) not in seq_to_filepath: if cp not in emoji and tuple([cp]) not in seq_to_filepath:
print('coverage: missing special %04x (%s)' % (cp, unicode_data.name(cp))) print(f'coverage: missing special {cp} ({unicode_data.name(cp)})')
# combining sequences # combining sequences
comb_seq_to_name = sorted( comb_seq_to_name = sorted(
@ -291,8 +287,7 @@ def _check_coverage(seq_to_filepath, unicode_version):
# strip vs and try again # strip vs and try again
non_vs_seq = unicode_data.strip_emoji_vs(seq) non_vs_seq = unicode_data.strip_emoji_vs(seq)
if non_vs_seq not in seq_to_filepath: if non_vs_seq not in seq_to_filepath:
print('coverage: missing combining sequence %s (%s)' % print(f'coverage: missing combining sequence {unicode_data.seq_to_string(seq)} ({name})')
(unicode_data.seq_to_string(seq), name))
# check for 'unknown flag' # check for 'unknown flag'
# this is either emoji_ufe82b or 'unknown_flag', but we filter out things that # this is either emoji_ufe82b or 'unknown_flag', but we filter out things that
@ -324,7 +319,7 @@ def create_sequence_to_filepath(name_to_dirpath, prefix, suffix):
result = {} result = {}
for name, dirname in name_to_dirpath.items(): for name, dirname in name_to_dirpath.items():
if not name.startswith(prefix): if not name.startswith(prefix):
print('expected prefix "%s" for "%s"' % (prefix, name)) print(f'expected prefix "{prefix}" for "{name}"')
continue continue
segments = name[len(prefix): -len(suffix)].split('_') segments = name[len(prefix): -len(suffix)].split('_')
@ -332,12 +327,12 @@ def create_sequence_to_filepath(name_to_dirpath, prefix, suffix):
seq = [] seq = []
for s in segments: for s in segments:
if not segment_re.match(s): if not segment_re.match(s):
print('bad codepoint name "%s" in %s/%s' % (s, dirname, name)) print(f'bad codepoint name "{s}" in {dirname}/{name}')
segfail = True segfail = True
continue continue
n = int(s, 16) n = int(s, 16)
if n > 0x10ffff: if n > 0x10ffff:
print('codepoint "%s" out of range in %s/%s' % (s, dirname, name)) print(f'codepoint "{s}" out of range in {dirname}/{name}')
segfail = True segfail = True
continue continue
seq.append(n) seq.append(n)
@ -384,13 +379,12 @@ def run_check(dirs, prefix, suffix, exclude, unicode_version, coverage):
msg = '' msg = ''
if unicode_version: if unicode_version:
msg = ' (%3.1f)' % unicode_version msg = ' (%3.1f)' % unicode_version
print('Checking files with prefix "%s" and suffix "%s"%s in:\n %s' % ( print(f'Checking files with prefix "{prefix}" and suffix "{suffix}"{msg} in: {dirs}')
prefix, suffix, msg, '\n '.join(dirs)))
name_to_dirpath = collect_name_to_dirpath_with_override( name_to_dirpath = collect_name_to_dirpath_with_override(
dirs, prefix=prefix, suffix=suffix, exclude=exclude) dirs, prefix=prefix, suffix=suffix, exclude=exclude)
print('checking %d names' % len(name_to_dirpath)) print(f'checking {len(name_to_dirpath)} names')
seq_to_filepath = create_sequence_to_filepath(name_to_dirpath, prefix, suffix) seq_to_filepath = create_sequence_to_filepath(name_to_dirpath, prefix, suffix)
print('checking %d sequences' % len(seq_to_filepath)) print(f'checking {len(seq_to_filepath)} sequences')
check_sequence_to_filepath(seq_to_filepath, unicode_version, coverage) check_sequence_to_filepath(seq_to_filepath, unicode_version, coverage)
print('done running checks') print('done running checks')