Modernize Python 2 code to get ready for Python 3

pull/166/head
cclauss 2017-09-14 22:18:24 +02:00
parent 7159f45c58
commit 4607d55e4c
12 changed files with 137 additions and 125 deletions

View File

@ -14,6 +14,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import glob
import os
@ -31,7 +32,7 @@ DATA_ROOT = path.dirname(path.abspath(__file__))
def str_to_seq(seq_str):
res = [int(s, 16) for s in seq_str.split('_')]
if 0xfe0f in res:
print '0xfe0f in file name: %s' % seq_str
print('0xfe0f in file name: %s' % seq_str)
res = [x for x in res if x != 0xfe0f]
return tuple(res)
@ -66,7 +67,7 @@ def read_emoji_aliases(filename):
als_seq = tuple([int(x, 16) for x in als.split('_')])
trg_seq = tuple([int(x, 16) for x in trg.split('_')])
except:
print 'cannot process alias %s -> %s' % (als, trg)
print('cannot process alias %s -> %s' % (als, trg))
continue
result[als_seq] = trg_seq
return result
@ -82,7 +83,7 @@ def add_aliases(
be done. Dstdir will be created if necessary, even if dry_run is true."""
if not path.isdir(srcdir):
print >> sys.stderr, '%s is not a directory' % srcdir
print('%s is not a directory' % srcdir, file=sys.stderr)
return
if not dstdir:
@ -104,8 +105,8 @@ def add_aliases(
alias_exists = False
for als, trg in sorted(aliases.items()):
if trg not in seq_to_file:
print >> sys.stderr, 'target %s for %s does not exist' % (
seq_to_str(trg), seq_to_str(als))
print('target %s for %s does not exist' % (
seq_to_str(trg), seq_to_str(als)), file=sys.stderr)
continue
alias_name = '%s%s.%s' % (prefix, seq_to_str(als), ext)
alias_path = path.join(dstdir, alias_name)
@ -113,7 +114,7 @@ def add_aliases(
if replace:
aliases_to_replace.append(alias_name)
else:
print >> sys.stderr, 'alias %s exists' % seq_to_str(als)
print('alias %s exists' % seq_to_str(als), file=sys.stderr)
alias_exists = True
continue
target_file = seq_to_file[trg]
@ -123,15 +124,15 @@ def add_aliases(
if not dry_run:
for k in sorted(aliases_to_replace):
os.remove(path.join(dstdir, k))
print 'replacing %d files' % len(aliases_to_replace)
print('replacing %d files' % len(aliases_to_replace))
elif alias_exists:
print >> sys.stderr, 'aborting, aliases exist.'
print('aborting, aliases exist.', file=sys.stderr)
return
for k, v in sorted(aliases_to_create.items()):
if dry_run:
msg = 'replace ' if k in aliases_to_replace else ''
print '%s%s -> %s' % (msg, k, v)
print('%s%s -> %s' % (msg, k, v))
else:
try:
if copy:
@ -143,10 +144,10 @@ def add_aliases(
else:
raise Exception('can\'t create cross-directory symlinks yet')
except Exception as e:
print >> sys.stderr, 'failed to create %s -> %s' % (k, v)
print('failed to create %s -> %s' % (k, v), file=sys.stderr)
raise Exception('oops, ' + str(e))
print 'created %d %s' % (
len(aliases_to_create), 'copies' if copy else 'symlinks')
print('created %d %s' % (
len(aliases_to_create), 'copies' if copy else 'symlinks'))
def main():

View File

@ -16,6 +16,7 @@
# Google Author(s): Doug Felt
"""Tool to update GSUB, hmtx, cmap, glyf tables with svg image glyphs."""
from __future__ import print_function
import argparse
import glob

View File

@ -15,6 +15,7 @@
# limitations under the License.
"""Compare emoji image file namings against unicode property data."""
from __future__ import print_function
import argparse
import collections
@ -95,9 +96,9 @@ def _check_valid_emoji(sorted_seq_to_filepath):
not_emoji[cp].append(fp)
if len(not_emoji):
print >> sys.stderr, '%d non-emoji found:' % len(not_emoji)
print('%d non-emoji found:' % len(not_emoji), file=sys.stderr)
for cp in sorted(not_emoji):
print >> sys.stderr, '%04x (in %s)' % (cp, ', '.join(not_emoji[cp]))
print('%04x (in %s)' % (cp, ', '.join(not_emoji[cp])), file=sys.stderr)
def _check_zwj(sorted_seq_to_filepath):
@ -109,21 +110,21 @@ def _check_zwj(sorted_seq_to_filepath):
if ZWJ not in seq:
continue
if seq[0] == 0x200d:
print >> sys.stderr, 'zwj at head of sequence in %s' % fp
print('zwj at head of sequence in %s' % fp, file=sys.stderr)
if len(seq) == 1:
continue
if seq[-1] == 0x200d:
print >> sys.stderr, 'zwj at end of sequence in %s' % fp
print('zwj at end of sequence in %s' % fp, file=sys.stderr)
for i, cp in enumerate(seq):
if cp == ZWJ:
if i > 0:
pcp = seq[i-1]
if pcp != EMOJI_PRESENTATION_VS and not unicode_data.is_emoji(pcp):
print >> sys.stderr, 'non-emoji %04x preceeds ZWJ in %s' % (pcp, fp)
print('non-emoji %04x preceeds ZWJ in %s' % (pcp, fp), file=sys.stderr)
if i < len(seq) - 1:
fcp = seq[i+1]
if not unicode_data.is_emoji(fcp):
print >> sys.stderr, 'non-emoji %04x follows ZWJ in %s' % (fcp, fp)
print('non-emoji %04x follows ZWJ in %s' % (fcp, fp), file=sys.stderr)
def _check_flags(sorted_seq_to_filepath):
@ -136,11 +137,11 @@ def _check_flags(sorted_seq_to_filepath):
if have_reg == None:
have_reg = is_reg
elif have_reg != is_reg:
print >> sys.stderr, 'mix of regional and non-regional in %s' % fp
print('mix of regional and non-regional in %s' % fp, file=sys.stderr)
if have_reg and len(seq) > 2:
# We provide dummy glyphs for regional indicators, so there are sequences
# with single regional indicator symbols.
print >> sys.stderr, 'regional indicator sequence length != 2 in %s' % fp
print('regional indicator sequence length != 2 in %s' % fp, file=sys.stderr)
def _check_skintone(sorted_seq_to_filepath):
@ -153,13 +154,13 @@ def _check_skintone(sorted_seq_to_filepath):
if _is_skintone_modifier(cp):
if i == 0:
if len(seq) > 1:
print >> sys.stderr, 'skin color selector first in sequence %s' % fp
print('skin color selector first in sequence %s' % fp, file=sys.stderr)
# standalone are ok
continue
pcp = seq[i-1]
if not unicode_data.is_emoji_modifier_base(pcp):
print >> sys.stderr, (
'emoji skintone modifier applied to non-base at %d: %s' % (i, fp))
print((
'emoji skintone modifier applied to non-base at %d: %s' % (i, fp)), file=sys.stderr)
elif unicode_data.is_emoji_modifier_base(cp):
if i < len(seq) - 1 and _is_skintone_modifier(seq[i+1]):
base_to_modifiers[cp].add(seq[i+1])
@ -167,9 +168,9 @@ def _check_skintone(sorted_seq_to_filepath):
base_to_modifiers[cp] = set()
for cp, modifiers in sorted(base_to_modifiers.iteritems()):
if len(modifiers) != 5:
print >> sys.stderr, 'emoji base %04x has %d modifiers defined (%s) in %s' % (
print('emoji base %04x has %d modifiers defined (%s) in %s' % (
cp, len(modifiers),
', '.join('%04x' % cp for cp in sorted(modifiers)), fp)
', '.join('%04x' % cp for cp in sorted(modifiers)), fp), file=sys.stderr)
def _check_zwj_sequences(seq_to_filepath):
@ -189,7 +190,7 @@ def _check_zwj_sequences(seq_to_filepath):
for seq, fp in zwj_seq_to_filepath.iteritems():
if seq not in zwj_sequence_to_name:
if seq not in zwj_sequence_without_vs_to_name_canonical:
print >> sys.stderr, 'zwj sequence not defined: %s' % fp
print('zwj sequence not defined: %s' % fp, file=sys.stderr)
else:
_, can = zwj_sequence_without_vs_to_name_canonical[seq]
# print >> sys.stderr, 'canonical sequence %s contains vs: %s' % (
@ -211,7 +212,7 @@ def read_emoji_aliases():
try:
trg_seq = tuple([int(x, 16) for x in trg.split('_')])
except:
print 'cannot process alias %s -> %s' % (als, trg)
print('cannot process alias %s -> %s' % (als, trg))
continue
result[als_seq] = trg_seq
return result
@ -229,11 +230,11 @@ def _check_coverage(seq_to_filepath):
aliases = read_emoji_aliases()
for k, v in sorted(aliases.items()):
if v not in seq_to_filepath and v not in non_vs_to_canonical:
print 'alias %s missing target %s' % (_seq_string(k), _seq_string(v))
print('alias %s missing target %s' % (_seq_string(k), _seq_string(v)))
continue
if k in seq_to_filepath or k in non_vs_to_canonical:
print 'alias %s already exists as %s (%s)' % (
_seq_string(k), _seq_string(v), seq_name(v))
print('alias %s already exists as %s (%s)' % (
_seq_string(k), _seq_string(v), seq_name(v)))
continue
filename = seq_to_filepath.get(v) or seq_to_filepath[non_vs_to_canonical[v]]
seq_to_filepath[k] = 'alias:' + filename
@ -242,13 +243,13 @@ def _check_coverage(seq_to_filepath):
emoji = sorted(unicode_data.get_emoji(age=age))
for cp in emoji:
if tuple([cp]) not in seq_to_filepath:
print 'missing single %04x (%s)' % (cp, unicode_data.name(cp, '<no name>'))
print('missing single %04x (%s)' % (cp, unicode_data.name(cp, '<no name>')))
# special characters
# all but combining enclosing keycap are currently marked as emoji
for cp in [ord('*'), ord('#'), ord(u'\u20e3')] + range(0x30, 0x3a):
if cp not in emoji and tuple([cp]) not in seq_to_filepath:
print 'missing special %04x (%s)' % (cp, unicode_data.name(cp))
print('missing special %04x (%s)' % (cp, unicode_data.name(cp)))
# combining sequences
comb_seq_to_name = sorted(
@ -258,22 +259,22 @@ def _check_coverage(seq_to_filepath):
# strip vs and try again
non_vs_seq = strip_vs(seq)
if non_vs_seq not in seq_to_filepath:
print 'missing combining sequence %s (%s)' % (_seq_string(seq), name)
print('missing combining sequence %s (%s)' % (_seq_string(seq), name))
# flag sequences
flag_seq_to_name = sorted(
unicode_data.get_emoji_flag_sequences(age=age).iteritems())
for seq, name in flag_seq_to_name:
if seq not in seq_to_filepath:
print 'missing flag sequence %s (%s)' % (_seq_string(seq), name)
print('missing flag sequence %s (%s)' % (_seq_string(seq), name))
# skin tone modifier sequences
mod_seq_to_name = sorted(
unicode_data.get_emoji_modifier_sequences(age=age).iteritems())
for seq, name in mod_seq_to_name:
if seq not in seq_to_filepath:
print 'missing modifier sequence %s (%s)' % (
_seq_string(seq), name)
print('missing modifier sequence %s (%s)' % (
_seq_string(seq), name))
# zwj sequences
# some of ours include the emoji presentation variation selector and some
@ -294,14 +295,14 @@ def _check_coverage(seq_to_filepath):
else:
test_seq = seq
if test_seq not in zwj_seq_without_vs:
print 'missing (canonical) zwj sequence %s (%s)' % (
_seq_string(seq), name)
print('missing (canonical) zwj sequence %s (%s)' % (
_seq_string(seq), name))
# check for 'unknown flag'
# this is either emoji_ufe82b or 'unknown_flag', we filter out things that
# don't start with our prefix so 'unknown_flag' would be excluded by default.
if tuple([0xfe82b]) not in seq_to_filepath:
print 'missing unknown flag PUA fe82b'
print('missing unknown flag PUA fe82b')
def check_sequence_to_filepath(seq_to_filepath):
@ -322,7 +323,7 @@ def create_sequence_to_filepath(name_to_dirpath, prefix, suffix):
result = {}
for name, dirname in name_to_dirpath.iteritems():
if not name.startswith(prefix):
print 'expected prefix "%s" for "%s"' % (prefix, name)
print('expected prefix "%s" for "%s"' % (prefix, name))
continue
segments = name[len(prefix): -len(suffix)].split('_')
@ -330,12 +331,12 @@ def create_sequence_to_filepath(name_to_dirpath, prefix, suffix):
seq = []
for s in segments:
if not segment_re.match(s):
print 'bad codepoint name "%s" in %s/%s' % (s, dirname, name)
print('bad codepoint name "%s" in %s/%s' % (s, dirname, name))
segfail = True
continue
n = int(s, 16)
if n > 0x10ffff:
print 'codepoint "%s" out of range in %s/%s' % (s, dirname, name)
print('codepoint "%s" out of range in %s/%s' % (s, dirname, name))
segfail = True
continue
seq.append(n)
@ -356,8 +357,8 @@ def collect_name_to_dirpath(directory, prefix, suffix):
if not f.endswith(suffix):
continue
if f in result:
print >> sys.stderr, 'duplicate file "%s" in %s and %s ' % (
f, dirname, result[f])
print('duplicate file "%s" in %s and %s ' % (
f, dirname, result[f]), file=sys.stderr)
continue
result[f] = dirname
return result
@ -375,15 +376,15 @@ def collect_name_to_dirpath_with_override(dirs, prefix, suffix):
def run_check(dirs, prefix, suffix):
print 'Checking files with prefix "%s" and suffix "%s" in:\n %s' % (
prefix, suffix, '\n '.join(dirs))
print('Checking files with prefix "%s" and suffix "%s" in:\n %s' % (
prefix, suffix, '\n '.join(dirs)))
name_to_dirpath = collect_name_to_dirpath_with_override(
dirs, prefix=prefix, suffix=suffix)
print 'checking %d names' % len(name_to_dirpath)
print('checking %d names' % len(name_to_dirpath))
seq_to_filepath = create_sequence_to_filepath(name_to_dirpath, prefix, suffix)
print 'checking %d sequences' % len(seq_to_filepath)
print('checking %d sequences' % len(seq_to_filepath))
check_sequence_to_filepath(seq_to_filepath)
print 'done.'
print('done.')
def main():

View File

@ -15,6 +15,7 @@
# limitations under the License.
"""Generate a glyph name for flag emojis."""
from __future__ import print_function
__author__ = 'roozbeh@google.com (Roozbeh Pournader)'
@ -48,8 +49,8 @@ def flag_code_to_glyph_name(flag_code):
def main():
print ' '.join([
flag_code_to_glyph_name(flag_code) for flag_code in sys.argv[1:]])
print(' '.join([
flag_code_to_glyph_name(flag_code) for flag_code in sys.argv[1:]]))
if __name__ == '__main__':
main()

View File

@ -17,6 +17,7 @@
"""Quick tool to display count/ids of flag images in a directory named
either using ASCII upper case pairs or the emoji_u+codepoint_sequence
names."""
from __future__ import print_function
import argparse
import re
@ -44,7 +45,7 @@ def _flag_names_from_file_names(src):
for f in glob.glob(path.join(src, '*.png')):
m = flag_re.match(path.basename(f))
if not m:
print 'no match'
print('no match')
continue
flags.add(m.group(1))
return flags
@ -52,14 +53,14 @@ def _flag_names_from_file_names(src):
def _dump_flag_info(names):
prev = None
print '%d flags' % len(names)
print('%d flags' % len(names))
for n in sorted(names):
if n[0] != prev:
if prev:
print
print()
prev = n[0]
print n,
print
print(n, end=' ')
print()
def main():
@ -76,7 +77,7 @@ def main():
names = _flag_names_from_file_names(args.srcdir)
else:
names = _flag_names_from_emoji_file_names(args.srcdir)
print args.srcdir
print(args.srcdir)
_dump_flag_info(names)

View File

@ -19,6 +19,7 @@
This takes a list of directories containing emoji image files, and
builds an html page presenting the images along with their composition
(for sequences) and unicode names (for individual emoji)."""
from __future__ import print_function
import argparse
import codecs
@ -109,11 +110,11 @@ def _get_desc(key_tuple, aliases, dir_infos, basepaths):
if cp_key in aliases:
fp = get_key_filepath(aliases[cp_key])
else:
print 'no alias for %s' % unicode_data.seq_to_string(cp_key)
print('no alias for %s' % unicode_data.seq_to_string(cp_key))
if not fp:
print 'no part for %s in %s' % (
print('no part for %s in %s' % (
unicode_data.seq_to_string(cp_key),
unicode_data.seq_to_string(key_tuple))
unicode_data.seq_to_string(key_tuple)))
return fp
def _get_part(cp):
@ -153,7 +154,7 @@ def _get_name(key_tuple, annotations):
elif key_tuple == (0xfe82b,):
seq_name = '(unknown flag PUA codepoint)'
else:
print 'no name for %s' % unicode_data.seq_to_string(key_tuple)
print('no name for %s' % unicode_data.seq_to_string(key_tuple))
seq_name = '(oops)'
return CELL_PREFIX + seq_name
@ -308,8 +309,8 @@ def _get_image_data(image_dir, ext, prefix):
continue
result[cps] = filename
if fails:
print >> sys.stderr, 'get_image_data failed (%s, %s, %s):\n %s' % (
image_dir, ext, prefix, '\n '.join(fails))
print('get_image_data failed (%s, %s, %s):\n %s' % (
image_dir, ext, prefix, '\n '.join(fails)), file=sys.stderr)
raise ValueError('get image data failed')
return result
@ -356,9 +357,9 @@ def _add_aliases(keys, aliases):
v_str = unicode_data.seq_to_string(v)
if k in keys:
msg = '' if v in keys else ' but it\'s not present'
print 'have alias image %s, should use %s%s' % (k_str, v_str, msg)
print('have alias image %s, should use %s%s' % (k_str, v_str, msg))
elif v not in keys:
print 'can\'t use alias %s, no image matching %s' % (k_str, v_str)
print('can\'t use alias %s, no image matching %s' % (k_str, v_str))
to_add = {k for k, v in aliases.iteritems() if k not in keys and v in keys}
return keys | to_add
@ -449,9 +450,9 @@ def _instantiate_template(template, arg_dict):
keyset = set(arg_dict.keys())
extra_args = keyset - ids
if extra_args:
print >> sys.stderr, (
print((
'the following %d args are unused:\n%s' %
(len(extra_args), ', '.join(sorted(extra_args))))
(len(extra_args), ', '.join(sorted(extra_args)))), file=sys.stderr)
return string.Template(template).substitute(arg_dict)
@ -605,7 +606,7 @@ def main():
file_parts = path.splitext(args.outfile)
if file_parts[1] != '.html':
args.outfile = file_parts[0] + '.html'
print 'added .html extension to filename:\n%s' % args.outfile
print('added .html extension to filename:\n%s' % args.outfile)
if args.annotate:
annotations = _parse_annotation_file(args.annotate)

View File

@ -16,6 +16,7 @@
# limitations under the License.
"""Generate name data for emoji resources. Currently in json format."""
from __future__ import print_function
import argparse
import collections
@ -277,24 +278,24 @@ def generate_names(
verbose=False):
srcdir = tool_utils.resolve_path(src_dir)
if not path.isdir(srcdir):
print >> sys.stderr, '%s is not a directory' % src_dir
print('%s is not a directory' % src_dir, file=sys.stderr)
return
if omit_groups:
unknown_groups = set(omit_groups) - set(unicode_data.get_emoji_groups())
if unknown_groups:
print >> sys.stderr, 'did not recognize %d group%s: %s' % (
print('did not recognize %d group%s: %s' % (
len(unknown_groups), '' if len(unknown_groups) == 1 else 's',
', '.join('"%s"' % g for g in omit_groups if g in unknown_groups))
print >> sys.stderr, 'valid groups are:\n %s' % (
'\n '.join(g for g in unicode_data.get_emoji_groups()))
', '.join('"%s"' % g for g in omit_groups if g in unknown_groups)), file=sys.stderr)
print('valid groups are:\n %s' % (
'\n '.join(g for g in unicode_data.get_emoji_groups())), file=sys.stderr)
return
print 'omitting %d group%s: %s' % (
print('omitting %d group%s: %s' % (
len(omit_groups), '' if len(omit_groups) == 1 else 's',
', '.join('"%s"' % g for g in omit_groups))
', '.join('"%s"' % g for g in omit_groups)))
else:
# might be None
print 'keeping all groups'
print('keeping all groups')
omit_groups = []
# make sure the destination exists
@ -302,9 +303,9 @@ def generate_names(
tool_utils.resolve_path(dst_dir))
# _get_image_data returns canonical cp sequences
print 'src dir:', srcdir
print('src dir:', srcdir)
seq_to_file = generate_emoji_html._get_image_data(srcdir, 'png', 'emoji_u')
print 'seq to file has %d sequences' % len(seq_to_file)
print('seq to file has %d sequences' % len(seq_to_file))
# Aliases add non-gendered versions using gendered images for the most part.
# But when we display the images, we don't distinguish genders in the
@ -328,9 +329,9 @@ def generate_names(
if unicode_data.is_regional_indicator_seq(seq):
replace_seq = canonical_aliases[seq]
if seq in seq_to_file:
print 'warning, alias %s has file %s' % (
print('warning, alias %s has file %s' % (
unicode_data.regional_indicator_seq_to_string(seq),
seq_to_file[seq])
seq_to_file[seq]))
continue
replace_file = seq_to_file.get(replace_seq)
if replace_file:
@ -352,11 +353,11 @@ def generate_names(
skipcount += 1
if verbose:
if group != last_skipped_group:
print 'group %s' % group
print('group %s' % group)
last_skipped_group = group
print ' %s (%s)' % (
print(' %s (%s)' % (
unicode_data.seq_to_string(seq),
', '.join(unicode_data.name(cp, 'x') for cp in seq))
', '.join(unicode_data.name(cp, 'x') for cp in seq)))
if skip_limit >= 0 and skipcount > skip_limit:
raise Exception('skipped too many items')
else:
@ -368,7 +369,7 @@ def generate_names(
indent = 2 if pretty_print else None
separators = None if pretty_print else (',', ':')
json.dump(data, f, indent=indent, separators=separators)
print 'wrote %s' % outfile
print('wrote %s' % outfile)
def main():

View File

@ -1,3 +1,4 @@
from __future__ import print_function
import os
from os import path
import subprocess
@ -5,7 +6,7 @@ import subprocess
OUTPUT_DIR = '/tmp/placeholder_emoji'
def generate_image(name, text):
print name, text.replace('\n', '_')
print(name, text.replace('\n', '_'))
subprocess.check_call(
['convert', '-size', '100x100', 'label:%s' % text,
'%s/%s' % (OUTPUT_DIR, name)])
@ -75,13 +76,13 @@ with open('sequences.txt', 'r') as f:
elif is_flag_sequence(values):
text = ''.join(regional_to_ascii(cp) for cp in values)
elif has_color_patch(values):
print 'skipping color patch sequence %s' % seq
print('skipping color patch sequence %s' % seq)
elif is_keycap_sequence(values):
text = get_keycap_text(values)
else:
text = get_combining_text(values)
if not text:
print 'missing %s' % seq
print('missing %s' % seq)
if text:
if len(text) > 3:

View File

@ -15,6 +15,7 @@
#
# Google Author(s): Doug Felt
from __future__ import print_function
import argparse
import os
import os.path
@ -120,9 +121,9 @@ View using Firefox&nbsp;26 and later.
text_parts.append(text)
if verbosity and glyph and not found_initial_glyph:
print "Did not find glyph '%s', using initial glyph '%s'" % (glyph, initial_glyph_str)
print("Did not find glyph '%s', using initial glyph '%s'" % (glyph, initial_glyph_str))
elif verbosity > 1 and not glyph:
print "Using initial glyph '%s'" % initial_glyph_str
print("Using initial glyph '%s'" % initial_glyph_str)
lines = [header % font_name]
lines.append(body_head % {'font':font_name, 'glyph':initial_glyph_str,
@ -133,28 +134,28 @@ View using Firefox&nbsp;26 and later.
with open(html_name, 'w') as fp:
fp.write(output)
if verbosity:
print 'Wrote ' + html_name
print('Wrote ' + html_name)
def do_generate_fonts(template_file, font_basename, pairs, reuse=0, verbosity=1):
out_woff = font_basename + '.woff'
if reuse > 1 and os.path.isfile(out_woff) and os.access(out_woff, os.R_OK):
if verbosity:
print 'Reusing ' + out_woff
print('Reusing ' + out_woff)
return
out_ttx = font_basename + '.ttx'
if reuse == 0:
add_svg_glyphs.add_image_glyphs(template_file, out_ttx, pairs, verbosity=verbosity)
elif verbosity:
print 'Reusing ' + out_ttx
print('Reusing ' + out_ttx)
quiet=verbosity < 2
font = ttx.TTFont(flavor='woff', quiet=quiet)
font.importXML(out_ttx, quiet=quiet)
font.save(out_woff)
if verbosity:
print 'Wrote ' + out_woff
print('Wrote ' + out_woff)
def main(argv):
@ -193,7 +194,7 @@ def main(argv):
if not out_basename:
out_basename = args.template_file.split('.')[0] # exclude e.g. '.tmpl.ttx'
if args.v:
print "Output basename is %s." % out_basename
print("Output basename is %s." % out_basename)
do_generate_fonts(args.template_file, out_basename, pairs, reuse=args.reuse_font, verbosity=args.v)
do_generate_test_html(out_basename, pairs, glyph=args.glyph, verbosity=args.v)

View File

@ -16,6 +16,7 @@
"""Create a copy of the emoji images that instantiates aliases, etc. as
symlinks."""
from __future__ import print_function
import argparse
import glob
@ -68,10 +69,10 @@ def _alias_people(code_strings, dst):
if src[1:].lower() in code_strings:
src_name = 'emoji_%s.png' % src.lower()
ali_name = 'emoji_u%s.png' % ali.lower()
print 'creating symlink %s -> %s' % (ali_name, src_name)
print('creating symlink %s -> %s' % (ali_name, src_name))
os.symlink(path.join(dst, src_name), path.join(dst, ali_name))
else:
print >> os.stderr, 'people image %s not found' % src
print('people image %s not found' % src, file=os.stderr)
def _alias_flags(code_strings, dst):
@ -80,27 +81,27 @@ def _alias_flags(code_strings, dst):
if src_str in code_strings:
src_name = 'emoji_u%s.png' % src_str
ali_name = 'emoji_u%s.png' % _flag_str(ali)
print 'creating symlink %s (%s) -> %s (%s)' % (ali_name, ali, src_name, src)
print('creating symlink %s (%s) -> %s (%s)' % (ali_name, ali, src_name, src))
os.symlink(path.join(dst, src_name), path.join(dst, ali_name))
else:
print >> os.stderr, 'flag image %s (%s) not found' % (src_name, src)
print('flag image %s (%s) not found' % (src_name, src), file=os.stderr)
def _alias_omitted_flags(code_strings, dst):
UNKNOWN_FLAG = 'fe82b'
if UNKNOWN_FLAG not in code_strings:
print >> os.stderr, 'unknown flag missing'
print('unknown flag missing', file=os.stderr)
return
dst_name = 'emoji_u%s.png' % UNKNOWN_FLAG
dst_path = path.join(dst, dst_name)
for ali in sorted(OMITTED_FLAGS):
ali_str = _flag_str(ali)
if ali_str in code_strings:
print >> os.stderr, 'omitted flag %s has image %s' % (ali, ali_str)
print('omitted flag %s has image %s' % (ali, ali_str), file=os.stderr)
continue
ali_name = 'emoji_u%s.png' % ali_str
print 'creating symlink %s (%s) -> unknown_flag (%s)' % (
ali_str, ali, dst_name)
print('creating symlink %s (%s) -> unknown_flag (%s)' % (
ali_str, ali, dst_name))
os.symlink(dst_path, path.join(dst, ali_name))

View File

@ -14,6 +14,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import glob
import os
@ -51,17 +52,17 @@ def strip_vs_from_filenames(imagedir, prefix, ext, dry_run=False):
if seq and EMOJI_VS in seq:
newname = '%s%s.%s' % (prefix, seq_to_str(strip_vs(seq)), ext)
if newname in names:
print >> sys.stderr, '%s non-vs name %s already exists.' % (
name, newname)
print('%s non-vs name %s already exists.' % (
name, newname), file=sys.stderr)
return
renames[name] = newname
for k, v in renames.iteritems():
if dry_run:
print '%s -> %s' % (k, v)
print('%s -> %s' % (k, v))
else:
os.rename(path.join(imagedir, k), path.join(imagedir, v))
print 'renamed %d files in %s' % (len(renames), imagedir)
print('renamed %d files in %s' % (len(renames), imagedir))
def main():

View File

@ -18,6 +18,7 @@
#
from __future__ import print_function
import sys, struct, StringIO
from png import PNG
import os
@ -374,7 +375,7 @@ def main (argv):
argv.remove (key)
if len (argv) < 4:
print >>sys.stderr, """
print("""
Usage:
emoji_builder.py [-V] [-O] [-U] [-A] font.ttf out-font.ttf strike-prefix...
@ -403,7 +404,7 @@ By default they are dropped.
If -C is given, unused chunks (color profile, etc) are NOT
dropped from the PNG images when embedding.
By default they are dropped.
"""
""", file=sys.stderr)
sys.exit (1)
font_file = argv[1]
@ -424,16 +425,16 @@ By default they are dropped.
pass
print
print()
font = ttx.TTFont (font_file)
print "Loaded font '%s'." % font_file
print("Loaded font '%s'." % font_file)
font_metrics = FontMetrics (font['head'].unitsPerEm,
font['hhea'].ascent,
-font['hhea'].descent)
print "Font metrics: upem=%d ascent=%d descent=%d." % \
(font_metrics.upem, font_metrics.ascent, font_metrics.descent)
print("Font metrics: upem=%d ascent=%d descent=%d." % \
(font_metrics.upem, font_metrics.ascent, font_metrics.descent))
glyph_metrics = font['hmtx'].metrics
unicode_cmap = font['cmap'].getcmap (3, 10)
if not unicode_cmap:
@ -453,11 +454,11 @@ By default they are dropped.
return cp >= 0xfe00 and cp <= 0xfe0f
for img_prefix in img_prefixes:
print
print()
img_files = {}
glb = "%s*.png" % img_prefix
print "Looking for images matching '%s'." % glb
print("Looking for images matching '%s'." % glb)
for img_file in glob.glob (glb):
codes = img_file[len (img_prefix):-4]
if "_" in codes:
@ -467,13 +468,13 @@ By default they are dropped.
else:
cp = int(codes, 16)
if is_vs(cp):
print "ignoring unexpected vs input %04x" % cp
print("ignoring unexpected vs input %04x" % cp)
continue
uchars = unichr(cp)
img_files[uchars] = img_file
if not img_files:
raise Exception ("No image files found in '%s'." % glb)
print "Found images for %d characters in '%s'." % (len (img_files), glb)
print("Found images for %d characters in '%s'." % (len (img_files), glb))
glyph_imgs = {}
advance = width = height = 0
@ -482,7 +483,7 @@ By default they are dropped.
try:
glyph_name = unicode_cmap.cmap[ord (uchars)]
except:
print "no cmap entry for %x" % ord(uchars)
print("no cmap entry for %x" % ord(uchars))
raise ValueError("%x" % ord(uchars))
else:
glyph_name = get_glyph_name_from_gsub (uchars, font, unicode_cmap.cmap)
@ -501,11 +502,11 @@ By default they are dropped.
glyphs = sorted (glyph_imgs.keys ())
if not glyphs:
raise Exception ("No common characters found between font and '%s'." % glb)
print "Embedding images for %d glyphs for this strike." % len (glyphs)
print("Embedding images for %d glyphs for this strike." % len (glyphs))
advance, width, height = (div (x, len (glyphs)) for x in (advance, width, height))
strike_metrics = StrikeMetrics (font_metrics, advance, width, height)
print "Strike ppem set to %d." % (strike_metrics.y_ppem)
print("Strike ppem set to %d." % (strike_metrics.y_ppem))
ebdt.start_strike (strike_metrics)
ebdt.write_glyphs (glyphs, glyph_imgs, image_format)
@ -513,21 +514,21 @@ By default they are dropped.
eblc.write_strike (strike_metrics, glyph_maps)
print
print()
ebdt = ebdt.data ()
add_font_table (font, 'CBDT', ebdt)
print "CBDT table synthesized: %d bytes." % len (ebdt)
print("CBDT table synthesized: %d bytes." % len (ebdt))
eblc.end_strikes ()
eblc = eblc.data ()
add_font_table (font, 'CBLC', eblc)
print "CBLC table synthesized: %d bytes." % len (eblc)
print("CBLC table synthesized: %d bytes." % len (eblc))
print
print()
if 'keep_outlines' not in options:
drop_outline_tables (font)
print "Dropped outline ('glyf', 'CFF ') and related tables."
print("Dropped outline ('glyf', 'CFF ') and related tables.")
# hack removal of cmap pua entry for unknown flag glyph. If we try to
# remove it earlier, getGlyphID dies. Need to restructure all of this
@ -535,7 +536,7 @@ By default they are dropped.
font_data.delete_from_cmap(font, [0xfe82b])
font.save (out_file)
print "Output font '%s' generated." % out_file
print("Output font '%s' generated." % out_file)
if __name__ == '__main__':