The original font is a bit of a mess, and includes many duplicate characters. I decided to reorganize the characters in a new set of glyphs, in derivced/font.png. This also includes very basic parsing for text data.
112 lines
3.1 KiB
Python
112 lines
3.1 KiB
Python
from parse.line import next_line, skip_whitespace
|
|
|
|
def parse_dw_const(line):
|
|
dw_const, args = line.split(maxsplit=1)
|
|
assert dw_const == "dw_const", line
|
|
args = args.split(';')[0]
|
|
args = [arg.strip() for arg in args.split(',')]
|
|
assert len(args) == 2, args
|
|
assert (
|
|
args[1].startswith('TEXT_')
|
|
or args[1].startswith('SCRIPT_')
|
|
), args[1]
|
|
return args
|
|
|
|
def tokenize_textpointer(lines):
|
|
pointers = []
|
|
while lines:
|
|
lines_next, line = next_line(lines)
|
|
if line is None:
|
|
break
|
|
if line.startswith("def_text_pointers"):
|
|
pass
|
|
elif line.startswith("text_end"):
|
|
pass
|
|
elif line.startswith(";"):
|
|
pass
|
|
elif line.startswith("dw_const"):
|
|
pointer = parse_dw_const(line)
|
|
pointers.append(pointer)
|
|
else:
|
|
assert (
|
|
line is None
|
|
or line.endswith(':')
|
|
or line.startswith("def_trainers")
|
|
), line
|
|
break
|
|
lines = lines_next
|
|
|
|
return lines, pointers
|
|
|
|
def tokenize_textpointers(lines):
|
|
ix = 0
|
|
# skip lines until _TextPointers is found
|
|
while lines:
|
|
lines, line = next_line(lines)
|
|
if line is None:
|
|
break
|
|
if not line.endswith('_TextPointers:'):
|
|
continue
|
|
|
|
name = line[:-1]
|
|
lines, textpointers = tokenize_textpointer(lines)
|
|
return name, textpointers
|
|
|
|
def tokenize_text_far(lines):
|
|
lines, far = next_line(lines)
|
|
lines, end = next_line(lines)
|
|
simple_far = (
|
|
far is not None
|
|
and end is not None
|
|
and far.split()[0] == 'text_far'
|
|
and end.split()[0] == 'text_end'
|
|
)
|
|
if not simple_far:
|
|
return None
|
|
else:
|
|
_, far_label = far.split()
|
|
assert far_label.startswith('_')
|
|
return far_label
|
|
|
|
def find_text_far(labels, lines):
|
|
while lines:
|
|
lines, line = next_line(lines)
|
|
if line is None:
|
|
break
|
|
if line.endswith(':') and line[:-1] in labels:
|
|
label = line[:-1]
|
|
text_far = tokenize_text_far(lines)
|
|
if text_far is not None:
|
|
yield label, text_far
|
|
|
|
def tokenize(lines):
|
|
tokens = tokenize_textpointers(lines)
|
|
if tokens is None:
|
|
return None
|
|
name, textpointers = tokens
|
|
labels_l = [label for label, _ in textpointers]
|
|
labels = set(labels_l)
|
|
|
|
# now, resolve text_far
|
|
text_far_l = list(find_text_far(labels, lines))
|
|
text_far_d = dict(text_far_l)
|
|
assert len(text_far_l) == len(text_far_d)
|
|
textpointers_d = dict(
|
|
(v, k) for k, v in textpointers
|
|
)
|
|
return name, (textpointers_d, text_far_d)
|
|
|
|
def parse(path):
|
|
with open(path) as f:
|
|
original_lines = f.read().split('\n')
|
|
tokens = tokenize(original_lines)
|
|
return tokens
|
|
|
|
def parse_all(prefix):
|
|
base_path = prefix / 'scripts'
|
|
paths = [p for p in base_path.iterdir()
|
|
# fixme: ViridianMart is weird
|
|
if p.is_file() and 'ViridianMart.asm' not in str(p)]
|
|
return dict(filter(lambda x: x is not None,
|
|
(parse(path) for path in paths)))
|