tools/parse: add dex_text
This commit is contained in:
parent
96cc628664
commit
096769917a
@ -1,27 +1,28 @@
|
|||||||
|
from functools import partial
|
||||||
from itertools import chain
|
from itertools import chain
|
||||||
|
|
||||||
def is_label(token_line):
|
def is_label(token_line):
|
||||||
return token_line[0].endswith(':')
|
return token_line[0].endswith(':')
|
||||||
|
|
||||||
def is_data(token_line):
|
def is_data(token_line, data_tokens):
|
||||||
return token_line[0] == 'db' or token_line[0] == 'dw' \
|
return token_line[0] in data_tokens
|
||||||
or token_line[0] == 'text_far'
|
|
||||||
|
|
||||||
Label = object()
|
Label = object()
|
||||||
Data = object()
|
Data = object()
|
||||||
|
|
||||||
def _event(token_line):
|
def _event(token_line, data_tokens):
|
||||||
if is_label(token_line):
|
if is_label(token_line):
|
||||||
label0, = token_line
|
label0, = token_line
|
||||||
yield Label, label0.split(':')[0]
|
yield Label, label0.split(':')[0]
|
||||||
elif is_data(token_line):
|
elif is_data(token_line, data_tokens):
|
||||||
_, args = token_line
|
_, args = token_line
|
||||||
yield Data, args
|
yield Data, args
|
||||||
else:
|
else:
|
||||||
return
|
return
|
||||||
|
|
||||||
def event(tokens):
|
def event(tokens, data_tokens):
|
||||||
return list(chain.from_iterable(map(_event, tokens)))
|
return list(chain.from_iterable(map(
|
||||||
|
partial(_event, data_tokens=data_tokens), tokens)))
|
||||||
|
|
||||||
def pointer_table(type_args, ix):
|
def pointer_table(type_args, ix):
|
||||||
pointer_table = []
|
pointer_table = []
|
||||||
|
@ -38,7 +38,8 @@ def dex_entry(type_args, ix):
|
|||||||
return ix+5, entry
|
return ix+5, entry
|
||||||
|
|
||||||
def build_tables(tokens):
|
def build_tables(tokens):
|
||||||
type_args = label_data.event(tokens)
|
data_tokens = {'dw', 'db', 'text_far'}
|
||||||
|
type_args = label_data.event(tokens, data_tokens=data_tokens)
|
||||||
ix = 0
|
ix = 0
|
||||||
while ix < len(type_args):
|
while ix < len(type_args):
|
||||||
type, args = type_args[ix]
|
type, args = type_args[ix]
|
||||||
|
@ -0,0 +1,75 @@
|
|||||||
|
import builtins
|
||||||
|
from dataclasses import dataclass
|
||||||
|
|
||||||
|
from parse.generic import tokenize
|
||||||
|
from parse.generic import number
|
||||||
|
from parse.generic import string
|
||||||
|
from parse.generic import label_data
|
||||||
|
|
||||||
|
def _label(type, args):
|
||||||
|
assert type is label_data.Label, (type, args)
|
||||||
|
assert builtins.type(args) == str
|
||||||
|
return args
|
||||||
|
|
||||||
|
def _string(type, args):
|
||||||
|
assert type is label_data.Data, (type, args)
|
||||||
|
# hmm, this is a bug in label_data.event
|
||||||
|
# ...too lazy to fix
|
||||||
|
arg = ','.join(args)
|
||||||
|
return string.parse(arg)
|
||||||
|
|
||||||
|
def type_sequence(schema, type_args):
|
||||||
|
assert len(schema) == len(type_args)
|
||||||
|
return (
|
||||||
|
schema[i](*type_args[i])
|
||||||
|
for i in range(len(schema))
|
||||||
|
)
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class DexText:
|
||||||
|
label: str
|
||||||
|
content: list[tuple[str, str]]
|
||||||
|
|
||||||
|
def parse_text(type_args, ix):
|
||||||
|
schema = [
|
||||||
|
_label,
|
||||||
|
_string, _string, _string,
|
||||||
|
_string, _string, _string,
|
||||||
|
]
|
||||||
|
|
||||||
|
label, \
|
||||||
|
text1, text2, text3, \
|
||||||
|
text4, text5, text6 \
|
||||||
|
= type_sequence(schema, type_args[ix:ix+7])
|
||||||
|
|
||||||
|
dex_text = DexText(
|
||||||
|
label,
|
||||||
|
[
|
||||||
|
('text', text1),
|
||||||
|
('next', text2),
|
||||||
|
('next', text3),
|
||||||
|
|
||||||
|
('page', text4),
|
||||||
|
('next', text5),
|
||||||
|
('next', text6),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
return ix+7, dex_text
|
||||||
|
|
||||||
|
def parse_texts(tokens):
|
||||||
|
data_tokens = {'text', 'next', 'page'}
|
||||||
|
type_args = label_data.event(tokens, data_tokens=data_tokens)
|
||||||
|
ix = 0
|
||||||
|
while ix < len(type_args):
|
||||||
|
ix, dex_text = parse_text(type_args, ix)
|
||||||
|
yield dex_text
|
||||||
|
|
||||||
|
def parse(prefix):
|
||||||
|
path = prefix / "data/pokemon/dex_text.asm"
|
||||||
|
with open(path) as f:
|
||||||
|
return list(parse_texts(tokenize.lines(f.read().split('\n'))))
|
||||||
|
|
||||||
|
|
||||||
|
from pathlib import Path
|
||||||
|
from pprint import pprint
|
||||||
|
pprint(parse(Path("pokered")))
|
@ -40,7 +40,8 @@ def parse_learnset_entry(args):
|
|||||||
def build_tables(tokens):
|
def build_tables(tokens):
|
||||||
evos_moves = EvosMoves()
|
evos_moves = EvosMoves()
|
||||||
|
|
||||||
type_args = label_data.event(tokens)
|
data_tokens = {'dw', 'db'}
|
||||||
|
type_args = label_data.event(tokens, data_tokens=data_tokens)
|
||||||
ix = 0
|
ix = 0
|
||||||
while ix < len(type_args):
|
while ix < len(type_args):
|
||||||
type, args = type_args[ix]
|
type, args = type_args[ix]
|
||||||
|
Loading…
x
Reference in New Issue
Block a user