87 lines
2.3 KiB
Python
87 lines
2.3 KiB
Python
from dataclasses import dataclass
|
|
|
|
# data/maps/headers/AgathasRoom.asm
|
|
|
|
def tokenize_params(params):
|
|
for param in params:
|
|
if '|' in param:
|
|
yield [p.strip() for p in param.split(' |')]
|
|
else:
|
|
yield param.strip()
|
|
|
|
def tokenize_line(line):
|
|
line = line.split(';')[0].strip()
|
|
key_params = line.split(' ', maxsplit=1)
|
|
if len(key_params) == 1:
|
|
return tuple(key_params)
|
|
else:
|
|
key, params = key_params
|
|
params = [p.strip() for p in params.split(',')]
|
|
return key, list(tokenize_params(params))
|
|
|
|
def tokenize_lines(lines):
|
|
for line in filter(bool, lines):
|
|
yield tokenize_line(line)
|
|
|
|
@dataclass
|
|
class MapHeader:
|
|
name1: str
|
|
name2: str
|
|
tileset: str
|
|
connection_names: list[str] # not sure if this one is useful
|
|
connections: list[list]
|
|
|
|
def blocks(self):
|
|
return f"{self.name1}_Blocks"
|
|
|
|
def text_pointers(self):
|
|
return f"{self.name1}_TextPointers"
|
|
|
|
def script(self):
|
|
return f"{self.name1}_Script",
|
|
|
|
def object(self):
|
|
return f"{self.name1}_Object",
|
|
|
|
def width(self):
|
|
return f"{self.name2}_WIDTH",
|
|
|
|
def height(self):
|
|
return f"{self.name2}_HEIGHT",
|
|
|
|
def flatten(tokens):
|
|
# expects tokens from a single file
|
|
|
|
# PalletTown, PALLET_TOWN, OVERWORLD, NORTH | SOUTH
|
|
|
|
# dw \1_Blocks
|
|
# dw \1_TextPointers
|
|
# dw \1_Script
|
|
# dw {\1_Object}
|
|
# \2_WIDTH
|
|
# \2_HEIGHT
|
|
map_headers = [s for s in tokens if s[0] == 'map_header']
|
|
assert len(map_headers) == 1
|
|
map_header, = map_headers
|
|
_, (name1, name2, tileset, connection_mask) = map_header
|
|
connections = [s for s in tokens if s[0] == 'connection']
|
|
return MapHeader(
|
|
name1 = name1,
|
|
name2 = name2,
|
|
tileset = tileset,
|
|
connection_names = [] if connection_mask == '0' else connection_mask,
|
|
connections = [tuple(c[1]) for c in connections]
|
|
)
|
|
|
|
def parse(path):
|
|
with open(path) as f:
|
|
tokens = list(tokenize_lines(f.read().split('\n')))
|
|
return flatten(tokens)
|
|
|
|
def parse_all(prefix):
|
|
base_path = prefix / 'data/maps/headers'
|
|
paths = [p for p in base_path.iterdir() if p.is_file()]
|
|
from pprint import pprint
|
|
pprint(paths)
|
|
return [parse(path) for path in paths]
|