assembler: add minus token; disable eol tokens

This commit is contained in:
Zack Buhman 2025-10-20 22:48:43 -05:00
parent 6f5007525c
commit 306de6541d
6 changed files with 15 additions and 16 deletions

View File

@ -18,7 +18,7 @@ def print_error(filename, buf, e):
if i == token.col:
sys.stderr.write(RED)
sys.stderr.write(c)
if i == token.col + len(token.lexeme):
if i == token.col + len(token.lexeme) - 1:
wrote_default = True
sys.stderr.write(DEFAULT)
if not wrote_default:

View File

@ -8,7 +8,7 @@ from assembler.fs.emitter import emit_instruction
from assembler.error import print_error
def frontend_inner(buf):
lexer = Lexer(buf, find_keyword, emit_newlines=False)
lexer = Lexer(buf, find_keyword, emit_newlines=False, minus_is_token=True)
tokens = list(lexer.lex_tokens())
parser = Parser(tokens)
for ins_ast in parser.instructions():

View File

@ -103,7 +103,7 @@ class Parser(BaseParser):
return False
def is_neg(self):
result = self.match(TT.identifier) and self.peek().lexeme == b'-'
result = self.match(TT.minus)
if result:
self.advance()
return result
@ -118,17 +118,12 @@ class Parser(BaseParser):
neg = self.is_neg()
abs = self.is_abs()
if neg:
self.consume(TT.left_paren, "expected left paren")
sel_keyword = self.consume(TT.keyword, "expected sel keyword")
self.consume(TT.dot, "expected dot")
swizzle_identifier = self.consume(TT.identifier, "expected swizzle identifier")
if abs:
self.consume(TT.bar, "expected bar")
if neg:
self.consume(TT.right_paren, "expected right paren")
mod_table = {
# (neg, abs)
@ -211,7 +206,7 @@ src0.a = float(0), src1.a = float(0), src2.a = float(0), srcp.a = neg2, src0.rgb
out[0].none = temp[0].none = MAD src0.r src0.r src0.r ,
out[0].none = temp[0].r = DP3 src0.rg0 src0.rg0 src0.rrr ;
"""
lexer = Lexer(buf, find_keyword, emit_newlines=False)
lexer = Lexer(buf, find_keyword, emit_newlines=False, minus_is_token=True)
tokens = list(lexer.lex_tokens())
parser = Parser(tokens)
from pprint import pprint

View File

@ -512,7 +512,7 @@ src0.a = float(0), src0.rgb = temp[0] , srcp.a = neg :
out[0].none = temp[0].none = MAD src0.r src0.r src0.r ,
out[0].none = temp[0].r = DP3 src0.rg0 src0.rg0 ;
"""
lexer = Lexer(buf, find_keyword, emit_newlines=False)
lexer = Lexer(buf, find_keyword, emit_newlines=False, minus_is_token=True)
tokens = list(lexer.lex_tokens())
parser = Parser(tokens)
try:

View File

@ -20,6 +20,7 @@ class TT(Enum):
semicolon = auto()
bar = auto()
comma = auto()
minus = auto()
@dataclass
class Token:
@ -41,7 +42,9 @@ class LexerError(Exception):
pass
class Lexer:
def __init__(self, buf: memoryview, find_keyword, emit_newlines=True):
def __init__(self, buf: memoryview, find_keyword,
emit_newlines=False,
minus_is_token=False):
self.start_ix = 0
self.current_ix = 0
self.buf = memoryview(buf)
@ -49,6 +52,7 @@ class Lexer:
self.col = 0
self.find_keyword = find_keyword
self.emit_newlines = emit_newlines
self.minus_is_token = minus_is_token
def at_end_p(self):
return self.current_ix >= len(self.buf)
@ -108,6 +112,8 @@ class Lexer:
return Token(*self.pos(), TT.semicolon, self.lexeme())
elif c == ord(','):
return Token(*self.pos(), TT.comma, self.lexeme())
elif self.minus_is_token and c == ord('-'):
return Token(*self.pos(), TT.minus, self.lexeme())
elif c == ord('#'):
while not self.at_end_p() and self.peek() != ord('\n'):
self.advance()

View File

@ -174,21 +174,19 @@ class Parser(BaseParser):
return Source(source_type, offset, source_swizzle)
def instruction(self):
while self.match(TT.eol):
self.advance()
first_token = self.peek()
destination_op = self.destination_op()
source0 = self.source()
if self.match(TT.eol) or self.match(TT.eof):
if self.match(TT.semicolon) or self.match(TT.eof):
source1 = None
else:
source1 = self.source()
if self.match(TT.eol) or self.match(TT.eof):
if self.match(TT.semicolon) or self.match(TT.eof):
source2 = None
else:
source2 = self.source()
last_token = self.peek(-1)
self.consume_either(TT.eol, TT.eof, "expected newline or EOF")
self.consume(TT.semicolon, "expected semicolon")
return (
Instruction(destination_op, source0, source1, source2),
(first_token.start_ix, last_token.start_ix + len(last_token.lexeme))