assembler: add minus token; disable eol tokens
This commit is contained in:
parent
6f5007525c
commit
306de6541d
@ -18,7 +18,7 @@ def print_error(filename, buf, e):
|
|||||||
if i == token.col:
|
if i == token.col:
|
||||||
sys.stderr.write(RED)
|
sys.stderr.write(RED)
|
||||||
sys.stderr.write(c)
|
sys.stderr.write(c)
|
||||||
if i == token.col + len(token.lexeme):
|
if i == token.col + len(token.lexeme) - 1:
|
||||||
wrote_default = True
|
wrote_default = True
|
||||||
sys.stderr.write(DEFAULT)
|
sys.stderr.write(DEFAULT)
|
||||||
if not wrote_default:
|
if not wrote_default:
|
||||||
|
|||||||
@ -8,7 +8,7 @@ from assembler.fs.emitter import emit_instruction
|
|||||||
from assembler.error import print_error
|
from assembler.error import print_error
|
||||||
|
|
||||||
def frontend_inner(buf):
|
def frontend_inner(buf):
|
||||||
lexer = Lexer(buf, find_keyword, emit_newlines=False)
|
lexer = Lexer(buf, find_keyword, emit_newlines=False, minus_is_token=True)
|
||||||
tokens = list(lexer.lex_tokens())
|
tokens = list(lexer.lex_tokens())
|
||||||
parser = Parser(tokens)
|
parser = Parser(tokens)
|
||||||
for ins_ast in parser.instructions():
|
for ins_ast in parser.instructions():
|
||||||
|
|||||||
@ -103,7 +103,7 @@ class Parser(BaseParser):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
def is_neg(self):
|
def is_neg(self):
|
||||||
result = self.match(TT.identifier) and self.peek().lexeme == b'-'
|
result = self.match(TT.minus)
|
||||||
if result:
|
if result:
|
||||||
self.advance()
|
self.advance()
|
||||||
return result
|
return result
|
||||||
@ -118,17 +118,12 @@ class Parser(BaseParser):
|
|||||||
neg = self.is_neg()
|
neg = self.is_neg()
|
||||||
abs = self.is_abs()
|
abs = self.is_abs()
|
||||||
|
|
||||||
if neg:
|
|
||||||
self.consume(TT.left_paren, "expected left paren")
|
|
||||||
|
|
||||||
sel_keyword = self.consume(TT.keyword, "expected sel keyword")
|
sel_keyword = self.consume(TT.keyword, "expected sel keyword")
|
||||||
self.consume(TT.dot, "expected dot")
|
self.consume(TT.dot, "expected dot")
|
||||||
swizzle_identifier = self.consume(TT.identifier, "expected swizzle identifier")
|
swizzle_identifier = self.consume(TT.identifier, "expected swizzle identifier")
|
||||||
|
|
||||||
if abs:
|
if abs:
|
||||||
self.consume(TT.bar, "expected bar")
|
self.consume(TT.bar, "expected bar")
|
||||||
if neg:
|
|
||||||
self.consume(TT.right_paren, "expected right paren")
|
|
||||||
|
|
||||||
mod_table = {
|
mod_table = {
|
||||||
# (neg, abs)
|
# (neg, abs)
|
||||||
@ -211,7 +206,7 @@ src0.a = float(0), src1.a = float(0), src2.a = float(0), srcp.a = neg2, src0.rgb
|
|||||||
out[0].none = temp[0].none = MAD src0.r src0.r src0.r ,
|
out[0].none = temp[0].none = MAD src0.r src0.r src0.r ,
|
||||||
out[0].none = temp[0].r = DP3 src0.rg0 src0.rg0 src0.rrr ;
|
out[0].none = temp[0].r = DP3 src0.rg0 src0.rg0 src0.rrr ;
|
||||||
"""
|
"""
|
||||||
lexer = Lexer(buf, find_keyword, emit_newlines=False)
|
lexer = Lexer(buf, find_keyword, emit_newlines=False, minus_is_token=True)
|
||||||
tokens = list(lexer.lex_tokens())
|
tokens = list(lexer.lex_tokens())
|
||||||
parser = Parser(tokens)
|
parser = Parser(tokens)
|
||||||
from pprint import pprint
|
from pprint import pprint
|
||||||
|
|||||||
@ -512,7 +512,7 @@ src0.a = float(0), src0.rgb = temp[0] , srcp.a = neg :
|
|||||||
out[0].none = temp[0].none = MAD src0.r src0.r src0.r ,
|
out[0].none = temp[0].none = MAD src0.r src0.r src0.r ,
|
||||||
out[0].none = temp[0].r = DP3 src0.rg0 src0.rg0 ;
|
out[0].none = temp[0].r = DP3 src0.rg0 src0.rg0 ;
|
||||||
"""
|
"""
|
||||||
lexer = Lexer(buf, find_keyword, emit_newlines=False)
|
lexer = Lexer(buf, find_keyword, emit_newlines=False, minus_is_token=True)
|
||||||
tokens = list(lexer.lex_tokens())
|
tokens = list(lexer.lex_tokens())
|
||||||
parser = Parser(tokens)
|
parser = Parser(tokens)
|
||||||
try:
|
try:
|
||||||
|
|||||||
@ -20,6 +20,7 @@ class TT(Enum):
|
|||||||
semicolon = auto()
|
semicolon = auto()
|
||||||
bar = auto()
|
bar = auto()
|
||||||
comma = auto()
|
comma = auto()
|
||||||
|
minus = auto()
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class Token:
|
class Token:
|
||||||
@ -41,7 +42,9 @@ class LexerError(Exception):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
class Lexer:
|
class Lexer:
|
||||||
def __init__(self, buf: memoryview, find_keyword, emit_newlines=True):
|
def __init__(self, buf: memoryview, find_keyword,
|
||||||
|
emit_newlines=False,
|
||||||
|
minus_is_token=False):
|
||||||
self.start_ix = 0
|
self.start_ix = 0
|
||||||
self.current_ix = 0
|
self.current_ix = 0
|
||||||
self.buf = memoryview(buf)
|
self.buf = memoryview(buf)
|
||||||
@ -49,6 +52,7 @@ class Lexer:
|
|||||||
self.col = 0
|
self.col = 0
|
||||||
self.find_keyword = find_keyword
|
self.find_keyword = find_keyword
|
||||||
self.emit_newlines = emit_newlines
|
self.emit_newlines = emit_newlines
|
||||||
|
self.minus_is_token = minus_is_token
|
||||||
|
|
||||||
def at_end_p(self):
|
def at_end_p(self):
|
||||||
return self.current_ix >= len(self.buf)
|
return self.current_ix >= len(self.buf)
|
||||||
@ -108,6 +112,8 @@ class Lexer:
|
|||||||
return Token(*self.pos(), TT.semicolon, self.lexeme())
|
return Token(*self.pos(), TT.semicolon, self.lexeme())
|
||||||
elif c == ord(','):
|
elif c == ord(','):
|
||||||
return Token(*self.pos(), TT.comma, self.lexeme())
|
return Token(*self.pos(), TT.comma, self.lexeme())
|
||||||
|
elif self.minus_is_token and c == ord('-'):
|
||||||
|
return Token(*self.pos(), TT.minus, self.lexeme())
|
||||||
elif c == ord('#'):
|
elif c == ord('#'):
|
||||||
while not self.at_end_p() and self.peek() != ord('\n'):
|
while not self.at_end_p() and self.peek() != ord('\n'):
|
||||||
self.advance()
|
self.advance()
|
||||||
|
|||||||
@ -174,21 +174,19 @@ class Parser(BaseParser):
|
|||||||
return Source(source_type, offset, source_swizzle)
|
return Source(source_type, offset, source_swizzle)
|
||||||
|
|
||||||
def instruction(self):
|
def instruction(self):
|
||||||
while self.match(TT.eol):
|
|
||||||
self.advance()
|
|
||||||
first_token = self.peek()
|
first_token = self.peek()
|
||||||
destination_op = self.destination_op()
|
destination_op = self.destination_op()
|
||||||
source0 = self.source()
|
source0 = self.source()
|
||||||
if self.match(TT.eol) or self.match(TT.eof):
|
if self.match(TT.semicolon) or self.match(TT.eof):
|
||||||
source1 = None
|
source1 = None
|
||||||
else:
|
else:
|
||||||
source1 = self.source()
|
source1 = self.source()
|
||||||
if self.match(TT.eol) or self.match(TT.eof):
|
if self.match(TT.semicolon) or self.match(TT.eof):
|
||||||
source2 = None
|
source2 = None
|
||||||
else:
|
else:
|
||||||
source2 = self.source()
|
source2 = self.source()
|
||||||
last_token = self.peek(-1)
|
last_token = self.peek(-1)
|
||||||
self.consume_either(TT.eol, TT.eof, "expected newline or EOF")
|
self.consume(TT.semicolon, "expected semicolon")
|
||||||
return (
|
return (
|
||||||
Instruction(destination_op, source0, source1, source2),
|
Instruction(destination_op, source0, source1, source2),
|
||||||
(first_token.start_ix, last_token.start_ix + len(last_token.lexeme))
|
(first_token.start_ix, last_token.start_ix + len(last_token.lexeme))
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user