Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def build_lexer(self, **kwargs):
self.lexer = lex.lex(module=self, **kwargs)
tokens = [
"PLUS",
"MINUS",
"-",
"NUMBER",
]
t_PLUS = r'\+'
t_MINUS = r'-'
t_NUMBER = r'\d+'
def t_error(t):
pass
lex.lex()
t.value = int(t.value)
return t
# Ignored characters
t_ignore = " \t"
def t_newline(t):
r'\n+'
t.lexer.lineno += t.value.count("\n")
def t_error(t):
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
# Build the lexer
lex.lex()
# Precedence rules for the arithmetic operators
precedence = (
('left','PLUS','MINUS'),
('left','TIMES','DIVIDE'),
('right','UMINUS'),
)
# dictionary of names (for storing variables)
names = { }
def p_statement_assign(p):
'statement : NAME EQUALS expression'
names[p[1]] = p[3]
def p_statement_expr(p):
def _create_semi_token(self, orig_token):
token = ply.lex.LexToken()
token.type = 'SEMI'
token.value = ';'
if orig_token is not None:
token.lineno = orig_token.lineno
token.lexpos = orig_token.lexpos
else:
token.lineno = 0
token.lexpos = 0
return token
def tokenize(data, debug=False, debuglog=None):
lexer = ply.lex.lex(debug=debug, debuglog=debuglog)
lexer.input(data)
lexer.begin('ipunchecked')
tokens = []
while True:
tok = lexer.token()
if not tok: break
tokens.append(tok)
return tokens
r'\n+'
t.lineno += len(t.value)
# Read in a symbol. This rule must be practically last since there are so few
# rules concerning what constitutes a symbol.
# Important for Read Macros --Must specify what isn't a Symbol!
def t_SYMBOL(t):
r'[^0-9()\'\`\,\@\.][^()\ \t\n]*'
return t
# These are the things that should be ignored.
t_ignore = ' \t'
# Handle errors.
def t_error(t):
raise SyntaxError("syntax error on line %d near '%s'" %
(t.lineno, t.value))
# Build the lexer.
lexer = lex.lex()
def build(self, **kwargs):
"""Build the lexer."""
self.lexer = ply.lex.lex(object=self, **kwargs)
def __init__(self):
self.lexer = lex.lex(outputdir=_lex_dir, lextab=_lex_mod, optimize=1)
self.token_stream = None
@ply.lex.TOKEN(quotedString)
def t_STRING_LITERAL(t):
t.value = t.value[1:-1]
return t
# this is incomplete, but should work for now
def t_supportsselparen_SUP_STATEMENT(t):
r'[^(){}\n]+(\([^(){}]*\)\n)*[^(){}\n]*'
return t
# Error handling rule
def t_ANY_error(t):
raise error.MapCSSError("Illegal character '%s' at line %i position %i" % (t.value[0], t.lexer.lineno, find_column(t.lexer.lexdata, t)))
# Define a rule so we can track line numbers
def t_ANY_newline(t):
r'\r?\n'
t.lexer.lineno += 1
lexer = lex.lex(reflags=re.DOTALL)
if __name__ == '__main__':
lex.runmain()