Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_lex_token5(self):
try:
run_import("lex_token5")
except ply.lex.LexError:
e = sys.exc_info()[1]
self.assert_(check_expected(str(e),"lex_token5.py:19: Rule 't_NUMBER' returned an unknown token type 'NUM'"))
def loadDictionary(inputFile, dtsFormat):
try:
if dtsFormat == True:
return dtsToDictionary(inputFile.read(), inputFile.name)
else:
return pickle.load(getattr(inputFile, 'buffer', inputFile))
except (ply.lex.LexError, SyntaxError):
sys.exit(1)
except Exception:
sys.exit(sys.exc_info())
except (LexError, YaccError, SyntaxError):
addr_obj = None
if addr_obj is None and not strict:
addr_parts = address.split(' ')
addr_spec = addr_parts[-1]
if len(addr_spec) < len(address):
try:
parse_rs = parser.parse(addr_spec, lexer=lexer.clone())
addr_obj = _lift_parse_result(parse_rs)
if addr_obj:
addr_obj._display_name = ' '.join(addr_parts[:-1])
if isinstance(addr_obj._display_name, str):
addr_obj._display_name = addr_obj._display_name.decode('utf-8')
except (LexError, YaccError, SyntaxError):
addr_obj = None
mtimes['parsing'] = time() - bstart
return addr_obj, mtimes
tok.lexer = self # Set additional attributes useful in token rules
self.lexmatch = m
self.lexpos = lexpos
newtok = func(tok)
# Every function must return a token, if nothing, we just move to next token
if not newtok:
lexpos = self.lexpos # This is here in case user has updated lexpos.
lexignore = self.lexignore # This is here in case there was a state change
break
# Verify type of the token. If not in the token map, raise an error
if not self.lexoptimize:
if newtok.type not in self.lextokens_all:
raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
func.__code__.co_filename, func.__code__.co_firstlineno,
func.__name__, newtok.type), lexdata[lexpos:])
return newtok
else:
# No match, see if in literals
if lexdata[lexpos] in self.lexliterals:
tok = LexToken()
tok.value = lexdata[lexpos]
tok.lineno = self.lineno
tok.type = tok.value
tok.lexpos = lexpos
self.lexpos = lexpos + 1
return tok
# No match. Call t_error() if defined.
self.lexpos = lexpos + 1
return tok
# No match. Call t_error() if defined.
if self.lexerrorf:
tok = LexToken()
tok.value = self.lexdata[lexpos:]
tok.lineno = self.lineno
tok.type = 'error'
tok.lexer = self
tok.lexpos = lexpos
self.lexpos = lexpos
newtok = self.lexerrorf(tok)
if lexpos == self.lexpos:
# Error method didn't change text position at all. This is an error.
raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
lexpos = self.lexpos
if not newtok:
continue
return newtok
self.lexpos = lexpos
raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos], lexpos), lexdata[lexpos:])
if self.lexeoff:
tok = LexToken()
tok.type = 'eof'
tok.value = ''
tok.lineno = self.lineno
tok.lexpos = lexpos
tok.lexer = self
self.lexpos = lexpos
def t_ANY_error(self, t):
raise ply.lex.LexError('Illegal character on line ' + str(t.lexer.lineno) + ': ' + t.value, t.value)
def convert(String):
newString = seekAndReplaceMatrices(String)
try:
return parser.parse(newString)
except ply.lex.LexError:
reportProblem('LexError in:\n'+newString)
return('Bad Formula')
except syntaxError:
reportProblem('Syntax Error in:\n' + newString)
return('Bad Formula')
except lexer.illegalCharacter:
reportProblem('illegal character in:\n' +newString)
return('Bad Formula')
#EndOfFunction
def t_STANDARD_KEYWORD(self, t):
r'(?:!|)[A-Za-z_][0-9A-Za-z_]*'
if self.compatibility_mode:
t.value = t.value.upper()
elif not t.value.isupper():
raise LexError('Scanning error. Mixed/lower case keyword detected, please use compatibility_mode=True', t.value)
if t.value in self.base_tokens:
t.type = t.value
elif t.value in self.active_schema:
t.type = self.active_schema[t.value]
elif t.value.startswith('!'):
t.type = 'USER_DEFINED_KEYWORD'
return t
tok.lineno = self.lineno
tok.type = 'error'
tok.lexer = self
tok.lexpos = lexpos
self.lexpos = lexpos
newtok = self.lexerrorf(tok)
if lexpos == self.lexpos:
# Error method didn't change text position at all. This is an error.
raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
lexpos = self.lexpos
if not newtok:
continue
return newtok
self.lexpos = lexpos
raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos], lexpos), lexdata[lexpos:])
if self.lexeoff:
tok = LexToken()
tok.type = 'eof'
tok.value = ''
tok.lineno = self.lineno
tok.lexpos = lexpos
tok.lexer = self
self.lexpos = lexpos
newtok = self.lexeoff(tok)
return newtok
self.lexpos = lexpos + 1
if self.lexdata is None:
raise RuntimeError('No input string given with input()')
return None