Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
re.findall(r'\s*([^=\s]+)\s*=\s*([^:\s]+)\s*(?::\s*(\S*))?', s)]
# outputs:
out_header = '' # generated .h file
out_rc = '' # generated .rc file
out_bins = {} # .bin files referenced by .rc file
HEADER, MESSAGE = range(2)
state = HEADER
symbolic_name = None
message_id = 0 # FIXME: Make this per-facility
message_id_typedef = ''
language = 'English'
severity = 0
facility = 0
messages = {}
for tok in iter(lex.token, None):
if tok.type == 'COMMENT':
# FIXME: These aren't output as they're encountered, instead all comments
# in the header block and before each message block are collected and then
# output all together in front of the next output chunk.
out_header += tok.value[1:].replace('\r', '') + '\n' # Strip leading ';'
continue
if state == HEADER:
# FIXME: FACILITYNAMES, SEVERITYNAMES aren't printed in the order in the
# .mc file, instead they're first collected and then printed once the header
# block is done.
if tok.type == 'MESSAGEIDTYPEDEF':
message_id_typedef = '(%s)' % tok.value.split('=', 1)[1].strip()
continue
if tok.type == 'SEVERITYNAMES':
severity_names = parse_list(tok.value)
def t_error(t):
print "Illegal character '%s' at line %d" % (t.value[0], t.lexer.lineno)
t.skip(1)
lex.lex()
import sys
gdl = open(sys.argv[-1]).read()
lex.input(gdl)
if __name__ == "__main__":
while 1:
tok = lex.token()
if not tok:
break
print tok
forall A B : Prop, A -> (~ ~ A) \/ B
Unnamed_thm < 2 |Unnamed_thm| 0 <
""",
"""
1 subgoal
============================
True -> True
Unnamed_thm < 2 |Unnamed_thm| 0 <
""",
)
lex.input(s[1])
while True:
tok = lex.token()
if not tok:
break
print tok
# Just some test code
if __name__ == "__main__":
local_env = {
"A" : "1",
"C" : "foo",
"D" : "20",
"E" : 0x100,
"F" : "baz"
}
for line in open(sys.argv[1]).readlines():
lex.input(line)
for tok in iter(lex.token, None):
print(tok.type, tok.value)
parser = yacc.yacc()
print(parser.parse(line))
print(parse(line, local_env, None))
f = open(sys.argv[1],"r")
data = f.read()
f.close()
else:
data = ""
while 1:
try:
data += raw_input() + "\n"
except:
break
lex.input(data)
# Tokenize
while 1:
tok = lex.token()
if not tok: break # No more input
print tok
def toks():
while 1:
tok = lex.token()
if not tok: break
print tok
@token(r'\"([^\\\n]|(\\(.|\n)))*?\"')
def t_CPP_STRING(self, t):
# String literal
t.lexer.lineno += t.value.count("\n")
return t
for m in matches:
#Keep the newlines so that linecount doesnt break
num_newlines = len([a for a in m if a=="\n"])
headerFileStr = headerFileStr.replace(m, "\n" * num_newlines)
headerFileStr = re.sub(r'extern[ ]+"[Cc]"[ ]*', "", headerFileStr)
self.braceDepth = 0
lex.lex()
lex.input(headerFileStr)
global curLine
global curChar
curLine = 0
curChar = 0
try:
while True:
tok = lex.token()
if not tok: break
if self.anon_union_counter[0] == self.braceDepth and self.anon_union_counter[1]:
self.anon_union_counter[1] -= 1
tok.value = TagStr(tok.value, lineno=tok.lineno)
#debug_print("TOK: %s"%tok)
if tok.type == 'NAME' and tok.value in self.IGNORE_NAMES: continue
self.stack.append( tok.value )
curLine = tok.lineno
curChar = tok.lexpos
if (tok.type in ('PRECOMP_MACRO', 'PRECOMP_MACRO_CONT')):
debug_print("PRECOMP: %s"%tok)
self._precomp_macro_buf.append(tok.value)
continue
if (tok.type == 'OPEN_BRACE'):
if len(self.nameStack) >= 2 and is_namespace(self.nameStack): # namespace {} with no name used in boost, this sets default?
if self.nameStack[1] == "__IGNORED_NAMESPACE__CppHeaderParser__":#Used in filtering extern "C"
#Strip it out but keep the linecount the same so line numbers are right
match_str = headerFileStr[locStart:locEnd]
debug_print("Striping out '%s'"%match_str)
num_newlines = len([a for a in match_str if a=="\n"])
headerFileStr = headerFileStr.replace(headerFileStr[locStart:locEnd], "\n"*num_newlines)
self.braceDepth = 0
lex.lex()
lex.input(headerFileStr)
global curLine
global curChar
curLine = 0
curChar = 0
try:
while True:
tok = lex.token()
if not tok: break
if self.anon_union_counter[0] == self.braceDepth and self.anon_union_counter[1]:
self.anon_union_counter[1] -= 1
tok.value = TagStr(tok.value, lineno=tok.lineno)
#debug_print("TOK: %s"%tok)
if tok.type == 'NAME' and tok.value in self.IGNORE_NAMES: continue
self.stack.append( tok.value )
curLine = tok.lineno
curChar = tok.lexpos
if (tok.type in ('PRECOMP_MACRO', 'PRECOMP_MACRO_CONT')):
debug_print("PRECOMP: %s"%tok)
self._precomp_macro_buf.append(tok.value)
self.stack = []
self.nameStack = []
continue
if (tok.type == 'OPEN_BRACE'):
#Strip it out but keep the linecount the same so line numbers are right
match_str = headerFileStr[locStart:locEnd]
debug_print("Striping out '%s'"%match_str)
num_newlines = len([a for a in match_str if a=="\n"])
headerFileStr = headerFileStr.replace(headerFileStr[locStart:locEnd], "\n"*num_newlines)
self.braceDepth = 0
lex.lex()
lex.input(headerFileStr)
global curLine
global curChar
curLine = 0
curChar = 0
try:
while True:
tok = lex.token()
if not tok: break
if self.anon_union_counter[0] == self.braceDepth and self.anon_union_counter[1]:
self.anon_union_counter[1] -= 1
tok.value = TagStr(tok.value, lineno=tok.lineno)
#debug_print("TOK: %s"%tok)
if tok.type == 'NAME' and tok.value in self.IGNORE_NAMES: continue
if tok.type != 'TEMPLATE_NAME':
self.stack.append( tok.value )
curLine = tok.lineno
curChar = tok.lexpos
if (tok.type in ('PRECOMP_MACRO', 'PRECOMP_MACRO_CONT')):
debug_print("PRECOMP: %s"%tok)
self._precomp_macro_buf.append(tok.value)
self.stack = []
self.nameStack = []
continue