How to use the ply.lex.lex function in ply

To help you get started, we’ve selected a few ply examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github echronos / echronos / external_tools / ply_info / example / newclasscalc / calc.py View on Github external
def __init__(self, **kw):
        self.debug = kw.get('debug', 0)
        self.names = { }
        try:
            modname = os.path.split(os.path.splitext(__file__)[0])[1] + "_" + self.__class__.__name__
        except:
            modname = "parser"+"_"+self.__class__.__name__
        self.debugfile = modname + ".dbg"
        self.tabmodule = modname + "_" + "parsetab"
        #print self.debugfile, self.tabmodule

        # Build the lexer and parser
        lex.lex(module=self, debug=self.debug)
        yacc.yacc(module=self,
                  debug=self.debug,
                  debugfile=self.debugfile,
                  tabmodule=self.tabmodule)
github ialbert / pyblue-central / pyblue / parser.py View on Github external
return t

    def t_NUMBER(t):
        r'\d+'
        t.value = int(t.value)
        return t

    def t_NAME(t):
        r'[\w!?+()$@*^#%&`~<>{}\.\-\/]+'
        return t

    def t_error(t):
        print "Illegal character '%s'" % t.value[0]
        t.lexer.skip(1)

    return lex.lex()
github salspaugh / splparser / splparser / cmdparsers / headlexer.py View on Github external
t.type = type_if_reserved(t, 'NBSTR')
    t.lexer.begin('ipunchecked')
    return t

def t_COLON(t):
    r':'
    t.lexer.begin('ipunchecked')
    return t

def t_error(t):
    badchar = t.value[0]
    t.lexer.skip(1)
    t.lexer.begin('ipunchecked')
    raise SPLSyntaxError("Illegal character in eval lexer '%s'" % badchar)

lexer = ply.lex.lex()

def tokenize(data, debug=False, debuglog=None):
    lexer = ply.lex.lex(debug=debug, debuglog=debuglog)
    lexer.input(data)
    lexer.begin('ipunchecked')
    tokens = []
    while True:
        tok = lexer.token()
        if not tok: break
        tokens.append(tok)
    return tokens

if __name__ == "__main__":
    import sys
    print tokenize(' '.join(sys.argv[1:]))
github xuqix / CppHeaderParser / CppHeaderParser / CppHeaderParser.py View on Github external
global doxygenCommentCache
    if t.value.startswith("/**") or t.value.startswith("/*!"):
        #not sure why, but get double new lines
        v = t.value.replace("\n\n", "\n")
        #strip prefixing whitespace
        v = re.sub("\n[\s]+\*", "\n*", v)
        doxygenCommentCache += v
    t.lexer.lineno += len([a for a in t.value if a=="\n"])
def t_NEWLINE(t):
    r'\n+'
    t.lexer.lineno += len(t.value)

def t_error(v):
    print(( "Lex error: ", v ))

lex.lex()
# Controls error_print
print_errors = 1
# Controls warning_print
print_warnings = 1
# Controls debug_print
debug = 0
# Controls trace_print
debug_trace = 0

def error_print(arg):
    if print_errors: print(("[%4d] %s"%(inspect.currentframe().f_back.f_lineno, arg)))

def warning_print(arg):
    if print_warnings: print(("[%4d] %s"%(inspect.currentframe().f_back.f_lineno, arg)))

def debug_print(arg):
github aurzenligl / prophy / prophyc / parsers / prophy.py View on Github external
def __init__(self):
        self._init_parse_data()
        self.lexer = lex.lex(module=self, debug=0)
        self.yacc = yacc.yacc(module=self, tabmodule='parsetab_prophy', write_tables=0, debug=0)
github kofemann / pynfs / xdr / xdrgen.py View on Github external
# Comments
def t_comment(t):
    r'/\*(.|\n)*?\*/'
    t.lexer.lineno += t.value.count('\n')

def t_linecomment(t):
    r'%.*\n'
    t.lexer.lineno += 1

def t_error(t):
    print("Illegal character %s at %d type %s" % (repr(t.value[0]), t.lexer.lineno, t.type))
    t.lexer.skip(1)

# Build the lexer
lex.lex(debug=0)


##########################################################################
#                                                                        #
#                          Yacc Parsing Info                             #
#                                                                        #
##########################################################################

def p_specification(t):
    '''specification : definition_list'''

def p_definition_list(t):
    '''definition_list : definition definition_list 
                       | empty'''

def p_definition(t):
github andrewchambers / pycc / mdcompiler / mdcompiler.py View on Github external
p[0] = p[1]


# Error handling rule
def t_error(t):
    print "Illegal character '%s'" % t.value[0]
    t.lexer.skip(1)


# Error rule for syntax errors
def p_error(p):
    raise Exception("Syntax error in input! %s" % p)

# Build the parser
# Build the lexer
lexer = lex.lex()
parser = yacc.yacc()


#Support functions

def isAsmStr(s):
    if s.startswith("#asm"):
        return True
    if "return" in s:
        return False
    return True


def isValidType(tname):
    return tname in ['I32','I8','Pointer', '_']
github ContinuumIO / pykit / pykit / deps / pycparser / c_lexer.py View on Github external
def build(self, **kwargs):
        """ Builds the lexer from the specification. Must be
            called after the lexer object is created.

            This method exists separately, because the PLY
            manual warns against calling lex.lex inside
            __init__
        """
        self.lexer = lex.lex(object=self, **kwargs)
github filvarga / srv6-mobile / src / tools / vppapigen / vppapigen.py View on Github external
def __init__(self, debug=False, filename='', logger=None):
        self.lexer = lex.lex(module=VPPAPILexer(filename), debug=debug)
        self.parser = yacc.yacc(module=VPPAPIParser(filename, logger),
                                write_tables=False, debug=debug)
        self.logger = logger