Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def p_inputunit(p):
'''inputunit : simple_list simple_list_terminator
| NEWLINE
| error NEWLINE
| EOF'''
# XXX
if p.lexer._parserstate & flags.parser.CMDSUBST:
p.lexer._parserstate.add(flags.parser.EOFTOKEN)
if isinstance(p[1], ast.node):
p[0] = p[1]
# accept right here in case the input contains more lines that are
# not part of the current command
p.accept()
parserstate = lambda: utils.typedset(flags.parser)
"}" : tokentype.RIGHT_CURLY,
"!" : tokentype.BANG,
"[[" : tokentype.COND_START,
"]]" : tokentype.COND_END,
"coproc" : tokentype.COPROC
}
class MatchedPairError(errors.ParsingError):
def __init__(self, startline, message, tokenizer):
# TODO use startline?
super(MatchedPairError, self).__init__(message,
tokenizer.source,
tokenizer._shell_input_line_index - 1)
wordflags = flags.word
parserflags = flags.parser
class token(object):
def __init__(self, type_, value, pos=None, flags=None):
if type_ is not None:
assert isinstance(type_, tokentype)
if flags is None:
flags = set()
self.ttype = type_
self.value = value
if pos is not None:
self.lexpos = pos[0]
self.endlexpos = pos[1]
assert self.lexpos < self.endlexpos, (self.lexpos, self.endlexpos)
def _parsedolparen(parserobj, base, sindex):
copiedps = copy.copy(parserobj.parserstate)
copiedps.add(flags.parser.CMDSUBST)
copiedps.add(flags.parser.EOFTOKEN)
string = base[sindex:]
tokenizerargs = {'eoftoken' : tokenizer.token(tokenizer.tokentype.RIGHT_PAREN, ')'),
'parserstate' : copiedps,
'lastreadtoken' : parserobj.tok._last_read_token,
'tokenbeforethat' : parserobj.tok._token_before_that,
'twotokensago' : parserobj.tok._two_tokens_ago}
node, endp = _recursiveparse(parserobj, base, sindex, tokenizerargs)
if string[endp] != ')':
while endp > 0 and string[endp-1] == '\n':
endp -= 1
return node, sindex + endp
def _parsedolparen(parserobj, base, sindex):
copiedps = copy.copy(parserobj.parserstate)
copiedps.add(flags.parser.CMDSUBST)
copiedps.add(flags.parser.EOFTOKEN)
string = base[sindex:]
tokenizerargs = {'eoftoken' : tokenizer.token(tokenizer.tokentype.RIGHT_PAREN, ')'),
'parserstate' : copiedps,
'lastreadtoken' : parserobj.tok._last_read_token,
'tokenbeforethat' : parserobj.tok._token_before_that,
'twotokensago' : parserobj.tok._two_tokens_ago}
node, endp = _recursiveparse(parserobj, base, sindex, tokenizerargs)
if string[endp] != ')':
while endp > 0 and string[endp-1] == '\n':
endp -= 1
return node, sindex + endp
'''simple_list : simple_list1
| simple_list1 AMPERSAND
| simple_list1 SEMICOLON'''
tok = p.lexer
heredoc.gatherheredocuments(tok)
if len(p) == 3 or len(p[1]) > 1:
parts = p[1]
if len(p) == 3:
parts.append(ast.node(kind='operator', op=p[2], pos=p.lexspan(2)))
p[0] = ast.node(kind='list', parts=parts, pos=_partsspan(parts))
else:
assert len(p[1]) == 1
p[0] = p[1][0]
if (len(p) == 2 and p.lexer._parserstate & flags.parser.CMDSUBST and
p.lexer._current_token.nopos() == p.lexer._shell_eof_token):
# accept the input
p.accept()