Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_expansion_limit(self):
cmd = 'a $(b $(c))'
m = matcher.matcher(cmd, s)
m.match()
class depthchecker(bashlex.ast.nodevisitor):
def __init__(self):
self.depth = 0
self.maxdepth = 0
def visitnode(self, node):
if 'substitution' in node.kind:
self.depth += 1
self.maxdepth = max(self.maxdepth, self.depth)
def visitendnode(self, node):
if 'substitution' in node.kind:
self.depth -= 1
v = depthchecker()
v.visit(m.ast)
self.assertEquals(v.maxdepth, 1)
def assertASTEquals(self, s, expected, **parserargs):
results = parse(s, **parserargs)
self.assertTrue(len(results) == 1, 'expected one ast from parse(), '
'got %d' % len(results))
result = results[0]
# make sure our words are not empty
class nullopvisitor(ast.nodevisitor):
def visitword(_, node, word):
self.assertTrue(word, 'node %r has no word' % node)
nullopvisitor().visit(result)
msg = 'ASTs not equal for %r\n\nresult:\n\n%s\n\n!=\n\nexpected:\n\n%s' % (s, result.dump(), expected.dump())
self.assertEquals(result, expected, msg)
def test_expansion_limit(self):
'''make sure the expansion limit is working by tracking recursive
parsing count, and also checking that the word isn't expanded'''
counter = [0]
class countingparser(parser._parser):
def __init__(self, *args, **kwargs):
super(countingparser, self).__init__(*args, **kwargs)
counter[0] += 1
old = parser._parser
parser._parser = countingparser
try:
s = 'a $(b $(c $(d $(e))))'
self.assertASTEquals(s,
commandnode(s,
wordnode('a'),
wordnode('$(b $(c $(d $(e))))', '$(b $(c $(d $(e))))', [
comsubnode('$(b $(c $(d $(e))))',
commandnode('b $(c $(d $(e)))',
wordnode('b'),
wordnode('$(c $(d $(e)))')
)
)
])
),
def test_expansion_limit(self):
'''make sure the expansion limit is working by tracking recursive
parsing count, and also checking that the word isn't expanded'''
counter = [0]
class countingparser(parser._parser):
def __init__(self, *args, **kwargs):
super(countingparser, self).__init__(*args, **kwargs)
counter[0] += 1
old = parser._parser
parser._parser = countingparser
try:
s = 'a $(b $(c $(d $(e))))'
self.assertASTEquals(s,
commandnode(s,
wordnode('a'),
wordnode('$(b $(c $(d $(e))))', '$(b $(c $(d $(e))))', [
comsubnode('$(b $(c $(d $(e))))',
commandnode('b $(c $(d $(e)))',
wordnode('b'),
def test_tokenize(self):
s = 'bar -x'
self.assertTokens(s, [
t(tt.WORD, 'bar', [0, 3]),
t(tt.WORD, '-x', [4, 6])])
s = 'wx y =z '
self.assertTokens(s, [
t(tt.WORD, 'wx', [0, 2]),
t(tt.WORD, 'y', [6, 7]),
t(tt.WORD, '=z', [8, 10])])
s = "a 'b' c"
self.assertTokens(s, [
t(tt.WORD, 'a', [0, 1]),
t(tt.WORD, "'b'", [2, 5], set([flags.word.QUOTED])),
t(tt.WORD, 'c', [6, 7])])
s = "a 'b ' c"
self.assertTokens(s, [
t(tt.WORD, 'a', [0, 1]),
t(tt.WORD, "'b '", [2, 7], set([flags.word.QUOTED])),
t(tt.WORD, 'c', [8, 9])])
def test_parsematchedpair(self):
s = '"`foo`"'
self.assertTokens(s, [
t(tt.WORD, '"`foo`"', [0, len(s)], set([flags.word.QUOTED]))])
s = '"${a}"'
self.assertTokens(s, [
t(tt.WORD, '"${a}"', [0, len(s)], set([flags.word.HASDOLLAR,
flags.word.QUOTED]))])
s = '${\'a\'}'
self.assertTokens(s, [
t(tt.WORD, '${\'a\'}', [0, len(s)], hasdollarset)])
s = '${$\'a\'}'
self.assertTokens(s, [
t(tt.WORD, '${$\'a\'}', [0, len(s)], hasdollarset)])
s = "'a\\'"
self.assertTokens(s, [
t(tt.WORD, "'a\\'", [0, len(s)], set([flags.word.QUOTED]))])
t(tt.LESS_GREATER, '<>', [4, 6]),
t(tt.SEMI_AND, ';&', [6, 8]),
t(tt.SEMI_SEMI_AND, ';;&', [8, 11]),
t(tt.SEMI_SEMI, ';;', [11, 13]),
t(tt.BAR, '|', [14, 15]),
t(tt.LESS_LESS_MINUS, '<<-', [15, 18]),
t(tt.LESS_LESS, '<<', [18, 20]),
t(tt.LESS_LESS_LESS, '<<<', [21, 24]),
t(tt.GREATER_GREATER, '>>', [24, 26]),
t(tt.AND_AND, '&&', [26, 28]),
t(tt.OR_OR, '||', [28, 30]),
t(tt.LESS_AND, '<&', [30, 32]),
t(tt.GREATER_AND, '>&', [32, 34]),
t(tt.LESS_GREATER, '<>', [34, 36]),
t(tt.GREATER_BAR, '>|', [36, 38]),
t(tt.AND_GREATER, '&>', [38, 40]),
t(tt.AND_GREATER_GREATER, '&>>', [41, 44]),
t(tt.BAR_AND, '|&', [44, 46])])
s = '<&-'
self.assertTokens(s, [
t(tt.LESS_AND, '<&', [0, 2]),
t(tt.DASH, '-', [2, 3])])
s = '"${a}"'
self.assertTokens(s, [
t(tt.WORD, '"${a}"', [0, len(s)], set([flags.word.HASDOLLAR,
flags.word.QUOTED]))])
s = '${\'a\'}'
self.assertTokens(s, [
t(tt.WORD, '${\'a\'}', [0, len(s)], hasdollarset)])
s = '${$\'a\'}'
self.assertTokens(s, [
t(tt.WORD, '${$\'a\'}', [0, len(s)], hasdollarset)])
s = "'a\\'"
self.assertTokens(s, [
t(tt.WORD, "'a\\'", [0, len(s)], set([flags.word.QUOTED]))])
def test_meta(self):
s = '!&()<>;&;;&;; |<<-<< <<<>>&&||<&>&<>>|&> &>>|&'
self.assertTokens(s, [
t(tt.BANG, '!', [0, 1]),
t(tt.AMPERSAND, '&', [1, 2]),
t(tt.LEFT_PAREN, '(', [2, 3]),
t(tt.RIGHT_PAREN, ')', [3, 4]),
t(tt.LESS_GREATER, '<>', [4, 6]),
t(tt.SEMI_AND, ';&', [6, 8]),
t(tt.SEMI_SEMI_AND, ';;&', [8, 11]),
t(tt.SEMI_SEMI, ';;', [11, 13]),
t(tt.BAR, '|', [14, 15]),
t(tt.LESS_LESS_MINUS, '<<-', [15, 18]),
t(tt.LESS_LESS, '<<', [18, 20]),
t(tt.LESS_LESS_LESS, '<<<', [21, 24]),
t(tt.GREATER_GREATER, '>>', [24, 26]),
t(tt.AND_AND, '&&', [26, 28]),
t(tt.OR_OR, '||', [28, 30]),
t(tt.LESS_AND, '<&', [30, 32]),
t(tt.GREATER_AND, '>&', [32, 34]),
t(tt.LESS_GREATER, '<>', [34, 36]),
t(tt.GREATER_BAR, '>|', [36, 38]),
t(tt.AND_GREATER, '&>', [38, 40]),
t(tt.AND_GREATER_GREATER, '&>>', [41, 44]),
t(tt.BAR_AND, '|&', [44, 46])])
s = '<&-'
self.assertTokens(s, [
t(tt.LESS_AND, '<&', [0, 2]),
self.assertTokens(s, [
t(tt.WORD, '$(a ${b})', [0, 9], hasdollarset)])
s = '$(a $[b])'
self.assertTokens(s, [
t(tt.WORD, '$(a $[b])', [0, 9], hasdollarset)])
s = '"$(a)"'
self.assertTokens(s, [
t(tt.WORD, '"$(a)"', [0, 6], set([flags.word.HASDOLLAR,
flags.word.QUOTED]))])
s = 'a $(! b)'
self.assertTokens(s, [
t(tt.WORD, 'a', [0, 1]),
t(tt.WORD, '$(! b)', [2, 8], hasdollarset)])
s = '$(!|!||)'
self.assertTokens(s, [
t(tt.WORD, '$(!|!||)', [0, 8], hasdollarset)])
s = '$(a <