Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def __init__(self, stream: Sequence[tokens.Token]):
"""Initializes the Parser with a stream of tokens and sets the index.
Args:
stream: Lexed sequence of tokens to be parsed
Raises:
TypeError: If an object in the stream is not a valid token
"""
for token in stream:
if not isinstance(token, tokens.Token):
raise TypeError(
f"Type {type(token)} for {token} is not a valid token type."
)
self.tokens = stream
self.index: int = 0
self.progress: int = 0
self.logger: logging.Logger = logging.getLogger(__name__)
self.depth: int = -1
self.log_debug: bool = self.logger.isEnabledFor(logging.DEBUG)
"property_label_key",
"else",
)
# These are keys for fields in Looker that have a "name" attribute. Since lkml uses the
# key `name` to represent the name of the field (e.g. for `dimension: dimension_name {`,
# the `name` key would hold the value `dimension_name`.)
KEYS_WITH_NAME_FIELDS: Tuple[str, ...] = (
"user_attribute_param",
"param",
"form_param",
"option",
)
CHARACTER_TO_TOKEN: Dict[str, Type[tokens.Token]] = {
"\0": tokens.StreamEndToken,
"{": tokens.BlockStartToken,
"}": tokens.BlockEndToken,
"[": tokens.ListStartToken,
"]": tokens.ListEndToken,
",": tokens.CommaToken,
":": tokens.ValueToken,
";": tokens.ExpressionBlockEndToken,
}
Raises:
TypeError: If one or more of the token_types are not actual token types
Returns:
bool: True if the current token matches one of the token_types
"""
if self.log_debug:
self.logger.debug(
"%sCheck %s == %s",
(1 + self.depth) * DELIMITER,
self.peek(),
" or ".join(t.__name__ for t in token_types),
)
for token_type in token_types:
if not issubclass(token_type, tokens.Token):
raise TypeError(f"{token_type} is not a valid token type.")
if type(self.peek()) in token_types:
return True
else:
return False
>>> token.value = 'A string value'
>>> token.__repr__()
'Token(A string value)'
>>> token.value = 'A very, very, very long string value'
>>> token.__repr__()
'Token(A very, very, very long s ... )'
"""
value = getattr(self, "value", "").strip()
value = (value[:25].rstrip() + " ... ") if len(value) > 25 else value
return f"{self.__class__.__name__}({value})"
class ContentToken(Token):
"""Base class for LookML tokens that contain a string of content."""
def __init__(self, value: str, line_number: int):
"""Initializes a ContentToken with string content.
Args:
value: A string value for the token's content
line_number: The corresponding line in the text where this token begins
"""
self.value: str = value
self.line_number: int = line_number
def __eq__(self, other):
"""Compare one ContentToken to another by their values."""
return self.id == other.id and self.value == other.value
"""
self.value: str = value
self.line_number: int = line_number
def __eq__(self, other):
"""Compare one ContentToken to another by their values."""
return self.id == other.id and self.value == other.value
class StreamStartToken(Token):
"""Represents the start of a stream of characters."""
id = ""
class StreamEndToken(Token):
"""Represents the end of a stream of characters."""
id = ""
class BlockStartToken(Token):
"""Represents the start of a block."""
id = "{"
class BlockEndToken(Token):
"""Represents the end of a block."""
id = "}"
id = ":"
class ExpressionBlockEndToken(Token):
"""Represents the end of an expression block."""
id = ";;"
class CommaToken(Token):
"""Separates elements in a list."""
id = ","
class ListStartToken(Token):
"""Represents the start of a list."""
id = "["
class ListEndToken(Token):
"""Represents the end of a list."""
id = "]"
class ExpressionBlockToken(ContentToken):
"""Contains the value of an expression block."""
id = ""
def __init__(self, text: str):
"""Initializes the Lexer with a LookML string and sets the index.
Args:
text: LookML string to be lexed
"""
self.text: str = text + "\0"
self.index: int = 0
self.tokens: List[tokens.Token] = []
self.line_number: int = 1
id = ";;"
class CommaToken(Token):
"""Separates elements in a list."""
id = ","
class ListStartToken(Token):
"""Represents the start of a list."""
id = "["
class ListEndToken(Token):
"""Represents the end of a list."""
id = "]"
class ExpressionBlockToken(ContentToken):
"""Contains the value of an expression block."""
id = ""
class LiteralToken(ContentToken):
"""Contains the value of an unquoted literal."""
id = ""
"""Initializes a ContentToken with string content.
Args:
value: A string value for the token's content
line_number: The corresponding line in the text where this token begins
"""
self.value: str = value
self.line_number: int = line_number
def __eq__(self, other):
"""Compare one ContentToken to another by their values."""
return self.id == other.id and self.value == other.value
class StreamStartToken(Token):
"""Represents the start of a stream of characters."""
id = ""
class StreamEndToken(Token):
"""Represents the end of a stream of characters."""
id = ""
class BlockStartToken(Token):
"""Represents the start of a block."""
id = "{"
id = ""
class StreamEndToken(Token):
"""Represents the end of a stream of characters."""
id = ""
class BlockStartToken(Token):
"""Represents the start of a block."""
id = "{"
class BlockEndToken(Token):
"""Represents the end of a block."""
id = "}"
class ValueToken(Token):
"""Separates a key from a value."""
id = ":"
class ExpressionBlockEndToken(Token):
"""Represents the end of an expression block."""
id = ";;"