Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def __call__(self, stream):
have_decl = have_doctype = False
in_cdata = False
_emit, _get = self._prepare_cache()
for filter_ in self.filters:
stream = filter_(stream)
for kind, data, pos in stream:
if kind is TEXT and isinstance(data, Markup):
yield data
continue
cached = _get((kind, data))
if cached is not None:
yield cached
elif kind is START or kind is EMPTY:
tag, attrib = data
buf = ['<', tag]
for attr, value in attrib:
buf += [' ', attr, '="', escape(value), '"']
buf.append(kind is EMPTY and '/>' or '>')
yield _emit(kind, data, Markup(''.join(buf)))
elif kind is END:
yield _emit(kind, data, Markup('' % data))
elif kind is TEXT:
if in_cdata:
yield _emit(kind, data, data)
else:
yield _emit(kind, data, escape(data, quotes=False))
def mark_text(self, pos, text, tag):
ws, text = self.cut_leading_space(text)
tag = QName(tag)
if ws:
self.append(TEXT, ws, pos)
self.append(START, (tag, Attrs()), pos)
self.append(TEXT, text, pos)
self.append(END, tag, pos)
def _strip_accesskeys(self, stream, ctxt=None):
for kind, data, pos in stream:
if kind is START and 'accesskey' in data[1]:
data = data[0], Attrs([(k, v) for k, v in data[1]
if k != 'accesskey'])
yield kind, data, pos
def _generate(self):
yield START, (self.tag, self.attrib), (None, -1, -1)
for kind, data, pos in Fragment._generate(self):
yield kind, data, pos
yield END, self.tag, (None, -1, -1)
def __init__(self, qname="strong", between="..."):
"""
:param qname: the QName for the tag to wrap around matched terms.
:param between: the text to add between fragments.
"""
self.qname = qname
self.between = between
from genshi.core import START, END, TEXT # @UnresolvedImport
from genshi.core import Attrs, Stream # @UnresolvedImport
self.START, self.END, self.TEXT = START, END, TEXT
self.Attrs, self.Stream = Attrs, Stream
def block_process(self, events):
for event in events:
type, data, pos = event
if type == START:
self.enter(pos, *data)
elif type == END:
self.leave(pos, data)
elif type == TEXT:
if self._context is not None and data.strip():
tag = QName(self._context)
self.append(START, (QName(tag), Attrs()), pos)
self.append(type, data, pos)
self.append(END, tag, pos)
else:
self.append(type, data, pos)
else:
self.append(type, data, pos)
if ctxt:
ctxt['_i18n.gettext'] = gettext
ctxt['_i18n.ngettext'] = ngettext
ctxt['_i18n.dgettext'] = dgettext
ctxt['_i18n.dngettext'] = dngettext
if ctxt and ctxt.get('_i18n.domain'):
# TODO: This can cause infinite recursion if dgettext is defined
# via the AttributeError case above!
gettext = lambda msg: dgettext(ctxt.get('_i18n.domain'), msg)
for kind, data, pos in stream:
# skip chunks that should not be localized
if skip:
if kind is START:
skip += 1
elif kind is END:
skip -= 1
yield kind, data, pos
continue
# handle different events that can be localized
if kind is START:
tag, attrs = data
if tag in self.ignore_tags or \
isinstance(attrs.get(xml_lang), basestring):
skip += 1
yield kind, data, pos
continue
new_attrs = []
def handle_starttag(self, tag, attrib):
fixed_attrib = []
for name, value in attrib: # Fixup minimized attributes
if value is None:
value = unicode(name)
elif not isinstance(value, unicode):
value = value.decode(self.encoding, 'replace')
fixed_attrib.append((QName(name), stripentities(value)))
self._enqueue(START, (QName(tag), Attrs(fixed_attrib)))
if tag in self._EMPTY_ELEMS:
self._enqueue(END, QName(tag))
else:
self._open_tags.append(tag)
def __iter__(self):
depth = 0
ignore_until = None
previous = None
for event in NamespaceFlattener(prefixes={
'http://www.w3.org/1999/xhtml': ''
})(self.tree):
if previous is not None:
if previous[0] == START:
depth += 1
if ignore_until <= depth:
ignore_until = None
if ignore_until is None:
for token in self.tokens(previous, event):
yield token
if token["type"] == "EmptyTag":
ignore_until = depth
if previous[0] == END:
depth -= 1
previous = event
if previous is not None:
if ignore_until is None or ignore_until <= depth:
for token in self.tokens(previous, None):
yield token
elif ignore_until is not None:
for token in walker:
type = token["type"]
if type in ("Characters", "SpaceCharacters"):
text.append(token["data"])
elif text:
yield TEXT, "".join(text), (None, -1, -1)
text = []
if type in ("StartTag", "EmptyTag"):
if token["namespace"]:
name = "{%s}%s" % (token["namespace"], token["name"])
else:
name = token["name"]
attrs = Attrs([(QName("{%s}%s" % attr if attr[0] is not None else attr[1]), value)
for attr, value in token["data"].items()])
yield (START, (QName(name), attrs), (None, -1, -1))
if type == "EmptyTag":
type = "EndTag"
if type == "EndTag":
if token["namespace"]:
name = "{%s}%s" % (token["namespace"], token["name"])
else:
name = token["name"]
yield END, QName(name), (None, -1, -1)
elif type == "Comment":
yield COMMENT, token["data"], (None, -1, -1)
elif type == "Doctype":
yield DOCTYPE, (token["name"], token["publicId"],