Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
argument if present), and meta is the document's metadata.
If the function returns None, the object to which it applies
will remain unchanged. If it returns an object, the object will
be replaced. If it returns a list, the list will be spliced in to
the list to which the target object belongs. (So, returning an
empty list deletes the object.)
"""
doc = pf.json.loads(pf.sys.stdin.read())
if len(pf.sys.argv) > 1:
format = pf.sys.argv[1]
else:
format = ""
if type(actions) is type(toJSONFilter):
altered = pf.walk(doc, actions, format, doc[0]['unMeta'])
elif type(actions) is list:
altered = doc
for action in actions:
altered = pf.walk(altered, action, format, doc[0]['unMeta'])
pf.json.dump(altered, pf.sys.stdout)
argument if present), and meta is the document's metadata.
If the function returns None, the object to which it applies
will remain unchanged. If it returns an object, the object will
be replaced. If it returns a list, the list will be spliced in to
the list to which the target object belongs. (So, returning an
empty list deletes the object.)
"""
doc = pf.json.loads(pf.sys.stdin.read())
if len(pf.sys.argv) > 1:
format = pf.sys.argv[1]
else:
format = ""
if type(actions) is type(toJSONFilter):
altered = pf.walk(doc, actions, format, doc[0]['unMeta'])
elif type(actions) is list:
altered = doc
for action in actions:
altered = pf.walk(altered, action, format, doc[0]['unMeta'])
pf.json.dump(altered, pf.sys.stdout)
format = sys.argv[1]
else:
format = ''
if 'meta' in document: # new API
metadata = document['meta']
elif document[0]: # old API
metadata = document[0]['unMeta']
if 'draft' in metadata:
DRAFT = metadata['draft']['c']
else:
DRAFT = False
newDocument = document
newDocument = walk(newDocument, handle_comments, format, metadata)
# Need to ensure the LaTeX/beamer template knows if `mdframed` package is
# required (when `` has been used).
if (format == 'latex' or format == 'beamer') and USED_BOX:
MetaList = elt('MetaList', 1)
MetaInlines = elt('MetaInlines', 1)
rawinlines = [MetaInlines([RawInline('tex',
'\\RequirePackage{mdframed}')])]
if 'header-includes' in metadata:
headerIncludes = metadata['header-includes']
if headerIncludes['t'] == 'MetaList':
rawinlines += headerIncludes['c']
else: # headerIncludes['t'] == 'MetaInlines'
rawinlines += [headerIncludes]
metadata['header-includes'] = MetaList(rawinlines)
newDocument['meta'] = metadata
altered = functools.reduce(lambda x, action: walk(x, action, fmt, meta),
[repair_refs, process_refs, replace_refs,
def main():
doc = pf.json.loads(pf.sys.stdin.read())
if len(pf.sys.argv) > 1:
format = pf.sys.argv[1]
else:
format = ""
metadata = doc[0]['unMeta']
args = {k: v['c'] for k, v in metadata.items()}
autoref = args.get('autoref', True)
refmanager = ReferenceManager(autoref=autoref)
altered = doc
for action in refmanager.reference_filter:
altered = pf.walk(altered, action, format, metadata)
pf.json.dump(altered, pf.sys.stdout)
def filter_json (self, json_doc):
new_doc = walk (json_doc[1], self.parse_extensions, 'md', json_doc[0])
return [json_doc[0], new_doc]
meta)
return [html(REVEALJS_TEXT[""])] + newContent +\
[html(REVEALJS_TEXT[""])]
elif docFormat == 'docx':
newContent = walk(content, handle_comments, docFormat,
meta)
return [docx(DOCX_TEXT[""])] + newContent +\
[docx(DOCX_TEXT[""])]
else:
return content
else:
return content
elif "smcaps" in classes:
# Always show this---don't worry about draft status!
if docFormat in ['latex', 'beamer']:
newContent = walk(content, handle_comments, docFormat, meta)
return [latex(LATEX_TEXT[""])] + newContent +\
[latex(LATEX_TEXT[""])]
elif docFormat in ['html', 'html5']:
newContent = walk(content, handle_comments, docFormat, meta)
return [html(HTML_TEXT[""])] + newContent +\
[html(HTML_TEXT[""])]
elif docFormat == 'revealjs':
newContent = walk(content, handle_comments, docFormat, meta)
return [html(REVEALJS_TEXT[""])] + newContent +\
[html(REVEALJS_TEXT[""])]
elif docFormat == 'docx':
return docx(DOCX_TEXT[tag])
else:
# FIXME: I should run this through a filter that capitalizes
# all strings in `content`.
return content
def raw(x):
result = []
def flatten(key, val, format, meta):
if val is not None :
if isinstance(val[1], unicode) :
# pprint.pprint(val[1], sys.stderr)
result.append(val[1])
if isinstance(val[1], dict) :
# pprint.pprint(val, sys.stderr)
result.append(" ")
walk(x, flatten, "", {})
return ''.join(result)
# the only input format; the most generic way should be implemented
# if key == "Header":
# level, classes, internal = value
#
# # record top level
# if self.heading_top_level == 0:
# self.heading_top_level = level
#
# # ensure we start from h1 in output
# if level > self.heading_top_level:
# level -= self.heading_top_level
#
# return pandocfilters.Header(level, classes, internal)
doc = json.loads(instring)
altered = pandocfilters.walk(doc, _filter, self.format, doc[0]["unMeta"])
return json.dumps(altered)
(headingLevel, dummy) = get_value(keyvals, "heading-level")
if not headingLevel:
headingLevel = 0
contents = get_contents_of_file(l, headingLevel)
doc = json.loads(str_to_json(contents))
if 'meta' in doc:
meta = doc['meta']
elif doc[0]: # old API
meta = doc[0]['unMeta']
else:
meta = {}
altered = walk(doc, include, format, meta)
rv.append(altered['blocks'])
elif l == '':
# Empty lines are harmless
next
else:
sys.stderr.write("WARNING: Can't read file '" + l + "'. Skipping.\n")
# Return a flattened list using nested list comprehension
#
# The following is equivalent to:
#
# flattened = []
# for sublist in rv:
# for item in sublist:
# flattened.append(item)