Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def content_object_init(instance):
if isinstance(instance, contents.Static):
return
if 'summary' in instance.metadata:
return
if not hasattr(instance, '_summary') and instance._content is not None:
content = instance._content
firstP = FirstParagraphParser()
firstP.feed(content)
endCharA = '。'
endCharB = '.'
endPosA = firstP.data.find(endCharA)
endPosB = firstP.data.find(endCharB)
endPos = min(max(endPosA, endPosB), _MAX_SUMMARY_POS)
instance._summary = firstP.data[:endPos + 1 if endPos > 0 else None]
if endPos == _MAX_SUMMARY_POS:
instance._summary += ' …'
if not page.save_as:
return
page_path = os.path.join(self.output_path, page.save_as)
if not os.path.exists(page_path):
return
lastdate = getattr(page, 'date', self.now)
try:
lastdate = self.get_date_modified(page, lastdate)
except ValueError:
warning("sitemap plugin: " + page.save_as + " has invalid modification date,")
warning("sitemap plugin: using date value as lastmod.")
lastmod = format_date(lastdate)
if isinstance(page, contents.Article):
pri = self.priorities['articles']
chfreq = self.changefreqs['articles']
elif isinstance(page, contents.Page):
pri = self.priorities['pages']
chfreq = self.changefreqs['pages']
else:
pri = self.priorities['indexes']
chfreq = self.changefreqs['indexes']
pageurl = '' if page.url == 'index.html' else page.url
#Exclude URLs from the sitemap:
sitemapExclude = []
if self.format == 'xml':
if pageurl not in sitemapExclude:
def bootstrapify(content):
if isinstance(content, contents.Static):
return
replacements = content.settings[BOOTSTRAPIFY_KEY]
soup = BeautifulSoup(content._content, 'html.parser')
for selector, classes in replacements.items():
replace_in_with(selector, soup, classes)
content._content = soup.decode()
def write_url(self, page, fd):
if getattr(page, 'status', 'published') != 'published':
return
page_path = os.path.join(self.output_path, page.url)
if not os.path.exists(page_path):
return
lastmod = format_date(getattr(page, 'updatedate', getattr(page, 'date', self.now)))
if isinstance(page, contents.Article):
pri = self.priorities['articles']
chfreq = self.changefreqs['articles']
elif isinstance(page, contents.Page):
pri = self.priorities['pages']
chfreq = self.changefreqs['pages']
else:
pri = self.priorities['indexes']
chfreq = self.changefreqs['indexes']
if self.format == 'xml':
fd.write(XML_URL.format(self.siteurl, page.url, lastmod, chfreq, pri))
else:
fd.write(self.siteurl + '/' + loc + '\n')
def generate_toc(content):
if isinstance(content, contents.Static):
return
_toc_run = content.metadata.get(
'toc_run',
content.settings[TOC_KEY]['TOC_RUN'])
if not _toc_run == 'true':
return
_toc_include_title = content.metadata.get(
'toc_include_title',
content.settings[TOC_KEY]['TOC_INCLUDE_TITLE']) == 'true'
all_ids = set()
title = content.metadata.get('title', 'Title')
tree = node = HtmlTreeNode(None, title, 'h0', '', _toc_include_title)
soup = BeautifulSoup(content._content, 'html.parser')
self.settings['DEFAULT_TEMPLATE'])
try:
entity_or_draft = self.readers.read_file(
base_path=self.path, path=f, content_class=entity_class,
context=self.context,
preread_signal=entity_subgenerator_preread,
preread_sender=self,
context_signal=entity_subgenerator_context,
context_sender=self)
except Exception as e:
logger.error('Could not process %s\n%s', f, e,
exc_info=self.settings.get('DEBUG', False))
self._add_failed_source_path(f)
continue
if not contents.is_valid_content(entity_or_draft, f):
self._add_failed_source_path(f)
continue
known_statuses = ("published", "draft")
if entity_or_draft.status.lower() not in known_statuses:
logger.warning("Unknown status '%s' for file %s, skipping it.",
entity_or_draft.status, f)
self._add_failed_source_path(f)
continue
self.cache_data(f, entity_or_draft)
if entity_or_draft.status.lower() == "published":
all_entities.append(entity_or_draft)
else: