Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
'lang': self.lang,
'description': self.description,
'contacts': self.contacts,
'wikiproject_name': self.wikiproject_name,
'campaign_start_date': self.campaign_start_date.isoformat(),
'campaign_end_date': self.campaign_end_date.isoformat(),
'date_created': self.date_created.isoformat(),
'date_updated': datetime.datetime.utcnow().strftime(UPDATED_DT_FORMAT),
'article_count': len(self.article_title_list),
'all_results': all_results,
'goals': [{'name': 'Article', 'slug': 'title'}] + sorted(self.goals, key=lambda s: s['name'])}
campaign_static_path = STATIC_PATH + 'campaigns/%s/' % self.id
article_list_html = ASHES_ENV.render('articles.html', ctx)
article_list_path = campaign_static_path + 'articles.html'
article_list_json_path = campaign_static_path + 'articles.json'
mkdir_p(os.path.split(article_list_path)[0])
with atomic_save(article_list_path) as html_f, atomic_save(article_list_json_path) as json_f:
html_f.write(article_list_html.encode('utf-8'))
json.dump(ctx, json_f, indent=2, sort_keys=True)
return
@face_middleware(provides=['work_dir', 'repo_dir', 'metrics_dir'], optional=True)
def mw_ensure_work_dir(next_):
work_dir = os.path.expanduser('~/.apatite/')
repo_dir = work_dir + 'repos/'
mkdir_p(repo_dir)
open(repo_dir + '/.apatite_repo_dir', 'w').close()
metrics_dir = work_dir + 'metrics/'
mkdir_p(metrics_dir)
return next_(work_dir=work_dir, repo_dir=repo_dir, metrics_dir=metrics_dir)
year=query_date.year,
month=query_date.month,
day=query_date.day),
'meta': {'fetched': datetime.utcnow().isoformat()}}
outfile_name = DATA_PATH_TMPL.format(lang=lang,
project=project,
year=query_date.year,
month=query_date.month,
day=query_date.day)
with tlog.critical('saving_single_day_stats') as rec:
rec['out_file'] = os.path.abspath(outfile_name)
try:
out_file = codecs.open(outfile_name, 'w')
except IOError:
mkdir_p(os.path.dirname(outfile_name))
out_file = codecs.open(outfile_name, 'w')
with out_file:
data_bytes = json.dumps(ret, indent=2, sort_keys=True)
rec['len_bytes'] = len(data_bytes)
out_file.write(data_bytes)
rec.success('wrote {len_bytes} bytes to {out_file}')
return
index_content = self.entries[0].entry_html
else:
index_content = 'No entries yet!'
fal.write(index_path, index_content)
archive_path = pjoin(output_path, ('archive' + EXPORT_HTML_EXT))
fal.write(archive_path, self.entries.rendered_html)
# output feeds
rss_path = pjoin(output_path, RSS_FEED_FILENAME)
fal.write(rss_path, self.entries.rendered_rss_feed)
atom_path = pjoin(output_path, ATOM_FEED_FILENAME)
fal.write(atom_path, self.entries.rendered_atom_feed)
for tag, entry_list in self.tag_map.items():
tag_path = pjoin(output_path, entry_list.path_part)
mkdir_p(tag_path)
rss_path = pjoin(tag_path, RSS_FEED_FILENAME)
atom_path = pjoin(tag_path, ATOM_FEED_FILENAME)
archive_path = pjoin(tag_path, 'index.html')
fal.write(rss_path, entry_list.rendered_rss_feed)
fal.write(atom_path, entry_list.rendered_atom_feed)
fal.write(archive_path, entry_list.rendered_html)
# copy assets, i.e., all directories under the theme path
for sdn in get_subdirectories(self.theme_path):
cur_src = pjoin(self.theme_path, sdn)
cur_dest = pjoin(output_path, sdn)
with chlog.critical('copy assets', src=cur_src, dest=cur_dest):
copytree(cur_src, cur_dest)
# optionally symlink the uploads directory. this is an
# important step for sites with uploads because Chert's
def copy_file(self, from_, to, log):
to_directory = to.parent
log.debug("copy.mkdir", dest=to_directory)
fileutils.mkdir_p(to.parent)
# We don't want to risk partially-copied files left on disk, so we copy to a tmp name
# then atomically rename into place.
tmp_name = tempfile.mktemp(prefix='.dea-mv-', dir=to_directory)
try:
log.info("copy.put", src=from_, tmp_dest=tmp_name)
shutil.copy(from_, tmp_name)
log.debug("copy.put.done")
os.rename(tmp_name, to)
finally:
log.debug('tmp_file.rm', tmp_file=tmp_name)
with suppress(FileNotFoundError):
os.remove(tmp_name)