Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
import os
import sys
import pysat
if sys.version_info[0] >= 3:
from importlib import reload as re_load
else:
re_load = reload
if os.path.isdir(path):
if store:
with open(os.path.join(os.path.expanduser('~'), '.pysat',
'data_path.txt'), 'w') as f:
f.write(path)
pysat.data_dir = path
pysat._files = re_load(pysat._files)
pysat._instrument = re_load(pysat._instrument)
else:
raise ValueError('Path %s does not lead to a valid directory.' % path)
# arguments for padding
if isinstance(pad, pds.DateOffset):
self.pad = pad
elif isinstance(pad, dict):
self.pad = pds.DateOffset(**pad)
elif pad is None:
self.pad = None
else:
estr = 'pad must be a dictionary or a pandas.DateOffset instance.'
raise ValueError(estr)
# instantiate Files class
manual_org = False if manual_org is None else manual_org
temporary_file_list = not temporary_file_list
self.files = _files.Files(self, manual_org=manual_org,
directory_format=self.directory_format,
update_files=update_files,
file_format=self.file_format,
write_to_disk=temporary_file_list,
ignore_empty_files=ignore_empty_files)
# set bounds for iteration
# self.bounds requires the Files class
# setting (None,None) loads default bounds
self.bounds = (None, None)
self.date = None
self._fid = None
self.yr = None
self.doy = None
self._load_by_date = False
if level < n_layers:
# If there is room to go down, look for directories
if link['href'].count('/') == 1:
remote_dirs[level+1].append(link['href'])
else:
# If at the endpoint, add matching files to list
add_file = True
for target in targets:
if link['href'].count(target) == 0:
add_file = False
if add_file:
full_files.append(link['href'])
# parse remote filenames to get date information
if delimiter is None:
stored = pysat._files.parse_fixed_width_filenames(full_files,
format_str)
else:
stored = pysat._files.parse_delimited_filenames(full_files,
format_str, delimiter)
# process the parsed filenames and return a properly formatted Series
stored_list = pysat._files.process_parsed_filenames(stored,
two_digit_year_break)
# Downselect to user-specified dates, if needed
if year is not None:
mask = (stored_list.index.year == year)
if month is not None:
mask = mask & (stored_list.index.month == month)
if day is not None:
mask = mask & (stored_list.index.day == day)
for target in targets:
if link['href'].count(target) == 0:
add_file = False
if add_file:
full_files.append(link['href'])
# parse remote filenames to get date information
if delimiter is None:
stored = pysat._files.parse_fixed_width_filenames(full_files,
format_str)
else:
stored = pysat._files.parse_delimited_filenames(full_files,
format_str, delimiter)
# process the parsed filenames and return a properly formatted Series
stored_list = pysat._files.process_parsed_filenames(stored,
two_digit_year_break)
# Downselect to user-specified dates, if needed
if year is not None:
mask = (stored_list.index.year == year)
if month is not None:
mask = mask & (stored_list.index.month == month)
if day is not None:
mask = mask & (stored_list.index.day == day)
return stored_list[mask]
import os
import sys
import pysat
if sys.version_info[0] >= 3:
from importlib import reload as re_load
else:
re_load = reload
if os.path.isdir(path):
if store:
with open(os.path.join(os.path.expanduser('~'), '.pysat',
'data_path.txt'), 'w') as f:
f.write(path)
pysat.data_dir = path
pysat._files = re_load(pysat._files)
pysat._instrument = re_load(pysat._instrument)
else:
raise ValueError('Path %s does not lead to a valid directory.' % path)
now.year, now.month, now.day,
23, 59, 59)
# iterate over experiments to grab files for each one
files = []
print("Grabbing filenames for each experiment")
print("A total of", len(exp_list), "experiments were found")
for exp in exp_list:
file_list = web_data.getExperimentFiles(exp.id)
files.extend(file_list)
# parse these filenames to grab out the ones we want
print("Parsing filenames")
stored = pysat._files.parse_fixed_width_filenames(files, format_str)
# process the parsed filenames and return a properly formatted Series
print("Processing filenames")
return pysat._files.process_parsed_filenames(stored, two_digit_year_break)
# arguments for padding
if isinstance(pad, pds.DateOffset):
self.pad = pad
elif isinstance(pad, dict):
self.pad = pds.DateOffset(**pad)
elif pad is None:
self.pad = None
else:
estr = 'pad must be a dictionary or a pandas.DateOffset instance.'
raise ValueError(estr)
# instantiate Files class
manual_org = False if manual_org is None else manual_org
temporary_file_list = not temporary_file_list
self.files = _files.Files(self, manual_org=manual_org,
directory_format=self.directory_format,
update_files=update_files,
file_format=self.file_format,
write_to_disk=temporary_file_list)
# set bounds for iteration
# self.bounds requires the Files class
# setting (None,None) loads default bounds
self.bounds = (None, None)
self.date = None
self._fid = None
self.yr = None
self.doy = None
self._load_by_date = False
# initialize orbit support