Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def download_orbit(granuleName):
'''
Grab orbit files from ASF
'''
cwd = os.getcwd()
os.chdir(os.environ['POEORB'])
sat = granuleName[:3]
date = granuleName[17:25]
print('downloading orbit for {}, {}'.format(sat,date))
url = 'https://s1qc.asf.alaska.edu/aux_poeorb'
r = requests.get(url)
webpage = html.fromstring(r.content)
orbits = webpage.xpath('//a/@href')
# get s1A or s1B
df = gpd.pd.DataFrame(dict(orbit=orbits))
dfSat = df[df.orbit.str.startswith(sat)]
dayBefore = gpd.pd.to_datetime(date) - gpd.pd.to_timedelta(1, unit='d')
dayBeforeStr = dayBefore.strftime('%Y%m%d')
# get matching orbit file
dfSat['startTime'] = dfSat.orbit.str[42:50]
match = dfSat.loc[dfSat.startTime == dayBeforeStr, 'orbit'].values[0]
cmd = 'wget -q -nc {}/{}'.format(url,match) #-nc means no clobber
print(cmd)
os.system(cmd)
os.chdir(cwd)
'''
cwd = os.getcwd()
try:
os.chdir(os.environ['POEORB'])
sat = granuleName[:3]
date = granuleName[17:25]
print('downloading orbit for {}, {}'.format(sat,date))
url = 'https://s1qc.asf.alaska.edu/aux_poeorb'
r = requests.get(url)
webpage = html.fromstring(r.content)
orbits = webpage.xpath('//a/@href')
# get s1A or s1B
df = gpd.pd.DataFrame(dict(orbit=orbits))
dfSat = df[df.orbit.str.startswith(sat)]
dayBefore = gpd.pd.to_datetime(date) - gpd.pd.to_timedelta(1, unit='d')
dayBeforeStr = dayBefore.strftime('%Y%m%d')
# get matching orbit file
dfSat['startTime'] = dfSat.orbit.str[42:50]
match = dfSat.loc[dfSat.startTime == dayBeforeStr, 'orbit'].values[0]
cmd = 'wget -q -nc {}/{}'.format(url,match) #-nc means no clobber
print(cmd)
os.system(cmd)
except Exception as e:
print('Trouble downloading POEORB... maybe scene is too recent?')
print(e)
pass
os.chdir(cwd) #NOTE: best to specifiy download dir instead of jumping cwd around...
Grab orbit files from ASF
'''
cwd = os.getcwd()
os.chdir(os.environ['POEORB'])
sat = granuleName[:3]
date = granuleName[17:25]
print('downloading orbit for {}, {}'.format(sat,date))
url = 'https://s1qc.asf.alaska.edu/aux_poeorb'
r = requests.get(url)
webpage = html.fromstring(r.content)
orbits = webpage.xpath('//a/@href')
# get s1A or s1B
df = gpd.pd.DataFrame(dict(orbit=orbits))
dfSat = df[df.orbit.str.startswith(sat)]
dayBefore = gpd.pd.to_datetime(date) - gpd.pd.to_timedelta(1, unit='d')
dayBeforeStr = dayBefore.strftime('%Y%m%d')
# get matching orbit file
dfSat['startTime'] = dfSat.orbit.str[42:50]
match = dfSat.loc[dfSat.startTime == dayBeforeStr, 'orbit'].values[0]
cmd = 'wget -q -nc {}/{}'.format(url,match) #-nc means no clobber
print(cmd)
os.system(cmd)
os.chdir(cwd)
def load_inventory(vectorFile):
'''
load merged inventory. easy!
'''
gf = gpd.read_file(vectorFile)
gf['timeStamp'] = gpd.pd.to_datetime(gf.sceneDate, format='%Y-%m-%d %H:%M:%S')
gf['sceneDateString'] = gf.timeStamp.apply(lambda x: x.strftime('%Y-%m-%d'))
gf['dateStamp'] = gpd.pd.to_datetime(gf.sceneDateString)
gf['utc'] = gf.timeStamp.apply(lambda x: x.strftime('%H:%M:%S'))
gf['orbitCode'] = gf.relativeOrbit.astype('category').cat.codes
return gf
def load_inventory(vectorFile):
'''
load merged inventory. easy!
'''
gf = gpd.read_file(vectorFile)
gf['timeStamp'] = gpd.pd.to_datetime(gf.sceneDate, format='%Y-%m-%d %H:%M:%S')
gf['sceneDateString'] = gf.timeStamp.apply(lambda x: x.strftime('%Y-%m-%d'))
gf['dateStamp'] = gpd.pd.to_datetime(gf.sceneDateString)
gf['utc'] = gf.timeStamp.apply(lambda x: x.strftime('%H:%M:%S'))
gf['orbitCode'] = gf.relativeOrbit.astype('category').cat.codes
return gf
def load_inventory(vectorFile):
'''
load merged inventory. easy!
'''
gf = gpd.read_file(vectorFile)
gf['timeStamp'] = gpd.pd.to_datetime(gf.sceneDate, format='%Y-%m-%d %H:%M:%S')
gf['sceneDateString'] = gf.timeStamp.apply(lambda x: x.strftime('%Y-%m-%d'))
gf['dateStamp'] = gpd.pd.to_datetime(gf.sceneDateString)
gf['utc'] = gf.timeStamp.apply(lambda x: x.strftime('%H:%M:%S'))
gf['orbitCode'] = gf.relativeOrbit.astype('category').cat.codes
return gf
def load_inventory(vectorFile):
'''
load merged inventory. easy!
'''
gf = gpd.read_file(vectorFile)
gf['timeStamp'] = gpd.pd.to_datetime(gf.sceneDate, format='%Y-%m-%d %H:%M:%S')
gf['sceneDateString'] = gf.timeStamp.apply(lambda x: x.strftime('%Y-%m-%d'))
gf['dateStamp'] = gpd.pd.to_datetime(gf.sceneDateString)
gf['utc'] = gf.timeStamp.apply(lambda x: x.strftime('%H:%M:%S'))
gf['orbitCode'] = gf.relativeOrbit.astype('category').cat.codes
return gf
# Loop through all the states and download the shapes
path_list = []
for state in us.STATES:
logger.debug(f"Downloading {state}")
shp_path = StateBlockGroupsDownloader2018(
state.abbr,
data_dir=self.data_dir
).run()
path_list.append(shp_path)
# Open all the shapes
df_list = [gpd.read_file(p) for p in path_list]
# Concatenate them together
df = gpd.pd.concat(df_list)
logger.debug(f"Writing file with {len(df)} blocks groups to {self.merged_path}")
df.to_file(self.merged_path, index=False)