Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
Note: These information will not be cached and are slow for large
coauthor groups.
"""
# Get number of authors to search for
res = cache_file(url=self.coauthor_link)
data = loads(res.text)['search-results']
N = int(data.get('opensearch:totalResults', 0))
# Store information in namedtuples
fields = 'surname given_name id areas affiliation_id name city country'
coauth = namedtuple('Coauthor', fields)
coauthors = []
# Iterate over search results in chunks of 25 results
count = 0
while count < N:
params = {'start': count, 'count': 25}
res = cache_file(url=self.coauthor_link, params=params, accept='json')
data = loads(res.text)['search-results'].get('entry', [])
# Extract information for each coauthor
for entry in data:
aff = entry.get('affiliation-current', {})
try:
areas = [a['$'] for a in entry.get('subject-area', [])]
except TypeError: # Only one subject area given
areas = [entry['subject-area']['$']]
new = coauth(surname=entry['preferred-name']['surname'],
given_name=entry['preferred-name'].get('given-name'),
id=entry['dc:identifier'].split(':')[-1],
areas='; '.join(areas), name=aff.get('affiliation-name'),
affiliation_id=aff.get('affiliation-id'),
city=aff.get('affiliation-city'),
country=aff.get('affiliation-country'))
coauthors.append(new)
_json = res.get('search-results', {}).get('entry', [])
if verbose:
chunk = 1
chunks = int(n/params['count']) + (n % params['count'] > 0) + 1 #roundup + 1 for the final iteration
print('Downloading results for query "{}":'.format(params['query']))
print_progress(chunk, chunks)
# Download the remaining information in chunks
while n > 0:
n -= params["count"]
if cursor:
pointer = res['search-results']['cursor'].get('@next')
params.update({'cursor': pointer})
else:
start += params["count"]
params.update({'start': start})
res = cache_file(url=SEARCH_URL[api], params=params, **kwds).json()
_json.extend(res.get('search-results', {}).get('entry', []))
if verbose:
chunk += 1
print_progress(chunk, chunks)
return _json
def get_coauthors(self):
"""Retrieves basic information about co-authors as a list of
namedtuples in the form
(surname, given_name, id, areas, affiliation_id, name, city, country),
where areas is a list of subject area codes joined by "; ".
Note: These information will not be cached and are slow for large
coauthor groups.
"""
# Get number of authors to search for
res = cache_file(url=self.coauthor_link)
data = loads(res.text)['search-results']
N = int(data.get('opensearch:totalResults', 0))
# Store information in namedtuples
fields = 'surname given_name id areas affiliation_id name city country'
coauth = namedtuple('Coauthor', fields)
coauthors = []
# Iterate over search results in chunks of 25 results
count = 0
while count < N:
params = {'start': count, 'count': 25}
res = cache_file(url=self.coauthor_link, params=params, accept='json')
data = loads(res.text)['search-results'].get('entry', [])
# Extract information for each coauthor
for entry in data:
aff = entry.get('affiliation-current', {})
try: