Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
Notes
-----
The files are cached in ~/.scopus/author_retrieval/ENHANCED/{author_id}
(without eventually leading '9-s2.0-').
"""
# Load json
view = "ENHANCED" # In case Scopus adds different views in future
self._id = str(int(str(author_id).split('-')[-1]))
Retrieval.__init__(self, identifier=self._id, api='AuthorRetrieval',
refresh=refresh, view=view)
self._json = self._json['author-retrieval-response']
# Checks
try:
self._json = self._json[0]
except KeyError: # Incomplete forward
alias_json = listify(self._json['alias']['prism:url'])
alias = ', '.join([d['$'].split(':')[-1] for d in alias_json])
text = 'Author profile with ID {} has been merged and the main '\
'profile is now one of {}. Please update your records '\
'manually. Functionality of this object is '\
'reduced.'.format(author_id, alias)
warn(text, UserWarning)
aff_id : str or int
The Scopus Affiliation ID. Optionally expressed
as an Elsevier EID (i.e., in the form 10-s2.0-nnnnnnnn).
refresh : bool (optional, default=False)
Whether to refresh the cached file if it exists or not.
Notes
-----
The files are cached in ~/.scopus/affiliation_retrieval/STANDARD/{aff_id}.
"""
view = "STANDARD" # In case Scopus adds different views in future
# Load json
aff_id = str(int(str(aff_id).split('-')[-1]))
Retrieval.__init__(self, identifier=aff_id, view=view, refresh=refresh,
api='ContentAffiliationRetrieval')
self._json = self._json['affiliation-retrieval-response']
from collections import namedtuple
from datetime import datetime
from pybliometrics.scopus.classes import Retrieval
class CitationOverview(Retrieval):
@property
def authors(self):
"""A list of namedtuples storing author information,
where each namedtuple corresponds to one author.
The information in each namedtuple is (name surname initials id url).
All entries are strings.
"""
out = []
order = 'name surname initials id url'
auth = namedtuple('Author', order)
for author in self._citeInfoMatrix.get('author'):
author = {k.split(":", 1)[-1]: v for k, v in author.items()}
new = auth(name=author.get('index-name'), id=author.get('authid'),
surname=author.get('surname'),
initials=author.get('initials'),
url=author.get('author-url'))
author_id : str or int
The ID of the author to search for. Optionally expressed
as an Elsevier EID (i.e., in the form 9-s2.0-nnnnnnnn).
refresh : bool (optional, default=False)
Whether to refresh the cached file (if it exists) or not.
Notes
-----
The files are cached in ~/.scopus/author_retrieval/ENHANCED/{author_id}
(without eventually leading '9-s2.0-').
"""
# Load json
view = "ENHANCED" # In case Scopus adds different views in future
self._id = str(int(str(author_id).split('-')[-1]))
Retrieval.__init__(self, identifier=self._id, api='AuthorRetrieval',
refresh=refresh, view=view)
self._json = self._json['author-retrieval-response']
# Checks
try:
self._json = self._json[0]
except KeyError: # Incomplete forward
alias_json = listify(self._json['alias']['prism:url'])
alias = ', '.join([d['$'].split(':')[-1] for d in alias_json])
text = 'Author profile with ID {} has been merged and the main '\
'profile is now one of {}. Please update your records '\
'manually. Functionality of this object is '\
'reduced.'.format(author_id, alias)
warn(text, UserWarning)
Note: These information will not be cached and are slow for large
coauthor groups.
"""
# Get number of authors to search for
res = cache_file(url=self.coauthor_link)
data = loads(res.text)['search-results']
N = int(data.get('opensearch:totalResults', 0))
# Store information in namedtuples
fields = 'surname given_name id areas affiliation_id name city country'
coauth = namedtuple('Coauthor', fields)
coauthors = []
# Iterate over search results in chunks of 25 results
count = 0
while count < N:
params = {'start': count, 'count': 25}
res = cache_file(url=self.coauthor_link, params=params, accept='json')
data = loads(res.text)['search-results'].get('entry', [])
# Extract information for each coauthor
for entry in data:
aff = entry.get('affiliation-current', {})
try:
areas = [a['$'] for a in entry.get('subject-area', [])]
except TypeError: # Only one subject area given
areas = [entry['subject-area']['$']]
new = coauth(surname=entry['preferred-name']['surname'],
given_name=entry['preferred-name'].get('given-name'),
id=entry['dc:identifier'].split(':')[-1],
areas='; '.join(areas), name=aff.get('affiliation-name'),
affiliation_id=aff.get('affiliation-id'),
city=aff.get('affiliation-city'),
country=aff.get('affiliation-country'))
coauthors.append(new)
def source_id(self):
"""Scopus source ID of the document."""
return chained_get(self._json, ['coredata', 'source-id'])
def journal_history(self):
"""List of named tuples of authored publications in the form
(sourcetitle, abbreviation, type, issn). issn is only given
for journals. abbreviation and issn may be None.
"""
jour = namedtuple('Journal', 'sourcetitle abbreviation type issn')
path = ['author-profile', 'journal-history', 'journal']
hist = [jour(sourcetitle=pub.get('sourcetitle'), issn=pub.get('issn'),
abbreviation=pub.get('sourcetitle-abbrev'),
type=pub.get('@type'))
for pub in listify(chained_get(self._json, path, []))]
return hist or None
def given_name(self):
"""Author's preferred given name."""
path = ['author-profile', 'preferred-name', 'given-name']
return chained_get(self._json, path)
def status(self):
"""The status of the author profile."""
return chained_get(self._json, ["author-profile", "status"])
def publicationName(self):
"""Name of source the document is published in."""
return chained_get(self._json, ['coredata', 'prism:publicationName'])