Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def __api_suspended_doujinshi_parser(id_):
if not isinstance(id_, (int,)) and (isinstance(id_, (str,)) and not id_.isdigit()):
raise Exception('Doujinshi id({0}) is not valid'.format(id_))
id_ = int(id_)
logger.log(15, 'Fetching information of doujinshi id {0}'.format(id_))
doujinshi = dict()
doujinshi['id'] = id_
url = '{0}/{1}'.format(constant.DETAIL_URL, id_)
i = 0
while 5 > i:
try:
response = request('get', url).json()
except Exception as e:
i += 1
if not i < 5:
logger.critical(str(e))
exit(1)
continue
break
doujinshi['name'] = response['title']['english']
doujinshi['subtitle'] = response['title']['japanese']
doujinshi['img_id'] = response['media_id']
doujinshi['ext'] = ''.join([i['t'] for i in response['images']['pages']])
doujinshi['pages'] = len(response['images']['pages'])
# gain information of the doujinshi
needed_fields = ['character', 'artist', 'language', 'tag', 'parody', 'group', 'category']
def doujinshi_parser(id_):
if not isinstance(id_, (int,)) and (isinstance(id_, (str,)) and not id_.isdigit()):
raise Exception('Doujinshi id({0}) is not valid'.format(id_))
id_ = int(id_)
logger.log(15, 'Fetching doujinshi information of id {0}'.format(id_))
doujinshi = dict()
doujinshi['id'] = id_
url = '{0}/{1}/'.format(constant.DETAIL_URL, id_)
try:
response = request('get', url)
if response.status_code in (200, ):
response = response.content
else:
logger.debug('Slow down and retry ({}) ...'.format(id_))
time.sleep(1)
return doujinshi_parser(str(id_))
except Exception as e:
logger.warn('Error: {}, ignored'.format(str(e)))
return None
html = BeautifulSoup(response, 'html.parser')
doujinshi_info = html.find('div', attrs={'id': 'info'})
title = doujinshi_info.find('h1').text
subtitle = doujinshi_info.find('h2')
if os.getenv('DEBUG'):
logger.info('Getting CSRF token ...')
if os.getenv('DEBUG'):
logger.info('CSRF token is {}'.format(csrf_token))
login_dict = {
'csrfmiddlewaretoken': csrf_token,
'username_or_email': username,
'password': password,
}
resp = request('post', url=constant.LOGIN_URL, data=login_dict)
if 'You\'re loading pages way too quickly.' in resp.text or 'Really, slow down' in resp.text:
csrf_token = _get_csrf_token(resp.text)
resp = request('post', url=resp.url, data={'csrfmiddlewaretoken': csrf_token, 'next': '/'})
if 'Invalid username/email or password' in resp.text:
logger.error('Login failed, please check your username and password')
exit(1)
if 'You\'re loading pages way too quickly.' in resp.text or 'Really, slow down' in resp.text:
logger.error('Using nhentai --cookie \'YOUR_COOKIE_HERE\' to save your Cookie.')
exit(2)
def __api_suspended_search_parser(keyword, sorting, page):
logger.debug('Searching doujinshis using keywords {0}'.format(keyword))
result = []
i = 0
while i < 5:
try:
response = request('get', url=constant.SEARCH_URL, params={'query': keyword, 'page': page, 'sort': sorting}).json()
except Exception as e:
i += 1
if not i < 5:
logger.critical(str(e))
logger.warn('If you are in China, please configure the proxy to fu*k GFW.')
exit(1)
continue
break
if 'result' not in response:
raise Exception('No result in response')
for row in response['result']:
title = row['title']['english']
title = title[:85] + '..' if len(title) > 85 else title
result.append({'id': row['id'], 'title': title})
def favorites_parser(page_range=''):
result = []
html = BeautifulSoup(request('get', constant.FAV_URL).content, 'html.parser')
count = html.find('span', attrs={'class': 'count'})
if not count:
logger.error("Can't get your number of favorited doujins. Did the login failed?")
return []
count = int(count.text.strip('(').strip(')').replace(',', ''))
if count == 0:
logger.warning('No favorites found')
return []
pages = int(count / 25)
if pages:
pages += 1 if count % (25 * pages) else 0
else:
pages = 1
pages = 1
logger.info('You have %d favorites in %d pages.' % (count, pages))
if os.getenv('DEBUG'):
pages = 1
page_range_list = range(1, pages + 1)
if page_range:
logger.info('page range is {0}'.format(page_range))
page_range_list = page_range_parser(page_range, pages)
for page in page_range_list:
try:
logger.info('Getting doujinshi ids of page %d' % page)
resp = request('get', constant.FAV_URL + '?page=%d' % page).content
result.extend(_get_title_and_id(resp))
except Exception as e:
logger.error('Error: %s, continue', str(e))
return result
def search_parser(keyword, sorting='date', page=1):
logger.debug('Searching doujinshis of keyword {0}'.format(keyword))
response = request('get', url=constant.SEARCH_URL, params={'q': keyword, 'page': page, 'sort': sorting}).content
result = _get_title_and_id(response)
if not result:
logger.warn('Not found anything of keyword {}'.format(keyword))
return result
sorting = ''
for p in range(1, max_page + 1):
if sys.version_info >= (3, 0, 0):
unicode_ = str
else:
unicode_ = unicode
if isinstance(tag_name, (str, unicode_)):
logger.debug('Fetching page {0} for doujinshi with tag \'{1}\''.format(p, tag_name))
response = request('get', url='%s/%s/%s?page=%d' % (constant.TAG_URL[index], tag_name, sorting, p)).content
result += _get_title_and_id(response)
else:
for i in tag_name:
logger.debug('Fetching page {0} for doujinshi with tag \'{1}\''.format(p, i))
response = request('get',
url='%s/%s/%s?page=%d' % (constant.TAG_URL[index], i, sorting, p)).content
result += _get_title_and_id(response)
if not result:
logger.error('Cannot find doujinshi id of tag \'{0}\''.format(tag_name))
return
if not result:
logger.warn('No results for tag \'{}\''.format(tag_name))
return result