Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
if self.loginNeededCheck(data):
# always login if not already to avoid lots of headaches
self.performLogin(url,data)
# refresh website after logging in
data = self._fetchUrl(url,usecache=False)
soup = self.make_soup(data)
# subscription check
# logger.debug(soup)
subCheck = soup.find('div',{'class':'click-to-read-full'})
if subCheck and self.getConfig("auto_sub"):
subSoup = self.doStorySubscribe(url,soup)
if subSoup:
soup = subSoup
else:
raise exceptions.FailedToDownload("Error when subscribing to story. This usually means a change in the website code.")
elif subCheck and not self.getConfig("auto_sub"):
raise exceptions.FailedToDownload("This story is only available to subscribers. You can subscribe manually on the web site, or set auto_sub:true in personal.ini.")
## Title
a = soup.find('h1', {'id': 'story-title'})
self.story.setMetadata('title',stripHTML(a))
# Find authorid and URL from... author url.
mainmeta = soup.find('footer', {'class': 'main-meta'})
alist = mainmeta.find('span', text='Author(s)')
alist = alist.parent.findAll('a', href=re.compile(r"/profile/view/\d+"))
for a in alist:
self.story.addToList('authorId',a['href'].split('/')[-1])
self.story.addToList('authorUrl','https://'+self.host+a['href'])
self.story.addToList('author',a.text)
def getChapterText(self, url):
logger.debug('Getting chapter text from: %s' % url)
soup = self.make_soup(self._fetchUrl(url))
div = soup.find('div', {'id' : 'all'})
if None == div:
raise exceptions.FailedToDownload("Error downloading Chapter: %s! Missing required element!" % url)
# removing the headers
for tag in div.findAll('h1') + div.findAll('h2'):
tag.extract()
# removing the info paragraph
for tag in div.findAll("p",{'id':'info'}):
tag.extract()
# removing the aright paragraph.
#<p class="aright">
for tag in div.findAll("p",{'class':'aright'}):
tag.extract()
# removing the first link, which is a link to the main page of the site.
tag = div.find('a')</p>
def getChapterText(self, url):
logger.debug('Getting chapter text from: %s' % url)
soup = self.make_soup(self._fetchUrl(url))
# hardly a great identifier, I know, but whofic really doesn't
# give us anything better to work with.
span = soup.find('span', {'style' : 'font-size: 100%;'})
if None == span:
raise exceptions.FailedToDownload("Error downloading Chapter: %s! Missing required element!" % url)
# chapter select at end of page included in span.
for form in span.find_all('form'):
form.extract()
span.name='div'
return self.utf8FromSoup(url,span)
def getChapterText(self, url):
logger.debug('Getting chapter text from: %s' % url)
soup = self.make_soup(self._fetchUrl(url))
div = soup.find('div', {'id' : 'story'})
if None == div:
raise exceptions.FailedToDownload("Error downloading Chapter: %s! Missing required element!" % url)
return self.utf8FromSoup(url,div)
def getChapterText(self, url):
logger.debug('Getting chapter text from: %s' % url)
#chapter=self.make_soup('<div class="story"></div>')
data = self._fetchUrl(url)
soup = self.make_soup(data)
chapter = soup.find("div", "entry_content")
if None == chapter:
raise exceptions.FailedToDownload("Error downloading Chapter: %s! Missing required element!" % url)
return self.utf8FromSoup(url,chapter)
def getChapterText(self, url):
logger.debug('Getting chapter text from: %s' % url)
soup = self.make_soup(self._fetchUrl(url))
div = soup.find('div', {'id' : 'story'})
if None == div:
raise exceptions.FailedToDownload("Error downloading Chapter: %s! Missing required element!" % url)
return self.utf8FromSoup(url,div)
def getChapterText(self, url):
logger.debug('Getting chapter text from: %s' % url)
soup = self.make_soup(self._fetchUrl(url))
div = soup.find('div', {'id' : 'story'})
if None == div:
raise exceptions.FailedToDownload("Error downloading Chapter: %s! Missing required element!" % url)
return self.utf8FromSoup(url,div)
def getChapterText(self, url):
logger.debug('Getting chapter text from: %s' % url)
soup = self.make_soup(self._fetchUrl(url))
div = soup.find('div', {'id' : 'story'})
if None == div:
raise exceptions.FailedToDownload("Error downloading Chapter: %s! Missing required element!" % url)
return self.utf8FromSoup(url,div)
def getChapterText(self, url):
logger.debug('Getting chapter text from: %s' % url)
soup = self.make_soup(self._fetchUrl(url))
div = soup.find('div', {'id' : 'story'})
if None == div:
raise exceptions.FailedToDownload("Error downloading Chapter: %s! Missing required element!" % url)
return self.utf8FromSoup(url,div)
def getChapterText(self, url):
logger.debug('Getting chapter text from: %s' % url)
soup = self.make_soup(self._fetchUrl(url))
div = soup.find('div', {'id' : 'story1'})
if None == div:
raise exceptions.FailedToDownload("Error downloading Chapter: %s! Missing required element!" % url)
return self.utf8FromSoup(url,div)