Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
import regex
from urlextract import URLExtract
EMAIL_REGEX = regex.compile(
r'[\p{L}0-9]+[\p{L}0-9_.+-]*[\p{L}0-9_+-]+@[\p{L}0-9]+[\p{L}0-9.-]*\.\p{L}+' # noqa
)
PUNCTUATION_SIGNS = set('.,;:¡!¿?…⋯&‹›«»\"“”[]()⟨⟩}{/|\\')
url_extractor = URLExtract()
def clean_text(text, allowed_chars='- '):
text = ' '.join(text.lower().split())
text = ''.join(ch for ch in text if ch.isalnum() or ch in allowed_chars)
return text
def contains_letters(word):
return any(ch.isalpha() for ch in word)
def contains_numbers(word):
return any(ch.isdigit() for ch in word)
def _get_extractor(syntax: Syntax):
from urlextract import URLExtract # type: ignore
u = URLExtract()
# https://github.com/lipoja/URLExtract/issues/13
if syntax in {'org', 'orgmode', 'org-mode'}: # TODO remove hardcoding..
u._stop_chars_right |= {'[', ']'}
u._stop_chars_left |= {'[', ']'}
elif syntax in {'md', 'markdown'}:
pass
# u._stop_chars_right |= {','}
# u._stop_chars_left |= {','}
return u
def findHttpUrls(searchRootDirectory):
alterableUrlsStore = {}
nonAlterableUrlsStore = {}
invalidUrlsStore = {}
extractor = URLExtract()
lengthOfOriginalRootPath = -1
for root, _, files in os.walk(searchRootDirectory, onerror=None):
if lengthOfOriginalRootPath == -1:
lengthOfOriginalRootPath = len(root)
for filename in files:
if pathlib.Path(filename).suffix in ['.props', '.pyproj', '.vcxproj', '.snk'] or '.git' in root:
continue
absoluteFilePath = os.path.join(root, filename)
relativeFilePath = '.' + absoluteFilePath[lengthOfOriginalRootPath:]
try:
with open(absoluteFilePath, "rb") as f:
data = f.read()
try:
data = data.decode("utf-8")
except Exception as e:
print("Unable to decodefile: {} in UTF-8 Encoding.".format(relativeFilePath))
def run(self, params={}):
p = HTMLTableParser()
p.feed(params.get(Input.TAP_ALERT))
data = p.tables
clean_data = TAP(data).data
# Get the Threat details URL which is NOT an HTML table element, but instead the <a> link of the
# table element
extractor = URLExtract()
cleaned_input_for_extractor = params.get(Input.TAP_ALERT)
cleaned_input_for_extractor.replace('\n', '')
urls_from_input = extractor.find_urls(cleaned_input_for_extractor)
threat_details_urls = list(filter(lambda u: r'threat/email' in u and r'threatinsight.proofpoint.com' in u[:40],
urls_from_input))
if threat_details_urls:
clean_data['threat']['threat_details_url'] = threat_details_urls[0]
return {Output.RESULTS: clean_data}
</a>