Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def checkuser(web):
print(GR+' [*] Loading module...')
time.sleep(0.6)
#print(R+'\n =======================')
#print(R+' C H E C K A L I A S')
#print(R+' =======================\n')
from core.methods.print import posintpas
posintpas("check alias")
print(GR+' [*] Parsing Url...')
web0 = tld.get_fld(web).split('.', 1)[0]
print(G+' [+] Alias Set : '+web0+C+color.TR2+C)
print(O+' [*] Setting services...'+C)
time.sleep(0.7)
global services
services = ['YouTube', 'Hypemachine', 'Yahoo', 'Linkagogo', 'Coolspotters', 'Wikipedia', 'Twitter', 'gdgt', 'BlogMarks', 'LinkedIn', 'Ebay', 'Tumblr', 'Pinterest','yotify', 'Blogger', 'Flickr', 'FortyThreeMarks,Moof', 'HuffingtonPost', 'Wordpress', 'DailyMotion', 'LiveJournal', 'vimeo', 'DeviantArt', 'reddit','StumbleUpon', 'Answers', 'Sourceforge', 'Wikia', 'ArmChairGM', 'Photobucket', 'MySpace', 'Etsy,SlideShare', 'Fiverr', 'scribd', 'Squidoo', 'ImageShack','ThemeForest', 'SoundCloud', 'Tagged', 'Hulu', 'Typepad', 'Hubpages', 'weebly', 'Zimbio', 'github', 'TMZ', 'WikiHow', 'Delicious', 'zillow', 'Jimdo', 'goodreads','Segnalo', 'Netlog', 'Issuu', 'ForumNokia', 'UStream', 'Gamespot', 'MetaCafe', 'askfm', 'hi5', 'JustinTV', 'Blekko', 'Skyrock', 'Cracked', 'foursquare', 'LastFM','posterous', 'steam', 'Opera', 'Dreamstime', 'Fixya', 'UltimateGuitar', 'docstoc', 'FanPop', 'Break', 'tinyurl', 'Kongregate', 'Disqus', 'Armorgames', 'Behance','ChaCha', 'CafeMom', 'Liveleak', 'Topix', 'lonelyplanet', 'Stardoll', 'Instructables', 'Polyvore', 'Proboards', 'Weheartit', 'Diigo', 'Gawker', 'FriendFeed','Videobash', 'Technorati', 'Gravatar', 'Dribbble', 'formspringme', 'myfitnesspal', '500px', 'Newgrounds', 'GrindTV', 'smugmug', 'ibibo', 'ReverbNation', 'Netvibes','Slashdot', 'Fool', 'Plurk', 'zedge', 'Discogs', 'YardBarker', 'Ebaumsworld', 'sparkpeople', 'Sharethis', 'Xmarks', 'Crunchbase', 'FunnyOrDie,Suite101', 'OVGuide','Veoh', 'Yuku', 'Experienceproject', 'Fotolog', 'Hotklix', 'Epinions', 'Hyves', 'Sodahead', 'Stylebistro', 'fark', 'AboutMe', 'Metacritic', 'Toluna', 'Mobypicture','Gather', 'Datpiff', 'mouthshut', 'blogtalkradio', 'Dzone', 'APSense', 'Bigstockphoto', 'n4g', 'Newsvine', 'ColourLovers', 'Icanhazcheezburger', 'Xanga','InsaneJournal', 'redbubble', 'Kaboodle', 'Folkd', 'Bebo', 'Getsatisfaction', 'WebShots', 'threadless', 'Active', 'GetGlue', 'Shockwave', 'Pbase']
print(C+' [!] Loaded '+str(len(services))+' services...')
check0x00(web0,web)
def domain_sanity_check(domain): #Verify the domain name sanity
if domain:
try:
domain = get_fld(domain, fix_protocol = True)
return domain
except:
print(colored("[!] Incorrect domain format. Please follow this format: example.com, http(s)://example.com, www.example.com", "red"))
sys.exit(1)
else:
pass
sublist.append(a)
except IOError:
print(R+' [-] Wordlist not found!')
global found
if 'http://' in web:
web = web.replace('http://','')
elif 'https://' in web:
web = web.replace('https://','')
else:
pass
web = 'http://' + web
tld0 = get_fld(web)
if len(sublist) > 0:
for m in sublist:
furl = str(m) + '.' + str(tld0)
flist.append(furl)
if flist:
time.sleep(0.5)
print(R+'\n B R U T E F O R C E R')
print(R+' =======================\n')
print(GR+' [*] Bruteforcing for possible subdomains...')
for url in flist:
if 'http://' in url:
url = url.replace('http://','')
elif 'https://' in url:
url = url.replace('https://','')
def get_fld_from_value(value, zone):
"""
Get the First Level Domain (FLD) for the provided value
"""
res = get_fld(value, fix_protocol=True, fail_silently=True)
if res is None:
return zone
return res
def posting_to_slack(result, dns_resolve, dns_output): #sending result to slack workplace
global domain_to_monitor
global new_subdomains
if dns_resolve:
dns_result = dns_output
if dns_result:
dns_result = {k:v for k,v in dns_result.items() if v} #filters non-resolving subdomains
rev_url = []
print(colored("\n[!] Exporting result to Slack. Please do not interrupt!", "red"))
for url in dns_result:
url = url.replace('*.', '')
url = url.replace('+ ', '')
rev_url.append(get_fld(url, fix_protocol = True))
unique_list = list(set(new_subdomains) & set(dns_result.keys())) #filters non-resolving subdomains from new_subdomains list
for subdomain in unique_list:
data = "{}:new: {}".format(at_channel(), subdomain)
slack(data)
try:
if dns_result[subdomain]["A"]:
for i in dns_result[subdomain]["A"]:
data = "```A : {}```".format(i)
slack(data)
except: pass
try:
if dns_result[subdomain]['CNAME']:
for i in dns_result[subdomain]['CNAME']:
data = "```CNAME : {}```".format(i)
def host(string):
if string and '*' not in string:
return tld.get_fld(string, fix_protocol=True, fail_silently=True)
def get_fld_from_value(value, zone):
"""
Get the First Level Domain (FLD) for the provided value
"""
res = get_fld(value, fix_protocol=True, fail_silently=True)
if res is None:
return zone
return res
def lookup(self, domain, wildcard = True):
try:
#connecting to crt.sh postgres database to retrieve subdomains.
unique_domains = set()
domain = domain.replace('%25.', '')
conn = psycopg2.connect("dbname={0} user={1} host={2}".format(DB_NAME, DB_USER, DB_HOST))
conn.autocommit = True
cursor = conn.cursor()
cursor.execute("SELECT ci.NAME_VALUE NAME_VALUE FROM certificate_identity ci WHERE ci.NAME_TYPE = 'dNSName' AND reverse(lower(ci.NAME_VALUE)) LIKE reverse(lower('%{}'));".format(domain))
for result in cursor.fetchall():
matches = re.findall(r"\'(.+?)\'", str(result))
for subdomain in matches:
try:
if get_fld("https://" + subdomain) == domain:
unique_domains.add(subdomain.lower())
except: pass
return sorted(unique_domains)
except:
base_url = "https://crt.sh/?q={}&output=json"
if wildcard:
domain = "%25.{}".format(domain)
url = base_url.format(domain)
subdomains = set()
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:64.0) Gecko/20100101 Firefox/64.0'
req = requests.get(url, headers={'User-Agent': user_agent}, timeout=30, verify=False) #times out after 30 seconds waiting (Mainly for large datasets)
if req.status_code == 200:
content = req.content.decode('utf-8')
data = json.loads(content)
for subdomain in data:
subdomains.add(subdomain["name_value"].lower())