How to use the mechanize._http.HTTPRefreshProcessor function in mechanize

To help you get started, we’ve selected a few mechanize examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github lehins / django-wepay / djwepay / tests.py View on Github external
    @staticmethod
    def browser_create():
        browser = mechanize.Browser()
        cj = cookielib.LWPCookieJar()
        browser.set_cookiejar(cj)
        browser.set_handle_equiv(True)
        # browser.set_handle_gzip(True)
        browser.set_handle_redirect(True)
        browser.set_handle_referer(True)
        browser.set_handle_robots(False)
        browser.set_handle_refresh(
            mechanize._http.HTTPRefreshProcessor(), max_time=1)
        # debugging stuff
        #browser.set_debug_redirects(True)
        #browser.set_debug_responses(True)
        #browser.set_debug_http(True)
        browser.addheaders = [
            ('User-Agent' , 
             "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/28.0.1500.71 Chrome/28.0.1500.71 Safari/537.36")
        ]
        return browser
github lehins / django-wepay / django_wepay / tests.py View on Github external
def browser_create():
    browser = mechanize.Browser()
    cj = cookielib.LWPCookieJar()
    browser.set_cookiejar(cj)
    browser.set_handle_equiv(True)
    # browser.set_handle_gzip(True)
    browser.set_handle_redirect(True)
    browser.set_handle_referer(True)
    browser.set_handle_robots(False)
    browser.set_handle_refresh(
        mechanize._http.HTTPRefreshProcessor(), max_time=1)
    # debugging stuff
    #browser.set_debug_redirects(True)
    #browser.set_debug_responses(True)
    #browser.set_debug_http(True)
    browser.addheaders = [
        ('User-Agent' , 
         "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.4 (KHTML, like Gecko)"
         " Chrome/22.0.1229.94 Safari/537.4")]
    return browser
github KANG-NEWBIE / SpamSms / src / gratis.py View on Github external
def send(self):
		br = mechanize.Browser()
		br.set_handle_equiv(True)
		br.set_handle_gzip(True)
		br.set_handle_redirect(True)
		br.set_handle_referer(True)
		br.set_handle_robots(False)
		br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
		br.addheaders = [("User-Agent",random.choice(ua))]
		def add(x, y):
			return x + y
		def subtract(x, y):
			return x - y
		def multiply(x, y):
			return x * y
		def divide(x, y):
			return x / y
		o=[]
		bs=BS(br.open(self.url),features="html.parser")
		for x in bs.find_all("b"):
			o.append(x.text)
		ja=o[1].split(' ')
		a=int(ja[0])
		x=ja[1]
github LoganDing / Coursera.org-Downloader / coursera_downloader.py View on Github external
def initialize_browser(course, email, password):
    #Use mechanize to handle cookie
    print
    print 'Initialize browsering session...'
    br = mechanize.Browser()
    cj = cookielib.LWPCookieJar()
    br.set_cookiejar(cj)
    br.set_handle_equiv(True)
    #br.set_handle_gzip(True)
    br.set_handle_redirect(True)
    br.set_handle_referer(True)
    br.set_handle_robots(False)
    br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time = 0)
    auth_url = 'https://class.coursera.org/****/auth/auth_redirector?type=login&subtype=normal&email'.replace('****', course)
    br.open(auth_url)

    br.select_form(nr = 0)
    br.form['email'] =  email
    br.form['password'] = password
    br.submit()
    print 'It takes seconds to login and resolve resources to download...\n'

    #Check if email + password submitted correctly
    if 'https://class.coursera.org/****/auth/login_receiver?data='.replace('****', course) not in br.geturl():
        print 'Failed to login, exit...'
        sys.exit(1)

    video_lectures = 'https://class.coursera.org/****/lecture/index'.replace('****', course)
    br.open(video_lectures)
github corydolphin / mailman-downloader / mailman_downloader.py View on Github external
A main entrypoint for this module. Downloads all of the archives for a particular mailinglist specified, 
    decoding all archives and storing them as .mbox files in the dest directory
    under a subdirectory for the particular mailing list.
    Consider this the major entry point. It also is the only function
    which holds any state, in the form of a mechanize.Browser instance
    '''

    br = mechanize.Browser()
    cj = cookielib.LWPCookieJar()
    br.set_cookiejar(cj)
    br.set_handle_equiv(True)
    br.set_handle_gzip(True) #we get gzip decompression for free!
    br.set_handle_redirect(True)
    br.set_handle_referer(True)
    br.set_handle_robots(False)
    br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
    br.addheaders = [('User-agent', ('Mozilla/5.0 (X11; U; Linux i686; en-US;'
                    ' rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox'
                    '/3.0.1')
                    )]

    listName = getListNameFromUrl(mailingListUrl)
    if password != None and username != None: #Authentication required
        try:
            authenticate(br, mailingListUrl, username, password) #first, authenticate with the server
        except AuthorizationException, ae:
            print >> sys.stderr, "Failed to authenticate with List:'%s', check username and password" % listName
            raise
        except Exception,e:
            print >> sys.stderr, "Failed to authenticate with List:'%s'" % listName
            traceback.print_exc()
            raise
github 0xInfection / TIDoS-Framework / modules / ScanEnum / crawler1.py View on Github external
from urllib2 import urlparse
import time
from time import sleep
from colors import *

br = mechanize.Browser()

cj = cookielib.LWPCookieJar()
br.set_cookiejar(cj)

br.set_handle_equiv(True)
br.set_handle_redirect(True)
br.set_handle_referer(True)
br.set_handle_robots(False)

br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
br.addheaders = [
    ('User-agent', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1')]

linksall = []
cis = []
crawled = []

def crawler10x00(web):

	time.sleep(0.5)
	print R+'\n    ==========================='
	print R+'     C R A W L E R  (Depth 1)'
	print R+'    ==========================\n'
	print O+' [This module will fetch all links'
	print O+' from an online API and then crawl '
	print O+'         them one by one]	'
github 0xInfection / TIDoS-Framework / modules / VulnLysis / SerioBugs / errorsqlsearch.py View on Github external
cj = cookielib.LWPCookieJar()
br.set_cookiejar(cj)

params = []

br.set_handle_equiv(True)
br.set_handle_redirect(True)
br.set_handle_referer(True)
br.set_handle_robots(False)

class UserAgent(FancyURLopener):
	version = 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:22.0) Gecko/20100101 Firefox/22.0'

useragent = UserAgent()
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
br.addheaders = [('User-agent', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1')]

ctr=0
path_list = []
payloads = []

def errorsqlsearch(web):

	os.system('clear')
	print R+'\n    ======================================'
	print R+'     S Q L i   H U N T E R (Auto Awesome)'
	print R+'    ======================================'
	print R+'  [It is recommended to run ScanEnum/Crawlers  '
	print R+'          before using this module] \n'
	
	with open('files/payload-db/errorsql_payloads.lst','r') as pay:
github s0md3v / Blazy / blazy.py View on Github external
from re import search, findall
from urllib import urlopen
#Stuff related to Mechanize browser module
br = mechanize.Browser() #Shortening the call by assigning it to a varaible "br"
# set cookies
cookies = cookielib.LWPCookieJar()
br.set_cookiejar(cookies)
# Mechanize settings
br.set_handle_equiv(True)
br.set_handle_redirect(True)
br.set_handle_referer(True)
br.set_handle_robots(False)
br.set_debug_http(False)
br.set_debug_responses(False)
br.set_debug_redirects(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time = 1)
br.addheaders = [('User-agent', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1'),
('Accept','text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'), ('Accept-Encoding','br')]
# Banner
print """\033[1;37m    ____   _                    
   |  _ \ | |              
   | |_) || |  __ _  ____ _   _ 
   |  _ < | | / _` ||_  /| | | |
   | |_) || || (_| | / / | |_| |
   |____/ |_| \__,_|/___| \__, |
                           __/ |
    Made with \033[91m<3\033[37m By D3V\033[1;37m   |___/ 
    \033[0m"""
url = raw_input('\033[1;34m[?]\033[0m Enter target URL: ') #takes input from user
if 'http://' in url:
    pass
elif 'https://' in url:
github 0xInfection / TIDoS-Framework / modules / ScanEnum / osdetect.py View on Github external
# Browser
br = mechanize.Browser()

# Cookie Jar
cj = cookielib.LWPCookieJar()
br.set_cookiejar(cj)

# Browser options
br.set_handle_equiv(True)
br.set_handle_redirect(True)
br.set_handle_referer(True)
br.set_handle_robots(False)

# Follows refresh 0 but not hangs on refresh > 0
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
br.addheaders = [
    ('User-agent', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1')]

def getos0x00(web):

    global flag
    flag = 0x00
    ip_addr = socket.gethostbyname(web)
    print GR+' [*] Getting ip address...'
    time.sleep(0.7)
    print G+' [+] Website IP : ' +O+ str(ip_addr)
    time.sleep(0.5)
    print GR+' [*] Trying to identify operating system...'
    time.sleep(0.5)
    print O+' [!] Configuring requests...'
    result = br.open('https://www.censys.io/ipv4/%s/raw' % ip_addr).read()
github BenDoan / Infinite-Campus-Grade-Scraper / scraper.py View on Github external
def setup():
    """general setup commands"""
    # Cookie Jar
    cj = cookielib.LWPCookieJar()
    br.set_cookiejar(cj)

    # Browser options
    br.set_handle_equiv(True)
    br.set_handle_redirect(True)
    br.set_handle_referer(True)
    br.set_handle_robots(False)

    # Follows refresh 0 but not hangs on refresh > 0
    br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)

    if options.verbose:
        br.set_debug_http(True)

    # User-Agent
    br.addheaders = [('User-agent', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1')]