Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
gc.collect()
##print gc.get_referrers(c)
##print gc.get_objects()
if opts.verbose >= 1:
print "Tracked objects:", len(gc.get_objects())
# The `del' below should delete these 4 objects:
# Curl + internal dict, CurlMulti + internal dict
del c
gc.collect()
if opts.verbose >= 1:
print "Tracked objects:", len(gc.get_objects())
if 1:
# Ensure that the refcounting error in "reset" is fixed:
for i in xrange(100000):
c = Curl()
c.reset()
# /***********************************************************************
# // done
# ************************************************************************/
print "All tests passed."
#! /usr/bin/env python
# -*- coding: iso-8859-1 -*-
# vi:ts=4:et
# $Id$
import pycurl
m = pycurl.CurlMulti()
m.handles = []
c1 = pycurl.Curl()
c2 = pycurl.Curl()
c1.setopt(c1.URL, 'http://curl.haxx.se')
c2.setopt(c2.URL, 'http://cnn.com')
c2.setopt(c2.FOLLOWLOCATION, 1)
m.add_handle(c1)
m.add_handle(c2)
m.handles.append(c1)
m.handles.append(c2)
num_handles = len(m.handles)
while num_handles:
while 1:
ret, num_handles = m.perform()
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
m.select(1.0)
def send_REST_request(ip, port, object, payload):
try:
response = StringIO()
headers = ["Content-Type:application/json"]
url = "http://%s:%s/%s" %(
ip, port, object)
conn = pycurl.Curl()
conn.setopt(pycurl.URL, url)
conn.setopt(pycurl.HTTPHEADER, headers)
conn.setopt(pycurl.POST, 1)
conn.setopt(pycurl.POSTFIELDS, '%s'%json.dumps(payload))
conn.setopt(pycurl.CUSTOMREQUEST, "PUT")
conn.setopt(pycurl.WRITEFUNCTION, response.write)
conn.perform()
return response.getvalue()
except:
return None
def get_request(url, outfpath=None):
global PYCURL
if PYCURL:
# outfpath must be set
import pycurl
from io import BytesIO
buffer = BytesIO()
c = pycurl.Curl()
c.setopt(c.URL, url)
c.setopt(c.WRITEDATA, buffer)
c.setopt(c.COOKIEJAR, '/tmp/cookie.jar')
c.setopt(c.NETRC, True)
c.setopt(c.FOLLOWLOCATION, True)
#c.setopt(c.REMOTE_NAME, outfpath)
c.perform()
c.close()
return buffer.getvalue()
resp = requests.get(url)
return resp.text
def search(query):
print query
c = pycurl.Curl()
data = BytesIO()
Q = str('http://0.0.0.0:8983/solr/select?q='+query+'&wt=json&indent=true')
#Q = "http://0.0.0.0:8983/solr/firenotes/select?wt=json&indent=true&q="+query
#print 'RESPONSE ' + Q
c.setopt(c.URL, Q)
c.setopt(c.WRITEFUNCTION, data.write)
c.perform()
try:
di = json.loads(data.getvalue())
#print di
ans = di["response"]["docs"]
#print ans
books = []
for i in ans:
#print i
def _create_and_configure_curl(self):
"""
Instantiates and configures the curl instance. This will drive the
bulk of the behavior of how the download progresses. The values in
this call should be tweaked or pulled out as repository-level
configuration as the download process is enhanced.
:return: curl instance to use for the download
:rtype: pycurl.Curl
"""
curl = pycurl.Curl()
# Eventually, add here support for:
# - callback on bytes downloaded
# - bandwidth limitations
# - SSL verification for hosts on SSL
# - client SSL certificate
# - proxy support
# - callback support for resuming partial downloads
curl.setopt(pycurl.VERBOSE, 0)
# TODO: Add in reference to is cancelled hook to be able to abort the download
# Close out the connection on our end in the event the remote host
# stops responding. This is interpretted as "If less than 1000 bytes are
# sent in a 5 minute interval, abort the connection."
def geturl(url):
c = pycurl.Curl()
c.setopt(pycurl.URL, url)
#c.setopt(pycurl.VERBOSE, 1)
c.setopt(pycurl.WRITEFUNCTION, read_cb)
c.setopt(pycurl.FOLLOWLOCATION, 1)
c.setopt(pycurl.NOSIGNAL, 1)
c.setopt(pycurl.DEBUGFUNCTION, dbg_cb)
c.perform()
c.close()
def _curl(url, params=None, post=False, username=None, password=None, header=None, body=None):
global pycurl, StringIO
if not pycurl:
import pycurl
try: import cStringIO as StringIO
except: import StringIO
curl = pycurl.Curl()
if get_prefs('use_http_proxy'):
HTTP_PROXY = '%s:%s' % (get_prefs('http_proxy_host'), get_prefs('http_proxy_port'))
curl.setopt(pycurl.PROXY, HTTP_PROXY)
if header:
curl.setopt(pycurl.HTTPHEADER, [str(k) + ':' + str(v) for k, v in header.items()])
if post:
curl.setopt(pycurl.POST, 1)
if params:
if post:
curl.setopt(pycurl.POSTFIELDS, urllib.urlencode(params))
else:
url = "?".join((url, urllib.urlencode(params)))
self.MAXPERHOST)
if (len(hostactive) < maxactive and
self.changeActiveDownloads(+1)):
del self._queue[i]
userhost = (url.user, url.host, url.port)
for handle in self._inactive:
if self._inactive[handle] == userhost:
del self._inactive[handle]
self._active[handle] = schemehost
break
else:
if len(self._inactive) > self.MAXINACTIVE:
del self._inactive[handle]
handle = pycurl.Curl()
self._active[handle] = schemehost
localpath = self.getLocalPath(item)
localpathpart = localpath+".part"
size = item.getInfo("size")
if os.path.isfile(localpathpart):
partsize = os.path.getsize(localpathpart)
if size and partsize >= size:
partsize = 0
else:
partsize = 0
handle.partsize = partsize
if partsize:
openmode = "a"
def convert(self, filename, outfile):
c = pycurl.Curl()
pdfsize = os.path.getsize(filename)
header = ["Content-Type: application/pdf",
"Content-length: " + str(pdfsize)]
f = open(filename, 'rb')
c.setopt(pycurl.URL, PDFX_URL)
c.setopt(pycurl.HTTPHEADER, header)
c.setopt(pycurl.FOLLOWLOCATION, True)
c.setopt(pycurl.POST, 1)
c.setopt(pycurl.INFILE, f)
c.setopt(pycurl.INFILESIZE, pdfsize)
print "Uploading %s..." % filename