Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_rating(self):
"""Submit rating form with and without AJAX."""
# Empty POST: Count errors
for ajax in True, False:
r = self.post_feedback({'type': OPINION_RATING}, ajax=ajax)
if not ajax:
doc = pyquery.PyQuery(r.content)
eq_(doc('article#rate form .errorlist').length, len(RATING_USAGE))
else:
eq_(r.status_code, 400)
errors = json.loads(r.content)
eq_(len(errors), len(RATING_USAGE))
for question in RATING_USAGE:
assert question.short in errors
# Submit actual rating
data = {'type': OPINION_RATING}
for type in RATING_USAGE:
data[type.short] = RATING_CHOICES[type.id % len(RATING_CHOICES)][0]
for ajax in True, False:
r = self.post_feedback(data, follow=False, ajax=ajax)
if not ajax:
def test_unicode(self):
xml = pq(u"<p>é</p>")
self.assertEqual(type(xml.html()), str)
self.assertEqual(str(xml), '<p>é</p>')
self.assertEqual(str(xml('p:contains("é")')), '<p>é</p>')
def test_xhtml_namespace(self):
expected = 'What'
d = pq(self.xhtml.encode('utf8'), parser='xml')
d.xhtml_to_html()
val = d('div').text()
self.assertEqual(repr(val), repr(expected))
def extract_upload_errors(html):
pq = PyQuery(html)
result = []
for e in pq.find('.thin > p[style="color: red; text-align: center;"]'):
result.append(PyQuery(e).text())
return result
def extract_upload_errors(html):
pq = PyQuery(html)
result = []
for e in pq.find('.thin > p[style="color: red; text-align: center;"]'):
result.append(PyQuery(e).text())
return result
def pq(self):
return PyQuery(self.html)
"data": [],
"more": False,
"title": title,
"total": 0,
"type": "collection",
"caption": "优酷视频全集"
}
last_num = 1
while True:
new_url = ep.format(list_id, last_num)
json_data = await get_url_service.get_url_async(new_url)[14:-2]
info = json.loads(json_data)
if info.get("error", None) == 1 and info.get("message", None) == "success":
new_html = info.get("html", None)
if new_html:
new_html = PyQuery(new_html)
items = new_html("a[target='video'][data-from='2-1']")
for item in items:
item = PyQuery(item)
url = "http:" + item.attr("href")
title = item.attr("title")
info = {
"name": title,
"no": title,
"subtitle": title,
"url": url
}
data["data"].append(info)
last_num += 1
else:
break
else:
async def Parse_le(self, input_text):
html = PyQuery(await get_url_service.get_url_async(input_text))
items = html('dt.d_tit')
title = "LETV"
i = 0
data = {
"data": [],
"more": False,
"title": title,
"total": i,
"type": "collection"
}
for item in items:
a = PyQuery(item).children('a')
name = a.text()
no = a.text()
subtitle = a.text()
url = a.attr('href')
if url is None:
continue
if not re.match('^http://www\.le\.com/.+\.html', url):
continue
info = {
"name": name,
"no": no,
"subtitle": subtitle,
"url": url,
"caption": "首页地址列表"
}
data["data"].append(info)