Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
yield specificity, check_style_attribute(
element, 'text-align:%s' % align)
elif element.tag == 'col':
if element.get('width'):
style_attribute = 'width:%s' % element.get('width')
if element.get('width').isdigit():
style_attribute += 'px'
yield specificity, check_style_attribute(
element, style_attribute)
elif element.tag == 'hr':
size = 0
if element.get('size'):
try:
size = int(element.get('size'))
except ValueError:
LOGGER.warning('Invalid value for size: %s', size)
if (element.get('color'), element.get('noshade')) != (None, None):
if size >= 1:
yield specificity, check_style_attribute(
element, 'border-width:%spx' % (size / 2))
elif size == 1:
yield specificity, check_style_attribute(
element, 'border-bottom-width:0')
elif size > 1:
yield specificity, check_style_attribute(
element, 'height:%spx' % (size - 2))
if element.get('width'):
style_attribute = 'width:%s' % element.get('width')
if element.get('width').isdigit():
style_attribute += 'px'
yield specificity, check_style_attribute(
element, style_attribute)
base_url=base_url, url_fetcher=url_fetcher,
check_css_mime_type=check_css_mime_type,
# Use str() to work around http://bugs.python.org/issue4978
# See https://github.com/Kozea/WeasyPrint/issues/97
**{str(type_): guess})
with result as result:
yield result
elif nones == [True, False, True, True, True, True]:
if base_url is None:
base_url = path2url(filename)
with open(filename, 'rb') as file_obj:
yield 'file_obj', file_obj, base_url, None
elif nones == [True, True, False, True, True, True]:
with fetch(url_fetcher, url) as result:
if check_css_mime_type and result['mime_type'] != 'text/css':
LOGGER.warning(
'Unsupported stylesheet type %s for %s',
result['mime_type'], result['redirected_url'])
yield 'string', '', base_url, None
else:
proto_encoding = result.get('encoding')
if base_url is None:
base_url = result.get('redirected_url', url)
if 'string' in result:
yield 'string', result['string'], base_url, proto_encoding
else:
yield (
'file_obj', result['file_obj'], base_url,
proto_encoding)
elif nones == [True, True, True, False, True, True]:
if base_url is None:
# filesystem file-like objects have a 'name' attribute.
'target-counter()', 'target-counters()', 'target-text()'):
anchor_token = value[1][0]
if anchor_token[0] == 'attr()':
attr = compute_attr_function(computer, anchor_token)
if attr is None:
computed_value = None
else:
computed_value = (value[0], (
(attr,) + value[1][1:]))
else:
computed_value = value
if computer['target_collector'] and computed_value:
computer['target_collector'].collect_computed_target(
computed_value[1][0])
if computed_value is None:
LOGGER.warning('Unable to compute %s\'s value for content: %s' % (
computer['element'], ', '.join(str(item) for item in value)))
else:
computed_values.append(computed_value)
return tuple(computed_values)
def w3c_date_to_pdf(string, attr_name):
"""
YYYYMMDDHHmmSSOHH'mm'
"""
if string is None:
return None
match = W3C_DATE_RE.match(string)
if match is None:
LOGGER.warning('Invalid %s date: %r', attr_name, string)
return None
groups = match.groupdict()
pdf_date = (groups['year'] +
(groups['month'] or '') +
(groups['day'] or '') +
(groups['hour'] or '') +
(groups['minute'] or '') +
(groups['second'] or ''))
if groups['hour']:
assert groups['minute']
if not groups['second']:
pdf_date += '00'
if groups['tz_hour']:
assert groups['tz_hour'].startswith(('+', '-'))
assert groups['tz_minute']
pdf_date += "%s'%s'" % (groups['tz_hour'], groups['tz_minute'])
rule.source_line, rule.source_column)
continue
ignore_imports = True
if not media_queries.evaluate_media_query(
media, device_media_type):
continue
content_rules = tinycss2.parse_rule_list(rule.content)
preprocess_stylesheet(
device_media_type, base_url, content_rules, url_fetcher,
matcher, page_rules, fonts, font_config, ignore_imports=True)
elif rule.type == 'at-rule' and rule.lower_at_keyword == 'page':
data = parse_page_selectors(rule)
if data is None:
LOGGER.warning(
'Unsupported @page selector "%s", '
'the whole @page rule was ignored at %s:%s.',
tinycss2.serialize(rule.prelude),
rule.source_line, rule.source_column)
continue
ignore_imports = True
for page_type in data:
specificity = page_type.pop('specificity')
page_type = PageType(**page_type)
content = tinycss2.parse_declaration_list(rule.content)
declarations = list(preprocess_declarations(base_url, content))
if declarations:
selector_list = [(specificity, None, page_type)]
page_rules.append((rule, selector_list, declarations))
try:
# Attachments from document links like or <a> can only be URLs.
# They're passed in as tuples
if isinstance(attachment, tuple):
url, description = attachment
attachment = Attachment(
url=url, url_fetcher=url_fetcher, description=description)
elif not isinstance(attachment, Attachment):
attachment = Attachment(guess=attachment, url_fetcher=url_fetcher)
with attachment.source as (source_type, source, url, _):
if isinstance(source, bytes):
source = io.BytesIO(source)
pdf_file_object = _create_compressed_file_object(source)
except URLFetchingError as exc:
LOGGER.error('Failed to load attachment: %s', exc)
return None
# TODO: Use the result object from a URL fetch operation to provide more
# details on the possible filename
return PdfDict(
Type=PdfName('Filespec'), F=PdfString.encode(''),
UF=PdfString.encode(_get_filename_from_result(url, None)),
EF=PdfDict(F=pdf_file_object),
Desc=PdfString.encode(attachment.description or ''))
</a>
def collect_anchor(self, anchor_name):
"""Store ``anchor_name`` in ``existing_anchors``."""
if anchor_name and isinstance(anchor_name, str):
if anchor_name in self.existing_anchors:
LOGGER.warning('Anchor defined twice: %s', anchor_name)
else:
self.existing_anchors.append(anchor_name)
continue
if not media_queries.evaluate_media_query(
media, device_media_type):
continue
url = url_join(
base_url, url, allow_relative=False,
context='@import at %s:%s',
context_args=(rule.source_line, rule.source_column))
if url is not None:
try:
CSS(
url=url, url_fetcher=url_fetcher,
media_type=device_media_type, font_config=font_config,
matcher=matcher, page_rules=page_rules)
except URLFetchingError as exc:
LOGGER.error(
'Failed to load stylesheet at %s : %s', url, exc)
elif rule.type == 'at-rule' and rule.lower_at_keyword == 'media':
media = media_queries.parse_media_query(rule.prelude)
if media is None:
LOGGER.warning('Invalid media type "%s" '
'the whole @media rule was ignored at %s:%s.',
tinycss2.serialize(rule.prelude),
rule.source_line, rule.source_column)
continue
ignore_imports = True
if not media_queries.evaluate_media_query(
media, device_media_type):
continue
content_rules = tinycss2.parse_rule_list(rule.content)
preprocess_stylesheet(
def draw(self, context, concrete_width, concrete_height, _image_rendering):
try:
svg = ScaledSVGSurface(
cairosvg.parser.Tree(
bytestring=self._svg_data, url=self._base_url,
url_fetcher=self._cairosvg_url_fetcher),
output=None, dpi=96, output_width=concrete_width,
output_height=concrete_height)
if svg.width and svg.height:
context.scale(
concrete_width / svg.width, concrete_height / svg.height)
context.set_source_surface(svg.cairo)
context.paint()
except Exception as e:
LOGGER.error(
'Failed to draw an SVG image at %s : %s', self._base_url, e)