Kajiki-0.7.1/ 0000755 0000765 0000024 00000000000 13155475106 013003 5 ustar amol staff 0000000 0000000 Kajiki-0.7.1/CHANGES.rst 0000644 0000765 0000024 00000011674 13155474337 014623 0 ustar amol staff 0000000 0000000 CHANGES
=======
0.7.1 (2017-09-11)
------------------
* Allow to replace ``gettext`` function by providing it in the template context or through ``base_globals`` in Loader
0.7.0 (2017-06-27)
------------------
* Text for i18n is now extracted ignoring the empty spaces surrounding the text itself. Empty text will always be treated as non translatable nodes for performance reasons.
* ``extract_python`` option will now report syntax errors when extracting text for translation.
0.6.3 (2017-05-25)
------------------
* Added ``extract_python`` option to babel message extractor, this allows extracting gettext calls in ``${}`` expressions
0.6.1 (2016-11-28)
------------------
* Actually report 0.6 in kajiki/version.py
* Expose ``strip_text`` option in loader
0.6.0 (2016-11-27)
------------------
* Fixed ``py:switch`` error message wrongly mentioning ``py:with``
* Support for multiline ``${}`` expressions
* Subsequent text nodes are now squashed into a single text node. This allows translating whole paragraphs instead of single sentences.
* Allow code and function calls inside tag attributes
* Added ``strip_text`` option to XMLTemplate and i18n collector to ensure leading and trailing spaces are stipped by text nodes (also leads to minified HTML)
* Some HTML nodes that do not require being closed but is commonly considered best practice to close them are now emitted with ending tag (IE:
)
* Generally improved code documentation to lower entry barrier for contributors
0.5.5 (2016-06-08)
------------------
* ``py:attrs`` will now emit the attribute name itself or will omit the attribute at all in case of
``bool`` values for 'checked', 'disabled', 'readonly', 'multiple', 'selected', 'nohref',
'ismap', 'declare' and 'defer',
0.5.4 (2016-06-04)
------------------
* ``py:switch`` now correctly supports multiple ``py:case`` statements.
* text inside ``
This is not necessary when you are writing HTML because HTML defines that the
content of ``'.format(style)
perform(src, ''.format(style),
mode='xml')
perform(src, ''.format(style), mode='html')
def test_script_variable(self):
'''Interpolate variables inside '
perform(src, '', mode='xml')
perform(src, '', mode='html')
def test_CDATA_disabled(self):
src = ''
perform(src, '', mode='xml', cdata_scripts=False)
perform(src, '', mode='html', cdata_scripts=False)
def test_CDATA_escaping(self):
src = ''''''
perform(src, '', mode='xml')
perform(src, '', mode='html')
def test_CDATA_escaping_mixed(self):
src = ''' >'''
perform(src, ' >', mode='xml')
perform(src, ' >', mode='html')
def test_script_commented_CDATA(self):
script = 'if (1 < 2) { doc.write("
')
def test_expr_multiline_cdata(self):
perform("""""",
'')
def test_jquery_call_is_not_expr(self):
'''Ensure we handle '$(' as a text literal, since it cannot be a
valid variable sequence. This simplifies, for example,
templates containing inline scripts with jQuery calls
which otherwise have to be written '$$(...'
'''
js = "$(function () { alert('.ready()'); });"
src = "
" + js + "
"
out = "
" + js + "
"
perform(src, out)
def test_jquery_shortcut_is_not_expr(self):
'''Ensure we handle '$.' as a text literal in script blocks'''
js = "$.extend({}, {foo: 'bar'})"
src = "
'
assert chr(32) in output # normal space
assert chr(160) in output # non breaking space
perform(source, output)
class TestSwitch(TestCase):
def test_switch(self):
perform('''
')
except XMLTemplateCompileError as e:
self.assertTrue(
'py:switch directive can only contain py:case and py:else nodes' in str(e)
)
else:
self.assertTrue(False, msg='Should have raised XMLTemplateParseError')
class TestElse(TestCase):
def test_pyif_pyelse(self):
try:
tpl = perform('''
True
False
''', '''
False
''')
except XMLTemplateCompileError as e:
self.assertTrue(
'py:else directive must be inside a py:switch or directly after py:if' in str(e)
)
else:
self.assertTrue(False, msg='Should have raised XMLTemplateParseError')
def test_pyiftag_pyelse_continuation(self):
tpl = perform(
'''
''', rsp
def test_include(self):
'''Must NOT result in: NameError: global name 'name' is not defined'''
loader = MockLoader({
'included.html': XMLTemplate('
The included template must also '
'access Kajiki globals and the template context: '
'${value_of("name")}
")
def test_literal(self):
'''Escape by default; literal() marks as safe.'''
context = dict(albatross="Albatross!!!")
expected_output = "
Albatross!!!
"
perform("
${literal(albatross)}
", expected_output, context)
perform("
${Markup(albatross)}
", expected_output, context)
perform("
$albatross
",
"
<em>Albatross!!!</em>
", context)
from kajiki.util import literal
markup = '"&"'
assert ''.join(list(literal(markup))) == markup
class TestTranslation(TestCase):
def test_scripts_non_translatable(self):
src = '
Hi
'
doc = _Parser('', src).parse()
for n in _Compiler('', doc).compile():
text = getattr(n, 'text', '')
if text in ('hello world', 'hello style'):
self.assertFalse(isinstance(n, TranslatableTextNode))
for n in _Compiler('', doc, cdata_scripts=False).compile():
text = getattr(n, 'text', '')
if text in ('hello world', 'hello style'):
self.assertFalse(isinstance(n, TranslatableTextNode))
def test_extract_translate(self):
src = '''
Hi
Hello
World
'''
expected = {
False: '''
TRANSLATED(Hi)
\n\n TRANSLATED(Hello
World)
''',
True: '''
TRANSLATED(Hi)
TRANSLATED(Hello
World)
'''
}
for strip_text in (False, True):
# Build translation table
messages = {}
for _, _, msgid, _ in i18n.extract(BytesIO(src.encode('utf-8')), None, None, {
'strip_text': strip_text
}):
messages[msgid] = 'TRANSLATED(%s)' % msgid
# Provide a fake translation function
default_gettext = i18n.gettext
i18n.gettext = lambda s: messages[s]
try:
perform(src, expected[strip_text], strip_text=strip_text)
finally:
i18n.gettext = default_gettext
def test_extract_python_inside(self):
src = '''
''', rsp
def test_without_substituting_gettext_with_lambda_extending_file(self):
# this should use i18n.gettext
loader = FileLoader(path=os.path.join(os.path.dirname(__file__),
'data'))
tpl = loader.import_('file_child.html')
rsp = tpl(dict()).render()
assert rsp == '''
parent
child
''', rsp
class TestDOMTransformations(TestCase):
def test_empty_text_extraction(self):
doc = kajiki.xml_template._Parser('', ''' text ''').parse()
doc = kajiki.xml_template._DomTransformer(doc, strip_text=False).transform()
text_data = [n.data for n in doc.firstChild.childNodes]
self.assertEqual([' ', 'text', ' '], text_data)
def test_empty_text_extraction_lineno(self):
doc = kajiki.xml_template._Parser('', '''
text
''').parse()
doc = kajiki.xml_template._DomTransformer(doc, strip_text=False).transform()
linenos = [n.lineno for n in doc.firstChild.childNodes]
self.assertEqual([1, 3, 3], linenos) # Last node starts on same line as it starts with \n
class TestErrorReporting(TestCase):
def test_syntax_error(self):
for strip_text in (False, True):
try:
perform('
${i}
', '', strip_text=strip_text)
except KajikiSyntaxError as exc:
assert '--> for i i range(1, 2):' in str(exc), exc
else:
assert False
def test_code_error(self):
for strip_text in (False, True):
try:
child = FileLoader(
os.path.join(os.path.dirname(__file__), 'data')
).load('error.html', strip_text=strip_text)
child().render()
except ZeroDivisionError as exc:
import traceback, sys
l = traceback.format_exception(*sys.exc_info())
last_line = l[-2]
assert '${3/0}' in last_line, last_line
else:
assert False
if __name__ == '__main__':
main()
Kajiki-0.7.1/kajiki/text.py 0000644 0000765 0000024 00000026177 13003162475 015612 0 ustar amol staff 0000000 0000000 # -*- coding: utf-8 -*-
'''Text template compiler.
Notable in this module are:
* TextTemplate - function building a template from text string or filename.
* _pattern - the regex used to find the beginnings of tags and expressions.
* _Scanner - scans text and generates a stream of tokens.
* _Parser - parses a stream of tokens into the internal representation (IR)
tree.
* _Parser._parse_ - consumes the body of a tag and returns an ir.Node.
'''
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import codecs
import re
from .ddict import defaultdict
from itertools import chain
from nine import iteritems, str
from shlex import split as shlex_split # Prior to Python 2.7.3, the
from sys import version_info # *shlex* module did not support
if version_info < (2, 7, 3): # Unicode input. Work around:
_shlex_split = shlex_split
shlex_split = lambda txt: _shlex_split(txt.encode('utf-8'))
del version_info
import kajiki
from . import ir
_pattern = r'''
\$(?:
(?P\$) | # Escape $$
(?P[_a-z][_a-z0-9.]*) | # $foo.bar
{(?P) | # ${....
(?P)
) |
^\s*%(?:
(?P[a-z]+) | # %for, %end, etc.
(?P)
)|
^\s*{%-(?P[a-z]+)| # {%-for, {%-end, etc.
{%(?:
(?P[a-z]+) | # {%for, {%end, etc.
(?P)
)
'''
_re_pattern = re.compile(_pattern, re.VERBOSE | re.IGNORECASE | re.MULTILINE)
def TextTemplate(source=None, filename=None, autoescape=False,
encoding='utf-8'):
assert source or filename, "You must either provide a *source* argument " \
"or a *filename* argument to TextTemplate()."
if source is None:
with codecs.open(filename, encoding=encoding) as f:
source = f.read()
if filename is None:
filename = ''
assert isinstance(source, str), \
"*source* must be a unicode string, not a {}".format(type(source))
scanner = _Scanner(filename, source)
tree = _Parser(scanner, autoescape).parse()
tree.filename = filename
return kajiki.template.from_ir(tree)
class _Scanner(object):
def __init__(self, filename, source):
self.filename = filename
self.source = source
self.lineno = 1
self.pos = 0
def __iter__(self):
source = self.source
for mo in _re_pattern.finditer(source):
start = mo.start()
if start > self.pos:
yield self.text(source[self.pos:start])
self.pos = start
groups = mo.groupdict()
if groups['expr_braced'] is not None:
self.pos = mo.end()
yield self._get_braced_expr()
elif groups['expr_named'] is not None:
self.pos = mo.end()
yield self.expr(groups['expr_named'])
elif groups['expr_escaped'] is not None:
self.pos = mo.end()
yield self.text('$')
elif groups['tag_bare'] is not None:
self.pos = mo.end()
yield self._get_tag_bare(groups['tag_bare'])
elif groups['tag_begin'] is not None:
self.pos = mo.end()
yield self._get_tag(groups['tag_begin'])
elif groups['tag_begin_ljust'] is not None:
self.pos = mo.end()
yield self._get_tag(groups['tag_begin_ljust'])
elif groups['tag_bare_invalid'] is not None:
continue
else:
msg = 'Syntax error %s:%s' % (self.filename, self.lineno)
for i, line in enumerate(self.source.splitlines()):
print('%3d %s' % (i + 1, line))
print(msg)
assert False, groups
if self.pos != len(source):
yield self.text(source[self.pos:])
def _get_pos(self):
return self._pos
def _set_pos(self, value):
assert value >= getattr(self, '_pos', 0)
self._pos = value
pos = property(_get_pos, _set_pos)
def text(self, text):
self.lineno += text.count('\n')
return _Text(self.filename, self.lineno, text)
def expr(self, text):
self.lineno += text.count('\n')
return _Expr(self.filename, self.lineno, text)
def tag(self, tagname, body):
tag = _Tag(self.filename, self.lineno, tagname, body)
self.lineno += tag.text.count('\n')
return tag
def _get_tag_bare(self, tagname):
end = self.source.find('\n', self.pos)
if end == -1:
end = len(self.source)
body = self.source[self.pos:end]
self.lineno += 1
self.pos = end + 1
return self.tag(tagname, body)
def _get_tag(self, tagname):
end = self.source.find('%}', self.pos)
assert end > 0
body = self.source[self.pos:end]
self.pos = end + 2
if body.endswith('-'):
body = body[:-1]
while self.source[self.pos] in ' \t':
self.pos += 1
return self.tag(tagname, body)
def _get_braced_expr(self):
try:
compile(self.source[self.pos:], '', 'eval')
except SyntaxError as se:
end = self.pos + sum([se.offset] + [len(line) + 1
for idx, line in enumerate(self.source[self.pos:].splitlines())
if idx < se.lineno - 1])
text = self.source[self.pos:end - 1]
self.pos = end
return self.expr(text)
class _Parser(object):
def __init__(self, tokenizer, autoescape=False):
self.tokenizer = tokenizer
self.functions = defaultdict(list)
self.functions['__main__()'] = []
self.mod_py = [] # module-level python blocks
self.iterator = iter(self.tokenizer)
self.autoescape = autoescape
self._in_def = False
self._is_child = False
def parse(self):
body = list(self._parse_body())
self.functions['__main__()'] = body[:-1]
defs = [ir.DefNode(k, *v) for k, v in iteritems(self.functions)]
return ir.TemplateNode(self.mod_py, defs)
def text(self, token):
text = ''.join(_unescape_newlines(token.text))
node = ir.TextNode(text)
node.filename = token.filename
node.lineno = token.lineno
return node
def expr(self, token):
node = ir.ExprNode(token.text, safe=not self.autoescape)
node.filename = token.filename
node.lineno = token.lineno
return node
def push_tok(self, token):
self.iterator = chain([token], self.iterator)
def _parse_body(self, *stoptags):
while True:
try:
token = next(self.iterator)
if isinstance(token, _Text):
yield self.text(token)
elif isinstance(token, _Expr):
yield self.expr(token)
elif isinstance(token, _Tag):
if token.tagname in stoptags:
yield token
break
parser = getattr(self, '_parse_%s' % token.tagname)
yield parser(token)
else:
msg = 'Parse error: %r unexpected' % token
assert False, msg
except StopIteration:
yield None
break
def _parse_def(self, token):
old_in_def, self._in_def = self._in_def, True
body = list(self._parse_body('end'))
self._in_def = old_in_def
if self._in_def:
return ir.InnerDefNode(token.body, *body[:-1])
else:
self.functions[token.body.strip()] = body[:-1]
return None
def _parse_call(self, token):
b = token.body.find('(')
e = token.body.find(')', b)
assert e > b > -1
arglist = token.body[b:e + 1]
call = token.body[e + 1:].strip()
body = list(self._parse_body('end'))
return ir.CallNode(
'$caller%s' % arglist,
call.replace('%caller', '$caller'),
*body[:-1])
def _parse_if(self, token):
body = list(self._parse_body('end', 'else'))
stoptok = body[-1]
if stoptok.tagname == 'else':
self.push_tok(stoptok)
return ir.IfNode(token.body, *body[:-1])
def _parse_for(self, token):
body = list(self._parse_body('end'))
return ir.ForNode(token.body, *body[:-1])
def _parse_switch(self, token):
body = list(self._parse_body('end'))
return ir.SwitchNode(token.body, *body[:-1])
def _parse_case(self, token):
body = list(self._parse_body('case', 'else', 'end'))
stoptok = body[-1]
self.push_tok(stoptok)
return ir.CaseNode(token.body, *body[:-1])
def _parse_else(self, token):
body = list(self._parse_body('end'))
return ir.ElseNode(*body[:-1])
def _parse_extends(self, token):
parts = shlex_split(token.body)
fn = parts[0]
assert len(parts) == 1
self._is_child = True
return ir.ExtendNode(fn)
def _parse_import(self, token):
parts = shlex_split(token.body)
fn = parts[0]
if len(parts) > 1:
assert parts[1] == 'as'
return ir.ImportNode(fn, parts[2])
else:
return ir.ImportNode(fn)
def _parse_include(self, token):
parts = shlex_split(token.body)
fn = parts[0]
assert len(parts) == 1
return ir.IncludeNode(fn)
def _parse_py(self, token):
body = token.body.strip()
if body:
body = [ir.TextNode(body), None]
else:
body = list(self._parse_body('end'))
node = ir.PythonNode(*body[:-1])
if node.module_level:
self.mod_py.append(node)
return None
else:
return node
def _parse_block(self, token):
fname = '_kj_block_' + token.body.strip()
decl = fname + '()'
body = list(self._parse_body('end'))[:-1]
self.functions[decl] = body
if self._is_child:
parent_block = 'parent.' + fname
body.insert(0,
ir.PythonNode(ir.TextNode('parent_block=%s' % parent_block)))
return None
else:
return ir.ExprNode(decl)
class _Token(object):
def __init__(self, filename, lineno, text):
self.filename = filename
self.lineno = lineno
self.text = text
def __repr__(self): # pragma no cover
return '<%s %r>' % (
self.__class__.__name__,
self.text)
class _Expr(_Token):
pass
class _Text(_Token):
pass
class _Tag(_Token):
def __init__(self, filename, lineno, tagname, body):
self.tagname = tagname
self.body = body
text = tagname + ' ' + body
super(_Tag, self).__init__(filename, lineno, text)
def _unescape_newlines(text):
i = 0
while i < len(text):
if text[i] == '\\':
if text[i + 1] != '\n':
yield text[i + 1]
i += 2
else:
yield text[i]
i += 1
Kajiki-0.7.1/kajiki/util.py 0000644 0000765 0000024 00000005752 13003776242 015602 0 ustar amol staff 0000000 0000000 # -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from collections import deque
import sys
from random import randint
from threading import local
def debug(): # pragma no cover
def pm(etype, value, tb):
import pdb
import traceback
try:
from IPython.ipapi import make_session
make_session()
from IPython.Debugger import Pdb
sys.stderr.write('Entering post-mortem IPDB shell\n')
p = Pdb(color_scheme='Linux')
p.reset()
p.setup(None, tb)
p.print_stack_trace()
sys.stderr.write('%s: %s\n' % (etype, value))
p.cmdloop()
p.forget()
# p.interaction(None, tb)
except ImportError:
sys.stderr.write('Entering post-mortem PDB shell\n')
traceback.print_exception(etype, value, tb)
pdb.post_mortem(tb)
sys.excepthook = pm
def expose(func):
func.exposed = True
return func
class Undefined(object):
pass
UNDEFINED = Undefined()
class flattener(object):
def __init__(self, iterator):
while type(iterator) == flattener:
iterator = iterator.iterator
self.iterator = iterator
@classmethod
def decorate(cls, func):
def inner(*args, **kwargs):
return cls(func(*args, **kwargs))
return inner
def accumulate_str(self):
if type(self.iterator) == flattener:
return self.iterator.accumulate_str()
s = ''
iter_stack = [self.iterator]
while iter_stack:
try:
x = next(iter_stack[-1])
except StopIteration:
iter_stack.pop()
continue
if type(x) == flattener:
iter_stack.append(x.iterator)
elif x is None:
pass
else:
s += x
return s
def __iter__(self):
for x in self.iterator:
if type(x) == flattener:
for xx in x:
if xx is not None:
yield xx
elif x is not None:
yield x
def literal(text):
return flattener(iter([text]))
class NameGen(object):
lcl = local()
def __init__(self):
self.names = set()
@classmethod
def gen(cls, hint):
if not hasattr(cls.lcl, 'inst'):
cls.lcl.inst = NameGen()
return cls.lcl.inst._gen(hint)
def _gen(self, hint):
r = hint
while r in self.names:
r = '%s_%d' % (hint, randint(0, len(self.names) * 10))
self.names.add(r)
return r
def gen_name(hint='_kj_'):
return NameGen.gen(hint)
def window(seq, n=2):
"""Return a sliding window of size ``n`` over an iterator"""
l = deque((next(seq, None) for _ in range(n)), maxlen=n)
push = l.append
yield l
for item in seq:
push(item)
yield l
Kajiki-0.7.1/kajiki/version.py 0000644 0000765 0000024 00000000260 13155474551 016305 0 ustar amol staff 0000000 0000000 # -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
__version__ = '0.6'
__release__ = '0.7.1'
Kajiki-0.7.1/kajiki/xml_template.py 0000644 0000765 0000024 00000106076 13155474165 017330 0 ustar amol staff 0000000 0000000 # -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import re
from codecs import open
from xml import sax
from xml.dom import minidom as dom
from xml.sax import SAXParseException
from nine import IS_PYTHON2, basestring, str, iteritems, native_str
if IS_PYTHON2:
from cStringIO import StringIO as BytesIO
else:
from io import BytesIO
from . import ir
from . import template
from .ddict import defaultdict
from .doctype import DocumentTypeDeclaration, extract_dtd
from .entities import html5, unescape
from .html_utils import (HTML_OPTIONAL_END_TAGS, HTML_REQUIRED_END_TAGS,
HTML_CDATA_TAGS)
from .markup_template import QDIRECTIVES, QDIRECTIVES_DICT
impl = dom.getDOMImplementation(' ')
def XMLTemplate(source=None, filename=None, mode=None, is_fragment=False,
encoding='utf-8', autoblocks=None, cdata_scripts=True,
strip_text=False, base_globals=None):
"""Given XML source code of a Kajiki Templates parses and returns a template class.
The source code is parsed to its DOM representation by :class:`._Parser`,
which is then expanded to separate directives from tags by :class:`._DomTransformer`
and then compiled to the *Intermediate Representation* tree by :class:`._Compiler`.
The *Intermediate Representation* generates the Python code
which creates a new :class:`kajiki.template._Template` subclass through
:meth:`kajiki.template.Template`.
The generated code is then executed to return the newly created class.
Calling ``.render()`` on an instance of the generate class will then render the template.
"""
if source is None:
with open(filename, encoding=encoding) as f:
source = f.read() # source is a unicode string
if filename is None:
filename = ''
doc = _Parser(filename, source).parse()
doc = _DomTransformer(doc, strip_text=strip_text).transform()
ir_ = _Compiler(filename, doc, mode=mode, is_fragment=is_fragment,
autoblocks=autoblocks, cdata_scripts=cdata_scripts).compile()
t = template.from_ir(ir_, base_globals=base_globals)
return t
def annotate(gen):
def inner(self, node, *args, **kwargs):
for x in gen(self, node, *args, **kwargs):
self._anno(node, x)
yield x
return inner
class _Compiler(object):
"""Compiles a DOM tree into Intermediate Representation :class:`kajiki.ir.TemplateNode`.
Intermediate Representation is a tree of nodes that represent
Python Code that should be generated to execute the template.
"""
def __init__(self, filename, doc, mode=None, is_fragment=False,
autoblocks=None, cdata_scripts=True):
self.filename = filename
self.doc = doc
self.is_fragment = is_fragment
self.functions = defaultdict(list)
self.functions['__main__()'] = []
self.function_lnos = {}
self.mod_py = []
self.autoblocks = autoblocks or []
self.cdata_scripts = cdata_scripts
self.in_def = False
self.is_child = False
# The rendering mode is either specified in the *mode* argument,
# or inferred from the DTD:
self._dtd = DocumentTypeDeclaration.matching(self.doc._dtd)
if mode:
self.mode = mode
elif self._dtd:
self.mode = self._dtd.rendering_mode
else: # The template might contain an unknown DTD
self.mode = 'xml' # by default
def compile(self):
"""Compile the document provided by :class:`._Parser`.
Returns as :class:`kajiki.ir.TemplateNode` instance representing
the whole tree of nodes as their intermediate representation.
The returned template will include at least a ``__main__``
function which is the document itself including a DOCTYPE and
any function declared through ``py:def`` or as a ``py:block``.
The ``TemplateNode`` will also include the module level
code specified through ``'
else:
dtd = None
if dtd:
dtd = ir.TextNode(dtd.strip()+'\n')
dtd.filename = self.filename
dtd.lineno = 1
body.insert(0, dtd)
self.functions['__main__()'] = body
defs = []
for k, v in iteritems(self.functions):
node = ir.DefNode(k, *v)
node.lineno = self.function_lnos.get(k)
defs.append(node)
node = ir.TemplateNode(self.mod_py, defs)
node.filename = self.filename
node.lineno = 0
return node
def _anno(self, dom_node, ir_node):
if ir_node.lineno:
return
ir_node.filename = self.filename
ir_node.lineno = dom_node.lineno
def _is_autoblock(self, node):
if node.tagName not in self.autoblocks:
return False
if node.hasAttribute('py:autoblock'):
guard = node.getAttribute('py:autoblock').lower()
if guard not in ('false', 'true'):
raise ValueError('py:autoblock is evaluated at compile time '
'and only accepts True/False constants')
if guard == 'false':
# We throw away the attribute so it doesn't remain in rendered nodes.
node.removeAttribute('py:autoblock')
return False
return True
def _compile_node(self, node):
"""Convert a DOM node to its intermediate representation.
Calls specific compile functions for special nodes and any
directive that was expanded by :meth:`._DomTransformer._expand_directives`.
For any plain XML node forward it to :meth:`._compile_xml`.
Automatically converts any ``autoblock`` node to a ``py:block`` directive.
"""
if isinstance(node, dom.Comment):
return self._compile_comment(node)
elif isinstance(node, dom.Text):
return self._compile_text(node)
elif isinstance(node, dom.ProcessingInstruction):
return self._compile_pi(node)
elif self._is_autoblock(node):
# Set the name of the block equal to the tag itself.
node.setAttribute('name', node.tagName)
return self._compile_block(node)
elif node.tagName.startswith('py:'):
# Handle directives
compiler = getattr(
self, '_compile_%s' % node.tagName.split(':')[-1],
self._compile_xml)
return compiler(node)
else:
return self._compile_xml(node)
@annotate
def _compile_xml(self, node):
"""Compile plain XML nodes.
When compiling a node also take care of directives that
only modify the node itself (``py:strip``, ``py:attrs``
and ``py:content``) as all directives wrapping the node
and its children have already been handled by :meth:`._compile_node`.
The provided intermediate representations include
the node itself, its attributes and its content.
Attributes of the node are handled through :class:`._TextCompiler`
to ensure ${expr} expressions are handled in attributes too.
In case the node has children (and no py:content)
compile the children too.
"""
content = attrs = guard = None
if node.hasAttribute('py:strip'):
guard = node.getAttribute('py:strip')
if guard == '': # py:strip="" means yes, do strip the tag
guard = 'False'
else:
guard = 'not (%s)' % guard
node.removeAttribute('py:strip')
yield ir.TextNode('<%s' % node.tagName, guard)
for k, v in sorted(node.attributes.items()):
tc = _TextCompiler(self.filename, v, node.lineno,
ir.TextNode, in_html_attr=True)
v = list(tc)
if k == 'py:content':
content = node.getAttribute('py:content')
continue
elif k == 'py:attrs':
attrs = node.getAttribute('py:attrs')
continue
yield ir.AttrNode(k, v, guard, self.mode)
if attrs:
yield ir.AttrsNode(attrs, guard, self.mode)
if content:
yield ir.TextNode('>', guard)
yield ir.ExprNode(content)
yield ir.TextNode('%s>' % node.tagName, guard)
else:
if node.childNodes:
yield ir.TextNode('>', guard)
if self.cdata_scripts and node.tagName in HTML_CDATA_TAGS:
# Special behaviour for