Removed a bunch of unneeded things again.
This commit is contained in:
@@ -123,7 +123,7 @@ class Compressor(object):
|
||||
return get_hexdigest(''.join(
|
||||
[self.content] + self.mtimes).encode(self.charset), 12)
|
||||
|
||||
def hunks(self, mode='file', forced=False):
|
||||
def hunks(self, forced=False):
|
||||
"""
|
||||
The heart of content parsing, iterates of the
|
||||
list of split contents and looks at its kind
|
||||
@@ -152,29 +152,29 @@ class Compressor(object):
|
||||
|
||||
if enabled:
|
||||
value = self.filter(value, **options)
|
||||
yield mode, smart_unicode(value, charset.lower())
|
||||
yield smart_unicode(value, charset.lower())
|
||||
else:
|
||||
if precompiled:
|
||||
value = self.handle_output(kind, value, forced=True, basename=basename)
|
||||
yield "verbatim", smart_unicode(value, charset.lower())
|
||||
yield smart_unicode(value, charset.lower())
|
||||
else:
|
||||
yield mode, self.parser.elem_str(elem)
|
||||
yield self.parser.elem_str(elem)
|
||||
|
||||
def filtered_output(self, content):
|
||||
def filter_output(self, content):
|
||||
"""
|
||||
Passes the concatenated content to the 'output' methods
|
||||
of the compressor filters.
|
||||
"""
|
||||
return self.filter(content, method=METHOD_OUTPUT)
|
||||
|
||||
def filtered_input(self, mode='file', forced=False):
|
||||
def filter_input(self, forced=False):
|
||||
"""
|
||||
Passes each hunk (file or code) to the 'input' methods
|
||||
of the compressor filters.
|
||||
"""
|
||||
content = []
|
||||
for mode, hunk in self.hunks(mode, forced):
|
||||
content.append((mode, hunk))
|
||||
for hunk in self.hunks(forced):
|
||||
content.append(hunk)
|
||||
return content
|
||||
|
||||
def precompile(self, content, kind=None, elem=None, filename=None, **kwargs):
|
||||
@@ -211,17 +211,15 @@ class Compressor(object):
|
||||
any custom modification. Calls other mode specific methods or simply
|
||||
returns the content directly.
|
||||
"""
|
||||
content = self.filtered_input(mode, forced)
|
||||
content = self.filter_input(forced)
|
||||
if not content:
|
||||
return ''
|
||||
|
||||
charset = self.charset
|
||||
output = '\n'.join(c.encode(charset) for (m, c) in content)
|
||||
output = '\n'.join(c.encode(self.charset) for c in content)
|
||||
|
||||
if settings.COMPRESS_ENABLED or forced:
|
||||
filtered_content = self.filtered_output(output)
|
||||
finished_content = self.handle_output(mode, filtered_content, forced)
|
||||
output = finished_content
|
||||
filtered_output = self.filter_output(output)
|
||||
return self.handle_output(mode, filtered_output, forced)
|
||||
|
||||
return output
|
||||
|
||||
|
||||
@@ -69,6 +69,9 @@ class CompressorNode(template.Node):
|
||||
return cache_key, cache_content
|
||||
return None, None
|
||||
|
||||
def render_output(self, compressor, forced=False):
|
||||
return compressor.output(self.mode, forced=forced)
|
||||
|
||||
def render(self, context, forced=False):
|
||||
|
||||
# Check if in debug mode
|
||||
@@ -90,7 +93,7 @@ class CompressorNode(template.Node):
|
||||
return cache_content
|
||||
|
||||
# call compressor output method and handle exceptions
|
||||
rendered_output = compressor.output(self.mode, forced=forced)
|
||||
rendered_output = self.render_output(compressor, forced)
|
||||
if cache_key:
|
||||
cache_set(cache_key, rendered_output)
|
||||
return rendered_output
|
||||
|
||||
@@ -51,12 +51,11 @@ class CompressorTestCase(TestCase):
|
||||
|
||||
def test_css_hunks(self):
|
||||
out = ['body { background:#990; }', u'p { border:5px solid green;}', 'body { color:#fff; }']
|
||||
hunks = [h for m, h in self.css_node.hunks()]
|
||||
self.assertEqual(out, hunks)
|
||||
self.assertEqual(out, list(self.css_node.hunks()))
|
||||
|
||||
def test_css_output(self):
|
||||
out = u'body { background:#990; }\np { border:5px solid green;}\nbody { color:#fff; }'
|
||||
hunks = '\n'.join([h for m, h in self.css_node.hunks()])
|
||||
hunks = '\n'.join([h for h in self.css_node.hunks()])
|
||||
self.assertEqual(out, hunks)
|
||||
|
||||
def test_css_mtimes(self):
|
||||
@@ -89,8 +88,7 @@ class CompressorTestCase(TestCase):
|
||||
|
||||
def test_js_hunks(self):
|
||||
out = ['obj = {};', u'obj.value = "value";']
|
||||
hunks = [h for m, h in self.js_node.hunks()]
|
||||
self.assertEqual(out, hunks)
|
||||
self.assertEqual(out, list(self.js_node.hunks()))
|
||||
|
||||
def test_js_output(self):
|
||||
out = u'<script type="text/javascript" src="/media/CACHE/js/066cd253eada.js"></script>'
|
||||
|
||||
@@ -145,7 +145,6 @@ class CssAbsolutizingTestCase(TestCase):
|
||||
'hash1': self.hashing_func(os.path.join(settings.COMPRESS_ROOT, 'img/python.png')),
|
||||
'hash2': self.hashing_func(os.path.join(settings.COMPRESS_ROOT, 'img/add.png')),
|
||||
}
|
||||
hunks = [h for m, h in self.css_node.hunks()]
|
||||
self.assertEqual([u"""\
|
||||
p { background: url('/media/img/python.png?%(hash1)s'); }
|
||||
p { background: url('/media/img/python.png?%(hash1)s'); }
|
||||
@@ -157,7 +156,7 @@ p { background: url('/media/img/add.png?%(hash2)s'); }
|
||||
p { background: url('/media/img/add.png?%(hash2)s'); }
|
||||
p { background: url('/media/img/add.png?%(hash2)s'); }
|
||||
p { background: url('/media/img/add.png?%(hash2)s'); }
|
||||
""" % hash_dict], hunks)
|
||||
""" % hash_dict], list(self.css_node.hunks()))
|
||||
|
||||
def test_guess_filename(self):
|
||||
for base_url in ('/media/', 'http://media.example.com/'):
|
||||
@@ -199,5 +198,4 @@ class CssDataUriTestCase(TestCase):
|
||||
def test_data_uris(self):
|
||||
datauri_hash = get_hashed_mtime(os.path.join(settings.COMPRESS_ROOT, 'css/datauri.css'))
|
||||
out = [u'.add { background-image: url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABGdBTUEAAK/INwWK6QAAABl0RVh0U29mdHdhcmUAQWRvYmUgSW1hZ2VSZWFkeXHJZTwAAAJvSURBVDjLpZPrS5NhGIf9W7YvBYOkhlkoqCklWChv2WyKik7blnNris72bi6dus0DLZ0TDxW1odtopDs4D8MDZuLU0kXq61CijSIIasOvv94VTUfLiB74fXngup7nvrnvJABJ/5PfLnTTdcwOj4RsdYmo5glBWP6iOtzwvIKSWstI0Wgx80SBblpKtE9KQs/We7EaWoT/8wbWP61gMmCH0lMDvokT4j25TiQU/ITFkek9Ow6+7WH2gwsmahCPdwyw75uw9HEO2gUZSkfyI9zBPCJOoJ2SMmg46N61YO/rNoa39Xi41oFuXysMfh36/Fp0b7bAfWAH6RGi0HglWNCbzYgJaFjRv6zGuy+b9It96N3SQvNKiV9HvSaDfFEIxXItnPs23BzJQd6DDEVM0OKsoVwBG/1VMzpXVWhbkUM2K4oJBDYuGmbKIJ0qxsAbHfRLzbjcnUbFBIpx/qH3vQv9b3U03IQ/HfFkERTzfFj8w8jSpR7GBE123uFEYAzaDRIqX/2JAtJbDat/COkd7CNBva2cMvq0MGxp0PRSCPF8BXjWG3FgNHc9XPT71Ojy3sMFdfJRCeKxEsVtKwFHwALZfCUk3tIfNR8XiJwc1LmL4dg141JPKtj3WUdNFJqLGFVPC4OkR4BxajTWsChY64wmCnMxsWPCHcutKBxMVp5mxA1S+aMComToaqTRUQknLTH62kHOVEE+VQnjahscNCy0cMBWsSI0TCQcZc5ALkEYckL5A5noWSBhfm2AecMAjbcRWV0pUTh0HE64TNf0mczcnnQyu/MilaFJCae1nw2fbz1DnVOxyGTlKeZft/Ff8x1BRssfACjTwQAAAABJRU5ErkJggg=="); }\n.python { background-image: url("/media/img/python.png?%s"); }\n.datauri { background-image: url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAoAAAAKCAYAAACNMs+9AAAABGdBTUEAALGPC/xhBQAAAAlwSFlzAAALEwAACxMBAJqcGAAAAAd0SU1FB9YGARc5KB0XV+IAAAAddEVYdENvbW1lbnQAQ3JlYXRlZCB3aXRoIFRoZSBHSU1Q72QlbgAAAF1JREFUGNO9zL0NglAAxPEfdLTs4BZM4DIO4C7OwQg2JoQ9LE1exdlYvBBeZ7jqch9//q1uH4TLzw4d6+ErXMMcXuHWxId3KOETnnXXV6MJpcq2MLaI97CER3N0 vr4MkhoXe0rZigAAAABJRU5ErkJggg=="); }\n' % datauri_hash]
|
||||
hunks = [h for m, h in self.css_node.hunks()]
|
||||
self.assertEqual(out, hunks)
|
||||
self.assertEqual(out, list(self.css_node.hunks()))
|
||||
|
||||
Reference in New Issue
Block a user