diff options
author | Evgeny Fadeev <evgeny.fadeev@gmail.com> | 2009-08-04 23:25:33 -0400 |
---|---|---|
committer | Evgeny Fadeev <evgeny.fadeev@gmail.com> | 2009-08-04 23:25:33 -0400 |
commit | 3ddaeb2d1c9a556768856d3e6b810dfd354aa01e (patch) | |
tree | 1a467ee87ae834590e2d6c88fdaeaf7e391e995d /utils | |
parent | a2ec9785ae0367eaf7fb1d094544bd90d0aace85 (diff) | |
download | askbot-3ddaeb2d1c9a556768856d3e6b810dfd354aa01e.tar.gz askbot-3ddaeb2d1c9a556768856d3e6b810dfd354aa01e.tar.bz2 askbot-3ddaeb2d1c9a556768856d3e6b810dfd354aa01e.zip |
included changes by Adolfo and Chaitanya and found temporary fix for languages
Diffstat (limited to 'utils')
-rw-r--r-- | utils/cache.py | 184 | ||||
-rw-r--r-- | utils/html.py | 102 | ||||
-rw-r--r-- | utils/lists.py | 172 |
3 files changed, 229 insertions, 229 deletions
diff --git a/utils/cache.py b/utils/cache.py index bc1cb1af..410c0662 100644 --- a/utils/cache.py +++ b/utils/cache.py @@ -1,92 +1,92 @@ -"""Utilities for working with Django Models."""
-import itertools
-
-from django.contrib.contenttypes.models import ContentType
-
-from lanai.utils.lists import flatten
-
-def fetch_model_dict(model, ids, fields=None):
- """
- Fetches a dict of model details for model instances with the given
- ids, keyed by their id.
-
- If a fields list is given, a dict of details will be retrieved for
- each model, otherwise complete model instances will be retrieved.
-
- Any fields list given shouldn't contain the primary key attribute for
- the model, as this can be determined from its Options.
- """
- if fields is None:
- return model._default_manager.in_bulk(ids)
- else:
- id_attr = model._meta.pk.attname
- return dict((obj[id_attr], obj) for obj
- in model._default_manager.filter(id__in=ids).values(
- *itertools.chain((id_attr,), fields)))
-
-def populate_foreign_key_caches(model, objects_to_populate, fields=None):
- """
- Populates caches for the given related Model in instances of objects
- which have a ForeignKey relationship to it, specified as a list of
- (object list, related attribute name list) two-tuples.
-
- If a list of field names is given, only the given fields will be
- looked up and related object caches will be populated with a dict of
- the specified fields. Otherwise, complete model instances will be
- retrieved.
- """
- # Get all related object ids for the appropriate fields
- related_object_ids = []
- for objects, attrs in objects_to_populate:
- related_object_ids.append(tuple(tuple(getattr(obj, '%s_id' % attr)
- for attr in attrs)
- for obj in objects))
- unique_ids = tuple(set(pk for pk in flatten(related_object_ids) if pk))
- related_objects = fetch_model_dict(model, unique_ids, fields)
-
- # Fill related object caches
- for (objects, attrs), related_ids in itertools.izip(objects_to_populate,
- related_object_ids):
- for obj, related_ids_for_obj in itertools.izip(objects,
- related_ids):
- for attr, related_object in itertools.izip(attrs, (related_objects.get(pk, None)
- for pk in related_ids_for_obj)):
- setattr(obj, '_%s_cache' % attr, related_object)
-
-def populate_content_object_caches(generic_related_objects, model_fields=None):
- """
- Retrieves ``ContentType`` and content objects for the given list of
- items which use a generic relation, grouping the retrieval of content
- objects by model to reduce the number of queries executed.
-
- This results in ``number_of_content_types + 1`` queries rather than
- the ``number_of_generic_reL_objects * 2`` queries you'd get by
- iterating over the list and accessing each item's object attribute.
-
- If a dict mapping model classes to field names is given, only the
- given fields will be looked up for each model specified and the
- object cache will be populated with a dict of the specified fields.
- Otherwise, complete model instances will be retrieved.
- """
- if model_fields is None:
- model_fields = {}
-
- # Group content object ids by their content type ids
- ids_by_content_type = {}
- for obj in generic_related_objects:
- ids_by_content_type.setdefault(obj.content_type_id,
- []).append(obj.object_id)
-
- # Retrieve content types and content objects in bulk
- content_types = ContentType.objects.in_bulk(ids_by_content_type.keys())
- for content_type_id, ids in ids_by_content_type.iteritems():
- model = content_types[content_type_id].model_class()
- objects[content_type_id] = fetch_model_dict(
- model, tuple(set(ids)), model_fields.get(model, None))
-
- # Set content types and content objects in the appropriate cache
- # attributes, so accessing the 'content_type' and 'object' attributes
- # on each object won't result in further database hits.
- for obj in generic_related_objects:
- obj._object_cache = objects[obj.content_type_id][obj.object_id]
- obj._content_type_cache = content_types[obj.content_type_id]
+"""Utilities for working with Django Models.""" +import itertools + +from django.contrib.contenttypes.models import ContentType + +from lanai.utils.lists import flatten + +def fetch_model_dict(model, ids, fields=None): + """ + Fetches a dict of model details for model instances with the given + ids, keyed by their id. + + If a fields list is given, a dict of details will be retrieved for + each model, otherwise complete model instances will be retrieved. + + Any fields list given shouldn't contain the primary key attribute for + the model, as this can be determined from its Options. + """ + if fields is None: + return model._default_manager.in_bulk(ids) + else: + id_attr = model._meta.pk.attname + return dict((obj[id_attr], obj) for obj + in model._default_manager.filter(id__in=ids).values( + *itertools.chain((id_attr,), fields))) + +def populate_foreign_key_caches(model, objects_to_populate, fields=None): + """ + Populates caches for the given related Model in instances of objects + which have a ForeignKey relationship to it, specified as a list of + (object list, related attribute name list) two-tuples. + + If a list of field names is given, only the given fields will be + looked up and related object caches will be populated with a dict of + the specified fields. Otherwise, complete model instances will be + retrieved. + """ + # Get all related object ids for the appropriate fields + related_object_ids = [] + for objects, attrs in objects_to_populate: + related_object_ids.append(tuple(tuple(getattr(obj, '%s_id' % attr) + for attr in attrs) + for obj in objects)) + unique_ids = tuple(set(pk for pk in flatten(related_object_ids) if pk)) + related_objects = fetch_model_dict(model, unique_ids, fields) + + # Fill related object caches + for (objects, attrs), related_ids in itertools.izip(objects_to_populate, + related_object_ids): + for obj, related_ids_for_obj in itertools.izip(objects, + related_ids): + for attr, related_object in itertools.izip(attrs, (related_objects.get(pk, None) + for pk in related_ids_for_obj)): + setattr(obj, '_%s_cache' % attr, related_object) + +def populate_content_object_caches(generic_related_objects, model_fields=None): + """ + Retrieves ``ContentType`` and content objects for the given list of + items which use a generic relation, grouping the retrieval of content + objects by model to reduce the number of queries executed. + + This results in ``number_of_content_types + 1`` queries rather than + the ``number_of_generic_reL_objects * 2`` queries you'd get by + iterating over the list and accessing each item's object attribute. + + If a dict mapping model classes to field names is given, only the + given fields will be looked up for each model specified and the + object cache will be populated with a dict of the specified fields. + Otherwise, complete model instances will be retrieved. + """ + if model_fields is None: + model_fields = {} + + # Group content object ids by their content type ids + ids_by_content_type = {} + for obj in generic_related_objects: + ids_by_content_type.setdefault(obj.content_type_id, + []).append(obj.object_id) + + # Retrieve content types and content objects in bulk + content_types = ContentType.objects.in_bulk(ids_by_content_type.keys()) + for content_type_id, ids in ids_by_content_type.iteritems(): + model = content_types[content_type_id].model_class() + objects[content_type_id] = fetch_model_dict( + model, tuple(set(ids)), model_fields.get(model, None)) + + # Set content types and content objects in the appropriate cache + # attributes, so accessing the 'content_type' and 'object' attributes + # on each object won't result in further database hits. + for obj in generic_related_objects: + obj._object_cache = objects[obj.content_type_id][obj.object_id] + obj._content_type_cache = content_types[obj.content_type_id] diff --git a/utils/html.py b/utils/html.py index 602e1a76..25a74a4a 100644 --- a/utils/html.py +++ b/utils/html.py @@ -1,51 +1,51 @@ -"""Utilities for working with HTML."""
-import html5lib
-from html5lib import sanitizer, serializer, tokenizer, treebuilders, treewalkers
-
-class HTMLSanitizerMixin(sanitizer.HTMLSanitizerMixin):
- acceptable_elements = ('a', 'abbr', 'acronym', 'address', 'b', 'big',
- 'blockquote', 'br', 'caption', 'center', 'cite', 'code', 'col',
- 'colgroup', 'dd', 'del', 'dfn', 'dir', 'div', 'dl', 'dt', 'em', 'font',
- 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'ins', 'kbd',
- 'li', 'ol', 'p', 'pre', 'q', 's', 'samp', 'small', 'span', 'strike',
- 'strong', 'sub', 'sup', 'table', 'tbody', 'td', 'tfoot', 'th', 'thead',
- 'tr', 'tt', 'u', 'ul', 'var')
-
- acceptable_attributes = ('abbr', 'align', 'alt', 'axis', 'border',
- 'cellpadding', 'cellspacing', 'char', 'charoff', 'charset', 'cite',
- 'cols', 'colspan', 'datetime', 'dir', 'frame', 'headers', 'height',
- 'href', 'hreflang', 'hspace', 'lang', 'longdesc', 'name', 'nohref',
- 'noshade', 'nowrap', 'rel', 'rev', 'rows', 'rowspan', 'rules', 'scope',
- 'span', 'src', 'start', 'summary', 'title', 'type', 'valign', 'vspace',
- 'width')
-
- allowed_elements = acceptable_elements
- allowed_attributes = acceptable_attributes
- allowed_css_properties = ()
- allowed_css_keywords = ()
- allowed_svg_properties = ()
-
-class HTMLSanitizer(tokenizer.HTMLTokenizer, HTMLSanitizerMixin):
- def __init__(self, stream, encoding=None, parseMeta=True, useChardet=True,
- lowercaseElementName=True, lowercaseAttrName=True):
- tokenizer.HTMLTokenizer.__init__(self, stream, encoding, parseMeta,
- useChardet, lowercaseElementName,
- lowercaseAttrName)
-
- def __iter__(self):
- for token in tokenizer.HTMLTokenizer.__iter__(self):
- token = self.sanitize_token(token)
- if token:
- yield token
-
-def sanitize_html(html):
- """Sanitizes an HTML fragment."""
- p = html5lib.HTMLParser(tokenizer=HTMLSanitizer,
- tree=treebuilders.getTreeBuilder("dom"))
- dom_tree = p.parseFragment(html)
- walker = treewalkers.getTreeWalker("dom")
- stream = walker(dom_tree)
- s = serializer.HTMLSerializer(omit_optional_tags=False,
- quote_attr_values=True)
- output_generator = s.serialize(stream)
- return u''.join(output_generator)
+"""Utilities for working with HTML.""" +import html5lib +from html5lib import sanitizer, serializer, tokenizer, treebuilders, treewalkers + +class HTMLSanitizerMixin(sanitizer.HTMLSanitizerMixin): + acceptable_elements = ('a', 'abbr', 'acronym', 'address', 'b', 'big', + 'blockquote', 'br', 'caption', 'center', 'cite', 'code', 'col', + 'colgroup', 'dd', 'del', 'dfn', 'dir', 'div', 'dl', 'dt', 'em', 'font', + 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'ins', 'kbd', + 'li', 'ol', 'p', 'pre', 'q', 's', 'samp', 'small', 'span', 'strike', + 'strong', 'sub', 'sup', 'table', 'tbody', 'td', 'tfoot', 'th', 'thead', + 'tr', 'tt', 'u', 'ul', 'var') + + acceptable_attributes = ('abbr', 'align', 'alt', 'axis', 'border', + 'cellpadding', 'cellspacing', 'char', 'charoff', 'charset', 'cite', + 'cols', 'colspan', 'datetime', 'dir', 'frame', 'headers', 'height', + 'href', 'hreflang', 'hspace', 'lang', 'longdesc', 'name', 'nohref', + 'noshade', 'nowrap', 'rel', 'rev', 'rows', 'rowspan', 'rules', 'scope', + 'span', 'src', 'start', 'summary', 'title', 'type', 'valign', 'vspace', + 'width') + + allowed_elements = acceptable_elements + allowed_attributes = acceptable_attributes + allowed_css_properties = () + allowed_css_keywords = () + allowed_svg_properties = () + +class HTMLSanitizer(tokenizer.HTMLTokenizer, HTMLSanitizerMixin): + def __init__(self, stream, encoding=None, parseMeta=True, useChardet=True, + lowercaseElementName=True, lowercaseAttrName=True): + tokenizer.HTMLTokenizer.__init__(self, stream, encoding, parseMeta, + useChardet, lowercaseElementName, + lowercaseAttrName) + + def __iter__(self): + for token in tokenizer.HTMLTokenizer.__iter__(self): + token = self.sanitize_token(token) + if token: + yield token + +def sanitize_html(html): + """Sanitizes an HTML fragment.""" + p = html5lib.HTMLParser(tokenizer=HTMLSanitizer, + tree=treebuilders.getTreeBuilder("dom")) + dom_tree = p.parseFragment(html) + walker = treewalkers.getTreeWalker("dom") + stream = walker(dom_tree) + s = serializer.HTMLSerializer(omit_optional_tags=False, + quote_attr_values=True) + output_generator = s.serialize(stream) + return u''.join(output_generator) diff --git a/utils/lists.py b/utils/lists.py index 426d9cd3..bbcfae98 100644 --- a/utils/lists.py +++ b/utils/lists.py @@ -1,86 +1,86 @@ -"""Utilities for working with lists and sequences."""
-
-def flatten(x):
- """
- Returns a single, flat list which contains all elements retrieved
- from the sequence and all recursively contained sub-sequences
- (iterables).
-
- Examples:
- >>> [1, 2, [3, 4], (5, 6)]
- [1, 2, [3, 4], (5, 6)]
-
- From http://kogs-www.informatik.uni-hamburg.de/~meine/python_tricks
- """
- result = []
- for el in x:
- if hasattr(el, '__iter__') and not isinstance(el, basestring):
- result.extend(flatten(el))
- else:
- result.append(el)
- return result
-
-def batch_size(items, size):
- """
- Retrieves items in batches of the given size.
-
- >>> l = range(1, 11)
- >>> batch_size(l, 3)
- [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10]]
- >>> batch_size(l, 5)
- [[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]
- """
- return [items[i:i+size] for i in xrange(0, len(items), size)]
-
-def batches(items, number):
- """
- Retrieves items in the given number of batches.
-
- >>> l = range(1, 11)
- >>> batches(l, 1)
- [[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]]
- >>> batches(l, 2)
- [[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]
- >>> batches(l, 3)
- [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10]]
- >>> batches(l, 4)
- [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10]]
- >>> batches(l, 5)
- [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]]
-
- Initial batches will contain as many items as possible in cases where
- there are not enough items to be distributed evenly.
-
- >>> batches(l, 6)
- [[1, 2], [3, 4], [5, 6], [7, 8], [9], [10]]
- >>> batches(l, 7)
- [[1, 2], [3, 4], [5, 6], [7], [8], [9], [10]]
- >>> batches(l, 8)
- [[1, 2], [3, 4], [5], [6], [7], [8], [9], [10]]
- >>> batches(l, 9)
- [[1, 2], [3], [4], [5], [6], [7], [8], [9], [10]]
- >>> batches(l, 10)
- [[1], [2], [3], [4], [5], [6], [7], [8], [9], [10]]
-
- If there are more batches than items, empty batches will be appended
- to the batch list.
-
- >>> batches(l, 11)
- [[1], [2], [3], [4], [5], [6], [7], [8], [9], [10], []]
- >>> batches(l, 12)
- [[1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [], []]
- """
- div, mod= divmod(len(items), number)
- if div > 1:
- if mod:
- div += 1
- return batch_size(items, div)
- else:
- if not div:
- return [[item] for item in items] + [[]] * (number - mod)
- elif div == 1 and not mod:
- return [[item] for item in items]
- else:
- # mod now tells you how many lists of 2 you can fit in
- return ([items[i*2:(i*2)+2] for i in xrange(0, mod)] +
- [[item] for item in items[mod*2:]])
+"""Utilities for working with lists and sequences.""" + +def flatten(x): + """ + Returns a single, flat list which contains all elements retrieved + from the sequence and all recursively contained sub-sequences + (iterables). + + Examples: + >>> [1, 2, [3, 4], (5, 6)] + [1, 2, [3, 4], (5, 6)] + + From http://kogs-www.informatik.uni-hamburg.de/~meine/python_tricks + """ + result = [] + for el in x: + if hasattr(el, '__iter__') and not isinstance(el, basestring): + result.extend(flatten(el)) + else: + result.append(el) + return result + +def batch_size(items, size): + """ + Retrieves items in batches of the given size. + + >>> l = range(1, 11) + >>> batch_size(l, 3) + [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10]] + >>> batch_size(l, 5) + [[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]] + """ + return [items[i:i+size] for i in xrange(0, len(items), size)] + +def batches(items, number): + """ + Retrieves items in the given number of batches. + + >>> l = range(1, 11) + >>> batches(l, 1) + [[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]] + >>> batches(l, 2) + [[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]] + >>> batches(l, 3) + [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10]] + >>> batches(l, 4) + [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10]] + >>> batches(l, 5) + [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]] + + Initial batches will contain as many items as possible in cases where + there are not enough items to be distributed evenly. + + >>> batches(l, 6) + [[1, 2], [3, 4], [5, 6], [7, 8], [9], [10]] + >>> batches(l, 7) + [[1, 2], [3, 4], [5, 6], [7], [8], [9], [10]] + >>> batches(l, 8) + [[1, 2], [3, 4], [5], [6], [7], [8], [9], [10]] + >>> batches(l, 9) + [[1, 2], [3], [4], [5], [6], [7], [8], [9], [10]] + >>> batches(l, 10) + [[1], [2], [3], [4], [5], [6], [7], [8], [9], [10]] + + If there are more batches than items, empty batches will be appended + to the batch list. + + >>> batches(l, 11) + [[1], [2], [3], [4], [5], [6], [7], [8], [9], [10], []] + >>> batches(l, 12) + [[1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [], []] + """ + div, mod= divmod(len(items), number) + if div > 1: + if mod: + div += 1 + return batch_size(items, div) + else: + if not div: + return [[item] for item in items] + [[]] * (number - mod) + elif div == 1 and not mod: + return [[item] for item in items] + else: + # mod now tells you how many lists of 2 you can fit in + return ([items[i*2:(i*2)+2] for i in xrange(0, mod)] + + [[item] for item in items[mod*2:]]) |