From ecccf56b732fd8b24479cdd71b1685b0ebd4e6d5 Mon Sep 17 00:00:00 2001 From: "Stephane Angel (Twidi)" Date: Sun, 5 Feb 2017 16:15:12 +0100 Subject: [PATCH 1/7] Allow usage of debugger in collection This is done by restoring "state" (_slice and _len_mode) when quitting a function. When debugging, the debugger my call __len__ to display the actual variables and then change one of these values, so the state of the current debbuging is broken. Note that it's still not thread-safe but still better than before. --- limpyd/collection.py | 93 ++++++++++++++++------------- limpyd/contrib/collection.py | 111 +++++++++++++++++++---------------- 2 files changed, 111 insertions(+), 93 deletions(-) diff --git a/limpyd/collection.py b/limpyd/collection.py index 8f27b5e..5c572c2 100644 --- a/limpyd/collection.py +++ b/limpyd/collection.py @@ -45,44 +45,52 @@ def __init__(self, cls): # case, specifically set to False in other cases def __iter__(self): - self._len_mode = False - return self._collection.__iter__() + old_slice_and_len_mode = None if self._slice is None else self._slice.copy(), self._len_mode + try: + self._len_mode = False + return self._collection.__iter__() + finally: + self._slice, self._len_mode = old_slice_and_len_mode def __getitem__(self, arg): - self._len_mode = False - self._slice = {} - if isinstance(arg, slice): - # A slice has been requested - # so add it to the sort parameters (via slice) - # and return the collection (a scliced collection is no more - # chainable, so we do not return `self`) - start = arg.start or 0 - if start < 0: - # in case of a negative start, we can't use redis sort so - # we fetch all the collection before returning the wanted slice - return self._collection[arg] - self._slice['start'] = start - stop = arg.stop - # Redis expects a number of elements - # not a python style stop value - if stop is None: - # negative value for the count return all - self._slice['num'] = -1 - else: - self._slice['num'] = stop - start - return self._collection - else: - # A single item has been requested - # Nevertheless, use the redis pagination, to minimize - # data transfert and use the fast redis offset system - start = arg - if start >= 0: + old_slice_and_len_mode = None if self._slice is None else self._slice.copy(), self._len_mode + try: + self._len_mode = False + self._slice = {} + if isinstance(arg, slice): + # A slice has been requested + # so add it to the sort parameters (via slice) + # and return the collection (a scliced collection is no more + # chainable, so we do not return `self`) + start = arg.start or 0 + if start < 0: + # in case of a negative start, we can't use redis sort so + # we fetch all the collection before returning the wanted slice + return self._collection[arg] self._slice['start'] = start - self._slice['num'] = 1 # one element - return self._collection[0] + stop = arg.stop + # Redis expects a number of elements + # not a python style stop value + if stop is None: + # negative value for the count return all + self._slice['num'] = -1 + else: + self._slice['num'] = stop - start + return self._collection else: - # negative index, we have to fetch the whole collection first - return self._collection[start] + # A single item has been requested + # Nevertheless, use the redis pagination, to minimize + # data transfert and use the fast redis offset system + start = arg + if start >= 0: + self._slice['start'] = start + self._slice['num'] = 1 # one element + return self._collection[0] + else: + # negative index, we have to fetch the whole collection first + return self._collection[start] + finally: + self._slice, self._len_mode = old_slice_and_len_mode def _get_pk(self): """ @@ -116,6 +124,7 @@ def _collection(self): """ Effectively retrieve data according to lazy_collection. """ + old_slice_and_len_mode = None if self._slice is None else self._slice.copy(), self._len_mode try: # try block to always reset the _slice in the "finally" part conn = self.cls.get_connection() @@ -165,12 +174,8 @@ def _collection(self): # Format return values if needed return self._prepare_results(collection) - - except: # raise original exception - raise - finally: # always reset the slice, having an exception or not - self._slice = {} - self._len_mode = True + finally: + self._slice, self._len_mode = old_slice_and_len_mode def _final_redis_call(self, final_set, sort_options): """ @@ -315,8 +320,12 @@ def __len__(self): return self._len def __repr__(self): - self._len_mode = False - return self._collection.__repr__() + old_slice_and_len_mode = None if self._slice is None else self._slice.copy(), self._len_mode + try: + self._len_mode = False + return self._collection.__repr__() + finally: + self._slice, self._len_mode = old_slice_and_len_mode def instances(self, skip_exist_test=False): """ diff --git a/limpyd/contrib/collection.py b/limpyd/contrib/collection.py index 7ffa797..2b1e6d3 100644 --- a/limpyd/contrib/collection.py +++ b/limpyd/contrib/collection.py @@ -102,13 +102,17 @@ def _collection(self): Effectively retrieve data according to lazy_collection. If we have a stored collection, without any result, return an empty list """ - if self.stored_key and not self._stored_len: - if self._len_mode: - self._len = 0 - self._len_mode = False - self._slice = {} - return [] - return super(ExtendedCollectionManager, self)._collection + old_slice_and_len_mode = None if self._slice is None else self._slice.copy(), self._len_mode + try: + if self.stored_key and not self._stored_len: + if self._len_mode: + self._len = 0 + self._len_mode = False + self._slice = {} + return [] + return super(ExtendedCollectionManager, self)._collection + finally: + self._slice, self._len_mode = old_slice_and_len_mode def _prepare_sets(self, sets): """ @@ -648,51 +652,56 @@ def store(self, key=None, ttl=DEFAULT_STORE_TTL): DEFAULT_STORE_TTL, which is 60 secondes. You can pass None if you don't want expiration. """ - self._store = True - - # save sort and values options - sort_options = None - if self._sort is not None: - sort_options = self._sort.copy() - values = None - if self._values is not None: - values = self._values - self._values = None - - # create a key for storage - store_key = key or self._unique_key() - if self._sort is None: - self._sort = {} - self._sort['store'] = store_key - - # if filter by pk, but without need to get "values", no redis call is done - # so force values to get a call to sort (to store result) - if self._lazy_collection['pks'] and not self._values: - self.values('pk') - - # call the collection - self._len_mode = False - self._collection - - # restore sort and values options - self._store = False - self._sort = sort_options - self._values = values - - # create the new collection - stored_collection = self.__class__(self.cls) - stored_collection.from_stored(store_key) - - # apply ttl if needed - if ttl is not None: - self.cls.get_connection().expire(store_key, ttl) - - # set choices about instances/values from the current to the new collection - for attr in ('_instances', '_instances_skip_exist_test', '_values'): - setattr(stored_collection, attr, deepcopy(getattr(self, attr))) + old_slice_and_len_mode = None if self._slice is None else self._slice.copy(), self._len_mode + try: + self._store = True - # finally return the new collection - return stored_collection + # save sort and values options + sort_options = None + if self._sort is not None: + sort_options = self._sort.copy() + values = None + if self._values is not None: + values = self._values + self._values = None + + # create a key for storage + store_key = key or self._unique_key() + if self._sort is None: + self._sort = {} + self._sort['store'] = store_key + + # if filter by pk, but without need to get "values", no redis call is done + # so force values to get a call to sort (to store result) + if self._lazy_collection['pks'] and not self._values: + self.values('pk') + + # call the collection + self._len_mode = False + self._collection + + # restore sort and values options + self._store = False + self._sort = sort_options + self._values = values + + # create the new collection + stored_collection = self.__class__(self.cls) + stored_collection.from_stored(store_key) + + # apply ttl if needed + if ttl is not None: + self.cls.get_connection().expire(store_key, ttl) + + # set choices about instances/values from the current to the new collection + for attr in ('_instances', '_instances_skip_exist_test', '_values'): + setattr(stored_collection, attr, deepcopy(getattr(self, attr))) + + # finally return the new collection + return stored_collection + + finally: + self._slice, self._len_mode = old_slice_and_len_mode def from_stored(self, key): """ From f1f71ac7fa1464f58b0edbafe19f0c5954bdb90f Mon Sep 17 00:00:00 2001 From: "Stephane Angel (Twidi)" Date: Sun, 5 Feb 2017 17:41:25 +0100 Subject: [PATCH 2/7] HashField cannot be marked as unique (this doesn't make any sense) --- limpyd/fields.py | 4 ++++ tests/fields/hash.py | 6 ++++++ 2 files changed, 10 insertions(+) diff --git a/limpyd/fields.py b/limpyd/fields.py index ec57bfe..647759a 100644 --- a/limpyd/fields.py +++ b/limpyd/fields.py @@ -175,6 +175,7 @@ class RedisField(RedisProxyCommand): 'kwargs': ['lockable', 'default'], 'attrs': ['name', '_instance', '_model', 'indexable', 'unique'] } + _unique_supported = True def __init__(self, *args, **kwargs): """ @@ -187,6 +188,8 @@ def __init__(self, *args, **kwargs): self.indexable = kwargs.get("indexable", False) self.unique = kwargs.get("unique", False) if self.unique: + if not self._unique_supported: + raise ImplementationError('%s field cannot be unique' % self.__class__.__name__) if hasattr(self, "default"): raise ImplementationError('Cannot set "default" and "unique" together!') self.indexable = True @@ -819,6 +822,7 @@ def _call_linsert(self, command, where, refvalue, value): class HashField(MultiValuesField): + _unique_supported = False proxy_getter = "hgetall" proxy_setter = "hmset" diff --git a/tests/fields/hash.py b/tests/fields/hash.py index 8f29ea7..42777cf 100644 --- a/tests/fields/hash.py +++ b/tests/fields/hash.py @@ -2,6 +2,7 @@ from __future__ import unicode_literals from limpyd import fields +from limpyd.exceptions import ImplementationError from ..model import TestRedisModel, BaseModelTest @@ -182,3 +183,8 @@ def test_hlen_should_return_number_of_keys(self): self.assertEqual(obj.headers.hlen(), 0) obj.headers.hmset(**headers) self.assertEqual(obj.headers.hlen(), 2) + + def test_hashfields_cannot_be_unique(self): + with self.assertRaises(ImplementationError): + class TestUniquenessHashField(TestRedisModel): + data = fields.HashField(indexable=True, unique=True) From c06a2bac2167f710f54e27ddcbdbe34dad52b2e4 Mon Sep 17 00:00:00 2001 From: "Stephane Angel (Twidi)" Date: Sun, 5 Feb 2017 18:42:43 +0100 Subject: [PATCH 3/7] Ensure number of parts in filter is correct --- limpyd/collection.py | 6 ++++++ limpyd/fields.py | 4 ++++ tests/collection.py | 15 +++++++++++++++ 3 files changed, 25 insertions(+) diff --git a/limpyd/collection.py b/limpyd/collection.py index 5c572c2..47c4017 100644 --- a/limpyd/collection.py +++ b/limpyd/collection.py @@ -308,6 +308,12 @@ def _add_filters(self, **filters): key_path = key.split('__') field_name = key_path.pop(0) field = self.cls.get_field(field_name) + if len(key_path) != field._field_parts - 1: + raise ImplementationError( + 'Unexpected number of parts in filter %s for field %s.%s' % ( + key, field._model._name, field.name + ) + ) self._lazy_collection['sets'].add(field.index_key(value, *key_path)) return self diff --git a/limpyd/fields.py b/limpyd/fields.py index 647759a..528f765 100644 --- a/limpyd/fields.py +++ b/limpyd/fields.py @@ -176,6 +176,8 @@ class RedisField(RedisProxyCommand): 'attrs': ['name', '_instance', '_model', 'indexable', 'unique'] } _unique_supported = True + _field_parts = 1 + def __init__(self, *args, **kwargs): """ @@ -823,6 +825,8 @@ def _call_linsert(self, command, where, refvalue, value): class HashField(MultiValuesField): _unique_supported = False + _field_parts = 2 + proxy_getter = "hgetall" proxy_setter = "hmset" diff --git a/tests/collection.py b/tests/collection.py index e239952..ae0a05a 100644 --- a/tests/collection.py +++ b/tests/collection.py @@ -138,6 +138,21 @@ class Group(TestRedisModel): # all groups by using the default manager self.assertEqual(len(list(Group.collection(manager=CollectionManager))), 2) + def test_number_of_parts_in_filter_key(self): + class MyEmail(TestRedisModel): + subject = fields.StringField(indexable=True) + headers = fields.HashField(indexable=True) + + MyEmail.collection(subject='hello') + with self.assertRaises(ImplementationError): + MyEmail.collection(subject__building='hello') + + MyEmail.collection(headers__from='you@moon.io') + with self.assertRaises(ImplementationError): + MyEmail.collection(headers='you@moon.io') + with self.assertRaises(ImplementationError): + MyEmail.collection(headers__from__age='you@moon.io') + class SliceTest(CollectionBaseTest): """ From 9b059b4c1cc5e2553cc834a99244ad05cabddfd8 Mon Sep 17 00:00:00 2001 From: "Stephane Angel (Twidi)" Date: Mon, 6 Feb 2017 03:44:58 +0100 Subject: [PATCH 4/7] Make EmailTestModel globally available in tests --- tests/fields/hash.py | 10 ++-------- tests/model.py | 6 +++++- 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/tests/fields/hash.py b/tests/fields/hash.py index 42777cf..a6e88cd 100644 --- a/tests/fields/hash.py +++ b/tests/fields/hash.py @@ -3,18 +3,12 @@ from limpyd import fields from limpyd.exceptions import ImplementationError - -from ..model import TestRedisModel, BaseModelTest - - -class EmailTestModel(TestRedisModel): - headers = fields.HashField(indexable=True) - raw_headers = fields.HashField() +from ..model import TestRedisModel, BaseModelTest, Email class HashFieldTest(BaseModelTest): - model = EmailTestModel + model = Email def test_hashfield_can_be_set_at_init(self): headers = { diff --git a/tests/model.py b/tests/model.py index 48b0ecc..d2b5291 100644 --- a/tests/model.py +++ b/tests/model.py @@ -9,7 +9,7 @@ import time import unittest -from limpyd import model +from limpyd import model, fields from limpyd import fields from limpyd.exceptions import * @@ -46,6 +46,10 @@ class Boat(TestRedisModel): length = fields.StringField() +class Email(TestRedisModel): + headers = fields.HashField(indexable=True) + + class BaseModelTest(LimpydBaseTest): model = None From 2187f84e667e84e5c924f4651ca9e1e29d44967b Mon Sep 17 00:00:00 2001 From: "Stephane Angel (Twidi)" Date: Mon, 6 Feb 2017 03:48:25 +0100 Subject: [PATCH 5/7] Check if a field is indexable in collection --- limpyd/collection.py | 8 ++++++++ tests/collection.py | 2 +- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/limpyd/collection.py b/limpyd/collection.py index 47c4017..d1f7f98 100644 --- a/limpyd/collection.py +++ b/limpyd/collection.py @@ -308,6 +308,14 @@ def _add_filters(self, **filters): key_path = key.split('__') field_name = key_path.pop(0) field = self.cls.get_field(field_name) + + if not field.indexable: + raise ImplementationError( + 'Field %s.%s is not indexable' % ( + field._model._name, field.name + ) + ) + if len(key_path) != field._field_parts - 1: raise ImplementationError( 'Unexpected number of parts in filter %s for field %s.%s' % ( diff --git a/tests/collection.py b/tests/collection.py index ae0a05a..015cb2e 100644 --- a/tests/collection.py +++ b/tests/collection.py @@ -45,7 +45,7 @@ def test_filter_from_kwargs(self): self.assertEqual(len(list(Boat.collection(power="sail", launched=1966))), 1) def test_should_raise_if_filter_is_not_indexable_field(self): - with self.assertRaises(ValueError): + with self.assertRaises(ImplementationError): Boat.collection(length=15.1) def test_collection_should_be_lazy(self): From 7a4312ed9d0af708ede54189dc138040056a42d0 Mon Sep 17 00:00:00 2001 From: "Stephane Angel (Twidi)" Date: Mon, 6 Feb 2017 04:50:21 +0100 Subject: [PATCH 6/7] Remove unused zset-to-set method and lua-script --- limpyd/contrib/collection.py | 20 -------------------- 1 file changed, 20 deletions(-) diff --git a/limpyd/contrib/collection.py b/limpyd/contrib/collection.py index 2b1e6d3..a0a9b5c 100644 --- a/limpyd/contrib/collection.py +++ b/limpyd/contrib/collection.py @@ -35,16 +35,6 @@ class ExtendedCollectionManager(CollectionManager): return 1 """, }, - 'zset_to_set': { - # add all members of the zset in a new set - 'lua': """ - redis.call('del', KEYS[2]) - for i, member in ipairs(redis.call('zrange', KEYS[1], 0, -1)) do - redis.call('sadd', KEYS[2], member) - end - return 1 - """, - }, } def __init__(self, cls): @@ -85,16 +75,6 @@ def _list_to_set(self, list_field, set_key): else: self.cls.get_connection().sadd(set_key, *list_field.lmembers()) - def _sortedset_to_set(self, sortedset_field, set_key): - """ - Store all content of the given SortedSetField in a redis set. - Use scripting if available to avoid retrieving all values locally from - the sorted set before sending them back to the set - """ - if self.cls.database.has_scripting(): - self._call_script('zset_to_set', keys=[sortedset_field.key, set_key]) - else: - self.cls.get_connection().sadd(set_key, *sortedset_field.zmembers()) @property def _collection(self): From da4f2d053588e64d32330b74895d0dde8c7dae82 Mon Sep 17 00:00:00 2001 From: "Stephane Angel (Twidi)" Date: Mon, 6 Feb 2017 15:04:56 +0100 Subject: [PATCH 7/7] ExtendedCollection.intersect can accept list and zset keys Before, only sets were allowed (without check, btw) --- doc/contrib.rst | 2 +- limpyd/contrib/collection.py | 69 +++++++++++++++++++++++------------- tests/contrib/collection.py | 44 ++++++++++++++++++++++- 3 files changed, 89 insertions(+), 26 deletions(-) diff --git a/doc/contrib.rst b/doc/contrib.rst index 816400b..29db2ae 100644 --- a/doc/contrib.rst +++ b/doc/contrib.rst @@ -643,7 +643,7 @@ Here is an example: - a python list - a python set - a python tuple -- a string, which must be the key of a Redis_ set (cannot be a list of sorted set for now) +- a string, which must be the key of a Redis_ set, sorted_set or list (long operation if a list) - a `limpyd` :ref:`SetField`, attached to a model - a `limpyd` :ref:`ListField`, attached to a model - a `limpyd` :ref:`SortedSetField`, attached to a model diff --git a/limpyd/contrib/collection.py b/limpyd/contrib/collection.py index a0a9b5c..1eb2f49 100644 --- a/limpyd/contrib/collection.py +++ b/limpyd/contrib/collection.py @@ -64,17 +64,17 @@ def _call_script(self, script_name, keys=[], args=[]): script['script_object'] = conn.register_script(script['lua']) return script['script_object'](keys=keys, args=args, client=conn) - def _list_to_set(self, list_field, set_key): + def _list_to_set(self, list_key, set_key): """ Store all content of the given ListField in a redis set. Use scripting if available to avoid retrieving all values locally from the list before sending them back to the set """ if self.cls.database.has_scripting(): - self._call_script('list_to_set', keys=[list_field.key, set_key]) + self._call_script('list_to_set', keys=[list_key, set_key]) else: - self.cls.get_connection().sadd(set_key, *list_field.lmembers()) - + conn = self.cls.get_connection() + conn.sadd(set_key, *conn.lrange(list_key, 0, -1)) @property def _collection(self): @@ -101,19 +101,47 @@ def _prepare_sets(self, sets): As the new "intersect" method can accept different types of "set", we have to handle them because we must return only keys of redis sets. """ + + if self.stored_key and not self.stored_key_exists(): + raise DoesNotExist('This collection is based on a previous one, ' + 'stored at a key that does not exist anymore.') + conn = self.cls.get_connection() all_sets = set() tmp_keys = set() only_one_set = len(sets) == 1 - if self.stored_key and not self.stored_key_exists(): - raise DoesNotExist('This collection is based on a previous one, ' - 'stored at a key that does not exist anymore.') + def add_key(key, key_type=None, is_tmp=False): + if not key_type: + key_type = conn.type(key) + if key_type == 'set': + all_sets.add(key) + elif key_type == 'zset': + all_sets.add(key) + self._has_sortedsets = True + elif key_type == 'list': + if only_one_set: + # we only have this list, use it directly + all_sets.add(key) + else: + # many sets, convert the list to a simple redis set + tmp_key = self._unique_key() + self._list_to_set(key, tmp_key) + add_key(tmp_key, 'set', True) + elif key_type == 'none': + # considered as an empty set + all_sets.add(key) + else: + raise ValueError('Cannot use redis key %s of type %s for filtering' % ( + key, key_type + )) + if is_tmp: + tmp_keys.add(key) for set_ in sets: if isinstance(set_, str): - all_sets.add(set_) + add_key(set_) elif isinstance(set_, ExtendedFilter): # We have a RedisModel and we'll use its pk, or a RedisField # (single value) and we'll use its value @@ -126,31 +154,22 @@ def _prepare_sets(self, sets): else: raise ValueError(u'Invalide filter value for %s: %s' % (field_name, value)) key = field.index_key(val) - all_sets.add(key) + add_key(key) elif isinstance(set_, SetField): # Use the set key. If we need to intersect, we'll use # sunionstore, and if not, store accepts set - all_sets.add(set_.key) + add_key(set_.key, 'set') elif isinstance(set_, SortedSetField): # Use the sorted set key. If we need to intersect, we'll use # zinterstore, and if not, store accepts zset - all_sets.add(set_.key) + add_key(set_.key, 'zset') elif isinstance(set_, (ListField, _StoredCollection)): - if only_one_set: - # we only have this list, use it directly - all_sets.add(set_.key) - else: - # many sets, convert the list to a simple redis set - tmp_key = self._unique_key() - self._list_to_set(set_, tmp_key) - tmp_keys.add(tmp_key) - all_sets.add(tmp_key) + add_key(set_.key, 'list') elif isinstance(set_, tuple) and len(set_): # if we got a list or set, create a redis set to hold its values tmp_key = self._unique_key() conn.sadd(tmp_key, *set_) - tmp_keys.add(tmp_key) - all_sets.add(tmp_key) + add_key(tmp_key, 'set', True) return all_sets, tmp_keys @@ -167,7 +186,8 @@ def intersect(self, *sets): Each "set" represent a list of pk, the final goal is to return only pks matching the intersection of all sets. A "set" can be: - - a string: considered as a redis set's name + - a string: considered as the name of a redis set, sorted set or list + (if a list, values will be stored in a temporary set) - a list, set or tuple: values will be stored in a temporary set - a SetField: we will directly use it's content on redis - a ListField or SortedSetField: values will be stored in a temporary @@ -184,7 +204,8 @@ def intersect(self, *sets): elif not isinstance(set_, (tuple, str, MultiValuesField, _StoredCollection)): raise ValueError('%s is not a valid type of argument that can ' 'be used as a set. Allowed are: string (key ' - 'of a redis set), limpyd multi-values field (' + 'of a redis set, sorted set or list), ' + 'limpyd multi-values field (' 'SetField, ListField or SortedSetField), or ' 'real python set, list or tuple' % set_) if isinstance(set_, SortedSetField): diff --git a/tests/contrib/collection.py b/tests/contrib/collection.py index 0aa7f90..e462f87 100644 --- a/tests/contrib/collection.py +++ b/tests/contrib/collection.py @@ -248,7 +248,7 @@ def tearDown(self): self.connection.sinterstore = IntersectTest.redis_sinterstore super(IntersectTest, self).tearDown() - def test_intersect_should_accept_string(self): + def test_intersect_should_accept_set_key_as_string(self): set_key = unique_key(self.connection) self.connection.sadd(set_key, 1, 2) collection = set(Group.collection().intersect(set_key)) @@ -260,6 +260,48 @@ def test_intersect_should_accept_string(self): collection = set(Group.collection().intersect(set_key)) self.assertEqual(collection, set(['1', '2'])) + def test_intersect_should_accept_sortedset_key_as_string(self): + zset_key = unique_key(self.connection) + self.connection.zadd(zset_key, 1.0, 1, 2.0, 2) + collection = set(Group.collection().intersect(zset_key)) + self.assertEqual(self.last_interstore_call['command'], 'zinterstore') + self.assertEqual(collection, set(['1', '2'])) + + zset_key = unique_key(self.connection) + self.connection.zadd(zset_key, 1.0, 1, 2.0, 2, 10.0, 10, 50.0, 50) + collection = set(Group.collection().intersect(zset_key)) + self.assertEqual(collection, set(['1', '2'])) + + def test_intersect_should_accept_list_key_as_string(self): + list_key = unique_key(self.connection) + self.connection.lpush(list_key, 1, 2) + collection = set(Group.collection().intersect(list_key)) + self.assertEqual(self.last_interstore_call['command'], 'sinterstore') + self.assertEqual(collection, set(['1', '2'])) + + list_key = unique_key(self.connection) + self.connection.lpush(list_key, 1, 2, 10, 50) + collection = set(Group.collection().intersect(list_key)) + self.assertEqual(collection, set(['1', '2'])) + + def test_intersect_should_not_accept_string_key_as_string(self): + str_key = unique_key(self.connection) + self.connection.set(str_key, 'foo') + with self.assertRaises(ValueError): + set(Group.collection().intersect(str_key)) + + def test_intersect_should_not_accept_hkey_key_as_string(self): + hash_key = unique_key(self.connection) + self.connection.hset(hash_key, 'foo', 'bar') + with self.assertRaises(ValueError): + set(Group.collection().intersect(hash_key)) + + def test_intersect_should_consider_non_existent_key_as_set(self): + no_key = unique_key(self.connection) + collection = set(Group.collection().intersect(no_key)) + self.assertEqual(self.last_interstore_call['command'], 'sinterstore') + self.assertEqual(collection, set()) + def test_intersect_should_accept_set(self): collection = set(Group.collection().intersect(set([1, 2]))) self.assertEqual(self.last_interstore_call['command'], 'sinterstore')