Prepare v0.2.0
diff --git a/.coveragerc b/.coveragerc
deleted file mode 100644
index e77617c..0000000
--- a/.coveragerc
+++ /dev/null
@@ -1,5 +0,0 @@
-[report]
-omit =
- */pyshared/*
- */python?.?/*
- */site-packages/nose/*
diff --git a/Changes b/Changes
index 67af7e8..310f0e1 100644
--- a/Changes
+++ b/Changes
@@ -1,3 +1,8 @@
+0.2.0 2014-04-02
+
+* Add @cache decorator.
+* Update documentation.
+
0.1.0 2014-03-27
* Initial release.
diff --git a/MIT-LICENSE b/LICENSE
similarity index 100%
rename from MIT-LICENSE
rename to LICENSE
diff --git a/MANIFEST.in b/MANIFEST.in
index 5217935..c9346d9 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,6 +1,6 @@
include Changes
+include LICENSE
include MANIFEST.in
-include MIT-LICENSE
include README.rst
recursive-include tests *.py
diff --git a/README.rst b/README.rst
index bb1067b..cd5ea58 100644
--- a/README.rst
+++ b/README.rst
@@ -31,7 +31,7 @@
This module provides various cache implementations based on different
cache algorithms, as well as decorators for easily memoizing function
-calls.
+calls, and utilities for creating custom cache implementations.
Installation
@@ -75,4 +75,4 @@
.. _Source Code: https://github.com/tkem/cachetools/
.. _Issue Tracker: https://github.com/tkem/cachetools/issues/
.. _Change Log: http://raw.github.com/tkem/cachetools/master/Changes
-.. _MIT License: http://raw.github.com/tkem/cachetools/master/MIT-LICENSE
+.. _MIT License: http://raw.github.com/tkem/cachetools/master/LICENSE
diff --git a/cachetools.py b/cachetools.py
index c0dc19b..33b080f 100644
--- a/cachetools.py
+++ b/cachetools.py
@@ -8,53 +8,63 @@
except ImportError:
from dummy_threading import RLock
-__version__ = '0.1.0'
+__version__ = '0.2.0'
-class Cache(collections.MutableMapping):
+def cache(cls):
+ """Class decorator that wraps any mutable mapping to work as a
+ cache."""
- def __init__(self, maxsize, wrapped=None):
- self.__wrapped__ = {} if wrapped is None else wrapped
- self.maxsize = maxsize
+ class Cache(collections.MutableMapping):
- def __getitem__(self, key):
- return self.__wrapped__[key]
+ __wrapped__ = cls
- def __setitem__(self, key, value):
- while len(self) >= self.maxsize:
- self.popitem()
- self.__wrapped__[key] = value
+ def __init__(self, maxsize, *args, **kwargs):
+ self.__wrapped__ = cls(*args, **kwargs)
+ self.maxsize = maxsize
- def __delitem__(self, key):
- del self.__wrapped__[key]
+ def __getitem__(self, key):
+ return self.__wrapped__[key]
- def __iter__(self):
- return iter(self.__wrapped__)
+ def __setitem__(self, key, value):
+ while len(self) >= self.maxsize:
+ self.popitem()
+ self.__wrapped__[key] = value
- def __len__(self):
- return len(self.__wrapped__)
+ def __delitem__(self, key):
+ del self.__wrapped__[key]
- def __repr__(self):
- return '%s(%r, maxsize=%d)' % (
- self.__class__.__name__,
- self.__wrapped__,
- self.__maxsize,
- )
+ def __iter__(self):
+ return iter(self.__wrapped__)
- @property
- def maxsize(self):
- return self.__maxsize
+ def __len__(self):
+ return len(self.__wrapped__)
- @maxsize.setter
- def maxsize(self, value):
- if value < 1:
- raise ValueError('maxsize must be >= 1')
- while (len(self) > value):
- self.popitem()
- self.__maxsize = value
+ def __repr__(self):
+ return '%s(%r, maxsize=%d)' % (
+ self.__class__.__name__,
+ self.__wrapped__,
+ self.__maxsize,
+ )
+
+ @property
+ def maxsize(self):
+ return self.__maxsize
+
+ @maxsize.setter
+ def maxsize(self, value):
+ if not value > 0:
+ raise ValueError('maxsize must be > 0')
+ while (len(self) > value):
+ self.popitem()
+ self.__maxsize = value
+
+ # TODO: functools.update_wrapper() for class decorators?
+
+ return Cache
-class LRUCache(Cache):
+class LRUCache(cache(collections.OrderedDict)):
"""Least Recently Used (LRU) cache implementation.
Discards the least recently used items first to make space when
@@ -66,25 +76,21 @@
# OrderedDict.move_to_end is only available in Python 3
if hasattr(collections.OrderedDict, 'move_to_end'):
- def __update(self, key):
+ def __getitem__(self, key):
+ value = self.__wrapped__[key]
self.__wrapped__.move_to_end(key)
+ return value
else:
- def __update(self, key):
- self.__wrapped__[key] = self.__wrapped__.pop(key)
-
- def __init__(self, maxsize):
- Cache.__init__(self, maxsize, collections.OrderedDict())
-
- def __getitem__(self, key):
- value = Cache.__getitem__(self, key)
- self.__update(key)
- return value
+ def __getitem__(self, key):
+ value = self.__wrapped__.pop(key)
+ self.__wrapped__[key] = value
+ return value
def popitem(self):
return self.__wrapped__.popitem(False)
-class LFUCache(Cache):
+class LFUCache(cache(dict)):
"""Least Frequently Used (LFU) cache implementation.
Counts how often an item is needed, and discards the items used
@@ -94,30 +100,33 @@
track of usage counts.
"""
- def __init__(self, maxsize):
- Cache.__init__(self, maxsize)
+ def __init__(self, maxsize, *args, **kwargs):
+ super(LFUCache, self).__init__(maxsize, *args, **kwargs)
self.__counter = collections.Counter()
def __getitem__(self, key):
- value = Cache.__getitem__(self, key)
+ value = super(LFUCache, self).__getitem__(key)
self.__counter[key] += 1
return value
def __setitem__(self, key, value):
- Cache.__setitem__(self, key, value)
+ super(LFUCache, self).__setitem__(key, value)
self.__counter[key] += 0
def __delitem__(self, key):
- Cache.__delitem__(self, key)
+ super(LFUCache, self).__delitem__(key)
del self.__counter[key]
def popitem(self):
- item = self.__counter.most_common()[-1]
- self.pop(item[0])
+ try:
+ item = self.__counter.most_common()[-1]
+ except IndexError:
+ raise KeyError
+ super(LFUCache, self).pop(item[0])
return item
-class RRCache(Cache):
+class RRCache(cache(dict)):
"""Random Replacement (RR) cache implementation.
Randomly selects a candidate item and discards it to make space
@@ -127,11 +136,11 @@
to be discarded.
"""
- def __init__(self, maxsize):
- Cache.__init__(self, maxsize)
-
def popitem(self):
- item = random.choice(list(self.items()))
+ try:
+ item = random.choice(list(self.items()))
+ except IndexError:
+ raise KeyError
self.pop(item[0])
return item
@@ -154,7 +163,6 @@
def decorator(func):
count = [0, 0]
- @functools.wraps(func)
def wrapper(*args, **kwargs):
key = makekey(args, kwargs)
with lock:
@@ -177,7 +185,7 @@
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
- return wrapper
+ return functools.update_wrapper(wrapper, func)
return decorator
diff --git a/docs/index.rst b/docs/index.rst
index 8b1866d..47440b6 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -3,9 +3,9 @@
.. module:: cachetools
-This module provides various memoizing collections and function
-decorators, including a variant of the Python 3 Standard Library
-:func:`functools.lru_cache` decorator.
+This module provides various memoizing collections and decorators,
+including a variant of the Python 3 Standard Library
+:func:`functools.lru_cache` function decorator.
.. code-block:: pycon
@@ -33,10 +33,10 @@
This module provides various cache implementations based on different
cache algorithms, as well as decorators for easily memoizing function
-calls.
+calls, and utilities for creating custom cache implementations.
-Cache Classes
+Cache Implementations
------------------------------------------------------------------------
.. autoclass:: LRUCache
@@ -56,23 +56,83 @@
with --- though not necessarily as efficient as --- the Python 3
Standard Library :func:`functools.lru_cache` decorator.
-All decorators feature two optional arguments, which should be
-specified as keyword arguments for compatibility with future
-extensions:
+In addition to a `maxsize` parameter, all decorators feature two
+optional arguments, which should be specified as keyword arguments for
+compatibility with future extensions:
If `typed` is set to :const:`True`, function arguments of different
types will be cached separately.
`lock` specifies a function of zero arguments that returns a `context
-manager`_ to lock the cache when necessary. If not specified, a
+manager`_ to lock the cache when necessary. If not specified,
:class:`threading.RLock` will be used for synchronizing access from
multiple threads.
-.. autofunction:: lru_cache
+The wrapped function is instrumented with :func:`cache_info` and
+:func:`cache_clear` functions to provide information about cache
+performance and clear the cache. See the :func:`functools.lru_cache`
+documentation for details.
-.. autofunction:: lfu_cache
+Unlike :func:`functools.lru_cache`, setting `maxsize` to zero or
+:const:`None` is not supported.
-.. autofunction:: rr_cache
+.. decorator:: lru_cache(maxsize=128, typed=False, lock=threading.RLock)
+
+ Decorator to wrap a function with a memoizing callable that saves
+ up to the `maxsize` most recent calls based on a Least Recently
+ Used (LRU) algorithm.
+
+.. decorator:: lfu_cache(maxsize=128, typed=False, lock=threading.RLock)
+
+ Decorator to wrap a function with a memoizing callable that saves
+ up to the `maxsize` most recent calls based on a Least Frequently
+ Used (LFU) algorithm.
+
+.. decorator:: rr_cache(maxsize=128, typed=False, lock=threading.RLock)
+
+ Decorator to wrap a function with a memoizing callable that saves
+ up to the `maxsize` most recent calls based on a Random Replacement
+ (RR) algorithm.
+
+
+Class Decorators
+------------------------------------------------------------------------
+
+.. decorator:: cache
+
+ Class decorator that wraps any mutable mapping to work as a cache.
+
+ This class decorator may be useful when implementing new cache
+ classes. It converts any mutable mapping into a cache-like class
+ with a :attr:`maxsize` attribute. If :func:`__setitem__` is called
+ when the cache is full, i.e. ``len(self) == self.maxsize``,
+ :func:`popitem` is invoked to make room for new items::
+
+ @cache
+ class DictCache(dict):
+ pass
+
+ c = DictCache(maxsize=2)
+ c['x'] = 1
+ c['y'] = 2
+ c['z'] = 3 # calls dict.popitem(c)
+
+ The original underlying class or object is accessible through the
+ :attr:`__wrapped__` attribute. This is useful for subclasses that
+ need to access the original mapping object directly, e.g. to
+ implement their own version of :func:`popitem`.
+
+ It is also possible, and arguably more comprehensible, to use the
+ wrapper class as a base class::
+
+ class OrderedDictCache(cache(collections.OrderedDict)):
+ def popitem(self):
+ return self.__wrapped__.popitem(last=False) # pop first item
+
+ c = OrderedDictCache(maxsize=2)
+ c['x'] = 1
+ c['y'] = 2
+ c['z'] = 3 # removes 'x'
.. _mapping: http://docs.python.org/dev/glossary.html#term-mapping
diff --git a/tests/test_cache.py b/tests/test_cache.py
new file mode 100644
index 0000000..5c32f2b
--- /dev/null
+++ b/tests/test_cache.py
@@ -0,0 +1,41 @@
+import unittest
+
+import cachetools
+import collections
+
+
[email protected]
+class DictCache(dict):
+ pass
+
+
[email protected]
+class OrderedDictCache(collections.OrderedDict):
+ pass
+
+
+class CacheTest(unittest.TestCase):
+
+ def test_dict_cache(self):
+ cache = DictCache(maxsize=2)
+
+ cache['a'] = 1
+ cache['b'] = 2
+ cache['c'] = 3
+
+ self.assertEqual(len(cache), 2)
+ self.assertTrue('a' in cache or ('b' in cache and 'c' in cache))
+ self.assertTrue('b' in cache or ('a' in cache and 'c' in cache))
+ self.assertTrue('c' in cache or ('a' in cache and 'b' in cache))
+
+ def test_ordered_dict_cache(self):
+ cache = OrderedDictCache(maxsize=2)
+
+ cache['a'] = 1
+ cache['b'] = 2
+ cache['c'] = 3
+
+ self.assertEqual(len(cache), 2)
+ self.assertNotIn('a', cache)
+ self.assertEqual(cache['b'], 2)
+ self.assertEqual(cache['c'], 3)
diff --git a/tests/test_lfucache.py b/tests/test_lfucache.py
index 9f0de48..d414e20 100644
--- a/tests/test_lfucache.py
+++ b/tests/test_lfucache.py
@@ -8,6 +8,11 @@
return n
+@lfu_cache(maxsize=2, typed=True)
+def cached_typed(n):
+ return n
+
+
class LFUCacheTest(unittest.TestCase):
def test_insert(self):
@@ -33,3 +38,19 @@
self.assertEqual(cached.cache_info(), (0, 1, 2, 1))
self.assertEqual(cached(1), 1)
self.assertEqual(cached.cache_info(), (1, 1, 2, 1))
+ self.assertEqual(cached(1.0), 1.0)
+ self.assertEqual(cached.cache_info(), (2, 1, 2, 1))
+
+ cached.cache_clear()
+ self.assertEqual(cached(1), 1)
+ self.assertEqual(cached.cache_info(), (2, 2, 2, 1))
+
+ def test_typed_decorator(self):
+ self.assertEqual(cached_typed(1), 1)
+ self.assertEqual(cached_typed.cache_info(), (0, 1, 2, 1))
+ self.assertEqual(cached_typed(1), 1)
+ self.assertEqual(cached_typed.cache_info(), (1, 1, 2, 1))
+ self.assertEqual(cached_typed(1.0), 1.0)
+ self.assertEqual(cached_typed.cache_info(), (1, 2, 2, 2))
+ self.assertEqual(cached_typed(1.0), 1.0)
+ self.assertEqual(cached_typed.cache_info(), (2, 2, 2, 2))
diff --git a/tests/test_lrucache.py b/tests/test_lrucache.py
index da1104d..a4ceb6e 100644
--- a/tests/test_lrucache.py
+++ b/tests/test_lrucache.py
@@ -8,6 +8,11 @@
return n
+@lru_cache(maxsize=2, typed=True)
+def cached_typed(n):
+ return n
+
+
class LRUCacheTest(unittest.TestCase):
def test_insert(self):
@@ -36,7 +41,24 @@
self.assertNotIn('b', cache)
def test_decorator(self):
+ self.assertEqual(cached.cache_info(), (0, 0, 2, 0))
self.assertEqual(cached(1), 1)
self.assertEqual(cached.cache_info(), (0, 1, 2, 1))
self.assertEqual(cached(1), 1)
self.assertEqual(cached.cache_info(), (1, 1, 2, 1))
+ self.assertEqual(cached(1.0), 1.0)
+ self.assertEqual(cached.cache_info(), (2, 1, 2, 1))
+
+ cached.cache_clear()
+ self.assertEqual(cached(1), 1)
+ self.assertEqual(cached.cache_info(), (2, 2, 2, 1))
+
+ def test_typed_decorator(self):
+ self.assertEqual(cached_typed(1), 1)
+ self.assertEqual(cached_typed.cache_info(), (0, 1, 2, 1))
+ self.assertEqual(cached_typed(1), 1)
+ self.assertEqual(cached_typed.cache_info(), (1, 1, 2, 1))
+ self.assertEqual(cached_typed(1.0), 1.0)
+ self.assertEqual(cached_typed.cache_info(), (1, 2, 2, 2))
+ self.assertEqual(cached_typed(1.0), 1.0)
+ self.assertEqual(cached_typed.cache_info(), (2, 2, 2, 2))
diff --git a/tests/test_rrcache.py b/tests/test_rrcache.py
index c4bb7d2..f8eb534 100644
--- a/tests/test_rrcache.py
+++ b/tests/test_rrcache.py
@@ -8,6 +8,11 @@
return n
+@rr_cache(maxsize=2, typed=True)
+def cached_typed(n):
+ return n
+
+
class RRCacheTest(unittest.TestCase):
def test_insert(self):
@@ -27,3 +32,19 @@
self.assertEqual(cached.cache_info(), (0, 1, 2, 1))
self.assertEqual(cached(1), 1)
self.assertEqual(cached.cache_info(), (1, 1, 2, 1))
+ self.assertEqual(cached(1.0), 1.0)
+ self.assertEqual(cached.cache_info(), (2, 1, 2, 1))
+
+ cached.cache_clear()
+ self.assertEqual(cached(1), 1)
+ self.assertEqual(cached.cache_info(), (2, 2, 2, 1))
+
+ def test_typed_decorator(self):
+ self.assertEqual(cached_typed(1), 1)
+ self.assertEqual(cached_typed.cache_info(), (0, 1, 2, 1))
+ self.assertEqual(cached_typed(1), 1)
+ self.assertEqual(cached_typed.cache_info(), (1, 1, 2, 1))
+ self.assertEqual(cached_typed(1.0), 1.0)
+ self.assertEqual(cached_typed.cache_info(), (1, 2, 2, 2))
+ self.assertEqual(cached_typed(1.0), 1.0)
+ self.assertEqual(cached_typed.cache_info(), (2, 2, 2, 2))