pax_global_header00006660000000000000000000000064142613534100014511gustar00rootroot0000000000000052 comment=63cfbf2c530e9d95a95febce2f467dc878631046 django-cache-machine-1.2.0/000077500000000000000000000000001426135341000154165ustar00rootroot00000000000000django-cache-machine-1.2.0/.coveragerc000066400000000000000000000000271426135341000175360ustar00rootroot00000000000000[run] source = caching django-cache-machine-1.2.0/.github/000077500000000000000000000000001426135341000167565ustar00rootroot00000000000000django-cache-machine-1.2.0/.github/workflows/000077500000000000000000000000001426135341000210135ustar00rootroot00000000000000django-cache-machine-1.2.0/.github/workflows/ci.yaml000066400000000000000000000044671426135341000223050ustar00rootroot00000000000000name: lint-test on: pull_request: branches: - main push: branches: - main schedule: # run once a week on early monday mornings - cron: "22 2 * * 1" jobs: pre-commit: runs-on: ubuntu-20.04 steps: - uses: actions/checkout@v2 - uses: actions/setup-python@v2 - uses: pre-commit/action@v2.0.0 test-job: runs-on: ubuntu-20.04 strategy: matrix: # tox-gh-actions will only run the tox environments which match the currently # running python-version. See [gh-actions] in tox.ini for the mapping python-version: [3.6, 3.7, 3.8, 3.9, "3.10"] # Service containers to run with `test-job` services: memcached: image: memcached ports: - 11211:11211 redis: image: redis options: >- --health-cmd "redis-cli ping" --health-interval 10s --health-timeout 5s --health-retries 5 ports: - 6379:6379 postgres-default: image: postgres env: POSTGRES_USER: default POSTGRES_PASSWORD: postgres options: >- --health-cmd pg_isready --health-interval 10s --health-timeout 5s --health-retries 5 ports: # use port 5432 for default DB - 5432:5432 postgres-primary2: image: postgres env: POSTGRES_USER: primary2 POSTGRES_PASSWORD: postgres options: >- --health-cmd pg_isready --health-interval 10s --health-timeout 5s --health-retries 5 ports: # use port 5433 for primary2 DB - 5433:5432 steps: - uses: actions/checkout@v2 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v2 with: python-version: ${{ matrix.python-version }} cache: "pip" cache-dependency-path: "**/dev-requirements.txt" - name: Install dependencies run: | python -m pip install --upgrade pip pip install -r dev-requirements.txt - name: Test with tox env: DATABASE_URL: postgres://default:postgres@localhost:5432/default DATABASE_URL_2: postgres://primary2:postgres@localhost:5433/primary2 run: tox django-cache-machine-1.2.0/.gitignore000066400000000000000000000001071426135341000174040ustar00rootroot00000000000000.coverage .tox docs/_build *.py[co] *.egg-info *~ .idea .direnv .envrc django-cache-machine-1.2.0/.pre-commit-config.yaml000066400000000000000000000015361426135341000217040ustar00rootroot00000000000000# See https://pre-commit.com for more information # See https://pre-commit.com/hooks.html for more hooks repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v2.4.0 hooks: - id: check-added-large-files - id: check-merge-conflict - id: check-yaml - id: debug-statements - id: detect-private-key - id: end-of-file-fixer - id: trailing-whitespace - repo: https://github.com/PyCQA/flake8 rev: 4.0.1 hooks: - id: flake8 - repo: https://github.com/pycqa/isort rev: 5.6.4 hooks: - id: isort args: ["--profile", "black", "--filter-files"] - repo: https://github.com/psf/black rev: 22.6.0 hooks: - id: black language_version: python3 - repo: https://github.com/pre-commit/mirrors-prettier rev: "v2.6.0" hooks: - id: prettier django-cache-machine-1.2.0/LICENSE000066400000000000000000000027621426135341000164320ustar00rootroot00000000000000Copyright (c) 2010, Jeff Balogh. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of Cache Machine nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. django-cache-machine-1.2.0/MANIFEST.in000066400000000000000000000000621426135341000171520ustar00rootroot00000000000000include LICENSE include README.rst prune examples django-cache-machine-1.2.0/README.rst000066400000000000000000000020601426135341000171030ustar00rootroot00000000000000============= Cache Machine ============= Cache Machine provides automatic caching and invalidation for Django models through the ORM. For full docs, see https://cache-machine.readthedocs.org/en/latest/. .. image:: https://github.com/django-cache-machine/django-cache-machine/actions/workflows/ci.yaml/badge.svg :target: https://github.com/django-cache-machine/django-cache-machine/actions/workflows/ci.yaml Requirements ------------ Cache Machine currently works with: * Django 2.2, 3.0, 3.1, 3.2, and 4.0 * Python 3.6, 3.7, 3.8, 3.9, and 3.10 The last version to support Python 2.7 and Django 1.11 is ``django-cache-machine==1.1.0``. Installation ------------ Get it from `pypi `_:: pip install django-cache-machine Running Tests ------------- Get it from `github `_:: git clone git://github.com/django-cache-machine/django-cache-machine.git cd django-cache-machine pip install -r dev-requirements.txt python run_tests.py django-cache-machine-1.2.0/caching/000077500000000000000000000000001426135341000170125ustar00rootroot00000000000000django-cache-machine-1.2.0/caching/__init__.py000066400000000000000000000001431426135341000211210ustar00rootroot00000000000000from __future__ import unicode_literals VERSION = ("1", "2", "0") __version__ = ".".join(VERSION) django-cache-machine-1.2.0/caching/base.py000066400000000000000000000342371426135341000203070ustar00rootroot00000000000000import functools import logging import django from django.core.cache.backends.base import DEFAULT_TIMEOUT from django.core.exceptions import EmptyResultSet from django.db import models from django.db.models import signals from django.db.models.query import ModelIterable from django.utils import encoding from caching import config from caching.invalidation import byid, cache, flush_key, invalidator, make_key log = logging.getLogger("caching") class CachingManager(models.Manager): # This option removed in Django 2.0 # Tell Django to use this manager when resolving foreign keys. (Django < 2.0) use_for_related_fields = True def get_queryset(self): return CachingQuerySet(self.model, using=self._db) def contribute_to_class(self, cls, name): signals.post_save.connect(self.post_save, sender=cls) signals.post_delete.connect(self.post_delete, sender=cls) return super(CachingManager, self).contribute_to_class(cls, name) def post_save(self, instance, **kwargs): self.invalidate( instance, is_new_instance=kwargs["created"], model_cls=kwargs["sender"] ) def post_delete(self, instance, **kwargs): self.invalidate(instance) def invalidate(self, *objects, **kwargs): """Invalidate all the flush lists associated with ``objects``.""" invalidator.invalidate_objects(objects, **kwargs) def raw(self, raw_query, params=None, *args, **kwargs): return CachingRawQuerySet( raw_query, self.model, params=params, using=self._db, *args, **kwargs ) def cache(self, timeout=DEFAULT_TIMEOUT): return self.get_queryset().cache(timeout) def no_cache(self): return self.cache(config.NO_CACHE) class CachingModelIterable(ModelIterable): """ Handles all the cache management for a QuerySet. Takes a queryset, and optionally takes a function that can be called to get an iterator over some database results. The function is only needed for RawQuerySets currently. """ def __init__(self, queryset, *args, **kwargs): self.iter_function = kwargs.pop("iter_function", None) self.timeout = kwargs.pop("timeout", queryset.timeout) self.db = kwargs.pop("db", queryset.db) super(CachingModelIterable, self).__init__(queryset, *args, **kwargs) def query_key(self): """ Generate the cache key for this query. Database router info is included to avoid the scenario where related cached objects from one DB (e.g. replica) are saved in another DB (e.g. primary), throwing a Django ValueError in the process. Django prevents cross DB model saving among related objects. """ query_db_string = "qs:%s::db:%s" % (self.queryset.query_key(), self.db) return make_key(query_db_string, with_locale=False) def cache_objects(self, objects, query_key): """Cache query_key => objects, then update the flush lists.""" log.debug("query_key: %s" % query_key) query_flush = flush_key(self.queryset.query_key()) log.debug("query_flush: %s" % query_flush) cache.add(query_key, objects, timeout=self.timeout) invalidator.cache_objects(self.queryset.model, objects, query_key, query_flush) def __iter__(self): if self.iter_function is not None: # This a RawQuerySet. Use the function passed into # the class constructor. iterator = self.iter_function else: # Otherwise, use super().__iter__. iterator = super(CachingModelIterable, self).__iter__ if self.timeout == config.NO_CACHE: # no cache, just iterate and return the results for obj in iterator(): yield obj return # Try to fetch from the cache. try: query_key = self.query_key() except EmptyResultSet: return cached = cache.get(query_key) if cached is not None: log.debug("cache hit: %s" % query_key) for obj in cached: obj.from_cache = True yield obj return # Use the special FETCH_BY_ID iterator if configured. if config.FETCH_BY_ID and hasattr(self.queryset, "fetch_by_id"): iterator = self.queryset.fetch_by_id # No cached results. Do the database query, and cache it once we have # all the objects. to_cache = [] for obj in iterator(): obj.from_cache = False to_cache.append(obj) yield obj if to_cache or config.CACHE_EMPTY_QUERYSETS: self.cache_objects(to_cache, query_key) class CachingQuerySet(models.query.QuerySet): _default_timeout_pickle_key = "__DEFAULT_TIMEOUT__" def __init__(self, *args, **kw): super(CachingQuerySet, self).__init__(*args, **kw) self.timeout = DEFAULT_TIMEOUT self._iterable_class = CachingModelIterable def __getstate__(self): """ Safely pickle our timeout if it's a DEFAULT_TIMEOUT. This is not needed by cache-machine itself, but by application code that may re-cache objects retrieved using cache-machine. """ state = dict() state.update(self.__dict__) if self.timeout == DEFAULT_TIMEOUT: state["timeout"] = self._default_timeout_pickle_key return state def __setstate__(self, state): """Safely unpickle our timeout if it's a DEFAULT_TIMEOUT.""" self.__dict__.update(state) if self.timeout == self._default_timeout_pickle_key: self.timeout = DEFAULT_TIMEOUT def flush_key(self): return flush_key(self.query_key()) def query_key(self): clone = self.query.clone() sql, params = clone.get_compiler(using=self.db).as_sql() return sql % params def iterator(self): return self._iterable_class(self) def fetch_by_id(self): """ Run two queries to get objects: one for the ids, one for id__in=ids. After getting ids from the first query we can try cache.get_many to reuse objects we've already seen. Then we fetch the remaining items from the db, and put those in the cache. This prevents cache duplication. """ # Include columns from extra since they could be used in the query's # order_by. vals = self.values_list("pk", *list(self.query.extra.keys())) pks = [val[0] for val in vals] keys = dict((byid(self.model._cache_key(pk, self.db)), pk) for pk in pks) cached = dict( (k, v) for k, v in list(cache.get_many(keys).items()) if v is not None ) # Pick up the objects we missed. missed = [pk for key, pk in list(keys.items()) if key not in cached] if missed: others = self.fetch_missed(missed) # Put the fetched objects back in cache. new = dict((byid(o), o) for o in others) cache.set_many(new) else: new = {} # Use pks to return the objects in the correct order. objects = dict((o.pk, o) for o in list(cached.values()) + list(new.values())) for pk in pks: yield objects[pk] def fetch_missed(self, pks): # Reuse the queryset but get a clean query. others = self.all() others.query.clear_limits() # Clear out the default ordering since we order based on the query. others = others.order_by().filter(pk__in=pks) if hasattr(others, "no_cache"): others = others.no_cache() if self.query.select_related: others.query.select_related = self.query.select_related return others def count(self): super_count = super(CachingQuerySet, self).count try: query_string = "count:%s" % self.query_key() except EmptyResultSet: return 0 if self.timeout == config.NO_CACHE or config.TIMEOUT == config.NO_CACHE: return super_count() else: return cached_with(self, super_count, query_string, config.TIMEOUT) def cache(self, timeout=DEFAULT_TIMEOUT): qs = self._clone() qs.timeout = timeout return qs def no_cache(self): return self.cache(config.NO_CACHE) def _clone(self, *args, **kw): qs = super(CachingQuerySet, self)._clone(*args, **kw) qs.timeout = self.timeout return qs class CachingMixin(object): """Inherit from this class to get caching and invalidation helpers.""" def flush_key(self): return flush_key(self) def get_cache_key(self, incl_db=True): """Return a cache key based on the object's primary key.""" # incl_db will be False if this key is intended for use in a flush key. # This ensures all cached copies of an object will be invalidated # regardless of the DB on which they're modified/deleted. return self._cache_key(self.pk, incl_db and self._state.db or None) cache_key = property(get_cache_key) @classmethod def model_flush_key(cls): """ Return a cache key for the entire model (used by invalidation). """ # use dummy PK and DB reference that will never resolve to an actual # cache key for an object return flush_key(cls._cache_key("all-pks", "all-dbs")) @classmethod def _cache_key(cls, pk, db=None): """ Return a string that uniquely identifies the object. For the Addon class, with a pk of 2, we get "o:addons.addon:2". """ if db: key_parts = ("o", cls._meta, pk, db) else: key_parts = ("o", cls._meta, pk) return ":".join(map(encoding.smart_str, key_parts)) def _cache_keys(self, incl_db=True): """Return the cache key for self plus all related foreign keys.""" fks = dict( (f, getattr(self, f.attname)) for f in self._meta.fields if isinstance(f, models.ForeignKey) ) keys = [] for fk, val in list(fks.items()): related_model = self._get_fk_related_model(fk) if val is not None and hasattr(related_model, "_cache_key"): keys.append( related_model._cache_key(val, incl_db and self._state.db or None) ) return (self.get_cache_key(incl_db=incl_db),) + tuple(keys) def _flush_keys(self): """Return the flush key for self plus all related foreign keys.""" return map(flush_key, self._cache_keys(incl_db=False)) def _get_fk_related_model(self, fk): if django.VERSION[0] >= 2: return fk.remote_field.model else: return fk.rel.to class CachingRawQuerySet(models.query.RawQuerySet): def __init__(self, *args, **kw): timeout = kw.pop("timeout", DEFAULT_TIMEOUT) super(CachingRawQuerySet, self).__init__(*args, **kw) self.timeout = timeout def __iter__(self): iterator = super(CachingRawQuerySet, self).__iter__ if self.timeout == config.NO_CACHE: iterator = iterator() while True: try: yield next(iterator) except StopIteration: return else: for obj in CachingModelIterable( self, iter_function=iterator, timeout=self.timeout ): yield obj def query_key(self): return self.raw_query % tuple(self.params or []) def _function_cache_key(key): return make_key("f:%s" % key, with_locale=True) def cached(function, key_, duration=DEFAULT_TIMEOUT): """Only calls the function if ``key`` is not already in the cache.""" key = _function_cache_key(key_) val = cache.get(key) if val is None: log.debug("cache miss for %s" % key) val = function() cache.set(key, val, duration) else: log.debug("cache hit for %s" % key) return val def cached_with(obj, f, f_key, timeout=DEFAULT_TIMEOUT): """Helper for caching a function call within an object's flush list.""" try: obj_key = obj.query_key() if hasattr(obj, "query_key") else obj.cache_key except (AttributeError, EmptyResultSet): log.warning("%r cannot be cached." % encoding.smart_str(obj)) return f() key = "%s:%s" % tuple(map(encoding.smart_str, (f_key, obj_key))) # Put the key generated in cached() into this object's flush list. invalidator.add_to_flush_list({obj.flush_key(): [_function_cache_key(key)]}) return cached(f, key, timeout) class cached_method(object): """ Decorator to cache a method call in this object's flush list. The external cache will only be used once per (instance, args). After that a local cache on the object will be used. Lifted from werkzeug. """ def __init__(self, func): self.func = func functools.update_wrapper(self, func) def __get__(self, obj, type=None): if obj is None: return self _missing = object() value = obj.__dict__.get(self.__name__, _missing) if value is _missing: w = MethodWrapper(obj, self.func) obj.__dict__[self.__name__] = w return w return value class MethodWrapper(object): """ Wraps around an object's method for two-level caching. The first call for a set of (args, kwargs) will use an external cache. After that, an object-local dict cache will be used. """ def __init__(self, obj, func): self.obj = obj self.func = func functools.update_wrapper(self, func) self.cache = {} def __call__(self, *args, **kwargs): def k(o): return o.cache_key if hasattr(o, "cache_key") else o arg_keys = list(map(k, args)) kwarg_keys = [(key, k(val)) for key, val in list(kwargs.items())] key_parts = ("m", self.obj.cache_key, self.func.__name__, arg_keys, kwarg_keys) key = ":".join(map(encoding.smart_str, key_parts)) if key not in self.cache: f = functools.partial(self.func, self.obj, *args, **kwargs) self.cache[key] = cached_with(self.obj, f, key) return self.cache[key] django-cache-machine-1.2.0/caching/config.py000066400000000000000000000015261426135341000206350ustar00rootroot00000000000000from django.conf import settings NO_CACHE = -1 WHOLE_MODEL = "whole-model" CACHE_PREFIX = getattr(settings, "CACHE_PREFIX", "") FETCH_BY_ID = getattr(settings, "FETCH_BY_ID", False) FLUSH = CACHE_PREFIX + ":flush:" CACHE_EMPTY_QUERYSETS = getattr(settings, "CACHE_EMPTY_QUERYSETS", False) TIMEOUT = getattr(settings, "CACHE_COUNT_TIMEOUT", NO_CACHE) CACHE_INVALIDATE_ON_CREATE = getattr(settings, "CACHE_INVALIDATE_ON_CREATE", None) CACHE_MACHINE_NO_INVALIDATION = getattr( settings, "CACHE_MACHINE_NO_INVALIDATION", False ) CACHE_MACHINE_USE_REDIS = getattr(settings, "CACHE_MACHINE_USE_REDIS", False) _invalidate_on_create_values = (None, WHOLE_MODEL) if CACHE_INVALIDATE_ON_CREATE not in _invalidate_on_create_values: raise ValueError( "CACHE_INVALIDATE_ON_CREATE must be one of: " "%s" % _invalidate_on_create_values ) django-cache-machine-1.2.0/caching/ext.py000066400000000000000000000052501426135341000201660ustar00rootroot00000000000000from django.conf import settings from django.utils import encoding from jinja2 import nodes from jinja2.ext import Extension import caching.base class FragmentCacheExtension(Extension): """ Cache a chunk of template code based on a queryset. Since looping over querysets is the slowest thing we do, you should wrap you for loop with the cache tag. Uses the default timeout unless you pass a second argument. {% cache queryset[, timeout] %} ...template code... {% endcache %} Derived from the jinja2 documentation example. """ tags = set(["cache"]) def __init__(self, environment): super(FragmentCacheExtension, self).__init__(environment) def preprocess(self, source, name, filename=None): self.name = filename or name return source def parse(self, parser): # the first token is the token that started the tag. In our case # we only listen to ``'cache'`` so this will be a name token with # `cache` as value. We get the line number so that we can give # that line number to the nodes we create by hand. lineno = next(parser.stream).lineno # Use the filename + line number and first object for the cache key. name = "%s+%s" % (self.name, lineno) args = [nodes.Const(name), parser.parse_expression()] # If there is a comma, the user provided a timeout. If not, use # None as second parameter. timeout = nodes.Const(None) extra = nodes.Const([]) while parser.stream.skip_if("comma"): x = parser.parse_expression() if parser.stream.current.type == "assign": next(parser.stream) extra = parser.parse_expression() else: timeout = x args.extend([timeout, extra]) body = parser.parse_statements(["name:endcache"], drop_needle=True) self.process_cache_arguments(args) # now return a `CallBlock` node that calls our _cache_support # helper method on this extension. return nodes.CallBlock( self.call_method("_cache_support", args), [], [], body ).set_lineno(lineno) def process_cache_arguments(self, args): """Extension point for adding anything extra to the cache_support.""" pass def _cache_support(self, name, obj, timeout, extra, caller): """Cache helper callback.""" if settings.DEBUG: return caller() extra = ":".join(map(encoding.smart_str, extra)) key = "fragment:%s:%s" % (name, extra) return caching.base.cached_with(obj, caller, key, timeout) # Nice import name. cache = FragmentCacheExtension django-cache-machine-1.2.0/caching/invalidation.py000066400000000000000000000223221426135341000220460ustar00rootroot00000000000000import collections import functools import hashlib import logging import socket from urllib.parse import parse_qsl from django.conf import settings from django.core.cache import cache as default_cache from django.core.cache import caches from django.core.cache.backends.base import InvalidCacheBackendError from django.utils import encoding, translation from caching import config try: import redis as redislib except ImportError: redislib = None # Look for an own cache first before falling back to the default cache try: cache = caches["cache_machine"] except (InvalidCacheBackendError, ValueError): cache = default_cache log = logging.getLogger("caching.invalidation") def make_key(k, with_locale=True): """Generate the full key for ``k``, with a prefix.""" key = encoding.smart_bytes("%s:%s" % (config.CACHE_PREFIX, k)) if with_locale: key += encoding.smart_bytes(translation.get_language()) # memcached keys must be < 250 bytes and w/o whitespace, but it's nice # to see the keys when using locmem. return hashlib.md5(key).hexdigest() def flush_key(obj): """We put flush lists in the flush: namespace.""" key = obj if isinstance(obj, str) else obj.get_cache_key(incl_db=False) return config.FLUSH + make_key(key, with_locale=False) def byid(obj): key = obj if isinstance(obj, str) else obj.cache_key return make_key("byid:" + key) def safe_redis(return_type): """ Decorator to catch and log any redis errors. return_type (optionally a callable) will be returned if there is an error. """ def decorator(f): @functools.wraps(f) def wrapper(*args, **kw): try: return f(*args, **kw) except (socket.error, redislib.RedisError) as e: log.error("redis error: %s" % e) # log.error('%r\n%r : %r' % (f.__name__, args[1:], kw)) if hasattr(return_type, "__call__"): return return_type() else: return return_type return wrapper return decorator class Invalidator(object): def invalidate_objects(self, objects, is_new_instance=False, model_cls=None): """Invalidate all the flush lists for the given ``objects``.""" obj_keys = [k for o in objects for k in o._cache_keys()] flush_keys = [k for o in objects for k in o._flush_keys()] # If whole-model invalidation on create is enabled, include this model's # key in the list to be invalidated. Note that the key itself won't # contain anything in the cache, but its corresponding flush key will. if ( config.CACHE_INVALIDATE_ON_CREATE == config.WHOLE_MODEL and is_new_instance and model_cls and hasattr(model_cls, "model_flush_key") ): flush_keys.append(model_cls.model_flush_key()) if not obj_keys or not flush_keys: return obj_keys, flush_keys = self.expand_flush_lists(obj_keys, flush_keys) if obj_keys: log.debug("deleting object keys: %s" % obj_keys) cache.delete_many(obj_keys) if flush_keys: log.debug("clearing flush lists: %s" % flush_keys) self.clear_flush_lists(flush_keys) def cache_objects(self, model, objects, query_key, query_flush): # Add this query to the flush list of each object. We include # query_flush so that other things can be cached against the queryset # and still participate in invalidation. flush_keys = [o.flush_key() for o in objects] flush_lists = collections.defaultdict(set) for key in flush_keys: log.debug("adding %s to %s" % (query_flush, key)) flush_lists[key].add(query_flush) flush_lists[query_flush].add(query_key) # Add this query to the flush key for the entire model, if enabled model_flush = model.model_flush_key() if config.CACHE_INVALIDATE_ON_CREATE == config.WHOLE_MODEL: flush_lists[model_flush].add(query_key) # Add each object to the flush lists of its foreign keys. for obj in objects: obj_flush = obj.flush_key() for key in obj._flush_keys(): if key not in (obj_flush, model_flush): log.debug("related: adding %s to %s" % (obj_flush, key)) flush_lists[key].add(obj_flush) if config.FETCH_BY_ID: flush_lists[key].add(byid(obj)) self.add_to_flush_list(flush_lists) def expand_flush_lists(self, obj_keys, flush_keys): """ Recursively search for flush lists and objects to invalidate. The search starts with the lists in `keys` and expands to any flush lists found therein. Returns ({objects to flush}, {flush keys found}). """ log.debug("in expand_flush_lists") obj_keys = set(obj_keys) search_keys = flush_keys = set(flush_keys) # Add other flush keys from the lists, which happens when a parent # object includes a foreign key. while 1: new_keys = set() for key in self.get_flush_lists(search_keys): if key.startswith(config.FLUSH): new_keys.add(key) else: obj_keys.add(key) if new_keys: log.debug("search for %s found keys %s" % (search_keys, new_keys)) flush_keys.update(new_keys) search_keys = new_keys else: return obj_keys, flush_keys def add_to_flush_list(self, mapping): """Update flush lists with the {flush_key: [query_key,...]} map.""" flush_lists = collections.defaultdict(set) flush_lists.update(cache.get_many(list(mapping.keys()))) for key, list_ in list(mapping.items()): if flush_lists[key] is None: flush_lists[key] = set(list_) else: flush_lists[key].update(list_) cache.set_many(flush_lists) def get_flush_lists(self, keys): """Return a set of object keys from the lists in `keys`.""" return set( e for flush_list in [_f for _f in list(cache.get_many(keys).values()) if _f] for e in flush_list ) def clear_flush_lists(self, keys): """Remove the given keys from the database.""" cache.delete_many(keys) class RedisInvalidator(Invalidator): def safe_key(self, key): if " " in key or "\n" in key: log.warning('BAD KEY: "%s"' % key) return "" return key @safe_redis(None) def add_to_flush_list(self, mapping): """Update flush lists with the {flush_key: [query_key,...]} map.""" pipe = redis.pipeline(transaction=False) for key, list_ in list(mapping.items()): for query_key in list_: # Redis happily accepts unicode, but returns byte strings, # so manually encode and decode the keys on the flush list here pipe.sadd(self.safe_key(key), query_key.encode("utf-8")) pipe.execute() @safe_redis(set) def get_flush_lists(self, keys): flush_list = redis.sunion(list(map(self.safe_key, keys))) return [k.decode("utf-8") for k in flush_list] @safe_redis(None) def clear_flush_lists(self, keys): redis.delete(*list(map(self.safe_key, keys))) class NullInvalidator(Invalidator): def add_to_flush_list(self, mapping): return def parse_backend_uri(backend_uri): """ Converts the "backend_uri" into a host and any extra params that are required for the backend. Returns a (host, params) tuple. """ backend_uri_sliced = backend_uri.split("://") if len(backend_uri_sliced) > 2: raise InvalidCacheBackendError("Backend URI can't have more than one scheme://") elif len(backend_uri_sliced) == 2: rest = backend_uri_sliced[1] else: rest = backend_uri_sliced[0] host = rest qpos = rest.find("?") if qpos != -1: params = dict(parse_qsl(rest[qpos + 1 :])) host = rest[:qpos] else: params = {} if host.endswith("/"): host = host[:-1] return host, params def get_redis_backend(): """Connect to redis from a string like CACHE_BACKEND.""" # From django-redis-cache. server, params = parse_backend_uri(settings.REDIS_BACKEND) db = params.pop("db", 0) try: db = int(db) except (ValueError, TypeError): db = 0 try: socket_timeout = float(params.pop("socket_timeout")) except (KeyError, ValueError): socket_timeout = None password = params.pop("password", None) if ":" in server: host, port = server.split(":") try: port = int(port) except (ValueError, TypeError): port = 6379 else: host = "localhost" port = 6379 return redislib.Redis( host=host, port=port, db=db, password=password, socket_timeout=socket_timeout ) if config.CACHE_MACHINE_NO_INVALIDATION: invalidator = NullInvalidator() elif config.CACHE_MACHINE_USE_REDIS: redis = get_redis_backend() invalidator = RedisInvalidator() else: invalidator = Invalidator() django-cache-machine-1.2.0/dev-requirements.txt000066400000000000000000000002551426135341000214600ustar00rootroot00000000000000# These are the reqs to build docs and run tests. sphinx django-redis jinja2 redis flake8 coverage psycopg2-binary dj-database-url python-memcached>=1.58 tox tox-gh-actions django-cache-machine-1.2.0/docs/000077500000000000000000000000001426135341000163465ustar00rootroot00000000000000django-cache-machine-1.2.0/docs/Makefile000066400000000000000000000060701426135341000200110ustar00rootroot00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = _build # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . .PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " changes to make an overview of all changed/added/deprecated items" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: -rm -rf $(BUILDDIR)/* html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/zamboni.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/zamboni.qhc" latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ "run these through (pdf)latex." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." django-cache-machine-1.2.0/docs/conf.py000066400000000000000000000014071426135341000176470ustar00rootroot00000000000000import os import sys import caching sys.path.append(os.path.abspath("..")) # The suffix of source filenames. source_suffix = ".rst" # The master toctree document. master_doc = "index" extensions = ["sphinx.ext.autodoc"] # General information about the project. project = "Cache Machine" copyright = "2010, The Zamboni Collective" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # version: The short X.Y version. # release: The full version, including alpha/beta/rc tags. version = release = caching.__version__ # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_trees = ["_build"] django-cache-machine-1.2.0/docs/index.rst000066400000000000000000000173121426135341000202130ustar00rootroot00000000000000.. _caching: ============= Cache Machine ============= Cache Machine provides automatic caching and invalidation for Django models through the ORM. The code is hosted on `github `_. For an overview of new features and backwards-incompatible changes which may affect you, please see the :ref:`release-notes`. Settings -------- Older versions of Cache Machine required you to use customized cache backends. These are no longer needed and they have been removed from Cache Machine. Use the standard Django cache backends. COUNT queries ^^^^^^^^^^^^^ Calls to ``QuerySet.count()`` can be cached, but they cannot be reliably invalidated. Cache Machine would have to do a full select to figure out the object keys, which is probably much more data than you want to pull. I recommend a short cache timeout; long enough to avoid repetitive queries, but short enough that stale counts won't be a big deal. :: CACHE_COUNT_TIMEOUT = 60 # seconds, not too long. By default, calls to ``QuerySet.count()`` are not cached. They are only cached if ``CACHE_COUNT_TIMEOUT`` is set to a value other than ``caching.base.NO_CACHE``. Empty querysets ^^^^^^^^^^^^^^^ By default cache machine will not cache empty querysets. To cache them:: CACHE_EMPTY_QUERYSETS = True .. _object-creation: Object creation ^^^^^^^^^^^^^^^ By default Cache Machine does not invalidate queries when a new object is created, because it can be expensive to maintain a flush list of all the queries associated with a given table and cause significant disruption on high-volume sites when *all* the queries for a particular model are invalidated at once. If these are not issues for your site and immediate inclusion of created objects in previously cached queries is desired, you can enable this feature as follows:: CACHE_INVALIDATE_ON_CREATE = 'whole-model' Cache Manager ------------- To enable caching for a model, add the :class:`~caching.base.CachingManager` to that class and inherit from the :class:`~caching.base.CachingMixin`. If you want related lookups (foreign keys) to hit the cache, ``CachingManager`` must be the default manager. If you have multiple managers that should be cached, return a :class:`~caching.base.CachingQuerySet` from the other manager's ``get_queryset`` method instead of subclassing ``CachingManager``, since that would hook up the post_save and post_delete signals multiple times. Here's what a minimal cached model looks like:: from django.db import models from caching.base import CachingManager, CachingMixin class Zomg(CachingMixin, models.Model): val = models.IntegerField() objects = CachingManager() # if you use Django 2.0 or later, you must set base_manager_name class Meta: base_manager_name = 'objects' # Attribute name of CachingManager(), above Whenever you run a query, ``CachingQuerySet`` will try to find that query in the cache. Queries are keyed by ``{prefix}:{sql}``. If it's there, we return the cached result set and everyone is happy. If the query isn't in the cache, the normal codepath to run a database query is executed. As the objects in the result set are iterated over, they are added to a list that will get cached once iteration is done. .. note:: Nothing will be cached if the QuerySet is not iterated through completely. Caching is supported for normal :class:`QuerySets ` and for :meth:`django.db.models.Manager.raw`. At this time, caching has not been implemented for ``QuerySet.values`` or ``QuerySet.values_list``. To support easy cache invalidation, we use "flush lists" to mark the cached queries an object belongs to. That way, all queries where an object was found will be invalidated when that object changes. Flush lists map an object key to a list of query keys. When an object is saved or deleted, all query keys in its flush list will be deleted. In addition, the flush lists of its foreign key relations will be cleared. To avoid stale foreign key relations, any cached objects will be flushed when the object their foreign key points to is invalidated. During cache invalidation, we explicitly set a None value instead of just deleting so we don't have any race conditions where: * Thread 1 -> Cache miss, get object from DB * Thread 2 -> Object saved, deleted from cache * Thread 1 -> Store (stale) object fetched from DB in cache The foundations of this module were derived from `Mike Malone's`_ `django-caching`_. .. _`Mike Malone's`: http://immike.net/ .. _django-caching: http://github.com/mmalone/django-caching/ Changing the timeout of a CachingQuerySet instance ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ By default, the timeout for a ``CachingQuerySet`` instance will be the timeout of the underlying cache being used by Cache Machine. To change the timeout of a ``CachingQuerySet`` instance, you can assign a different value to the ``timeout`` attribute which represents the number of seconds to cache for For example:: def get_objects(name): qs = CachedClass.objects.filter(name=name) qs.timeout = 5 # seconds return qs To disable caching for a particular ``CachingQuerySet`` instance, set the ``timeout`` attribute to ``caching.base.NO_CACHE``. Manual Caching -------------- Some things can be cached better outside of the ORM, so Cache Machine provides the function :func:`caching.base.cached` for caching arbitrary objects. Using this function gives you more control over what gets cached, and for how long, while abstracting a few repetitive elements. .. autofunction:: caching.base.cached Template Caching ---------------- Cache Machine includes a Jinja2 extension to cache template fragments based on a queryset or cache-aware object. These fragments will get invalidated on using the same rules as ``CachingQuerySets``. First, add it to your template environment:: env = jinja2.Environment(extensions=['caching.ext.cache']) .. highlight:: jinja Now wrap all your queryset looping with the ``cache`` tag. :: {% cache objects %} {# objects is a CachingQuerySet #} {% for obj in objects %} ... {% endfor %} {% endcache %} ...and for caching by single objects:: {% cache object %} ...expensive processing... {% endcache %} The tag can take an optional timeout. :: {% cache objects, 500 %} .. highlight:: python If someone wants to write a template tag for Django templates, I'd love to add it. Redis Support ------------- Cache Machine support storing flush lists in Redis rather than memcached, which is more efficient because Redis can manipulate the lists on the server side rather than having to transfer the entire list back and forth for each modification. To enable Redis support for Cache Machine, add the following to your settings file, replacing ``localhost`` with the hostname of your Redis server:: CACHE_MACHINE_USE_REDIS = True REDIS_BACKEND = 'redis://localhost:6379' .. note:: When using Redis, memcached is still used for caching model objects, i.e., only the flush lists are stored in Redis. You still need to configure ``CACHES`` the way you would normally for Cache Machine. Classes That May Interest You ----------------------------- .. autoclass:: caching.base.CachingModelIterable .. autoclass:: caching.base.CachingManager :members: This :class:`manager ` always returns a :class:`~caching.CachingQuerySet`, and hooks up ``post_save`` and ``post_delete`` signals to invalidate caches. .. autoclass:: caching.base.CachingMixin :members: .. class:: caching.base.CachingQuerySet Overrides the default :class:`~django.db.models.QuerySet` to fetch objects from cache before hitting the database. django-cache-machine-1.2.0/docs/releases.rst000066400000000000000000000054161426135341000207110ustar00rootroot00000000000000.. _release-notes: Release Notes ================== v1.2.0 (2022-07-06) ------------------- - Drop official support for unsupported Django versions (1.11, 2.0, 2.1) - Add support for Django 3.0, 3.1, 3.2, and 4.0 (thanks, @johanneswilm and @Afani97!) - Add support for Python 3.8, 3.9, and 3.10 - Switch to GitHub Actions v1.1.0 (2019-02-17) ------------------- - Drop official support for unsupported Django versions (1.8, 1.9, and 1.10) - Add support for Django 2.0, 2.1, and 2.2 (thanks, @JungleKim and @wetneb!) - Add support for Python 3.7 - Fix Travis v1.0.0 (2017-10-13) ------------------- - Update Travis and Tox configurations - Drop support for Python < 2.7 - Add support for Python 3.5 and 3.6 - Drop support for Django < 1.8 - Add support for Django 1.9, 1.10, and 1.11 - Removed all custom cache backends. - Flake8 fixes Backwards Incompatible Changes ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - Cache Machine previously included custom backends for LocMem, Memcached and PyLibMemcached. These were necessary because the core backends in old versions of Django did not support infinte timeouts. They now do, so Cache Machine's custom backends are no longer necessary. They have been removed, so you should revert to using the core Django backends. v0.9.1 (2015-10-22) ------------------- - Fix bug that prevented objects retrieved via cache machine from being re-cached by application code (see PR #103) - Fix bug that prevented caching objects forever when using Django <= 1.5 (see PR #104) - Fix regression (introduced in 0.8) that broke invalidation when an object was cached via a replica database and later modified or deleted via the primary database, when using primary/replica replication (see PR #105). Note this change may cause unexpected invalidation when sharding across DBs that share both a schema and primary key values or other attributes. v0.9 (2015-07-29) ----------------- - Support for Python 3 - A new setting, ``CACHE_INVALIDATE_ON_CREATE``, which facilitates invalidation when a new model object is created. For more information, see :ref:`object-creation`. v0.8.1 (2015-07-03) ----------------------- This release is primarily aimed at adding support for more recent versions of Django and catching up on recent contributions. - Support for Django 1.7 and Django 1.8 - Fix bug in parsing of ``REDIS_BACKEND`` URI - Miscellaneous bug fixes and documentation corrections Backwards Incompatible Changes ________________________________ - Dropped support for the old style ``caching.backends.memcached.CacheClass`` and ``caching.backends.locmem.CacheClass`` classes. Support for this naming has been deprecated since Django 1.3. You will need to switch your project to use ``MemcachedCache``, ``PyLibMCCache``, or ``LocMemCache`` in place of ``CacheClass``. django-cache-machine-1.2.0/examples/000077500000000000000000000000001426135341000172345ustar00rootroot00000000000000django-cache-machine-1.2.0/examples/cache_machine/000077500000000000000000000000001426135341000217635ustar00rootroot00000000000000django-cache-machine-1.2.0/examples/cache_machine/__init__.py000066400000000000000000000000001426135341000240620ustar00rootroot00000000000000django-cache-machine-1.2.0/examples/cache_machine/custom_backend.py000066400000000000000000000004401426135341000253140ustar00rootroot00000000000000# flake8: noqa from .settings import * CACHES = { "default": { "BACKEND": "django.core.cache.backends.locmem.LocMemCache", }, "cache_machine": { "BACKEND": "django.core.cache.backends.memcached.MemcachedCache", "LOCATION": "localhost:11211", }, } django-cache-machine-1.2.0/examples/cache_machine/django_redis_settings.py000066400000000000000000000002671426135341000267120ustar00rootroot00000000000000# flake8: noqa from .redis_settings import * CACHES = { "default": { "BACKEND": "django_redis.cache.RedisCache", "LOCATION": "redis://127.0.0.1:6379/0", }, } django-cache-machine-1.2.0/examples/cache_machine/locmem_settings.py000066400000000000000000000002211426135341000255240ustar00rootroot00000000000000# flake8: noqa from .settings import * CACHES = { "default": { "BACKEND": "django.core.cache.backends.locmem.LocMemCache", }, } django-cache-machine-1.2.0/examples/cache_machine/memcache_byid.py000066400000000000000000000000731426135341000251060ustar00rootroot00000000000000# flake8: noqa from .settings import * FETCH_BY_ID = True django-cache-machine-1.2.0/examples/cache_machine/redis_byid.py000066400000000000000000000001011426135341000244420ustar00rootroot00000000000000# flake8: noqa from .redis_settings import * FETCH_BY_ID = True django-cache-machine-1.2.0/examples/cache_machine/redis_settings.py000066400000000000000000000001421426135341000253600ustar00rootroot00000000000000# flake8: noqa from .settings import * CACHE_MACHINE_USE_REDIS = True REDIS_BACKEND = "redis://" django-cache-machine-1.2.0/examples/cache_machine/settings.py000066400000000000000000000022661426135341000242030ustar00rootroot00000000000000import os import dj_database_url import django CACHES = { "default": { "BACKEND": "django.core.cache.backends.memcached.MemcachedCache", "LOCATION": "localhost:11211", }, } DATABASES = { "default": dj_database_url.config(default="postgres:///cache_machine_devel"), "primary2": dj_database_url.parse( os.getenv("DATABASE_URL_2", "postgres:///cache_machine_devel2") ), } for primary, replica in (("default", "replica"), ("primary2", "replica2")): DATABASES[replica] = DATABASES[primary].copy() DATABASES[replica]["TEST"] = {"MIRROR": primary} DEFAULT_AUTO_FIELD = "django.db.models.AutoField" INSTALLED_APPS = ("tests.testapp",) SECRET_KEY = "ok" MIDDLEWARE_CLASSES = ( "django.contrib.sessions.middleware.SessionMiddleware", "django.middleware.common.CommonMiddleware", "django.middleware.csrf.CsrfViewMiddleware", "django.contrib.auth.middleware.AuthenticationMiddleware", "django.contrib.auth.middleware.SessionAuthenticationMiddleware", "django.contrib.messages.middleware.MessageMiddleware", "django.middleware.clickjacking.XFrameOptionsMiddleware", ) if django.VERSION[0] >= 2: MIDDLEWARE = MIDDLEWARE_CLASSES django-cache-machine-1.2.0/run_tests.py000066400000000000000000000041471426135341000200240ustar00rootroot00000000000000""" Creating standalone Django apps is a PITA because you're not in a project, so you don't have a settings.py file. I can never remember to define DJANGO_SETTINGS_MODULE, so I run these commands which get the right env automatically. """ import argparse import os import sys from subprocess import call, check_output NAME = os.path.basename(os.path.dirname(__file__)) ROOT = os.path.abspath(os.path.dirname(__file__)) os.environ["PYTHONPATH"] = os.pathsep.join([ROOT, os.path.join(ROOT, "examples")]) SETTINGS = ( "locmem_settings", "settings", "memcache_byid", "custom_backend", "redis_settings", "redis_byid", "django_redis_settings", ) def main(): parser = argparse.ArgumentParser( description="Run the tests for django-cache-machine. " "If no options are specified, tests will be run with " "all settings files and without coverage.py." ) parser.add_argument( "--with-coverage", action="store_true", help="Run tests with coverage.py and display coverage report", ) parser.add_argument( "--settings", choices=SETTINGS, help="Run tests only for the specified settings file", ) args = parser.parse_args() settings = args.settings and [args.settings] or SETTINGS results = [] django_admin = check_output(["which", "django-admin"]).strip() for i, settings_module in enumerate(settings): print("Running tests for: %s" % settings_module) os.environ["DJANGO_SETTINGS_MODULE"] = "cache_machine.%s" % settings_module # append to the existing coverage data for all but the first run if args.with_coverage and i > 0: test_cmd = ["coverage", "run", "--append"] elif args.with_coverage: test_cmd = ["coverage", "run"] else: test_cmd = [] test_cmd += [django_admin, "test", "--keepdb"] results.append(call(test_cmd)) if args.with_coverage: results.append(call(["coverage", "report", "-m", "--fail-under", "70"])) sys.exit(any(results) and 1 or 0) if __name__ == "__main__": main() django-cache-machine-1.2.0/setup.cfg000066400000000000000000000001631426135341000172370ustar00rootroot00000000000000[flake8] max-line-length = 88 extend-ignore = E203 exclude= .tox .git .direnv [isort] profile = black django-cache-machine-1.2.0/setup.py000066400000000000000000000024261426135341000171340ustar00rootroot00000000000000from setuptools import setup import caching setup( name="django-cache-machine", version=caching.__version__, description="Automatic caching and invalidation for Django models " "through the ORM.", long_description=open("README.rst").read(), author="Jeff Balogh", author_email="jbalogh@mozilla.com", url="http://github.com/django-cache-machine/django-cache-machine", license="BSD", packages=["caching"], include_package_data=True, zip_safe=False, classifiers=[ "Development Status :: 4 - Beta", "Environment :: Web Environment", # I don't know what exactly this means, but why not? "Environment :: Web Environment :: Mozilla", "Framework :: Django", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Topic :: Software Development :: Libraries :: Python Modules", ], ) django-cache-machine-1.2.0/tests/000077500000000000000000000000001426135341000165605ustar00rootroot00000000000000django-cache-machine-1.2.0/tests/__init__.py000066400000000000000000000000001426135341000206570ustar00rootroot00000000000000django-cache-machine-1.2.0/tests/test_cache.py000066400000000000000000000563441426135341000212500ustar00rootroot00000000000000from __future__ import unicode_literals import logging import pickle import sys import unittest import jinja2 from django.conf import settings from django.core.cache.backends.base import DEFAULT_TIMEOUT from django.test import TestCase, TransactionTestCase from django.utils import encoding, translation from caching import base, config, invalidation from .testapp.models import Addon, User if sys.version_info >= (3,): from unittest import mock else: import mock cache = invalidation.cache log = logging.getLogger(__name__) class CachingTestCase(TestCase): fixtures = ["tests/testapp/fixtures/testapp/test_cache.json"] extra_apps = ["tests.testapp"] def setUp(self): cache.clear() self.old_timeout = config.TIMEOUT if getattr(settings, "CACHE_MACHINE_USE_REDIS", False): invalidation.redis.flushall() def tearDown(self): config.TIMEOUT = self.old_timeout def test_flush_key(self): """flush_key should work for objects or strings.""" a = Addon.objects.get(id=1) self.assertEqual( base.flush_key(a.get_cache_key(incl_db=False)), base.flush_key(a) ) def test_cache_key(self): a = Addon.objects.get(id=1) self.assertEqual(a.cache_key, "o:testapp.addon:1:default") keys = set((a.cache_key, a.author1.cache_key, a.author2.cache_key)) self.assertEqual(set(a._cache_keys()), keys) def test_cache(self): """Basic cache test: second get comes from cache.""" self.assertIs(Addon.objects.get(id=1).from_cache, False) self.assertIs(Addon.objects.get(id=1).from_cache, True) def test_filter_cache(self): self.assertIs(Addon.objects.filter(id=1)[0].from_cache, False) self.assertIs(Addon.objects.filter(id=1)[0].from_cache, True) def test_slice_cache(self): self.assertIs(Addon.objects.filter(id=1)[:1][0].from_cache, False) self.assertIs(Addon.objects.filter(id=1)[:1][0].from_cache, True) def test_should_not_cache_values(self): with self.assertNumQueries(2): Addon.objects.values("id")[0] Addon.objects.values("id")[0] def test_should_not_cache_values_list(self): with self.assertNumQueries(2): Addon.objects.values_list("id")[0] Addon.objects.values_list("id")[0] def test_invalidation(self): self.assertIs(Addon.objects.get(id=1).from_cache, False) a = [x for x in Addon.objects.all() if x.id == 1][0] self.assertIs(a.from_cache, False) self.assertIs(Addon.objects.get(id=1).from_cache, True) a = [x for x in Addon.objects.all() if x.id == 1][0] self.assertIs(a.from_cache, True) a.save() self.assertIs(Addon.objects.get(id=1).from_cache, False) a = [x for x in Addon.objects.all() if x.id == 1][0] self.assertIs(a.from_cache, False) self.assertIs(Addon.objects.get(id=1).from_cache, True) a = [x for x in Addon.objects.all() if x.id == 1][0] self.assertIs(a.from_cache, True) def test_invalidation_cross_locale(self): self.assertIs(Addon.objects.get(id=1).from_cache, False) a = [x for x in Addon.objects.all() if x.id == 1][0] self.assertIs(a.from_cache, False) self.assertIs(Addon.objects.get(id=1).from_cache, True) a = [x for x in Addon.objects.all() if x.id == 1][0] self.assertIs(a.from_cache, True) # Do query & invalidation in a different locale. old_locale = translation.get_language() translation.activate("fr") self.assertIs(Addon.objects.get(id=1).from_cache, True) a = [x for x in Addon.objects.all() if x.id == 1][0] self.assertIs(a.from_cache, True) a.save() translation.activate(old_locale) self.assertIs(Addon.objects.get(id=1).from_cache, False) a = [x for x in Addon.objects.all() if x.id == 1][0] self.assertIs(a.from_cache, False) def test_fk_invalidation(self): """When an object is invalidated, its foreign keys get invalidated.""" a = Addon.objects.get(id=1) self.assertIs(User.objects.get(name="clouseroo").from_cache, False) a.save() self.assertIs(User.objects.get(name="clouseroo").from_cache, False) def test_fk_parent_invalidation(self): """When a foreign key changes, any parent objects get invalidated.""" self.assertIs(Addon.objects.get(id=1).from_cache, False) a = Addon.objects.get(id=1) self.assertIs(a.from_cache, True) u = User.objects.get(id=a.author1.id) self.assertIs(u.from_cache, True) u.name = "fffuuu" u.save() self.assertIs(User.objects.get(id=a.author1.id).from_cache, False) a = Addon.objects.get(id=1) self.assertIs(a.from_cache, False) self.assertEqual(a.author1.name, "fffuuu") def test_raw_cache(self): sql = "SELECT * FROM %s WHERE id = 1" % Addon._meta.db_table raw = list(Addon.objects.raw(sql)) self.assertEqual(len(raw), 1) raw_addon = raw[0] a = Addon.objects.get(id=1) for field in Addon._meta.fields: self.assertEqual(getattr(a, field.name), getattr(raw_addon, field.name)) self.assertIs(raw_addon.from_cache, False) cached = list(Addon.objects.raw(sql)) self.assertEqual(len(cached), 1) cached_addon = cached[0] a = Addon.objects.get(id=1) for field in Addon._meta.fields: self.assertEqual(getattr(a, field.name), getattr(cached_addon, field.name)) self.assertIs(cached_addon.from_cache, True) def test_raw_cache_params(self): """Make sure the query params are included in the cache key.""" sql = "SELECT * from %s WHERE id = %%s" % Addon._meta.db_table raw = list(Addon.objects.raw(sql, [1]))[0] self.assertEqual(raw.id, 1) raw2 = list(Addon.objects.raw(sql, [2]))[0] self.assertEqual(raw2.id, 2) @mock.patch("caching.base.CachingModelIterable") def test_raw_nocache(self, CachingModelIterable): base.TIMEOUT = 60 sql = "SELECT * FROM %s WHERE id = 1" % Addon._meta.db_table raw = list(Addon.objects.raw(sql, timeout=config.NO_CACHE)) self.assertEqual(len(raw), 1) raw_addon = raw[0] self.assertFalse(hasattr(raw_addon, "from_cache")) self.assertFalse(CachingModelIterable.called) @mock.patch("caching.base.cache") def test_count_cache(self, cache_mock): config.TIMEOUT = 60 cache_mock.scheme = "memcached" cache_mock.get.return_value = None q = Addon.objects.all() q.count() self.assertTrue(cache_mock.set.call_args, "set not called") args, kwargs = cache_mock.set.call_args key, value, timeout = args self.assertEqual(value, 2) self.assertEqual(timeout, 60) @mock.patch("caching.base.cached") def test_count_none_timeout(self, cached_mock): config.TIMEOUT = config.NO_CACHE Addon.objects.count() self.assertEqual(cached_mock.call_count, 0) @mock.patch("caching.base.cached") def test_count_nocache(self, cached_mock): base.TIMEOUT = 60 Addon.objects.no_cache().count() self.assertEqual(cached_mock.call_count, 0) def test_queryset_flush_list(self): """Check that we're making a flush list for the queryset.""" q = Addon.objects.all() objects = list(q) # Evaluate the queryset so it gets cached. base.invalidator.add_to_flush_list({q.flush_key(): ["remove-me"]}) cache.set("remove-me", 15) Addon.objects.invalidate(objects[0]) self.assertIs(cache.get(q.flush_key()), None) self.assertIs(cache.get("remove-me"), None) def test_jinja_cache_tag_queryset(self): env = jinja2.Environment(extensions=["caching.ext.cache"]) def check(q, expected): t = env.from_string( "{% cache q %}{% for x in q %}{{ x.id }}:{{ x.val }};" "{% endfor %}{% endcache %}" ) self.assertEqual(t.render(q=q), expected) # Get the template in cache, then hijack iterator to make sure we're # hitting the cached fragment. check(Addon.objects.all(), "1:42;2:42;") qs = Addon.objects.all() qs.iterator = mock.Mock() check(qs, "1:42;2:42;") self.assertFalse(qs.iterator.called) # Make changes, make sure we dropped the cached fragment. a = Addon.objects.get(id=1) a.val = 17 a.save() q = Addon.objects.all() cache.get(q.flush_key()) self.assertIs(cache.get(q.flush_key()), None) check(Addon.objects.all(), "1:17;2:42;") qs = Addon.objects.all() qs.iterator = mock.Mock() check(qs, "1:17;2:42;") def test_jinja_cache_tag_object(self): env = jinja2.Environment(extensions=["caching.ext.cache"]) addon = Addon.objects.get(id=1) def check(obj, expected): t = env.from_string( "{% cache obj, 30 %}{{ obj.id }}:{{ obj.val }}{% endcache %}" ) self.assertEqual(t.render(obj=obj), expected) check(addon, "1:42") addon.val = 17 addon.save() check(addon, "1:17") def test_jinja_multiple_tags(self): env = jinja2.Environment(extensions=["caching.ext.cache"]) addon = Addon.objects.get(id=1) template = ( "{% cache obj %}{{ obj.id }}{% endcache %}\n" "{% cache obj %}{{ obj.val }}{% endcache %}" ) def check(obj, expected): t = env.from_string(template) self.assertEqual(t.render(obj=obj), expected) check(addon, "1\n42") addon.val = 17 addon.save() check(addon, "1\n17") def test_jinja_cache_tag_extra(self): env = jinja2.Environment(extensions=["caching.ext.cache"]) addon = Addon.objects.get(id=1) template = ( "{% cache obj, extra=[obj.key] %}{{ obj.id }}:" "{{ obj.key }}{% endcache %}" ) def check(obj, expected): t = env.from_string(template) self.assertEqual(t.render(obj=obj), expected) addon.key = 1 check(addon, "1:1") addon.key = 2 check(addon, "1:2") template = ( "{% cache obj, 10, extra=[obj.key] %}{{ obj.id }}:" "{{ obj.key }}{% endcache %}" ) addon.key = 1 check(addon, "1:1") addon.key = 2 check(addon, "1:2") def test_cached_with(self): counter = mock.Mock() def expensive(): counter() return counter.call_count a = Addon.objects.get(id=1) def f(): return base.cached_with(a, expensive, "key") # Only gets called once. self.assertEqual(f(), 1) self.assertEqual(f(), 1) # Switching locales does not reuse the cache. old_locale = translation.get_language() translation.activate("fr") self.assertEqual(f(), 2) # Called again after flush. a.save() self.assertEqual(f(), 3) translation.activate(old_locale) self.assertEqual(f(), 4) counter.reset_mock() q = Addon.objects.filter(id=1) def f(): return base.cached_with(q, expensive, "key") # Only gets called once. self.assertEqual(f(), 1) self.assertEqual(f(), 1) # Called again after flush. list(q)[0].save() self.assertEqual(f(), 2) self.assertEqual(f(), 2) def test_cached_with_bad_object(self): """cached_with shouldn't fail if the object is missing a cache key.""" counter = mock.Mock() def f(): counter() return counter.call_count self.assertEqual(base.cached_with([], f, "key"), 1) def test_cached_with_unicode(self): u = encoding.smart_bytes( "\\u05ea\\u05d9\\u05d0\\u05d5\\u05e8 " "\\u05d0\\u05d5\\u05e1\\u05e3" ) obj = mock.Mock() obj.query_key.return_value = "xxx" obj.flush_key.return_value = "key" def f(): return 1 self.assertEqual(base.cached_with(obj, f, "adf:%s" % u), 1) def test_cached_method(self): a = Addon.objects.get(id=1) self.assertEqual(a.calls(), (1, 1)) self.assertEqual(a.calls(), (1, 1)) a.save() # Still returns 1 since the object has it's own local cache. self.assertEqual(a.calls(), (1, 1)) self.assertEqual(a.calls(3), (3, 2)) a = Addon.objects.get(id=1) self.assertEqual(a.calls(), (1, 3)) self.assertEqual(a.calls(4), (4, 4)) self.assertEqual(a.calls(3), (3, 2)) b = Addon.objects.create(id=5, val=32, author1_id=1, author2_id=2) self.assertEqual(b.calls(), (1, 5)) # Make sure we're updating the wrapper's docstring. self.assertEqual(b.calls.__doc__, Addon.calls.__doc__) @mock.patch("caching.base.cache.get") def test_no_cache_from_manager(self, mock_cache): a = Addon.objects.no_cache().get(id=1) self.assertEqual(a.id, 1) self.assertFalse(hasattr(a, "from_cache")) self.assertFalse(mock_cache.called) @mock.patch("caching.base.cache.get") def test_no_cache_from_queryset(self, mock_cache): a = Addon.objects.all().no_cache().get(id=1) self.assertEqual(a.id, 1) self.assertFalse(hasattr(a, "from_cache")) self.assertFalse(mock_cache.called) def test_timeout_from_manager(self): q = Addon.objects.cache(12).filter(id=1) self.assertEqual(q.timeout, 12) a = q.get() self.assertTrue(hasattr(a, "from_cache")) self.assertEqual(a.id, 1) def test_timeout_from_queryset(self): q = Addon.objects.all().cache(12).filter(id=1) self.assertEqual(q.timeout, 12) a = q.get() self.assertTrue(hasattr(a, "from_cache")) self.assertEqual(a.id, 1) @unittest.skipUnless( any(["memcache" in c["BACKEND"] for c in settings.CACHES.values()]), "This test requires that Django use memcache", ) @mock.patch("memcache.Client.set") def test_infinite_timeout(self, mock_set): """ Test that memcached infinite timeouts work with all Django versions. """ cache.set("foo", "bar", timeout=None) # for memcached, 0 timeout means store forever mock_set.assert_called_with(":1:foo", "bar", 0) def test_cache_and_no_cache(self): """Whatever happens last sticks.""" q = Addon.objects.no_cache().cache(12).filter(id=1) self.assertEqual(q.timeout, 12) no_cache = q.no_cache() # The querysets don't share anything. self.assertEqual(q.timeout, 12) self.assertNotEqual(no_cache.timeout, 12) self.assertFalse(hasattr(no_cache.get(), "from_cache")) self.assertEqual(q.get().id, 1) self.assertTrue(hasattr(q.get(), "from_cache")) @mock.patch("caching.base.cache") def test_cache_machine_timeout(self, cache): cache.scheme = "memcached" cache.get.return_value = None cache.get_many.return_value = {} a = Addon.objects.cache(12).get(id=1) self.assertEqual(a.id, 1) self.assertTrue(cache.add.called) args, kwargs = cache.add.call_args self.assertEqual(kwargs, {"timeout": 12}) def test_unicode_key(self): list(User.objects.filter(name="\\xfcmla\\xfct")) def test_empty_in(self): # Raised an exception before fixing #2. self.assertEqual([], list(User.objects.filter(pk__in=[]))) def test_empty_in_count(self): # Regression test for #14. self.assertEqual(0, User.objects.filter(pk__in=[]).count()) def test_empty_queryset(self): for k in (1, 1): with self.assertNumQueries(k): self.assertEqual(len(Addon.objects.filter(pk=42)), 0) @mock.patch("caching.config.CACHE_EMPTY_QUERYSETS", True) def test_cache_empty_queryset(self): for k in (1, 0): with self.assertNumQueries(k): self.assertEqual(len(Addon.objects.filter(pk=42)), 0) def test_invalidate_empty_queryset(self): u = User.objects.create() self.assertEqual(list(u.addon_set.all()), []) Addon.objects.create(val=42, author1=u, author2=u) self.assertEqual([a.val for a in u.addon_set.all()], [42]) def test_invalidate_new_related_object(self): u = User.objects.create() Addon.objects.create(val=42, author1=u, author2=u) self.assertEqual([a.val for a in u.addon_set.all()], [42]) Addon.objects.create(val=17, author1=u, author2=u) self.assertEqual([a.val for a in u.addon_set.all()], [42, 17]) def test_make_key_unicode(self): translation.activate("en-US") f = "fragment\xe9\x9b\xbb\xe8\x85\xa6\xe7\x8e" # This would crash with a unicode error. base.make_key(f, with_locale=True) translation.deactivate() @mock.patch("caching.invalidation.cache.get_many") def test_get_flush_lists_none(self, cache_mock): if not getattr(settings, "CACHE_MACHINE_USE_REDIS", False): cache_mock.return_value.values.return_value = [None, [1]] self.assertEqual(base.invalidator.get_flush_lists(None), set([1])) def test_parse_backend_uri(self): """Test that parse_backend_uri works as intended. Regression for #92.""" from caching.invalidation import parse_backend_uri uri = "redis://127.0.0.1:6379?socket_timeout=5" host, params = parse_backend_uri(uri) self.assertEqual(host, "127.0.0.1:6379") self.assertEqual(params, {"socket_timeout": "5"}) @mock.patch("caching.config.CACHE_INVALIDATE_ON_CREATE", "whole-model") def test_invalidate_on_create_enabled(self): """Test that creating new objects invalidates cached queries for that model.""" self.assertEqual([a.name for a in User.objects.all()], ["fliggy", "clouseroo"]) User.objects.create(name="spam") users = User.objects.all() # our new user should show up and the query should not have come from the cache self.assertEqual([a.name for a in users], ["fliggy", "clouseroo", "spam"]) self.assertFalse(any([u.from_cache for u in users])) # if we run it again, it should be cached this time users = User.objects.all() self.assertEqual([a.name for a in users], ["fliggy", "clouseroo", "spam"]) self.assertTrue(all([u.from_cache for u in User.objects.all()])) @mock.patch("caching.config.CACHE_INVALIDATE_ON_CREATE", None) def test_invalidate_on_create_disabled(self): """ Test that creating new objects does NOT invalidate cached queries when whole-model invalidation on create is disabled. """ users = User.objects.all() self.assertTrue(users, "Can't run this test without some users") self.assertFalse(any([u.from_cache for u in users])) User.objects.create(name="spam") self.assertTrue(all([u.from_cache for u in User.objects.all()])) def test_pickle_queryset(self): """ Test for CacheingQuerySet.__getstate__ and CachingQuerySet.__setstate__. """ # Make sure CachingQuerySet.timeout, when set to DEFAULT_TIMEOUT, can be safely # pickled/unpickled on/from different Python processes which may have different # underlying values for DEFAULT_TIMEOUT: q1 = Addon.objects.all() self.assertEqual(q1.timeout, DEFAULT_TIMEOUT) pickled = pickle.dumps(q1) new_timeout = object() with mock.patch("caching.base.DEFAULT_TIMEOUT", new_timeout): q2 = pickle.loads(pickled) self.assertEqual(q2.timeout, new_timeout) # Make sure values other than DEFAULT_TIMEOUT remain unaffected: q1 = Addon.objects.cache(10).all() self.assertEqual(q1.timeout, 10) pickled = pickle.dumps(q1) with mock.patch("caching.base.DEFAULT_TIMEOUT", new_timeout): q2 = pickle.loads(pickled) self.assertEqual(q2.timeout, 10) # use TransactionTestCase so that ['TEST']['MIRROR'] setting works # see https://code.djangoproject.com/ticket/23718 class MultiDbTestCase(TransactionTestCase): databases = {"default", "primary2", "replica", "replica2"} fixtures = ["tests/testapp/fixtures/testapp/test_cache.json"] extra_apps = ["tests.testapp"] def test_multidb_cache(self): """Test where primary and replica DB result in two different cache keys""" self.assertIs(Addon.objects.get(id=1).from_cache, False) self.assertIs(Addon.objects.get(id=1).from_cache, True) from_replica = Addon.objects.using("replica").get(id=1) self.assertIs(from_replica.from_cache, False) self.assertEqual(from_replica._state.db, "replica") def test_multidb_fetch_by_id(self): """ Test where primary and replica DB result in two different cache keys with FETCH_BY_ID """ with self.settings(FETCH_BY_ID=True): self.assertIs(Addon.objects.get(id=1).from_cache, False) self.assertIs(Addon.objects.get(id=1).from_cache, True) from_replica = Addon.objects.using("replica").get(id=1) self.assertIs(from_replica.from_cache, False) self.assertEqual(from_replica._state.db, "replica") def test_multidb_primary_replica_invalidation(self): """Test saving an object on one DB invalidates it for all DBs""" log.debug("priming the DB & cache") primary_obj = User.objects.using("default").create(name="new-test-user") replica_obj = User.objects.using("replica").get(name="new-test-user") self.assertIs(replica_obj.from_cache, False) log.debug("deleting the original object") User.objects.using("default").filter(pk=replica_obj.pk).delete() log.debug("re-creating record with a new primary key") primary_obj = User.objects.using("default").create(name="new-test-user") log.debug("attempting to force re-fetch from DB (should not use cache)") replica_obj = User.objects.using("replica").get(name="new-test-user") self.assertIs(replica_obj.from_cache, False) self.assertEqual(replica_obj.pk, primary_obj.pk) def test_multidb_no_db_crossover(self): """Test no crossover of objects with identical PKs""" primary_obj = User.objects.using("default").create(name="new-test-user") primary_obj2 = User.objects.using("primary2").create( pk=primary_obj.pk, name="other-test-user", ) # prime the cache for the default DB primary_obj = User.objects.using("default").get(name="new-test-user") self.assertIs(primary_obj.from_cache, False) primary_obj = User.objects.using("default").get(name="new-test-user") self.assertIs(primary_obj.from_cache, True) # prime the cache for the 2nd primary DB primary_obj2 = User.objects.using("primary2").get(name="other-test-user") self.assertIs(primary_obj2.from_cache, False) primary_obj2 = User.objects.using("primary2").get(name="other-test-user") self.assertIs(primary_obj2.from_cache, True) # ensure no crossover between databases self.assertNotEqual(primary_obj.name, primary_obj2.name) django-cache-machine-1.2.0/tests/testapp/000077500000000000000000000000001426135341000202405ustar00rootroot00000000000000django-cache-machine-1.2.0/tests/testapp/__init__.py000066400000000000000000000000001426135341000223370ustar00rootroot00000000000000django-cache-machine-1.2.0/tests/testapp/fixtures/000077500000000000000000000000001426135341000221115ustar00rootroot00000000000000django-cache-machine-1.2.0/tests/testapp/fixtures/testapp/000077500000000000000000000000001426135341000235715ustar00rootroot00000000000000django-cache-machine-1.2.0/tests/testapp/fixtures/testapp/test_cache.json000066400000000000000000000007121426135341000265660ustar00rootroot00000000000000[ { "pk": 1, "model": "testapp.user", "fields": { "name": "fliggy" } }, { "pk": 2, "model": "testapp.user", "fields": { "name": "clouseroo" } }, { "pk": 1, "model": "testapp.addon", "fields": { "author2": 1, "author1": 2, "val": 42 } }, { "pk": 2, "model": "testapp.addon", "fields": { "author2": 1, "author1": 2, "val": 42 } } ] django-cache-machine-1.2.0/tests/testapp/models.py000066400000000000000000000021401426135341000220720ustar00rootroot00000000000000from unittest import mock import django from django.db import models from caching.base import CachingManager, CachingMixin, cached_method # This global call counter will be shared among all instances of an Addon. call_counter = mock.Mock() class User(CachingMixin, models.Model): name = models.CharField(max_length=30) objects = CachingManager() if django.VERSION[0] >= 2: class Meta: # Tell Django to use this manager when resolving foreign keys. # (Django >= 2.0) base_manager_name = "objects" class Addon(CachingMixin, models.Model): val = models.IntegerField() author1 = models.ForeignKey(User, on_delete=models.CASCADE) author2 = models.ForeignKey( User, related_name="author2_set", on_delete=models.CASCADE ) objects = CachingManager() class Meta: # without this, Postgres & SQLite return objects in different orders: ordering = ("pk",) @cached_method def calls(self, arg=1): """This is a docstring for calls()""" call_counter() return arg, call_counter.call_count django-cache-machine-1.2.0/tox.ini000066400000000000000000000016701426135341000167350ustar00rootroot00000000000000# Tox (http://tox.testrun.org/) is a tool for running tests # in multiple virtualenvs. This configuration file will run the # test suite on all supported python versions. To use it, "pip install tox" # and then run "tox" from this directory. [tox] envlist = py3{6,7,8,9}-{2.2,3.0,3.1,3.2},py310-3.2,py3{8,9,10}-{4.0} [gh-actions] python = 3.6: py36 3.7: py37 3.8: py38 3.9: py39 3.10: py310 [testenv] commands = {envpython} run_tests.py --with-coverage passenv = DATABASE_URL DATABASE_URL_2 deps = -rdev-requirements.txt 2.2: Django>=2.2,<3.0 3.0: Django>=3.0,<3.1 3.1: Django>=3.1,<3.2 3.2: Django>=3.2,<4.0 4.0: Django>=4.0,<4.1 [testenv:docs] basepython = python3.7 deps = Sphinx Django setenv = PYTHONPATH = {toxinidir}/examples/ DJANGO_SETTINGS_MODULE = cache_machine.settings changedir = docs commands = /usr/bin/make html [testenv:py37-flake8] deps = flake8 commands = flake8