diff --git a/TODOLIST.txt b/TODOLIST.txt new file mode 100644 index 0000000..30be318 --- /dev/null +++ b/TODOLIST.txt @@ -0,0 +1,42 @@ +TODOLIST +-------- + +If you want a really fast Drupal 8: + + * Drupal\Core\KeyValueStore\DatabaseStorage + Drupal\Core\KeyValueStore\DatabaseStorageExpirable + Notes: + - Both are easy to implement. + - Must be able to separate it from the sharded pool since it needs to + be reliable and consistent over time. The client/server pool + implementation from 7.x-3.x must be port too. + - The first bring the complexity of the data migration. + + * Drupal\Core\Routing + Notes: + - Quite easy one too + - I'm not sure if there is other components using it or not, case in + which this is not sure anymore this is easy. + + * Drupal\Core\Config\DatabaseStorage + Note: + - Easy one. + + * Drupal\Core\Path\AliasStorage + Note: + - Already done in 7.x-2.x version, and if the schema didn't change much + this a rather easy one too. + - If the same schema is used that the 7.x version, then there is no use + in sharding it, and should be stored along the router table replacement. + + * Drupal\Core\Session\SessionHandler + Note: + - Easy one. + +The first two will get rid of almost 30 out of the 50 remaining SQL queries +on a simple homepage with no content displayed. The third one will get rid of +5 or so remaining. + +If all of those are took care of, it will remain less than 10 SQL queries on +a standard profile home page. After that, real profiling needs to be done over +a site with contents, blocks and views all around the place, on various pages. diff --git a/redis.services.yml b/redis.services.yml index bc193dc..4818f5b 100644 --- a/redis.services.yml +++ b/redis.services.yml @@ -3,4 +3,4 @@ services: class: Drupal\redis\Cache\CacheBackendFactory arguments: ['@redis.factory', '@cache_tags.invalidator.checksum'] redis.factory: - class: Drupal\redis\ClientFactory + class: Drupal\redis\ClientFactory \ No newline at end of file diff --git a/src/Cache/CacheBase.php b/src/Cache/CacheBase.php index a449ff8..ddf5bc8 100644 --- a/src/Cache/CacheBase.php +++ b/src/Cache/CacheBase.php @@ -21,7 +21,9 @@ */ abstract class CacheBase implements CacheBackendInterface { - use RedisPrefixTrait; + use RedisPrefixTrait { + getKey as getParentKey; + } /** * Temporary cache items lifetime is infinite. @@ -34,17 +36,6 @@ abstract class CacheBase implements CacheBackendInterface { */ const LIFETIME_PERM_DEFAULT = 31536000; - /** - * Computed keys are let's say arround 60 characters length due to - * key prefixing, which makes 1,000 keys DEL command to be something - * arround 50,000 bytes length: this is huge and may not pass into - * Redis, let's split this off. - * Some recommend to never get higher than 1,500 bytes within the same - * command which makes us forced to split this at a very low threshold: - * 20 seems a safe value here (1,280 average length). - */ - const KEY_THRESHOLD = 20; - /** * Latest delete all flush KEY name. */ @@ -132,18 +123,6 @@ public function invalidate($cid) { $this->invalidateMultiple([$cid]); } - /** - * Return the key for the given cache key. - */ - public function getKey($cid = NULL) { - if (NULL === $cid) { - return $this->getPrefix() . ':' . $this->bin; - } - else { - return $this->getPrefix() . ':' . $this->bin . ':' . $cid; - } - } - /** * Calculate the correct expiration time. * @@ -158,7 +137,7 @@ protected function getExpiration($expire) { if ($expire == Cache::PERMANENT || $expire > $this->permTtl) { return $this->permTtl; } - return $expire - REQUEST_TIME; + return $expire - time(); } /** @@ -205,4 +184,15 @@ public function setPermTtl($ttl = NULL) { } } + /** + * {@inheritdoc} + */ + public function getKey($parts) { + if (is_string($parts)) { + $parts = [$parts]; + } + array_unshift($parts, $this->bin); + return $this->getParentKey($parts); + } + } diff --git a/src/Cache/PhpRedis.php b/src/Cache/PhpRedis.php index 4166d6a..b5a3de7 100644 --- a/src/Cache/PhpRedis.php +++ b/src/Cache/PhpRedis.php @@ -15,6 +15,11 @@ */ class PhpRedis extends CacheBase { + /** + * A bit more than 10 minutes. + */ + const INVALID_TTL = 666; + /** * @var \Redis */ @@ -27,13 +32,6 @@ class PhpRedis extends CacheBase { */ protected $checksumProvider; - /** - * The last delete timestamp. - * - * @var float - */ - protected $lastDeleteAll = NULL; - /** * Creates a PHpRedis cache backend. */ @@ -44,48 +42,129 @@ function __construct($bin, \Redis $client, CacheTagsChecksumInterface $checksum_ } /** - * {@inheritdoc} + * Set the last flush timestamp + * + * @param boolean $overwrite + * If set the method won't try to load the existing value before + * + * @return string */ - public function getMultiple(&$cids, $allow_invalid = FALSE) { - // Avoid an error when there are no cache ids. - if (empty($cids)) { - return []; + protected function setLastFlushTime($overwrite = false) { + + $key = $this->getKey('_flush'); + $time = REQUEST_TIME; + + $flushTime = $this->client->get($key); + + if ($flushTime && $time === (int)$flushTime) { + $flushTime = $this->getNextIncrement($flushTime); + } else { + $flushTime = $this->getNextIncrement($time); } - $return = array(); + $this->client->set($key, $flushTime); - // Build the list of keys to fetch. - $keys = array_map(array($this, 'getKey'), $cids); + return $flushTime; + } - // Optimize for the common case when only a single cache entry needs to - // be fetched, no pipeline is needed then. - if (count($keys) > 1) { - $pipe = $this->client->multi(\Redis::PIPELINE); - foreach ($keys as $key) { - $pipe->hgetall($key); - } - $result = $pipe->exec(); + /** + * Get the last flush timestamp + * + * @return string + */ + protected function getLastFlushTime() { + + $flushTime = $this->client->get($this->getKey('_flush')); + + if (!$flushTime) { + // In case there is no last flush data consider that the cache backend + // is actually pending an inconsistent state, the 'flush' key might + // disappear anytime a server is replaced or manually flushed. Please + // note that the initial flush timestamp is set when an entry is set + // too. + $flushTime = $this->setLastFlushTime(); } - else { - $result = [$this->client->hGetAll(reset($keys))]; + + return $flushTime; + } + + /** + * {@inheritdoc} + */ + public function get($cid, $allow_invalid = FALSE) { + + $entryKey = $this->getKey($cid); + $item = $this->client->hGetAll($entryKey); + $time = REQUEST_TIME; + + if (!$item) { + return FALSE; } - // Loop over the cid values to ensure numeric indexes. - foreach (array_values($cids) as $index => $key) { - // Check if a valid result was returned from Redis. - if (isset($result[$index]) && is_array($result[$index])) { - // Check expiration and invalidation and convert into an object. - $item = $this->expandEntry($result[$index], $allow_invalid); - if ($item) { - $return[$item->cid] = $item; + $item = (object)$item; + // @todo Sometimes tags are inserted as an " " string case in which we end + // up with explode'ing it and get as a result [""] which breaks items + // validity at tags check. Explore this and find why. + $item->tags = array_filter(explode(',', $item->tags)); + $item->valid = (bool)$item->valid; + $item->expire = (int)$item->expire; + $item->ttl = (int)$item->ttl; + + if (!$item->valid && $item->ttl === self::INVALID_TTL ) { + // @todo This is ugly but we are int the case where an already expired + // entry was set previously, this means that we are probably in the unit + // tests and we should not delete this entry to make core tests happy. + if (!$allow_invalid) { + if ($item->created < $time - $item->ttl) { + // Force delete 10 mintes after the invalidation to keep some + // cleanup level for this ugly hack. + $this->client->del($entryKey); } + return FALSE; + } + } else if ($item->valid && !$allow_invalid) { + + if (Cache::PERMANENT !== $item->expire && $item->expire < $time) { + $this->client->del($entryKey); + return FALSE; + } + + $lastFlush = $this->getLastFlushTime(); + if ($item->created < $lastFlush) { + $this->client->del($entryKey); + return FALSE; + } + + if (!$this->checksumProvider->isValid($item->checksum, $item->tags)) { + $this->client->del($entryKey); + return FALSE; } } - // Remove fetched cids from the list. - $cids = array_diff($cids, array_keys($return)); + $item->data = unserialize($item->data); + $item->created = (int)$item->created; - return $return; + return $item; + } + + /** + * {@inheritdoc} + */ + public function getMultiple(&$cids, $allow_invalid = FALSE) { + $ret = []; + + // @todo Unperformant, but in a sharded environement we + // cannot proceed another way, still there are some paths + // to explore + foreach ($cids as $index => $cid) { + $item = $this->get($cid, $allow_invalid); + if ($item) { + $ret[$cid] = $item; + unset($cids[$index]); + } + } + + return $ret; } /** @@ -93,176 +172,154 @@ public function getMultiple(&$cids, $allow_invalid = FALSE) { */ public function set($cid, $data, $expire = Cache::PERMANENT, array $tags = array()) { - $ttl = $this->getExpiration($expire); + assert('\Drupal\Component\Assertion\Inspector::assertAllStrings($tags)', 'Cache Tags must be strings.'); - $key = $this->getKey($cid); + $time = REQUEST_TIME; + $created = null; + $entryKey = $this->getKey($cid); + $lastFlush = $this->getLastFlushTime(); - // If the item is already expired, delete it. - if ($ttl <= 0) { - $this->delete($key); + if ($time === (int)$lastFlush) { + // Latest flush happened the exact same second. + $created = $lastFlush; + } else { + $created = $this->getNextIncrement($time); } - // Build the cache item and save it as a hash array. - $entry = $this->createEntryHash($cid, $data, $expire, $tags); - $pipe = $this->client->multi(\REdis::PIPELINE); - $pipe->hMset($key, $entry); - $pipe->expire($key, $ttl); - $pipe->exec(); - } + $valid = true; + $maxTtl = $this->getPermTtl(); + + if (Cache::PERMANENT !== $expire) { + + if ($expire <= $time) { + // And existing entry if any is stalled + // $this->client->del($entryKey); + // return; + // @todo This might happen during tests to check that invalid entries + // can be fetched, I do not like this. This invalid features mostly + // serves some edge caching cases, let's set a very small cache life + // time. 10 minutes is enough. See ::invalidate() method comment. + $valid = false; + $ttl = self::INVALID_TTL; + } else { + $ttl = $expire - $time; + } + + if ($maxTtl < $ttl) { + $ttl = $maxTtl; + } + // This feature might be deactivated by the site admin. + } else if ($maxTtl !== self::LIFETIME_INFINITE) { + $ttl = $maxTtl; + } else { + $ttl = $expire; + } + //getExpiration + // 0 for tag means it never has been deleted + $checksum = $this->checksumProvider->getCurrentChecksum($tags); + + $this->client->hMset($entryKey, [ + 'cid' => $cid, + 'created' => $created, + 'checksum' => $checksum, + 'expire' => $expire, + 'ttl' => $ttl, + 'data' => serialize($data), + 'tags' => implode(',', $tags), + 'valid' => (int)$valid, + ]); + + if ($expire !== Cache::PERMANENT) { + $this->client->expire($entryKey, $ttl); + } + } /** * {@inheritdoc} */ - public function deleteMultiple(array $cids) { - $keys = array_map(array($this, 'getKey'), $cids); - $this->client->del($keys); + public function setMultiple(array $items) { + foreach ($items as $cid => $item) { + $item += [ + 'data' => null, + 'expire' => Cache::PERMANENT, + 'tags' => [], + ]; + $this->set($cid, $item['data'], $item['expire'], $item['tags']); + } } /** * {@inheritdoc} */ - public function deleteAll() { - // The last delete timestamp is in milliseconds, ensure that no cache - // was written in the same millisecond. - // @todo This is needed to make the tests pass, is this safe enough for real - // usage? - usleep(1000); - $this->lastDeleteAll = round(microtime(TRUE), 3); - $this->client->set($this->getKey(static::LAST_DELETE_ALL_KEY), $this->lastDeleteAll); + public function delete($cid) { + $this->client->del($this->getKey($cid)); } /** * {@inheritdoc} */ - public function invalidateMultiple(array $cids) { - // Loop over all cache items, they are stored as a hash, so we can access - // the valid flag directly, only write if it exists and is not 0. + public function deleteMultiple(array $cids) { foreach ($cids as $cid) { - $key = $this->getKey($cid); - if ($this->client->hGet($key, 'valid')) { - $this->client->hSet($key, 'valid', 0); - } + $this->client->del($this->getKey($cid)); } } /** * {@inheritdoc} */ - public function invalidateAll() { - // To invalidate the whole bin, we invalidate a special tag for this bin. - $this->checksumProvider->invalidateTags([$this->getTagForBin()]); + public function deleteAll() { + $this->setLastFlushTime(); } /** * {@inheritdoc} */ - public function garbageCollection() { - // @todo Do we need to do anything here? + public function invalidate($cid) { + $entryKey = $this->getKey($cid); + if ($this->client->hGet($entryKey, 'valid')) { + // @todo Note that the original algorithm was to delete the entry at + // this point instead of just invalidate it, but the bigger core unit + // test method actually goes down that path, so as a temporary solution + // we are just invalidating it this way. + $this->client->hMset($entryKey, [ + 'valid' => 0, + 'ttl' => self::INVALID_TTL, + ]); + } } /** - * Returns the last delete all timestamp. - * - * @return float - * The last delete timestamp as a timestamp with a millisecond precision. + * {@inheritdoc} */ - protected function getLastDeleteAll() { - // Cache the last delete all timestamp. - if ($this->lastDeleteAll === NULL) { - $this->lastDeleteAll = (float) $this->client->get($this->getKey(static::LAST_DELETE_ALL_KEY)); + public function invalidateMultiple(array $cids) { + foreach ($cids as $cid) { + $this->invalidate($cid); } - return $this->lastDeleteAll; } /** - * Create cache entry. - * - * @param string $cid - * @param mixed $data - * @param int $expire - * @param string[] $tags - * - * @return array + * {@inheritdoc} */ - protected function createEntryHash($cid, $data, $expire = Cache::PERMANENT, array $tags) { - // Always add a cache tag for the current bin, so that we can use that for - // invalidateAll(). - $tags[] = $this->getTagForBin(); - assert('\Drupal\Component\Assertion\Inspector::assertAllStrings($tags)', 'Cache Tags must be strings.'); - $hash = array( - 'cid' => $cid, - 'created' => round(microtime(TRUE), 3), - 'expire' => $expire, - 'tags' => implode(' ', $tags), - 'valid' => 1, - 'checksum' => $this->checksumProvider->getCurrentChecksum($tags), - ); - - // Let Redis handle the data types itself. - if (!is_string($data)) { - $hash['data'] = serialize($data); - $hash['serialized'] = 1; - } - else { - $hash['data'] = $data; - $hash['serialized'] = 0; - } - - return $hash; + public function invalidateAll() { + $this->setLastFlushTime(); } /** - * Prepares a cached item. - * - * Checks that items are either permanent or did not expire, and unserializes - * data as appropriate. - * - * @param array $values - * The hash returned from redis or false. - * @param bool $allow_invalid - * If FALSE, the method returns FALSE if the cache item is not valid. - * - * @return mixed|false - * The item with data unserialized as appropriate and a property indicating - * whether the item is valid, or FALSE if there is no valid item to load. + * {@inheritdoc} */ - protected function expandEntry(array $values, $allow_invalid) { - // Check for entry being valid. - if (empty($values['cid'])) { - return FALSE; - } - - $cache = (object) $values; - - $cache->tags = explode(' ', $cache->tags); - - // Check expire time, allow to have a cache invalidated explicitly, don't - // check if already invalid. - if ($cache->valid) { - $cache->valid = $cache->expire == Cache::PERMANENT || $cache->expire >= REQUEST_TIME; - - // Check if invalidateTags() has been called with any of the items's tags. - if ($cache->valid && !$this->checksumProvider->isValid($cache->checksum, $cache->tags)) { - $cache->valid = FALSE; - } - } - - // Ensure the entry does not predate the last delete all time. - $last_delete_timestamp = $this->getLastDeleteAll(); - if ($last_delete_timestamp && ((float)$values['created']) < $last_delete_timestamp) { - return FALSE; - } - - if (!$allow_invalid && !$cache->valid) { - return FALSE; - } - - if ($cache->serialized) { - $cache->data = unserialize($cache->data); - } + public function garbageCollection() { + // No need for garbage collection, Redis will do it for us based upon + // the entries TTL. Also, knowing that in a sharded environment we cannot + // predict where entries are going to be stored, especially when doing + // proxy assisted sharding, we can't really do anything in here. + } - return $cache; + /** + * {@inheritdoc} + */ + public function removeBin() { + $this->deleteAll(); } } diff --git a/src/Cache/RedisCacheTagsChecksum.php b/src/Cache/RedisCacheTagsChecksum.php index db1d224..585e478 100644 --- a/src/Cache/RedisCacheTagsChecksum.php +++ b/src/Cache/RedisCacheTagsChecksum.php @@ -24,7 +24,7 @@ class RedisCacheTagsChecksum implements CacheTagsChecksumInterface, CacheTagsInv * * @var array */ - protected $tagCache = array(); + protected $tagCache = []; /** * A list of tags that have already been invalidated in this request. @@ -33,7 +33,7 @@ class RedisCacheTagsChecksum implements CacheTagsChecksumInterface, CacheTagsInv * * @var array */ - protected $invalidatedTags = array(); + protected $invalidatedTags = []; /** * @var \Redis @@ -51,22 +51,21 @@ function __construct(ClientFactory $factory) { * {@inheritdoc} */ public function invalidateTags(array $tags) { - $keys_to_increment = []; foreach ($tags as $tag) { - // Only invalidate tags once per request unless they are written again. if (isset($this->invalidatedTags[$tag])) { + // Only invalidate tags once per request unless they are written again. continue; } + + $tagKey = $this->getKey(['tag', $tag]); + $current = $this->client->get($tagKey); + + $current = $this->getNextIncrement($current); + $this->client->set($tagKey, $current); + + // Rightly populate the tag cache with the new values. $this->invalidatedTags[$tag] = TRUE; - unset($this->tagCache[$tag]); - $keys_to_increment[] = $this->getTagKey($tag); - } - if ($keys_to_increment) { - $multi = $this->client->multi(\Redis::PIPELINE); - foreach ($keys_to_increment as $key) { - $multi->incr($key); - } - $multi->exec(); + $this->tagCache[$tag] = $current; } } @@ -88,7 +87,7 @@ public function getCurrentChecksum(array $tags) { * {@inheritdoc} */ public function isValid($checksum, array $tags) { - return $checksum == $this->calculateChecksum($tags); + return $this->calculateChecksum($tags) <= $checksum; } /** @@ -97,16 +96,35 @@ public function isValid($checksum, array $tags) { public function calculateChecksum(array $tags) { $checksum = 0; - $fetch = array_values(array_diff($tags, array_keys($this->tagCache))); - if ($fetch) { - $keys = array_map(array($this, 'getTagKey'), $fetch); - foreach ($this->client->mget($keys) as $index => $invalidations) { - $this->tagCache[$fetch[$index]] = $invalidations ?: 0; + foreach ($tags as $tag) { + + if (isset($this->tagCache[$tag])) { + $current = $this->tagCache[$tag]; + } + else { + $tagKey = $this->getKey(['tag', $tag]); + $current = $this->client->get($tagKey); + + if (!$current) { + // Tag has never been created yet, so ensure it has an entry in Redis + // database. When dealing in a sharded environment, the tag checksum + // itself might have been dropped silently, case in which giving back + // a 0 value can cause invalided cache entries to be considered as + // valid back. + // Note that doing that, in case a tag key was dropped by the holding + // Redis server, all items based upon the droppped tag will then become + // invalid, but that's the definitive price of trying to being + // consistent in all cases. + $current = $this->getNextIncrement(); + $this->client->set($tagKey, $current); + } + + $this->tagCache[$tag] = $current; } - } - foreach ($tags as $tag) { - $checksum += $this->tagCache[$tag]; + if ($checksum < $current) { + $checksum = $current; + } } return $checksum; @@ -116,21 +134,8 @@ public function calculateChecksum(array $tags) { * {@inheritdoc} */ public function reset() { - $this->tagCache = array(); - $this->invalidatedTags = array(); - } - - /** - * Return the key for the given cache tag. - * - * @param string $tag - * The cache tag. - * - * @return string - * The prefixed cache tag. - */ - protected function getTagKey($tag) { - return $this->getPrefix() . ':cachetags:' . $tag; + $this->tagCache = []; + $this->invalidatedTags = []; } } diff --git a/src/RedisPrefixTrait.php b/src/RedisPrefixTrait.php index b08c97b..1a435ea 100644 --- a/src/RedisPrefixTrait.php +++ b/src/RedisPrefixTrait.php @@ -94,4 +94,106 @@ protected function getPrefix() { return $this->prefix; } + /** + * From the given timestamp build an incremental safe time-based identifier. + * + * Due to potential accidental cache wipes, when a server goes down in the + * cluster or when a server triggers its LRU algorithm wipe-out, keys that + * matches flush or tags checksum might be dropped. + * + * Per default, each new inserted tag will trigger a checksum computation to + * be stored in the Redis server as a timestamp. In order to ensure a checksum + * validity a simple comparison between the tag checksum and the cache entry + * checksum will tell us if the entry pre-dates the current checksum or not, + * thus telling us its state. The main problem we experience is that Redis + * is being so fast it is able to create and drop entries at same second, + * sometime even the same micro second. The only safe way to avoid conflicts + * is to checksum using an arbitrary computed number (a sequence). + * + * Drupal core does exactly this thus tags checksums are additions of each tag + * individual checksum; each tag checksum is a independent arbitrary serial + * that gets incremented starting with 0 (no invalidation done yet) to n (n + * invalidations) which grows over time. This way the checksum computation + * always rises and we have a sensible default that works in all cases. + * + * This model works as long as you can ensure consistency for the serial + * storage over time. Nevertheless, as explained upper, in our case this + * serial might be dropped at some point for various valid technical reasons: + * if we start over to 0, we may accidentally compute a checksum which already + * existed in the past and make invalid entries turn back to valid again. + * + * In order to prevent this behavior, using a timestamp as part of the serial + * ensures that we won't experience this problem in a time range wider than a + * single second, which is safe enough for us. But using timestamp creates a + * new problem: Redis is so fast that we can set or delete hundreds of entries + * easily during the same second: an entry created then invalidated the same + * second will create false positives (entry is being considered as valid) - + * note that depending on the check algorithm, false negative may also happen + * the same way. Therefore we need to have an abitrary serial value to be + * incremented in order to enforce our checks to be more strict. + * + * The solution to both the first (the need for a time based checksum in case + * of checksum data being dropped) and the second (the need to have an + * arbitrary predictible serial value to avoid false positives or negatives) + * we are combining the two: every checksum will be built this way: + * + * UNIXTIMESTAMP.SERIAL + * + * For example: + * + * 1429789217.017 + * + * will reprensent the 17th invalidation of the 1429789217 exact second which + * happened while writing this documentation. The next tag being invalidated + * the same second will then have this checksum: + * + * 1429789217.018 + * + * And so on... + * + * In order to make it consitent with PHP string and float comparison we need + * to set fixed precision over the decimal, and store as a string to avoid + * possible float precision problems when comparing. + * + * This algorithm is not fully failsafe, but allows us to proceed to 1000 + * operations on the same checksum during the same second, which is a + * sufficiently great value to reduce the conflict probability to almost + * zero for most uses cases. + * + * @param int|string $timestamp + * "TIMESTAMP[.INCREMENT]" string + * + * @return string + * The next "TIMESTAMP.INCREMENT" string. + */ + public function getNextIncrement($timestamp = null) { + + if (!$timestamp) { + return time() . '.000'; + } + + if (FALSE !== ($pos = strpos($timestamp, '.'))) { + $inc = substr($timestamp, $pos + 1, 3); + + return ((int)$timestamp) . '.' . str_pad($inc + 1, 3, '0', STR_PAD_LEFT); + } + + return $timestamp . '.000'; + } + + /** + * Get prefixed key + * + * @param string[] $parts + * Arbitrary number of strings to compose the key + * + * @return string + */ + public function getKey($parts = []) { + if (!is_array($parts)) { + $parts = [$parts]; + } + array_unshift($parts, $this->getPrefix()); + return implode(':', $parts); + } } diff --git a/src/Tests/AbstractRedisCacheFixesUnitTestCase.php b/src/Tests/AbstractRedisCacheFixesUnitTestCase.php index 630d229..91d2074 100644 --- a/src/Tests/AbstractRedisCacheFixesUnitTestCase.php +++ b/src/Tests/AbstractRedisCacheFixesUnitTestCase.php @@ -35,7 +35,7 @@ public function testTemporaryCacheExpire() { $this->assertIdentical('bar', $data->data); // Expiring entry with negative lifetime. - $backend->set('test3', 'baz', REQUEST_TIME - 100); + $backend->set('test3', 'baz', time() - 100); $data = $backend->get('test3'); $this->assertEqual(false, $data); } diff --git a/src/Tests/Cache/PhpRedisUnitTest.php b/src/Tests/Cache/PhpRedisUnitTest.php index 85ded8a..69ad089 100644 --- a/src/Tests/Cache/PhpRedisUnitTest.php +++ b/src/Tests/Cache/PhpRedisUnitTest.php @@ -7,6 +7,7 @@ namespace Drupal\redis\Tests\Cache; +use Drupal\Core\Cache\Cache; use Drupal\Core\DependencyInjection\ContainerBuilder; use Drupal\Core\Site\Settings; use Drupal\redis\Cache\PhpRedis; @@ -37,7 +38,6 @@ public function containerBuild(ContainerBuilder $container) { } } - /** * Creates a new instance of PhpRedis cache backend. * @@ -45,9 +45,106 @@ public function containerBuild(ContainerBuilder $container) { * A new PhpRedis cache backend. */ protected function createCacheBackend($bin) { - $cache = \Drupal::service('cache.backend.redis')->get($bin); + $cache = new PhpRedis( + $bin, + \Drupal::service('redis.factory')->getClient(), + \Drupal::service('cache_tags.invalidator.checksum') + ); $cache->setMinTtl(10); return $cache; } + /** + * Tests Drupal\Core\Cache\CacheBackendInterface::invalidateTags(). + */ + function testInvalidateTags() { + $backend = $this->getCacheBackend(); + + // Create two cache entries with the same tag and tag value. + $backend->set('test_cid_invalidate1', $this->defaultValue, Cache::PERMANENT, array('test_tag:2')); + $backend->set('test_cid_invalidate2', $this->defaultValue, Cache::PERMANENT, array('test_tag:2')); + $this->assertTrue($backend->get('test_cid_invalidate1') && $backend->get('test_cid_invalidate2'), 'Two cache items were created.'); + + // Invalidate test_tag of value 1. This should invalidate both entries. + Cache::invalidateTags(array('test_tag:2')); + $this->assertFalse($backend->get('test_cid_invalidate1') || $backend->get('test_cid_invalidate2'), 'Two cache items invalidated after invalidating a cache tag.'); + + // Create two cache entries with the same tag and an array tag value. + $backend->set('test_cid_invalidate1', $this->defaultValue, Cache::PERMANENT, array('test_tag:1')); + $backend->set('test_cid_invalidate2', $this->defaultValue, Cache::PERMANENT, array('test_tag:1')); + $this->assertTrue($backend->get('test_cid_invalidate1') && $backend->get('test_cid_invalidate2'), 'Two cache items were created.'); + + // Invalidate test_tag of value 1. This should invalidate both entries. + Cache::invalidateTags(array('test_tag:1')); + $this->assertFalse($backend->get('test_cid_invalidate1') || $backend->get('test_cid_invalidate2'), 'Two caches removed after invalidating a cache tag.'); + + // Create three cache entries with a mix of tags and tag values. + $backend->set('test_cid_invalidate1', $this->defaultValue, Cache::PERMANENT, array('test_tag:1')); + $backend->set('test_cid_invalidate2', $this->defaultValue, Cache::PERMANENT, array('test_tag:2')); + $backend->set('test_cid_invalidate3', $this->defaultValue, Cache::PERMANENT, array('test_tag_foo:3')); + $this->assertTrue($backend->get('test_cid_invalidate1') && $backend->get('test_cid_invalidate2') && $backend->get('test_cid_invalidate3'), 'Three cached items were created.'); + Cache::invalidateTags(array('test_tag_foo:3')); + $this->assertTrue($backend->get('test_cid_invalidate1') && $backend->get('test_cid_invalidate2'), 'Cache items not matching the tag were not invalidated.'); + $this->assertFalse($backend->get('test_cid_invalidated3'), 'Cached item matching the tag was removed.'); + + // Create cache entry in multiple bins. Two cache entries + // (test_cid_invalidate1 and test_cid_invalidate2) still exist from previous + // tests. + $tags = array('test_tag:1', 'test_tag:2', 'test_tag:3'); + $bins = array('path', 'bootstrap', 'page'); + foreach ($bins as $bin) { + $this->getCacheBackend($bin)->set('test', $this->defaultValue, Cache::PERMANENT, $tags); + $this->assertTrue($this->getCacheBackend($bin)->get('test'), 'Cache item was set in bin.'); + } + + Cache::invalidateTags(array('test_tag:2')); + + // Test that the cache entry has been invalidated in multiple bins. + foreach ($bins as $bin) { + $this->assertFalse($this->getCacheBackend($bin)->get('test'), 'Tag invalidation affected item in bin.'); + } + // Test that the cache entry with a matching tag has been invalidated. + $this->assertFalse($this->getCacheBackend($bin)->get('test_cid_invalidate2'), 'Cache items matching tag were invalidated.'); + // Test that the cache entry with without a matching tag still exists. + $this->assertTrue($this->getCacheBackend($bin)->get('test_cid_invalidate1'), 'Cache items not matching tag were not invalidated.'); + } + + /** + * Test Drupal\Core\Cache\CacheBackendInterface::invalidateAll(). + */ + public function testInvalidateAll() { + $backend_a = $this->getCacheBackend(); + $backend_b = $this->getCacheBackend('bootstrap'); + + // Set both expiring and permanent keys. + $backend_a->set('test1', 1, Cache::PERMANENT); + $backend_a->set('test2', 3, time() + 1000); + $backend_b->set('test3', 4, Cache::PERMANENT); + + $backend_a->invalidateAll(); + + $this->assertFalse($backend_a->get('test1'), 'First key has been invalidated.'); + $this->assertFalse($backend_a->get('test2'), 'Second key has been invalidated.'); + $this->assertTrue($backend_b->get('test3'), 'Item in other bin is preserved.'); + } + + /** + * Tests Drupal\Core\Cache\CacheBackendInterface::removeBin(). + */ + public function testRemoveBin() { + $backend_a = $this->getCacheBackend(); + $backend_b = $this->getCacheBackend('bootstrap'); + + // Set both expiring and permanent keys. + $backend_a->set('test1', 1, Cache::PERMANENT); + $backend_a->set('test2', 3, time() + 1000); + $backend_b->set('test3', 4, Cache::PERMANENT); + + $backend_a->removeBin(); + + $this->assertFalse($backend_a->get('test1'), 'First key has been deleted.'); + $this->assertFalse($backend_a->get('test2'), 'Second key has been deleted.'); + $this->assertTrue($backend_b->get('test3'), 'Item in other bin is preserved.'); + } + }