diff --git a/activesupport/lib/active_support/cache.rb b/activesupport/lib/active_support/cache.rb
index a21c3b1c098ba..40a4b646a5e93 100644
--- a/activesupport/lib/active_support/cache.rb
+++ b/activesupport/lib/active_support/cache.rb
@@ -160,8 +160,8 @@ def retrieve_store_class(store)
# Some implementations may not support all methods beyond the basic cache
# methods of #fetch, #write, #read, #exist?, and #delete.
#
- # ActiveSupport::Cache::Store can store any Ruby object that is supported by
- # its +coder+'s +dump+ and +load+ methods.
+ # +ActiveSupport::Cache::Store+ can store any Ruby object that is supported
+ # by its +coder+'s +dump+ and +load+ methods.
#
# cache = ActiveSupport::Cache::MemoryStore.new
#
@@ -370,8 +370,8 @@ def mute
#
# ==== Options
#
- # Internally, +fetch+ calls #read_entry, and calls #write_entry on a cache
- # miss. Thus, +fetch+ supports the same options as #read and #write.
+ # Internally, +fetch+ calls +read_entry+, and calls +write_entry+ on a
+ # cache miss. Thus, +fetch+ supports the same options as #read and #write.
# Additionally, +fetch+ supports the following options:
#
# * force: true - Forces a cache "miss," meaning we treat the
diff --git a/activesupport/lib/active_support/cache/mem_cache_store.rb b/activesupport/lib/active_support/cache/mem_cache_store.rb
index 30f9a4e484793..9833bd6faac86 100644
--- a/activesupport/lib/active_support/cache/mem_cache_store.rb
+++ b/activesupport/lib/active_support/cache/mem_cache_store.rb
@@ -24,11 +24,11 @@ module Cache
#
# Special features:
# - Clustering and load balancing. One can specify multiple memcached servers,
- # and MemCacheStore will load balance between all available servers. If a
- # server goes down, then MemCacheStore will ignore it until it comes back up.
+ # and +MemCacheStore+ will load balance between all available servers. If a
+ # server goes down, then +MemCacheStore+ will ignore it until it comes back up.
#
- # MemCacheStore implements the Strategy::LocalCache strategy which implements
- # an in-memory cache inside of a block.
+ # +MemCacheStore+ implements the Strategy::LocalCache strategy which
+ # implements an in-memory cache inside of a block.
class MemCacheStore < Store
# These options represent behavior overridden by this implementation and should
# not be allowed to get down to the Dalli client
@@ -106,14 +106,14 @@ def self.build_mem_cache(*addresses) # :nodoc:
end
end
- # Creates a new MemCacheStore object, with the given memcached server
+ # Creates a new +MemCacheStore+ object, with the given memcached server
# addresses. Each address is either a host name, or a host-with-port string
# in the form of "host_name:port". For example:
#
# ActiveSupport::Cache::MemCacheStore.new("localhost", "server-downstairs.localnetwork:8229")
#
# If no addresses are provided, but ENV['MEMCACHE_SERVERS'] is defined, it will be used instead. Otherwise,
- # MemCacheStore will connect to localhost:11211 (the default memcached port).
+ # +MemCacheStore+ will connect to localhost:11211 (the default memcached port).
# Passing a +Dalli::Client+ instance is deprecated and will be removed. Please pass an address instead.
def initialize(*addresses)
addresses = addresses.flatten
diff --git a/activesupport/lib/active_support/cache/memory_store.rb b/activesupport/lib/active_support/cache/memory_store.rb
index eb7626075cbba..2fe57190d8e7b 100644
--- a/activesupport/lib/active_support/cache/memory_store.rb
+++ b/activesupport/lib/active_support/cache/memory_store.rb
@@ -18,13 +18,13 @@ module Cache
# a cleanup will occur which tries to prune the cache down to three quarters
# of the maximum size by removing the least recently used entries.
#
- # Unlike other Cache store implementations, MemoryStore does not compress
- # values by default. MemoryStore does not benefit from compression as much
+ # Unlike other Cache store implementations, +MemoryStore+ does not compress
+ # values by default. +MemoryStore+ does not benefit from compression as much
# as other Store implementations, as it does not send data over a network.
# However, when compression is enabled, it still pays the full cost of
# compression in terms of cpu use.
#
- # MemoryStore is thread-safe.
+ # +MemoryStore+ is thread-safe.
class MemoryStore < Store
module DupCoder # :nodoc:
extend self
diff --git a/activesupport/lib/active_support/cache/redis_cache_store.rb b/activesupport/lib/active_support/cache/redis_cache_store.rb
index 873c5f04294dc..4335f5c9a5de1 100644
--- a/activesupport/lib/active_support/cache/redis_cache_store.rb
+++ b/activesupport/lib/active_support/cache/redis_cache_store.rb
@@ -19,22 +19,23 @@ module ActiveSupport
module Cache
# = Redis \Cache \Store
#
- # Deployment note: Take care to use a *dedicated Redis cache* rather
- # than pointing this at your existing Redis server. It won't cope well
- # with mixed usage patterns and it won't expire cache entries by default.
+ # Deployment note: Take care to use a dedicated Redis cache rather
+ # than pointing this at a persistent Redis server (for example, one used as
+ # an Active Job queue). Redis won't cope well with mixed usage patterns and it
+ # won't expire cache entries by default.
#
# Redis cache server setup guide: https://redis.io/topics/lru-cache
#
- # * Supports vanilla Redis, hiredis, and Redis::Distributed.
- # * Supports Memcached-like sharding across Redises with Redis::Distributed.
+ # * Supports vanilla Redis, hiredis, and +Redis::Distributed+.
+ # * Supports Memcached-like sharding across Redises with +Redis::Distributed+.
# * Fault tolerant. If the Redis server is unavailable, no exceptions are
# raised. Cache fetches are all misses and writes are dropped.
# * Local cache. Hot in-memory primary cache within block/middleware scope.
- # * +read_multi+ and +write_multi+ support for Redis mget/mset. Use Redis::Distributed
- # 4.0.1+ for distributed mget support.
+ # * +read_multi+ and +write_multi+ support for Redis mget/mset. Use
+ # +Redis::Distributed+ 4.0.1+ for distributed mget support.
# * +delete_matched+ support for Redis KEYS globs.
class RedisCacheStore < Store
- # Keys are truncated with the ActiveSupport digest if they exceed 1kB
+ # Keys are truncated with the Active Support digest if they exceed 1kB
MAX_KEY_BYTESIZE = 1024
DEFAULT_REDIS_OPTIONS = {
@@ -110,8 +111,11 @@ def build_redis_client(**redis_options)
# Creates a new Redis cache store.
#
- # Handles four options: :redis block, :redis instance, single :url
- # string, and multiple :url strings.
+ # There are four ways to provide the Redis client used by the cache: the
+ # +:redis+ param can be a Redis instance or a block that returns a Redis
+ # instance, or the +:url+ param can be a string or an array of strings
+ # which will be used to create a Redis instance or a +Redis::Distributed+
+ # instance.
#
# Option Class Result
# :redis Proc -> options[:redis].call
@@ -134,7 +138,7 @@ def build_redis_client(**redis_options)
#
# Race condition TTL is not set by default. This can be used to avoid
# "thundering herd" cache writes when hot cache entries are expired.
- # See ActiveSupport::Cache::Store#fetch for more.
+ # See ActiveSupport::Cache::Store#fetch for more.
#
# Setting skip_nil: true will not cache nil results:
#
@@ -244,7 +248,7 @@ def increment(name, amount = 1, options = nil)
# Decrement a cached integer value using the Redis decrby atomic operator.
# Returns the updated value.
#
- # If the key is unset or has expired, it will be set to -amount:
+ # If the key is unset or has expired, it will be set to +-amount+:
#
# cache.decrement("foo") # => -1
#