From 85e3b795320b0b8b40d215f4f216fdf24cf4a927 Mon Sep 17 00:00:00 2001 From: Paul Oyston Date: Wed, 19 Feb 2014 21:36:16 +0000 Subject: [PATCH 1/2] Changed the redis.conf to the version bundled with 2.8 Added many more paramemters to the config, also made some of the parameters optional instead of compulsary (e.g. maxmemory) --- defaults/main.yml | 16 +- templates/redis.conf.j2 | 560 +++++++++++++++++++++++++++++++--------- 2 files changed, 445 insertions(+), 131 deletions(-) diff --git a/defaults/main.yml b/defaults/main.yml index a3e10a3..d254cc0 100644 --- a/defaults/main.yml +++ b/defaults/main.yml @@ -1,5 +1,6 @@ --- +redis_pidfile: /var/run/redis.pid redis_bind_address: "0.0.0.0" redis_port: 6379 redis_syslog_enabled: "yes" @@ -13,13 +14,24 @@ redis_db_dir: /var/lib/redis redis_role: master redis_requirepass: false redis_pass: None -redis_max_clients: 128 -redis_max_memory: 512mb +redis_max_clients: 10000 redis_maxmemory_policy: volatile-lru redis_appendfsync: everysec +redis_timeout: 0 +redis_loglevel: notice +redis_logfile: /var/log/redis.log +redis_hz: 10 +redis_tcp_keepalive: 0 +redis_syslog_ident: redis #If role is slave set these values too redis_master_ip: 1.1.1.1 redis_master_port: 6379 redis_master_auth: None +redis_slave_serve_stale_data: yes +redis_slave_read_only: yes +redis_slave_priority: 100 +redis_disable_tcp_nodelay: no +redis_appendonly: no +redis_appendfilename: appendonly.aof \ No newline at end of file diff --git a/templates/redis.conf.j2 b/templates/redis.conf.j2 index 5d4037c..425dbad 100644 --- a/templates/redis.conf.j2 +++ b/templates/redis.conf.j2 @@ -1,6 +1,6 @@ # Redis configuration file example -# Note on units: when memory size is needed, it is possible to specifiy +# Note on units: when memory size is needed, it is possible to specify # it in the usual form of 1k 5GB 4M and so forth: # # 1k => 1000 bytes @@ -12,62 +12,120 @@ # # units are case insensitive so 1GB 1Gb 1gB are all the same. +################################## INCLUDES ################################### + +# Include one or more other config files here. This is useful if you +# have a standard template that goes to all Redis server but also need +# to customize a few per-server settings. Include files can include +# other files, so use this wisely. +# +# Notice option "include" won't be rewritten by command "CONFIG REWRITE" +# from admin or Redis Sentinel. Since Redis always uses the last processed +# line as value of a configuration directive, you'd better put includes +# at the beginning of this file to avoid overwriting config change at runtime. +# +# If instead you are interested in using includes to override configuration +# options, it is better to use include as the last line. +# +# include /path/to/local.conf +# include /path/to/other.conf + +################################ GENERAL ##################################### + # By default Redis does not run as a daemon. Use 'yes' if you need it. # Note that Redis will write a pid file in /var/run/redis.pid when daemonized. daemonize yes # When running daemonized, Redis writes a pid file in /var/run/redis.pid by # default. You can specify a custom pid file location here. -pidfile /var/run/redis/redis.pid +pidfile {{ redis_pidfile }} # Accept connections on the specified port, default is 6379. # If port 0 is specified Redis will not listen on a TCP socket. port {{ redis_port }} -# If you want you can bind a single interface, if the bind option is not -# specified all the interfaces will listen for incoming connections. +# TCP listen() backlog. # +# In high requests-per-second environments you need an high backlog in order +# to avoid slow clients connections issues. Note that the Linux kernel +# will silently truncate it to the value of /proc/sys/net/core/somaxconn so +# make sure to raise both the value of somaxconn and tcp_max_syn_backlog +# in order to get the desired effect. +tcp-backlog 511 + +# By default Redis listens for connections from all the network interfaces +# available on the server. It is possible to listen to just one or multiple +# interfaces using the "bind" configuration directive, followed by one or +# more IP addresses. +# +# Examples: +# +# bind 192.168.1.100 10.0.0.1 +# bind 127.0.0.1 bind {{ redis_bind_address }} -# Specify the path for the unix socket that will be used to listen for +# Specify the path for the Unix socket that will be used to listen for # incoming connections. There is no default, so Redis will not listen # on a unix socket when not specified. # # unixsocket /tmp/redis.sock # unixsocketperm 755 +{% if redis_unix_socket: %} +unixsocket {{ redis_unixsocket }} +{% if redis_unixsocketperm %} +unixsocketperm {{ redis_unixsocketperm }} +{% endif %} +{% endif %} + # Close the connection after a client is idle for N seconds (0 to disable) -timeout 0 +timeout {{ redis_timeout }} -# Set server verbosity to 'debug' -# it can be one of: +# TCP keepalive. +# +# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence +# of communication. This is useful for two reasons: +# +# 1) Detect dead peers. +# 2) Take the connection alive from the point of view of network +# equipment in the middle. +# +# On Linux, the specified value (in seconds) is the period used to send ACKs. +# Note that to close the connection the double of the time is needed. +# On other kernels the period depends on the kernel configuration. +# +# A reasonable value for this option is 60 seconds. +tcp-keepalive {{ redis_tcp_keepalive }} + +# Specify the server verbosity level. +# This can be one of: # debug (a lot of information, useful for development/testing) # verbose (many rarely useful info, but not a mess like the debug level) # notice (moderately verbose, what you want in production probably) # warning (only very important / critical messages are logged) -loglevel notice +loglevel {{ redis_loglevel }} -# Specify the log file name. Also 'stdout' can be used to force +# Specify the log file name. Also the empty string can be used to force # Redis to log on the standard output. Note that if you use standard # output for logging but daemonize, logs will be sent to /dev/null -logfile /var/log/redis/redis.log +logfile {{ redis_logfile }} # To enable logging to the system logger, just set 'syslog-enabled' to yes, # and optionally update the other syslog parameters to suit your needs. syslog-enabled {{ redis_syslog_enabled }} # Specify the syslog identity. -syslog-ident redis +syslog-ident {{redis_syslog_ident}} -# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. -syslog-facility local0 +# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. +# syslog-facility local0 # Set the number of databases. The default database is DB 0, you can select # a different one on a per-connection basis using SELECT where # dbid is a number between 0 and 'databases'-1 databases {{ redis_databases }} -################################ SNAPSHOTTING ################################# +################################ SNAPSHOTTING ################################ # # Save the DB on disk: # @@ -82,16 +140,47 @@ databases {{ redis_databases }} # after 60 sec if at least 10000 keys changed # # Note: you can disable saving at all commenting all the "save" lines. +# +# It is also possible to remove all the previously configured save +# points by adding a save directive with a single empty string argument +# like in the following example: +# +# save "" + {% for i in redis_database_save_times %} save {{ i[0] }} {{ i[1] }} {% endfor %} +# By default Redis will stop accepting writes if RDB snapshots are enabled +# (at least one save point) and the latest background save failed. +# This will make the user aware (in a hard way) that data is not persisting +# on disk properly, otherwise chances are that no one will notice and some +# disaster will happen. +# +# If the background saving process will start working again Redis will +# automatically allow writes again. +# +# However if you have setup your proper monitoring of the Redis server +# and persistence, you may want to disable this feature so that Redis will +# continue to work as usual even if there are problems with disk, +# permissions, and so forth. +stop-writes-on-bgsave-error yes + # Compress string objects using LZF when dump .rdb databases? # For default that's set to 'yes' as it's almost always a win. # If you want to save some CPU in the saving child set it to 'no' but # the dataset will likely be bigger if you have compressible values or keys. rdbcompression yes +# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. +# This makes the format more resistant to corruption but there is a performance +# hit to pay (around 10%) when saving and loading RDB files, so you can disable it +# for maximum performances. +# +# RDB files created with checksum disabled have a checksum of zero that will +# tell the loading code to skip the check. +rdbchecksum yes + # The filename where to dump the DB dbfilename {{ redis_dbfilename }} @@ -100,7 +189,7 @@ dbfilename {{ redis_dbfilename }} # The DB will be written inside this directory, with the filename specified # above using the 'dbfilename' configuration directive. # -# Also the Append Only File will be created inside this directory. +# The Append Only File will also be created inside this directory. # # Note that you must specify a directory here, not a file name. dir {{ redis_db_dir }} @@ -112,22 +201,61 @@ dir {{ redis_db_dir }} # so for example it is possible to configure the slave to save the DB with a # different interval, or to listen to another port, and so on. # +# slaveof + +# If the master is password protected (using the "requirepass" configuration +# directive below) it is possible to tell the slave to authenticate before +# starting the replication synchronization process, otherwise the master will +# refuse the slave request. +# +# masterauth + {% if redis_role == 'slave' %} slaveof {{ redis_master_ip }} {{ redis_master_port }} masterauth {{ redis_master_auth }} +{% endif %} + +# When a slave loses its connection with the master, or when the replication +# is still in progress, the slave can act in two different ways: +# +# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will +# still reply to client requests, possibly with out of date data, or the +# data set may just be empty if this is the first synchronization. +# +# 2) if slave-serve-stale-data is set to 'no' the slave will reply with +# an error "SYNC with master in progress" to all the kind of commands +# but to INFO and SLAVEOF. +# +slave-serve-stale-data {{ redis_slave_serve_stale_data }} -slave-serve-stale-data yes +# You can configure a slave instance to accept writes or not. Writing against +# a slave instance may be useful to store some ephemeral data (because data +# written on a slave will be easily deleted after resync with the master) but +# may also cause problems if clients are writing to it because of a +# misconfiguration. +# +# Since Redis 2.6 by default slaves are read-only. +# +# Note: read only slaves are not designed to be exposed to untrusted clients +# on the internet. It's just a protection layer against misuse of the instance. +# Still a read only slave exports by default all the administrative commands +# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve +# security of read only slaves using 'rename-command' to shadow all the +# administrative / dangerous commands. +slave-read-only {{ redis_slave_read_only }} -{% endif %} # Slaves send PINGs to server in a predefined interval. It's possible to change # this interval with the repl_ping_slave_period option. The default value is 10 # seconds. # # repl-ping-slave-period 10 -# The following option sets a timeout for both Bulk transfer I/O timeout and -# master data or ping response timeout. The default value is 60 seconds. +# The following option sets the replication timeout for: +# +# 1) Bulk transfer I/O during SYNC, from the point of view of slave. +# 2) Master timeout from the point of view of slaves (data, pings). +# 3) Slave timeout from the point of view of masters (REPLCONF ACK pings). # # It is important to make sure that this value is greater than the value # specified for repl-ping-slave-period otherwise a timeout will be detected @@ -135,6 +263,80 @@ slave-serve-stale-data yes # # repl-timeout 60 +# Disable TCP_NODELAY on the slave socket after SYNC? +# +# If you select "yes" Redis will use a smaller number of TCP packets and +# less bandwidth to send data to slaves. But this can add a delay for +# the data to appear on the slave side, up to 40 milliseconds with +# Linux kernels using a default configuration. +# +# If you select "no" the delay for data to appear on the slave side will +# be reduced but more bandwidth will be used for replication. +# +# By default we optimize for low latency, but in very high traffic conditions +# or when the master and slaves are many hops away, turning this to "yes" may +# be a good idea. +repl-disable-tcp-nodelay {{ redis_disable_tcp_nodelay }} + +# Set the replication backlog size. The backlog is a buffer that accumulates +# slave data when slaves are disconnected for some time, so that when a slave +# wants to reconnect again, often a full resync is not needed, but a partial +# resync is enough, just passing the portion of data the slave missed while +# disconnected. +# +# The biggest the replication backlog, the longer the time the slave can be +# disconnected and later be able to perform a partial resynchronization. +# +# The backlog is only allocated once there is at least a slave connected. +# +# repl-backlog-size 1mb + +# After a master has no longer connected slaves for some time, the backlog +# will be freed. The following option configures the amount of seconds that +# need to elapse, starting from the time the last slave disconnected, for +# the backlog buffer to be freed. +# +# A value of 0 means to never release the backlog. +# +# repl-backlog-ttl 3600 + +# The slave priority is an integer number published by Redis in the INFO output. +# It is used by Redis Sentinel in order to select a slave to promote into a +# master if the master is no longer working correctly. +# +# A slave with a low priority number is considered better for promotion, so +# for instance if there are three slaves with priority 10, 100, 25 Sentinel will +# pick the one with priority 10, that is the lowest. +# +# However a special priority of 0 marks the slave as not able to perform the +# role of master, so a slave with priority of 0 will never be selected by +# Redis Sentinel for promotion. +# +# By default the priority is 100. +slave-priority {{ redis_slave_priority }} + +# It is possible for a master to stop accepting writes if there are less than +# N slaves connected, having a lag less or equal than M seconds. +# +# The N slaves need to be in "online" state. +# +# The lag in seconds, that must be <= the specified value, is calculated from +# the last ping received from the slave, that is usually sent every second. +# +# This option does not GUARANTEES that N replicas will accept the write, but +# will limit the window of exposure for lost writes in case not enough slaves +# are available, to the specified number of seconds. +# +# For example to require at least 3 slaves with a lag <= 10 seconds use: +# +# min-slaves-to-write 3 +# min-slaves-max-lag 10 +# +# Setting one or the other to 0 disables the feature. +# +# By default min-slaves-to-write is set to 0 (feature disabled) and +# min-slaves-max-lag is set to 10. + ################################## SECURITY ################################### # Require clients to issue AUTH before processing any other @@ -148,31 +350,39 @@ slave-serve-stale-data yes # 150k passwords per second against a good box. This means that you should # use a very strong password otherwise it will be very easy to break. # +# requirepass foobared + {% if redis_requirepass %} requirepass {{ redis_pass }} {% endif %} # Command renaming. # -# It is possilbe to change the name of dangerous commands in a shared +# It is possible to change the name of dangerous commands in a shared # environment. For instance the CONFIG command may be renamed into something -# of hard to guess so that it will be still available for internal-use -# tools but not available for general clients. +# hard to guess so that it will still be available for internal-use tools +# but not available for general clients. # # Example: # # rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 # -# It is also possilbe to completely kill a command renaming it into +# It is also possible to completely kill a command by renaming it into # an empty string: # # rename-command CONFIG "" +# +# Please note that changing the name of commands that are logged into the +# AOF file or transmitted to slaves may cause problems. ################################### LIMITS #################################### -# Set the max number of connected clients at the same time. By default there -# is no limit, and it's up to the number of file descriptors the Redis process -# is able to open. The special value '0' means no limits. +# Set the max number of connected clients at the same time. By default +# this limit is set to 10000 clients, however if the Redis server is not +# able to configure the process file limit to allow for the specified limit +# the max number of allowed clients is set to the current file limit +# minus 32 (as Redis reserves a few file descriptors for internal uses). +# # Once the limit is reached Redis will close all the new connections sending # an error 'max number of clients reached'. # @@ -180,7 +390,7 @@ maxclients {{ redis_max_clients }} # Don't use more memory than the specified amount of bytes. # When the memory limit is reached Redis will try to remove keys -# accordingly to the eviction policy selected (see maxmemmory-policy). +# according to the eviction policy selected (see maxmemory-policy). # # If Redis can't remove keys according to the policy, or if the policy is # set to 'noeviction', Redis will start to reply with errors to commands @@ -188,7 +398,7 @@ maxclients {{ redis_max_clients }} # to reply to read-only commands like GET. # # This option is usually useful when using Redis as an LRU cache, or to set -# an hard memory limit for an instance (using the 'noeviction' policy). +# a hard memory limit for an instance (using the 'noeviction' policy). # # WARNING: If you have slaves attached to an instance with maxmemory on, # the size of the output buffers needed to feed the slaves are subtracted @@ -201,19 +411,23 @@ maxclients {{ redis_max_clients }} # limit for maxmemory so that there is some free RAM on the system for slave # output buffers (but this is not needed if the policy is 'noeviction'). # +# maxmemory + +{% if redis_max_memory: %} maxmemory {{ redis_max_memory }} +{% endif %} # MAXMEMORY POLICY: how Redis will select what to remove when maxmemory -# is reached? You can select among five behavior: +# is reached. You can select among five behaviors: # # volatile-lru -> remove the key with an expire set using an LRU algorithm # allkeys-lru -> remove any key accordingly to the LRU algorithm # volatile-random -> remove a random key with an expire set -# allkeys->random -> remove a random key, any key +# allkeys-random -> remove a random key, any key # volatile-ttl -> remove the key with the nearest expire time (minor TTL) # noeviction -> don't expire at all, just return an error on write operations # -# Note: with all the kind of policies, Redis will return an error on write +# Note: with any of the above policies, Redis will return an error on write # operations, when there are not suitable keys for eviction. # # At the date of writing this commands are: set setnx setex append @@ -224,6 +438,7 @@ maxmemory {{ redis_max_memory }} # # The default is: # +# maxmemory-policy volatile-lru maxmemory-policy {{ redis_maxmemory_policy }} # LRU and minimal TTL algorithms are not precise algorithms but approximated @@ -236,26 +451,29 @@ maxmemory-policy {{ redis_maxmemory_policy }} ############################## APPEND ONLY MODE ############################### -# By default Redis asynchronously dumps the dataset on disk. If you can live -# with the idea that the latest records will be lost if something like a crash -# happens this is the preferred way to run Redis. If instead you care a lot -# about your data and don't want to that a single record can get lost you should -# enable the append only mode: when this mode is enabled Redis will append -# every write operation received in the file appendonly.aof. This file will -# be read on startup in order to rebuild the full dataset in memory. +# By default Redis asynchronously dumps the dataset on disk. This mode is +# good enough in many applications, but an issue with the Redis process or +# a power outage may result into a few minutes of writes lost (depending on +# the configured save points). +# +# The Append Only File is an alternative persistence mode that provides +# much better durability. For instance using the default data fsync policy +# (see later in the config file) Redis can lose just one second of writes in a +# dramatic event like a server power outage, or a single write if something +# wrong with the Redis process itself happens, but the operating system is +# still running correctly. # -# Note that you can have both the async dumps and the append only file if you -# like (you have to comment the "save" statements above to disable the dumps). -# Still if append only mode is enabled Redis will load the data from the -# log file at startup ignoring the dump.rdb file. +# AOF and RDB persistence can be enabled at the same time without problems. +# If the AOF is enabled on startup Redis will load the AOF, that is the file +# with the better durability guarantees. # -# IMPORTANT: Check the BGREWRITEAOF to check how to rewrite the append -# log file in background when it gets too big. +# Please check http://redis.io/topics/persistence for more information. -appendonly no +appendonly {{ redis_appendonly }} # The name of the append only file (default: "appendonly.aof") -# appendfilename appendonly.aof + +appendfilename "{{ redis_appendfilename }}" # The fsync() call tells the Operating System to actually write data on disk # instead to wait for more data in the output buffer. Some OS will really flush @@ -265,16 +483,19 @@ appendonly no # # no: don't fsync, just let the OS flush the data when it wants. Faster. # always: fsync after every write to the append only log . Slow, Safest. -# everysec: fsync only if one second passed since the last fsync. Compromise. +# everysec: fsync only one time every second. Compromise. # -# The default is "everysec" that's usually the right compromise between +# The default is "everysec", as that's usually the right compromise between # speed and data safety. It's up to you to understand if you can relax this to -# "no" that will will let the operating system flush the output buffer when +# "no" that will let the operating system flush the output buffer when # it wants, for better performances (but if you can live with the idea of # some data loss consider the default persistence mode that's snapshotting), # or on the contrary, use "always" that's very slow but a bit safer than # everysec. # +# More details please check the following article: +# http://antirez.com/post/redis-persistence-demystified.html +# # If unsure, use "everysec". # appendfsync always @@ -292,21 +513,22 @@ appendfsync {{ redis_appendfsync }} # that will prevent fsync() from being called in the main process while a # BGSAVE or BGREWRITEAOF is in progress. # -# This means that while another child is saving the durability of Redis is -# the same as "appendfsync none", that in pratical terms means that it is -# possible to lost up to 30 seconds of log in the worst scenario (with the +# This means that while another child is saving, the durability of Redis is +# the same as "appendfsync none". In practical terms, this means that it is +# possible to lose up to 30 seconds of log in the worst scenario (with the # default Linux settings). # # If you have latency problems turn this to "yes". Otherwise leave it as # "no" that is the safest pick from the point of view of durability. + no-appendfsync-on-rewrite no # Automatic rewrite of the append only file. # Redis is able to automatically rewrite the log file implicitly calling -# BGREWRITEAOF when the AOF log size will growth by the specified percentage. +# BGREWRITEAOF when the AOF log size grows by the specified percentage. # # This is how it works: Redis remembers the size of the AOF file after the -# latest rewrite (or if no rewrite happened since the restart, the size of +# latest rewrite (if no rewrite has happened since the restart, the size of # the AOF at startup is used). # # This base size is compared to the current size. If the current size is @@ -315,9 +537,29 @@ no-appendfsync-on-rewrite no # is useful to avoid rewriting the AOF file even if the percentage increase # is reached but it is still pretty small. # -# Specify a precentage of zero in order to disable the automatic AOF +# Specify a percentage of zero in order to disable the automatic AOF # rewrite feature. +auto-aof-rewrite-percentage 100 +auto-aof-rewrite-min-size 64mb + +################################ LUA SCRIPTING ############################### + +# Max execution time of a Lua script in milliseconds. +# +# If the maximum execution time is reached Redis will log that a script is +# still in execution after the maximum allowed time and will start to +# reply to queries with an error. +# +# When a long running script exceed the maximum execution time only the +# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be +# used to stop a script that did not yet called write commands. The second +# is the only way to shut down the server in the case a write commands was +# already issue by the script but the user don't want to wait for the natural +# termination of the script. +# +# Set it to 0 or a negative value for unlimited execution without warnings. +lua-time-limit 5000 ################################## SLOW LOG ################################### @@ -341,98 +583,158 @@ slowlog-log-slower-than 10000 # There is no limit to this length. Just be aware that it will consume memory. # You can reclaim memory used by the slow log with SLOWLOG RESET. -slowlog-max-len 1024 - -################################ VIRTUAL MEMORY ############################### +slowlog-max-len 128 -### WARNING! Virtual Memory is deprecated in Redis 2.4 -### The use of Virtual Memory is strongly discouraged. +############################# Event notification ############################## -# Virtual Memory allows Redis to work with datasets bigger than the actual -# amount of RAM needed to hold the whole dataset in memory. -# In order to do so very used keys are taken in memory while the other keys -# are swapped into a swap file, similarly to what operating systems do -# with memory pages. +# Redis can notify Pub/Sub clients about events happening in the key space. +# This feature is documented at http://redis.io/topics/keyspace-events +# +# For instance if keyspace events notification is enabled, and a client +# performs a DEL operation on key "foo" stored in the Database 0, two +# messages will be published via Pub/Sub: # -# To enable VM just set 'vm-enabled' to yes, and set the following three -# VM parameters accordingly to your needs. - -vm-enabled no -# vm-enabled yes - -# This is the path of the Redis swap file. As you can guess, swap files -# can't be shared by different Redis instances, so make sure to use a swap -# file for every redis process you are running. Redis will complain if the -# swap file is already in use. +# PUBLISH __keyspace@0__:foo del +# PUBLISH __keyevent@0__:del foo # -# The best kind of storage for the Redis swap file (that's accessed at random) -# is a Solid State Disk (SSD). +# It is possible to select the events that Redis will notify among a set +# of classes. Every class is identified by a single character: # -# *** WARNING *** if you are using a shared hosting the default of putting -# the swap file under /tmp is not secure. Create a dir with access granted -# only to Redis user and configure Redis to create the swap file there. -vm-swap-file /tmp/redis.swap - -# vm-max-memory configures the VM to use at max the specified amount of -# RAM. Everything that deos not fit will be swapped on disk *if* possible, that -# is, if there is still enough contiguous space in the swap file. +# K Keyspace events, published with __keyspace@__ prefix. +# E Keyevent events, published with __keyevent@__ prefix. +# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... +# $ String commands +# l List commands +# s Set commands +# h Hash commands +# z Sorted set commands +# x Expired events (events generated every time a key expires) +# e Evicted events (events generated when a key is evicted for maxmemory) +# A Alias for g$lshzxe, so that the "AKE" string means all the events. # -# With vm-max-memory 0 the system will swap everything it can. Not a good -# default, just specify the max amount of RAM you can in bytes, but it's -# better to leave some margin. For instance specify an amount of RAM -# that's more or less between 60 and 80% of your free RAM. -vm-max-memory 0 - -# Redis swap files is split into pages. An object can be saved using multiple -# contiguous pages, but pages can't be shared between different objects. -# So if your page is too big, small objects swapped out on disk will waste -# a lot of space. If you page is too small, there is less space in the swap -# file (assuming you configured the same number of total swap file pages). +# The "notify-keyspace-events" takes as argument a string that is composed +# by zero or multiple characters. The empty string means that notifications +# are disabled at all. # -# If you use a lot of small objects, use a page size of 64 or 32 bytes. -# If you use a lot of big objects, use a bigger page size. -# If unsure, use the default :) -vm-page-size 32 - -# Number of total memory pages in the swap file. -# Given that the page table (a bitmap of free/used pages) is taken in memory, -# every 8 pages on disk will consume 1 byte of RAM. +# Example: to enable list and generic events, from the point of view of the +# event name, use: # -# The total swap size is vm-page-size * vm-pages +# notify-keyspace-events Elg # -# With the default of 32-bytes memory pages and 134217728 pages Redis will -# use a 4 GB swap file, that will use 16 MB of RAM for the page table. +# Example 2: to get the stream of the expired keys subscribing to channel +# name __keyevent@0__:expired use: # -# It's better to use the smallest acceptable value for your application, -# but the default is large in order to work in most conditions. -vm-pages 134217728 - -# Max number of VM I/O threads running at the same time. -# This threads are used to read/write data from/to swap file, since they -# also encode and decode objects from disk to memory or the reverse, a bigger -# number of threads can help with big objects even if they can't help with -# I/O itself as the physical device may not be able to couple with many -# reads/writes operations at the same time. +# notify-keyspace-events Ex # -# The special value of 0 turn off threaded I/O and enables the blocking -# Virtual Memory implementation. -vm-max-threads 4 +# By default all notifications are disabled because most users don't need +# this feature and the feature has some overhead. Note that if you don't +# specify at least one of K or E, no events will be delivered. +notify-keyspace-events "" ############################### ADVANCED CONFIG ############################### -{% if ansible_distribution == 'RedHat' %} -auto-aof-rewrite-percentage 100 -auto-aof-rewrite-min-size 64mb -hash-max-zipmap-entries 512 -hash-max-zipmap-value 64 +# Hashes are encoded using a memory efficient data structure when they have a +# small number of entries, and the biggest entry does not exceed a given +# threshold. These thresholds can be configured using the following directives. +hash-max-ziplist-entries 512 +hash-max-ziplist-value 64 + +# Similarly to hashes, small lists are also encoded in a special way in order +# to save a lot of space. The special representation is only used when +# you are under the following limits: list-max-ziplist-entries 512 list-max-ziplist-value 64 + +# Sets have a special encoding in just one case: when a set is composed +# of just strings that happens to be integers in radix 10 in the range +# of 64 bit signed integers. +# The following configuration setting sets the limit in the size of the +# set in order to use this special memory saving encoding. set-max-intset-entries 512 + +# Similarly to hashes and lists, sorted sets are also specially encoded in +# order to save a lot of space. This encoding is only used when the length and +# elements of a sorted set are below the following limits: zset-max-ziplist-entries 128 zset-max-ziplist-value 64 -activerehashing yes -{% endif %} - +# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in +# order to help rehashing the main Redis hash table (the one mapping top-level +# keys to values). The hash table implementation Redis uses (see dict.c) +# performs a lazy rehashing: the more operation you run into a hash table +# that is rehashing, the more rehashing "steps" are performed, so if the +# server is idle the rehashing is never complete and some more memory is used +# by the hash table. +# +# The default is to use this millisecond 10 times every second in order to +# active rehashing the main dictionaries, freeing memory when possible. +# +# If unsure: +# use "activerehashing no" if you have hard latency requirements and it is +# not a good thing in your environment that Redis can reply form time to time +# to queries with 2 milliseconds delay. +# +# use "activerehashing yes" if you don't have such hard requirements but +# want to free memory asap when possible. +activerehashing yes +# The client output buffer limits can be used to force disconnection of clients +# that are not reading data from the server fast enough for some reason (a +# common reason is that a Pub/Sub client can't consume messages as fast as the +# publisher can produce them). +# +# The limit can be set differently for the three different classes of clients: +# +# normal -> normal clients +# slave -> slave clients and MONITOR clients +# pubsub -> clients subscribed to at least one pubsub channel or pattern +# +# The syntax of every client-output-buffer-limit directive is the following: +# +# client-output-buffer-limit +# +# A client is immediately disconnected once the hard limit is reached, or if +# the soft limit is reached and remains reached for the specified number of +# seconds (continuously). +# So for instance if the hard limit is 32 megabytes and the soft limit is +# 16 megabytes / 10 seconds, the client will get disconnected immediately +# if the size of the output buffers reach 32 megabytes, but will also get +# disconnected if the client reaches 16 megabytes and continuously overcomes +# the limit for 10 seconds. +# +# By default normal clients are not limited because they don't receive data +# without asking (in a push way), but just after a request, so only +# asynchronous clients may create a scenario where data is requested faster +# than it can read. +# +# Instead there is a default limit for pubsub and slave clients, since +# subscribers and slaves receive data in a push fashion. +# +# Both the hard or the soft limit can be disabled by setting them to zero. +client-output-buffer-limit normal 0 0 0 +client-output-buffer-limit slave 256mb 64mb 60 +client-output-buffer-limit pubsub 32mb 8mb 60 + +# Redis calls an internal function to perform many background tasks, like +# closing connections of clients in timeout, purging expired keys that are +# never requested, and so forth. +# +# Not all tasks are performed with the same frequency, but Redis checks for +# tasks to perform accordingly to the specified "hz" value. +# +# By default "hz" is set to 10. Raising the value will use more CPU when +# Redis is idle, but at the same time will make Redis more responsive when +# there are many keys expiring at the same time, and timeouts may be +# handled with more precision. +# +# The range is between 1 and 500, however a value over 100 is usually not +# a good idea. Most users should use the default of 10 and raise this up to +# 100 only in environments where very low latency is required. +hz {{ redis_hz }} + +# When a child rewrites the AOF file, if the following option is enabled +# the file will be fsync-ed every 32 MB of data generated. This is useful +# in order to commit the file to the disk more incrementally and avoid +# big latency spikes. +aof-rewrite-incremental-fsync yes From d8c71523b1d2ebb550467e38e9c1720980c58b02 Mon Sep 17 00:00:00 2001 From: Paul Oyston Date: Wed, 19 Feb 2014 23:01:24 +0000 Subject: [PATCH 2/2] Fixed issue with some unquoted parameters in the config. Updated the Readme with new parameters. --- README.md | 53 ++++++++++++++++++++++++++--------------- defaults/main.yml | 17 +++++++------ templates/redis.conf.j2 | 6 ++--- 3 files changed, 47 insertions(+), 29 deletions(-) diff --git a/README.md b/README.md index 4ad8d6d..9556187 100644 --- a/README.md +++ b/README.md @@ -17,28 +17,43 @@ Role Variables The variables that can be passed to this role and a brief description about them are as follows. See the documentation for Redis for details: - redis_bind_address # The network address for redis to bind to - redis_port: 6379 # Port for redis server - redis_syslog_enabled: "yes" # enable_syslog - redis_databases: 16 # Set number of databases - redis_database_save_times: # Save the DB on disk (seconds changes) + redis_pidfile: /var/run/redis.pid # PID File + redis_bind_address: "0.0.0.0" # The network address for redis to bind to + redis_port: 6379 # Port for the Redis server + redis_unixsocket: null # (Optional) Socket file + redis_unixsocketperm: null # (Optional) Socket permissions + redis_syslog_enabled: "yes" # enable_syslog + redis_databases: 16 # Set number of databases + redis_database_save_times: # Save the DB on disk (seconds changes) - [900, 1] - [300, 10] - [60, 10000] - redis_dbfilename: dump.rdb # Filename for the db - redis_db_dir: /var/lib/redis # DB directory - redis_role: master # The role for this redis deployment (master/slave) - redis_requirepass: false # If password is required for querying - redis_pass: None # Password if require_pass is enabled - redis_max_clients: 128 - redis_max_memory: 512mb - redis_maxmemory_policy: volatile-lru - redis_appendfsync: everysec # How often to sync the filesystem - - # If redis_role is "slave", set these values too - redis_master_ip: 1.1.1.1 # The master's IP - redis_master_port: 6379 # master port - redis_master_auth: None # master auth + redis_dbfilename: dump.rdb # Filename for the db + redis_db_dir: /var/lib/redis # DB Directory + redis_role: master # Role for redis deployment (master/slave) + redis_requirepass: false # If a password is required + redis_pass: None # Password is requirepass is enabled + redis_max_clients: 10000 # Maximum number of clients allowed to connect + redis_maxmemory: null # (Optional) Maximum memory that Redis can use + redis_maxmemory_policy: volatile-lru + redis_appendfsync: everysec + redis_timeout: 0 + redis_loglevel: notice # Loglevel + redis_logfile: /var/log/redis.log # File to log to + redis_hz: 10 + redis_tcp_keepalive: 0 + redis_syslog_ident: redis + redis_disable_tcp_nodelay: no + redis_appendonly: no + redis_appendfilename: appendonly.aof + + If role is slave set these values too + redis_master_ip: 1.1.1.1 + redis_master_port: 6379 + redis_master_auth: None + redis_slave_serve_stale_data: yes + redis_slave_read_only: yes + redis_slave_priority: 100 Examples -------- diff --git a/defaults/main.yml b/defaults/main.yml index d254cc0..73c7e3f 100644 --- a/defaults/main.yml +++ b/defaults/main.yml @@ -2,6 +2,9 @@ redis_pidfile: /var/run/redis.pid redis_bind_address: "0.0.0.0" +redis_unixsocket: null +redis_unixsocketperm: null +redis_max_memory: null redis_port: 6379 redis_syslog_enabled: "yes" redis_databases: 16 @@ -13,25 +16,25 @@ redis_dbfilename: dump.rdb redis_db_dir: /var/lib/redis redis_role: master redis_requirepass: false -redis_pass: None +redis_pass: null redis_max_clients: 10000 redis_maxmemory_policy: volatile-lru redis_appendfsync: everysec redis_timeout: 0 redis_loglevel: notice -redis_logfile: /var/log/redis.log +redis_logfile: /var/log/redis/redis.log redis_hz: 10 redis_tcp_keepalive: 0 redis_syslog_ident: redis #If role is slave set these values too redis_master_ip: 1.1.1.1 redis_master_port: 6379 -redis_master_auth: None -redis_slave_serve_stale_data: yes -redis_slave_read_only: yes +redis_master_auth: null +redis_slave_serve_stale_data: "yes" +redis_slave_read_only: "yes" redis_slave_priority: 100 -redis_disable_tcp_nodelay: no +redis_disable_tcp_nodelay: "no" -redis_appendonly: no +redis_appendonly: "no" redis_appendfilename: appendonly.aof \ No newline at end of file diff --git a/templates/redis.conf.j2 b/templates/redis.conf.j2 index 425dbad..f4ac97a 100644 --- a/templates/redis.conf.j2 +++ b/templates/redis.conf.j2 @@ -51,7 +51,7 @@ port {{ redis_port }} # will silently truncate it to the value of /proc/sys/net/core/somaxconn so # make sure to raise both the value of somaxconn and tcp_max_syn_backlog # in order to get the desired effect. -tcp-backlog 511 +# tcp-backlog 511 # By default Redis listens for connections from all the network interfaces # available on the server. It is possible to listen to just one or multiple @@ -71,7 +71,7 @@ bind {{ redis_bind_address }} # unixsocket /tmp/redis.sock # unixsocketperm 755 -{% if redis_unix_socket: %} +{% if redis_unixsocket: %} unixsocket {{ redis_unixsocket }} {% if redis_unixsocketperm %} unixsocketperm {{ redis_unixsocketperm }} @@ -108,7 +108,7 @@ loglevel {{ redis_loglevel }} # Specify the log file name. Also the empty string can be used to force # Redis to log on the standard output. Note that if you use standard # output for logging but daemonize, logs will be sent to /dev/null -logfile {{ redis_logfile }} +logfile "{{ redis_logfile }}" # To enable logging to the system logger, just set 'syslog-enabled' to yes, # and optionally update the other syslog parameters to suit your needs.