1
0

maxmemory.tcl 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348
  1. start_server {tags {"maxmemory external:skip"}} {
  2. test "Without maxmemory small integers are shared" {
  3. r config set maxmemory 0
  4. r set a 1
  5. assert {[r object refcount a] > 1}
  6. }
  7. test "With maxmemory and non-LRU policy integers are still shared" {
  8. r config set maxmemory 1073741824
  9. r config set maxmemory-policy allkeys-random
  10. r set a 1
  11. assert {[r object refcount a] > 1}
  12. }
  13. test "With maxmemory and LRU policy integers are not shared" {
  14. r config set maxmemory 1073741824
  15. r config set maxmemory-policy allkeys-lru
  16. r set a 1
  17. r config set maxmemory-policy volatile-lru
  18. r set b 1
  19. assert {[r object refcount a] == 1}
  20. assert {[r object refcount b] == 1}
  21. r config set maxmemory 0
  22. }
  23. foreach policy {
  24. allkeys-random allkeys-lru allkeys-lfu volatile-lru volatile-lfu volatile-random volatile-ttl
  25. } {
  26. test "maxmemory - is the memory limit honoured? (policy $policy)" {
  27. # make sure to start with a blank instance
  28. r flushall
  29. # Get the current memory limit and calculate a new limit.
  30. # We just add 100k to the current memory size so that it is
  31. # fast for us to reach that limit.
  32. set used [s used_memory]
  33. set limit [expr {$used+100*1024}]
  34. r config set maxmemory $limit
  35. r config set maxmemory-policy $policy
  36. # Now add keys until the limit is almost reached.
  37. set numkeys 0
  38. while 1 {
  39. r setex [randomKey] 10000 x
  40. incr numkeys
  41. if {[s used_memory]+4096 > $limit} {
  42. assert {$numkeys > 10}
  43. break
  44. }
  45. }
  46. # If we add the same number of keys already added again, we
  47. # should still be under the limit.
  48. for {set j 0} {$j < $numkeys} {incr j} {
  49. r setex [randomKey] 10000 x
  50. }
  51. assert {[s used_memory] < ($limit+4096)}
  52. }
  53. }
  54. foreach policy {
  55. allkeys-random allkeys-lru volatile-lru volatile-random volatile-ttl
  56. } {
  57. test "maxmemory - only allkeys-* should remove non-volatile keys ($policy)" {
  58. # make sure to start with a blank instance
  59. r flushall
  60. # Get the current memory limit and calculate a new limit.
  61. # We just add 100k to the current memory size so that it is
  62. # fast for us to reach that limit.
  63. set used [s used_memory]
  64. set limit [expr {$used+100*1024}]
  65. r config set maxmemory $limit
  66. r config set maxmemory-policy $policy
  67. # Now add keys until the limit is almost reached.
  68. set numkeys 0
  69. while 1 {
  70. r set [randomKey] x
  71. incr numkeys
  72. if {[s used_memory]+4096 > $limit} {
  73. assert {$numkeys > 10}
  74. break
  75. }
  76. }
  77. # If we add the same number of keys already added again and
  78. # the policy is allkeys-* we should still be under the limit.
  79. # Otherwise we should see an error reported by Redis.
  80. set err 0
  81. for {set j 0} {$j < $numkeys} {incr j} {
  82. if {[catch {r set [randomKey] x} e]} {
  83. if {[string match {*used memory*} $e]} {
  84. set err 1
  85. }
  86. }
  87. }
  88. if {[string match allkeys-* $policy]} {
  89. assert {[s used_memory] < ($limit+4096)}
  90. } else {
  91. assert {$err == 1}
  92. }
  93. }
  94. }
  95. foreach policy {
  96. volatile-lru volatile-lfu volatile-random volatile-ttl
  97. } {
  98. test "maxmemory - policy $policy should only remove volatile keys." {
  99. # make sure to start with a blank instance
  100. r flushall
  101. # Get the current memory limit and calculate a new limit.
  102. # We just add 100k to the current memory size so that it is
  103. # fast for us to reach that limit.
  104. set used [s used_memory]
  105. set limit [expr {$used+100*1024}]
  106. r config set maxmemory $limit
  107. r config set maxmemory-policy $policy
  108. # Now add keys until the limit is almost reached.
  109. set numkeys 0
  110. while 1 {
  111. # Odd keys are volatile
  112. # Even keys are non volatile
  113. if {$numkeys % 2} {
  114. r setex "key:$numkeys" 10000 x
  115. } else {
  116. r set "key:$numkeys" x
  117. }
  118. if {[s used_memory]+4096 > $limit} {
  119. assert {$numkeys > 10}
  120. break
  121. }
  122. incr numkeys
  123. }
  124. # Now we add the same number of volatile keys already added.
  125. # We expect Redis to evict only volatile keys in order to make
  126. # space.
  127. set err 0
  128. for {set j 0} {$j < $numkeys} {incr j} {
  129. catch {r setex "foo:$j" 10000 x}
  130. }
  131. # We should still be under the limit.
  132. assert {[s used_memory] < ($limit+4096)}
  133. # However all our non volatile keys should be here.
  134. for {set j 0} {$j < $numkeys} {incr j 2} {
  135. assert {[r exists "key:$j"]}
  136. }
  137. }
  138. }
  139. }
  140. # Calculate query buffer memory of slave
  141. proc slave_query_buffer {srv} {
  142. set clients [split [$srv client list] "\r\n"]
  143. set c [lsearch -inline $clients *flags=S*]
  144. if {[string length $c] > 0} {
  145. assert {[regexp {qbuf=([0-9]+)} $c - qbuf]}
  146. assert {[regexp {qbuf-free=([0-9]+)} $c - qbuf_free]}
  147. return [expr $qbuf + $qbuf_free]
  148. }
  149. return 0
  150. }
  151. proc test_slave_buffers {test_name cmd_count payload_len limit_memory pipeline} {
  152. start_server {tags {"maxmemory external:skip"}} {
  153. start_server {} {
  154. set slave_pid [s process_id]
  155. test "$test_name" {
  156. set slave [srv 0 client]
  157. set slave_host [srv 0 host]
  158. set slave_port [srv 0 port]
  159. set master [srv -1 client]
  160. set master_host [srv -1 host]
  161. set master_port [srv -1 port]
  162. # Disable slow log for master to avoid memory growth in slow env.
  163. $master config set slowlog-log-slower-than -1
  164. # add 100 keys of 100k (10MB total)
  165. for {set j 0} {$j < 100} {incr j} {
  166. $master setrange "key:$j" 100000 asdf
  167. }
  168. # make sure master doesn't disconnect slave because of timeout
  169. $master config set repl-timeout 1200 ;# 20 minutes (for valgrind and slow machines)
  170. $master config set maxmemory-policy allkeys-random
  171. $master config set client-output-buffer-limit "replica 100000000 100000000 300"
  172. $master config set repl-backlog-size [expr {10*1024}]
  173. $slave slaveof $master_host $master_port
  174. wait_for_condition 50 100 {
  175. [s 0 master_link_status] eq {up}
  176. } else {
  177. fail "Replication not started."
  178. }
  179. # measure used memory after the slave connected and set maxmemory
  180. set orig_used [s -1 used_memory]
  181. set orig_client_buf [s -1 mem_clients_normal]
  182. set orig_mem_not_counted_for_evict [s -1 mem_not_counted_for_evict]
  183. set orig_used_no_repl [expr {$orig_used - $orig_mem_not_counted_for_evict}]
  184. set limit [expr {$orig_used - $orig_mem_not_counted_for_evict + 32*1024}]
  185. if {$limit_memory==1} {
  186. $master config set maxmemory $limit
  187. }
  188. # put the slave to sleep
  189. set rd_slave [redis_deferring_client]
  190. exec kill -SIGSTOP $slave_pid
  191. # send some 10mb worth of commands that don't increase the memory usage
  192. if {$pipeline == 1} {
  193. set rd_master [redis_deferring_client -1]
  194. for {set k 0} {$k < $cmd_count} {incr k} {
  195. $rd_master setrange key:0 0 [string repeat A $payload_len]
  196. }
  197. for {set k 0} {$k < $cmd_count} {incr k} {
  198. #$rd_master read
  199. }
  200. } else {
  201. for {set k 0} {$k < $cmd_count} {incr k} {
  202. $master setrange key:0 0 [string repeat A $payload_len]
  203. }
  204. }
  205. set new_used [s -1 used_memory]
  206. set slave_buf [s -1 mem_clients_slaves]
  207. set client_buf [s -1 mem_clients_normal]
  208. set mem_not_counted_for_evict [s -1 mem_not_counted_for_evict]
  209. set used_no_repl [expr {$new_used - $mem_not_counted_for_evict - [slave_query_buffer $master]}]
  210. # we need to exclude replies buffer and query buffer of replica from used memory.
  211. # removing the replica (output) buffers is done so that we are able to measure any other
  212. # changes to the used memory and see that they're insignificant (the test's purpose is to check that
  213. # the replica buffers are counted correctly, so the used memory growth after deducting them
  214. # should be nearly 0).
  215. # we remove the query buffers because on slow test platforms, they can accumulate many ACKs.
  216. set delta [expr {($used_no_repl - $client_buf) - ($orig_used_no_repl - $orig_client_buf)}]
  217. assert {[$master dbsize] == 100}
  218. assert {$slave_buf > 2*1024*1024} ;# some of the data may have been pushed to the OS buffers
  219. set delta_max [expr {$cmd_count / 2}] ;# 1 byte unaccounted for, with 1M commands will consume some 1MB
  220. assert {$delta < $delta_max && $delta > -$delta_max}
  221. $master client kill type slave
  222. set killed_used [s -1 used_memory]
  223. set killed_slave_buf [s -1 mem_clients_slaves]
  224. set killed_mem_not_counted_for_evict [s -1 mem_not_counted_for_evict]
  225. # we need to exclude replies buffer and query buffer of slave from used memory after kill slave
  226. set killed_used_no_repl [expr {$killed_used - $killed_mem_not_counted_for_evict - [slave_query_buffer $master]}]
  227. set delta_no_repl [expr {$killed_used_no_repl - $used_no_repl}]
  228. assert {$killed_slave_buf == 0}
  229. assert {$delta_no_repl > -$delta_max && $delta_no_repl < $delta_max}
  230. }
  231. # unfreeze slave process (after the 'test' succeeded or failed, but before we attempt to terminate the server
  232. exec kill -SIGCONT $slave_pid
  233. }
  234. }
  235. }
  236. # test that slave buffer are counted correctly
  237. # we wanna use many small commands, and we don't wanna wait long
  238. # so we need to use a pipeline (redis_deferring_client)
  239. # that may cause query buffer to fill and induce eviction, so we disable it
  240. test_slave_buffers {slave buffer are counted correctly} 1000000 10 0 1
  241. # test that slave buffer don't induce eviction
  242. # test again with fewer (and bigger) commands without pipeline, but with eviction
  243. test_slave_buffers "replica buffer don't induce eviction" 100000 100 1 0
  244. start_server {tags {"maxmemory external:skip"}} {
  245. test {Don't rehash if used memory exceeds maxmemory after rehash} {
  246. r config set maxmemory 0
  247. r config set maxmemory-policy allkeys-random
  248. # Next rehash size is 8192, that will eat 64k memory
  249. populate 4096 "" 1
  250. set used [s used_memory]
  251. set limit [expr {$used + 10*1024}]
  252. r config set maxmemory $limit
  253. r set k1 v1
  254. # Next writing command will trigger evicting some keys if last
  255. # command trigger DB dict rehash
  256. r set k2 v2
  257. # There must be 4098 keys because redis doesn't evict keys.
  258. r dbsize
  259. } {4098}
  260. }
  261. start_server {tags {"maxmemory external:skip"}} {
  262. test {client tracking don't cause eviction feedback loop} {
  263. r config set maxmemory 0
  264. r config set maxmemory-policy allkeys-lru
  265. r config set maxmemory-eviction-tenacity 100
  266. # 10 clients listening on tracking messages
  267. set clients {}
  268. for {set j 0} {$j < 10} {incr j} {
  269. lappend clients [redis_deferring_client]
  270. }
  271. foreach rd $clients {
  272. $rd HELLO 3
  273. $rd read ; # Consume the HELLO reply
  274. $rd CLIENT TRACKING on
  275. $rd read ; # Consume the CLIENT reply
  276. }
  277. # populate 300 keys, with long key name and short value
  278. for {set j 0} {$j < 300} {incr j} {
  279. set key $j[string repeat x 1000]
  280. r set $key x
  281. # for each key, enable caching for this key
  282. foreach rd $clients {
  283. $rd get $key
  284. $rd read
  285. }
  286. }
  287. # we need to wait one second for the client querybuf excess memory to be
  288. # trimmed by cron, otherwise the INFO used_memory and CONFIG maxmemory
  289. # below (on slow machines) won't be "atomic" and won't trigger eviction.
  290. after 1100
  291. # set the memory limit which will cause a few keys to be evicted
  292. # we need to make sure to evict keynames of a total size of more than
  293. # 16kb since the (PROTO_REPLY_CHUNK_BYTES), only after that the
  294. # invalidation messages have a chance to trigger further eviction.
  295. set used [s used_memory]
  296. set limit [expr {$used - 40000}]
  297. r config set maxmemory $limit
  298. # make sure some eviction happened
  299. set evicted [s evicted_keys]
  300. if {$::verbose} { puts "evicted: $evicted" }
  301. # make sure we didn't drain the database
  302. assert_range [r dbsize] 200 300
  303. assert_range $evicted 10 50
  304. foreach rd $clients {
  305. $rd read ;# make sure we have some invalidation message waiting
  306. $rd close
  307. }
  308. # eviction continues (known problem described in #8069)
  309. # for now this test only make sures the eviction loop itself doesn't
  310. # have feedback loop
  311. set evicted [s evicted_keys]
  312. if {$::verbose} { puts "evicted: $evicted" }
  313. }
  314. }