测试脚本
#!/bin/env python
import redis
import uuid
import time
r = redis.Redis(host='localhost', port=6379, db=0)
for num_strings in (100000,):
r.flushall()
time.sleep(1.0)
initial_size = r.dbsize()
initial_info = r.info()
for i in xrange(0, num_strings):
r.set(str(uuid.uuid4()), time.time())
#r.setex(str(uuid.uuid4()), time.time(), 100000)
final_size = r.dbsize()
final_info = r.info()
print "For %s strings." % (num_strings,)
print "Keys: %s => %s" % (initial_size, final_size)
print "Memory: %s => %s" % (initial_info['used_memory'],
final_info['used_memory'])
print "Memory per key: %d"%((int(final_info['used_memory']) - int(initial_info['used_memory'])) / num_strings)
测试脚本
#!/bin/env python
import redis
import math
import time
r = redis.Redis(host='localhost', port=6379, db=0)
set_capcity = int(r.config_get("set-max-intset-entries")["set-max-intset-entries"])
def set_name(i, num_strings, set_capcity):
set_num = math.ceil(num_strings/float(set_capcity))
return "s%d"%(i%set_num)
for num_strings in (100000,):
r.flushall()
time.sleep(1.0)
initial_size = r.dbsize()
initial_info = r.info()
for i in xrange(0, num_strings):
#r.sadd("s", str(i))
r.sadd(set_name(i, num_strings, set_capcity), str(i))
final_size = r.dbsize()
final_info = r.info()
print "For %s strings." % (num_strings,)
print "Keys: %s => %s" % (initial_size, final_size)
print "Memory: %s => %s" % (initial_info['used_memory'],
final_info['used_memory'])
print "Memory per key: %d"%((int(final_info['used_memory']) - int(initial_info['used_memory'])) / num_strings)
注意: redis的set仅当值为整型,压缩才会生效。
除非你能够保证你的机器总是有一半的空闲内存,否则别使用快照方式持久化数据或者通过执行BGREWRITEAOF压缩aof文件。 redis在执行bgsave时,会进行一次fork,fork后的进程负责将内存中的数据写入磁盘,由于fork采用Copy-On-Write,两个redis进程共享内存中的数据。redis如果有数据更新,则会将对应的共享内存页创建一份副本再更新,当更新操作足够频繁时,共享的内存空间会迅速地副本化,导致物理内存被耗光,系统被迫动用交换空间,从而导致redis服务极不稳定,整个系统堵塞在磁盘io上。