mirror of
https://github.com/fluencelabs/redis
synced 2025-04-25 10:32:14 +00:00
some change to redis-sha1.rb utility to make it more robust against non-meaningful changes in the dataset
This commit is contained in:
parent
5ad3c8c852
commit
b32627cdc1
2
TODO
2
TODO
@ -61,6 +61,8 @@ it's not a guarantee they'll ever get implemented ;)
|
|||||||
* zmalloc() should avoid to add a private header for archs where there is some other kind of libc-specific way to get the size of a malloced block. Already done for Mac OS X.
|
* zmalloc() should avoid to add a private header for archs where there is some other kind of libc-specific way to get the size of a malloced block. Already done for Mac OS X.
|
||||||
* Read-only mode.
|
* Read-only mode.
|
||||||
* Pattern-matching replication.
|
* Pattern-matching replication.
|
||||||
|
* Dont' safe empty lists / sets / zsets on disk with snapshotting.
|
||||||
|
* Remove keys when a list / set / zset reaches length of 0.
|
||||||
|
|
||||||
DOCUMENTATION WISHLIST
|
DOCUMENTATION WISHLIST
|
||||||
======================
|
======================
|
||||||
|
2
redis.c
2
redis.c
@ -923,7 +923,7 @@ void backgroundRewriteDoneHandler(int statloc) {
|
|||||||
close(fd);
|
close(fd);
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
}
|
}
|
||||||
redisLog(REDIS_WARNING,"Parent diff flushed into the new append log file with success");
|
redisLog(REDIS_NOTICE,"Parent diff flushed into the new append log file with success (%lu bytes)",sdslen(server.bgrewritebuf));
|
||||||
/* Now our work is to rename the temp file into the stable file. And
|
/* Now our work is to rename the temp file into the stable file. And
|
||||||
* switch the file descriptor used by the server for append only. */
|
* switch the file descriptor used by the server for append only. */
|
||||||
if (rename(tmpfile,server.appendfilename) == -1) {
|
if (rename(tmpfile,server.appendfilename) == -1) {
|
||||||
|
@ -16,17 +16,31 @@ def redisSha1(opts={})
|
|||||||
sha1=""
|
sha1=""
|
||||||
r = Redis.new(opts)
|
r = Redis.new(opts)
|
||||||
r.keys('*').sort.each{|k|
|
r.keys('*').sort.each{|k|
|
||||||
sha1 = Digest::SHA1.hexdigest(sha1+k)
|
|
||||||
vtype = r.type?(k)
|
vtype = r.type?(k)
|
||||||
if vtype == "string"
|
if vtype == "string"
|
||||||
|
len = 1
|
||||||
|
sha1 = Digest::SHA1.hexdigest(sha1+k)
|
||||||
sha1 = Digest::SHA1.hexdigest(sha1+r.get(k))
|
sha1 = Digest::SHA1.hexdigest(sha1+r.get(k))
|
||||||
elsif vtype == "list"
|
elsif vtype == "list"
|
||||||
sha1 = Digest::SHA1.hexdigest(sha1+r.list_range(k,0,-1).join("\x01"))
|
len = r.llen(k)
|
||||||
|
if len != 0
|
||||||
|
sha1 = Digest::SHA1.hexdigest(sha1+k)
|
||||||
|
sha1 = Digest::SHA1.hexdigest(sha1+r.list_range(k,0,-1).join("\x01"))
|
||||||
|
end
|
||||||
elsif vtype == "set"
|
elsif vtype == "set"
|
||||||
sha1 = Digest::SHA1.hexdigest(sha1+r.set_members(k).to_a.sort.join("\x02"))
|
len = r.scard(k)
|
||||||
|
if len != 0
|
||||||
|
sha1 = Digest::SHA1.hexdigest(sha1+k)
|
||||||
|
sha1 = Digest::SHA1.hexdigest(sha1+r.set_members(k).to_a.sort.join("\x02"))
|
||||||
|
end
|
||||||
elsif vtype == "zset"
|
elsif vtype == "zset"
|
||||||
sha1 = Digest::SHA1.hexdigest(sha1+r.zrange(k,0,-1).join("\x01"))
|
len = r.zcard(k)
|
||||||
|
if len != 0
|
||||||
|
sha1 = Digest::SHA1.hexdigest(sha1+k)
|
||||||
|
sha1 = Digest::SHA1.hexdigest(sha1+r.zrange(k,0,-1).join("\x01"))
|
||||||
|
end
|
||||||
end
|
end
|
||||||
|
# puts "#{k} => #{sha1}" if len != 0
|
||||||
}
|
}
|
||||||
sha1
|
sha1
|
||||||
end
|
end
|
||||||
|
Loading…
x
Reference in New Issue
Block a user