Chunked loading of RDB to prevent redis from stalling reading very large keys.

This commit is contained in:
yoav
2012-12-12 15:59:22 +02:00
committed by antirez
parent 112e763618
commit 9f6f436a51
5 changed files with 45 additions and 15 deletions

View File

@ -53,6 +53,12 @@ struct _rio {
/* The current checksum */
uint64_t cksum;
/* number of bytes read or written */
size_t processed_bytes;
/* maximum simgle read or write chunk size */
size_t max_processing_chunk;
/* Backend-specific vars. */
union {
struct {
@ -74,16 +80,29 @@ typedef struct _rio rio;
* if needed. */
static inline size_t rioWrite(rio *r, const void *buf, size_t len) {
if (r->update_cksum) r->update_cksum(r,buf,len);
return r->write(r,buf,len);
while (len) {
size_t bytes_to_write = (r->max_processing_chunk && r->max_processing_chunk < len) ? r->max_processing_chunk : len;
if (r->update_cksum) r->update_cksum(r,buf,bytes_to_write);
if (r->write(r,buf,bytes_to_write) == 0)
return 0;
buf = (char*)buf + bytes_to_write;
len -= bytes_to_write;
r->processed_bytes += bytes_to_write;
}
return 1;
}
static inline size_t rioRead(rio *r, void *buf, size_t len) {
if (r->read(r,buf,len) == 1) {
if (r->update_cksum) r->update_cksum(r,buf,len);
return 1;
while (len) {
size_t bytes_to_read = (r->max_processing_chunk && r->max_processing_chunk < len) ? r->max_processing_chunk : len;
if (r->read(r,buf,bytes_to_read) == 0)
return 0;
if (r->update_cksum) r->update_cksum(r,buf,bytes_to_read);
buf = (char*)buf + bytes_to_read;
len -= bytes_to_read;
r->processed_bytes += bytes_to_read;
}
return 0;
return 1;
}
static inline off_t rioTell(rio *r) {