AOF流程全解析

272 阅读18分钟

在serverCron中,或者redis的启动流程中。可以看到包含了大量aof相关的逻辑。现在来梳理一下aof的相关流程。

首先要了解的是aof的写入分为2条线.一条是在主线程执行aof的数据写入和刷盘,一条线是在后台进程生成瘦身后的aof文件。 这两条线可以同时进行也可以分开进行。当后台进程完成任务后会使用这个aof文件强制覆盖之前的aof文件(就恢复过程来将瘦身后的aof恢复更快,所有优先级更高) 这里就不说aof的数据恢复流程了,在redis的启动流程中有提到,虽然没有吸到代码级别。 存储aof数据的缓冲区有2个,一个是aof_buf,它是普通的aof数据存储缓冲区,每当执行command时,根据command类型会触发propagate()方法,这个方法就是整个aof的入口,会间接触发feedAppendOnlyFile方法,他的主要逻辑就是将数据写入到aof_buf中

/**
 * 作为整个aof写入的入口  首先要清楚触发该方法的时机是什么 redis在执行一些command时,如果该命令应当被追加到aof中且此时aof处于开启状态
 * 就会自动的将数据追加到aof中
 * @param cmd 本次执行的command
 * @param dictid 本次操作的redisObject所在的db
 * @param argv 执行本次command的所有参数
 * @param argc 参数数量
 */
void feedAppendOnlyFile(struct redisCommand *cmd, int dictid, robj **argv, int argc) {
    sds buf = sdsempty();
    robj *tmpargv[3];

    /* The DB this command was targeting is not the same as the last command
     * we appended. To issue a SELECT command is needed.
     * 如果指定的db与此时选择的db不同 先生成一条selectCommand
     * */
    if (dictid != server.aof_selected_db) {
        char seldb[64];

        snprintf(seldb,sizeof(seldb),"%d",dictid);
        buf = sdscatprintf(buf,"*2\r\n$6\r\nSELECT\r\n$%lu\r\n%s\r\n",
            (unsigned long)strlen(seldb),seldb);
        server.aof_selected_db = dictid;
    }

    // 这里根据command的类型生成不同的数据体
    if (cmd->proc == expireCommand || cmd->proc == pexpireCommand ||
        cmd->proc == expireatCommand) {
        /* Translate EXPIRE/PEXPIRE/EXPIREAT into PEXPIREAT */
        buf = catAppendOnlyExpireAtCommand(buf,cmd,argv[1],argv[2]);
        // 是一个set指令 包装数据后写入到buf中
    } else if (cmd->proc == setexCommand || cmd->proc == psetexCommand) {
        /* Translate SETEX/PSETEX to SET and PEXPIREAT */
        tmpargv[0] = createStringObject("SET",3);
        tmpargv[1] = argv[1];
        tmpargv[2] = argv[3];
        buf = catAppendOnlyGenericCommand(buf,3,tmpargv);
        decrRefCount(tmpargv[0]);
        buf = catAppendOnlyExpireAtCommand(buf,cmd,argv[1],argv[2]);
    } else if (cmd->proc == setCommand && argc > 3) {
        int i;
        robj *exarg = NULL, *pxarg = NULL;
        for (i = 3; i < argc; i ++) {
            if (!strcasecmp(argv[i]->ptr, "ex")) exarg = argv[i+1];
            if (!strcasecmp(argv[i]->ptr, "px")) pxarg = argv[i+1];
        }
        serverAssert(!(exarg && pxarg));

        if (exarg || pxarg) {
            /* Translate SET [EX seconds][PX milliseconds] to SET and PEXPIREAT */
            buf = catAppendOnlyGenericCommand(buf,3,argv);
            if (exarg)
                buf = catAppendOnlyExpireAtCommand(buf,server.expireCommand,argv[1],
                                                   exarg);
            if (pxarg)
                buf = catAppendOnlyExpireAtCommand(buf,server.pexpireCommand,argv[1],
                                                   pxarg);
        } else {
            buf = catAppendOnlyGenericCommand(buf,argc,argv);
        }
    } else {
        /* All the other commands don't need translation or need the
         * same translation already operated in the command vector
         * for the replication itself. */
        buf = catAppendOnlyGenericCommand(buf,argc,argv);
    }

    // 此时已经将格式化数据写入到了buf中

    /* Append to the AOF buffer. This will be flushed on disk just before
     * of re-entering the event loop, so before the client will get a
     * positive reply about the operation performed.
     * 将本次数据追加到 aof_buf中 最终写入aof_file是通过这个aof_buf的
     * */
    if (server.aof_state == AOF_ON)
        server.aof_buf = sdscatlen(server.aof_buf,buf,sdslen(buf));

    /* If a background append only file rewriting is in progress we want to
     * accumulate the differences between the child DB and the current one
     * in a buffer, so that when the child process will do its work we
     * can append the differences to the new append only file.
     * 如果此时有正在处理aof的子进程 将数据也写入到rewriteBuf中 这部分数据是需要通过pipe发送到子进程的
     * */
    if (server.aof_child_pid != -1)
        aofRewriteBufferAppend((unsigned char*)buf,sdslen(buf));

    sdsfree(buf);
}

在最后我们看到如果存在aof子进程,会执行aofRewriteBufferAppend方法,这个方法又是在做什么呢?

/* Append data to the AOF rewrite buffer, allocating new blocks if needed.
 * 当此时aof子进程正在运行, 同时又执行了新的command(会产生新的aof数据) 那么会在此时执行该方法 将数据也写入一份到aof_rewrite_buf_blocks
 * 这部分数据通过pipe进行传输 所以能保证进程间的通信安全
 * */
void aofRewriteBufferAppend(unsigned char *s, unsigned long len) {
    listNode *ln = listLast(server.aof_rewrite_buf_blocks);
    aofrwblock *block = ln ? ln->value : NULL;

    while(len) {
        /* If we already got at least an allocated block, try appending
         * at least some piece into it.
         * 已经存在block 将本次数据追加到该block下
         * */
        if (block) {
            unsigned long thislen = (block->free < len) ? block->free : len;
            if (thislen) {  /* The current block is not already full. */
                memcpy(block->buf+block->used, s, thislen);
                block->used += thislen;
                block->free -= thislen;
                s += thislen;
                len -= thislen;
            }
        }

        // 还有剩余的数据 生成一个新的blocks并追加到链表上
        if (len) { /* First block to allocate, or need another block. */
            int numblocks;

            block = zmalloc(sizeof(*block));
            block->free = AOF_RW_BUF_BLOCK_SIZE;
            block->used = 0;
            listAddNodeTail(server.aof_rewrite_buf_blocks,block);

            /* Log every time we cross more 10 or 100 blocks, respectively
             * as a notice or warning. */
            numblocks = listLength(server.aof_rewrite_buf_blocks);
            if (((numblocks+1) % 10) == 0) {
                int level = ((numblocks+1) % 100) == 0 ? LL_WARNING :
                                                         LL_NOTICE;
                serverLog(level,"Background AOF buffer size: %lu MB",
                    aofRewriteBufferSize()/(1024*1024));
            }
        }
    }

    /* Install a file event to send data to the rewrite child if there is
     * not one already.
     * 这里使用pipe的完成父子进程间的数据传输
     * 为写通道注册写入事件 将数据通过写通道写入到内核共享缓冲区后 子进程就可以从这个内核共享缓冲区读取数据了
     * */
    if (aeGetFileEvents(server.el,server.aof_pipe_write_data_to_child) == 0) {
        aeCreateFileEvent(server.el, server.aof_pipe_write_data_to_child,
            AE_WRITABLE, aofChildWriteDiffData, NULL);
    }
}

将数据写入到了一个特殊的缓冲区,aof_rewrite_buf_blocks中,先抛开子进程的aof写入,只看主进程是如何处理aof_buf中的数据的。 在beforeSleep中会看到这样一行代码

  /* Send the invalidation messages to clients participating to the
     * client side caching protocol in broadcasting (BCAST) mode.
     * TODO 忽略链路相关的
     * */
    trackingBroadcastInvalidationMessages();

    /* Write the AOF buffer on disk aof文件刷盘 */
    flushAppendOnlyFile(0);

    /* Handle writes with pending output buffers.
     * 处理client_pending_writes队列中的任务
     * */
    handleClientsWithPendingWritesUsingThreads();

    /* Close clients that need to be closed asynchronous
     * 处理to_close队列中的client
     * */
    freeClientsInAsyncFreeQueue();

以及serverCron的

  /* AOF write errors: in this case we have a buffer to flush as well and
     * clear the AOF error in case of success to make the DB writable again,
     * however to try every second is enough in case of 'hz' is set to
     * a higher frequency.
     * 每间隔一段时间 发现上一次aof的写入失败了 就在此时执行aof写入以及刷盘
     * */
    run_with_period(1000) {
        if (server.aof_last_write_status == C_ERR)
            flushAppendOnlyFile(0);
    }

  /* AOF postponed flush: Try at every cron cycle if the slow fsync
     * completed.
     * 当某次尝试对aof文件进行刷盘时 发现已经存在一个刷盘任务了 需要延迟执行刷盘操作就会在此时执行
     * */
    if (server.aof_flush_postponed_start) flushAppendOnlyFile(0);

来看看这个方法是如何处理aof_buf中的数据的吧

/**
 * 在服务器即将被关闭 或者beforeSleep中 会执行该方法 如果上次刷盘失败也会尝试刷盘 当服务器即将被关闭时是强制刷盘 其他情况非强制
 * 从这里可以看出redis单节点是存在数据丢失的情况的 如何通过副本解决这个问题是之后要关注的
 * @param force
 */
void flushAppendOnlyFile(int force) {
    ssize_t nwritten;
    int sync_in_progress = 0;
    mstime_t latency;

    // 每当执行完command后 就会将数据写入到aof_buf中 然后在合适的时机 在主线程完成aof文件的刷盘 还有一种是通过子进程生成aof文件
    // 虽然本次没有新的数据产生 但是并不是每次执行该方法都会刷盘 这里就是检测是否有必要刷盘
    if (sdslen(server.aof_buf) == 0) {
        /* Check if we need to do fsync even the aof buffer is empty,
         * because previously in AOF_FSYNC_EVERYSEC mode, fsync is
         * called only when aof buffer is not empty, so if users
         * stop write commands before fsync called in one second,
         * the data in page cache cannot be flushed in time.
         *
         * 确保此时aof文件中的数据偏移量比上次刷盘记录的偏移量大
         * 并且当前时间应当大于上一次刷盘时间 实际上就是相差1秒及以上(避免刷盘过于频繁)
         * 确保此时还没有设置刷盘任务
         * */
        if (server.aof_fsync == AOF_FSYNC_EVERYSEC &&
            server.aof_fsync_offset != server.aof_current_size &&
            server.unixtime > server.aof_last_fsync &&
            !(sync_in_progress = aofFsyncInProgress())) {
            goto try_fsync;
        } else {
            // 此时没有待刷盘数据 直接返回
            return;
        }
    }

    // AOF_FSYNC_EVERYSEC 是异步刷盘 这里是判断此时是否已经有待执行的刷盘任务了
    if (server.aof_fsync == AOF_FSYNC_EVERYSEC)
        sync_in_progress = aofFsyncInProgress();

    // 非强制刷盘模式
    if (server.aof_fsync == AOF_FSYNC_EVERYSEC && !force) {
        /* With this append fsync policy we do background fsyncing.
         * If the fsync is still in progress we can try to delay
         * the write for a couple of seconds.
         * 此时已经存在一个刷盘任务了  注意这个刷盘任务并不一定是针对被文件
         * */
        if (sync_in_progress) {
            if (server.aof_flush_postponed_start == 0) {
                /* No previous write postponing, remember that we are
                 * postponing the flush and return.
                 * 记录当前时间点
                 * */
                server.aof_flush_postponed_start = server.unixtime;
                return
                // 代表在2秒内该方法在非强制情况下连续执行 是没有任何效果的
            } else if (server.unixtime - server.aof_flush_postponed_start < 2) {
                /* We were already waiting for fsync to finish, but for less
                 * than two seconds this is still ok. Postpone again. */
                return;
            }
            /* Otherwise fall trough, and go write since we can't wait
             * over two seconds.
             * 代表已经等待超过2秒了  这里决定在本线程手动刷盘
             * */
            server.aof_delayed_fsync++;
            serverLog(LL_NOTICE,"Asynchronous AOF fsync is taking too long (disk is busy?). Writing the AOF buffer without waiting for fsync to complete, this may slow down Redis.");
        }
    }
    /* We want to perform a single write. This should be guaranteed atomic
     * at least if the filesystem we are writing is a real physical one.
     * While this will save us against the server being killed I don't think
     * there is much to do about the whole server stopping for power problems
     * or alike */
    // 如果在刷盘前需要等待一定时间  调用sleep
    if (server.aof_flush_sleep && sdslen(server.aof_buf)) {
        usleep(server.aof_flush_sleep);
    }

    latencyStartMonitor(latency);
    // 将buf中的数据写入到文件中
    nwritten = aofWrite(server.aof_fd,server.aof_buf,sdslen(server.aof_buf));
    latencyEndMonitor(latency);
    /* We want to capture different events for delayed writes:
     * when the delay happens with a pending fsync, or with a saving child
     * active, and when the above two conditions are missing.
     * We also use an additional event name to save all samples which is
     * useful for graphing / monitoring purposes. */
    if (sync_in_progress) {
        latencyAddSampleIfNeeded("aof-write-pending-fsync",latency);
    } else if (hasActiveChildProcess()) {
        latencyAddSampleIfNeeded("aof-write-active-child",latency);
    } else {
        latencyAddSampleIfNeeded("aof-write-alone",latency);
    }
    latencyAddSampleIfNeeded("aof-write",latency);

    /* We performed the write so reset the postponed flush sentinel to zero.
     * 因为数据已经写入到文件中了  就不需要在serverCron中延迟调用该方法了
     * */
    server.aof_flush_postponed_start = 0;

    // 发现部分数据写入失败
    if (nwritten != (ssize_t)sdslen(server.aof_buf)) {
        static time_t last_write_error_log = 0;
        int can_log = 0;

        /* Limit logging rate to 1 line per AOF_WRITE_LOG_ERROR_RATE seconds. */
        if ((server.unixtime - last_write_error_log) > AOF_WRITE_LOG_ERROR_RATE) {
            can_log = 1;
            last_write_error_log = server.unixtime;
        }

        /* Log the AOF write error and record the error code. */
        if (nwritten == -1) {
            if (can_log) {
                serverLog(LL_WARNING,"Error writing to the AOF file: %s",
                    strerror(errno));
                server.aof_last_write_errno = errno;
            }
        } else {
            if (can_log) {
                serverLog(LL_WARNING,"Short write while writing to "
                                       "the AOF file: (nwritten=%lld, "
                                       "expected=%lld)",
                                       (long long)nwritten,
                                       (long long)sdslen(server.aof_buf));
            }

            if (ftruncate(server.aof_fd, server.aof_current_size) == -1) {
                if (can_log) {
                    serverLog(LL_WARNING, "Could not remove short write "
                             "from the append-only file.  Redis may refuse "
                             "to load the AOF the next time it starts.  "
                             "ftruncate: %s", strerror(errno));
                }
            } else {
                /* If the ftruncate() succeeded we can set nwritten to
                 * -1 since there is no longer partial data into the AOF. */
                nwritten = -1;
            }
            server.aof_last_write_errno = ENOSPC;
        }

        /* Handle the AOF write error. */
        if (server.aof_fsync == AOF_FSYNC_ALWAYS) {
            /* We can't recover when the fsync policy is ALWAYS since the
             * reply for the client is already in the output buffers, and we
             * have the contract with the user that on acknowledged write data
             * is synced on disk. */
            serverLog(LL_WARNING,"Can't recover from AOF write error when the AOF fsync policy is 'always'. Exiting...");
            exit(1);
        } else {
            // 本次部分写入失败 等待下次写入即可

            /* Recover from failed write leaving data into the buffer. However
             * set an error to stop accepting writes as long as the error
             * condition is not cleared. */
            server.aof_last_write_status = C_ERR;

            /* Trim the sds buffer if there was a partial write, and there
             * was no way to undo it with ftruncate(2). */
            if (nwritten > 0) {
                server.aof_current_size += nwritten;
                sdsrange(server.aof_buf,nwritten,-1);
            }
            return; /* We'll try again on the next call... */
        }
    } else {
        /* Successful write(2). If AOF was in error state, restore the
         * OK state and log the event.
         * 本次buf中的所有数据都已经写入到aof中  取消标记 避免之后的重试
         * */
        if (server.aof_last_write_status == C_ERR) {
            serverLog(LL_WARNING,
                "AOF write error looks solved, Redis can write again.");
            server.aof_last_write_status = C_OK;
        }
    }
    server.aof_current_size += nwritten;

    /* Re-use AOF buffer when it is small enough. The maximum comes from the
     * arena size of 4k minus some overhead (but is otherwise arbitrary). */
    if ((sdslen(server.aof_buf)+sdsavail(server.aof_buf)) < 4000) {
        sdsclear(server.aof_buf);
    } else {
        sdsfree(server.aof_buf);
        server.aof_buf = sdsempty();
    }

    // 代表需要对aof刷盘
try_fsync:
    /* Don't fsync if no-appendfsync-on-rewrite is set to yes and there are
     * children doing I/O in the background.
     * aof_no_fsync_on_rewrite 这个配置项默认是0 意思是如果此时aof子进程正在运行 就没必要对此时的aof文件进行刷盘了 之后数据会立即被替换
     * */
    if (server.aof_no_fsync_on_rewrite && hasActiveChildProcess())
        return;

    /* Perform the fsync if needed.
     * 代表每次都会发起刷盘
     * */
    if (server.aof_fsync == AOF_FSYNC_ALWAYS) {
        /* redis_fsync is defined as fdatasync() for Linux in order to avoid
         * flushing metadata. */
        latencyStartMonitor(latency);
        redis_fsync(server.aof_fd); /* Let's try to get this data on the disk */
        latencyEndMonitor(latency);
        latencyAddSampleIfNeeded("aof-fsync-always",latency);
        server.aof_fsync_offset = server.aof_current_size;
        server.aof_last_fsync = server.unixtime;
        // 通过bio线程完成刷盘任务
    } else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC &&
                server.unixtime > server.aof_last_fsync)) {
        if (!sync_in_progress) {
            aof_background_fsync(server.aof_fd);
            server.aof_fsync_offset = server.aof_current_size;
        }
        server.aof_last_fsync = server.unixtime;
    }
}

在主线程中会将aof_buf的数据写入到aof文件并尝试刷盘。这是主线程处理aof的线。 而在某些情况下会触发后台子进程生成aof文件。 目前触发点有2种,第一种是在serverCron中检测到aof文件足够大,需要进行瘦身,还有一种是外部命令强制触发任务。

serverCron

/* Trigger an AOF rewrite if needed.
         * 根据此时aof内数据的大小 开启子进程生成aof文件
         * */
        if (server.aof_state == AOF_ON &&
            !hasActiveChildProcess() &&
            server.aof_rewrite_perc &&
            // 如果aof文件内数据很少 rewrite(瘦身)的效果就会比较差 所以有一个最小的限制值
            server.aof_current_size > server.aof_rewrite_min_size)
        {
            // 判断是否满足条件
            long long base = server.aof_rewrite_base_size ?
                server.aof_rewrite_base_size : 1;
            long long growth = (server.aof_current_size*100/base) - 100;
            if (growth >= server.aof_rewrite_perc) {
                serverLog(LL_NOTICE,"Starting automatic rewriting of AOF on %lld%% growth",growth);
                rewriteAppendOnlyFileBackground();
            }
        }

外部命令

/**
 * 使用子进程基于当前数据生成aof 同时还会进行持久化
 * @param c
 */
void bgrewriteaofCommand(client *c) {
    // 此时已经有正在工作的子进程了
    if (server.aof_child_pid != -1) {
        addReplyError(c,"Background append only file rewriting already in progress");
        // 如果此时有其他子进程 设置一个scheduled标记 当某个子进程结束后就会执行该任务
    } else if (hasActiveChildProcess()) {
        server.aof_rewrite_scheduled = 1;
        addReplyStatus(c,"Background append only file rewriting scheduled");
        // 产生子进程执行任务 主线程在这里会直接返回
    } else if (rewriteAppendOnlyFileBackground() == C_OK) {
        addReplyStatus(c,"Background append only file rewriting started");
    } else {
        addReplyError(c,"Can't execute an AOF background rewriting. "
                        "Please check the server logs for more information.");
    }
}

子进程的处理逻辑是这样

/* Write a sequence of commands able to fully rebuild the dataset into
 * "filename". Used both by REWRITEAOF and BGREWRITEAOF.
 *
 * In order to minimize the number of commands needed in the rewritten
 * log Redis uses variadic commands when possible, such as RPUSH, SADD
 * and ZADD. However at max AOF_REWRITE_ITEMS_PER_CMD items per time
 * are inserted using a single command.
 * 将重做(瘦身)后的数据写入到aof 执行本次任务的是子进程
 * */
int rewriteAppendOnlyFile(char *filename) {
    rio aof;
    FILE *fp = NULL;
    char tmpfile[256];
    char byte;

    /* Note that we have to use a different temp name here compared to the
     * one used by rewriteAppendOnlyFileBackground() function.
     * 先写入一个临时文件
     * */
    snprintf(tmpfile,256,"temp-rewriteaof-%d.aof", (int) getpid());
    fp = fopen(tmpfile,"w");
    if (!fp) {
        serverLog(LL_WARNING, "Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s", strerror(errno));
        return C_ERR;
    }

    // 在此时先清空从父进程采集到的数据
    server.aof_child_diff = sdsempty();
    // 将临时文件包装成rio流 通过rio定义的api写入数据
    rioInitWithFile(&aof,fp);

    // 设置需要自动刷盘的标记
    if (server.aof_rewrite_incremental_fsync)
        rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);

    // 通知module的监听器 此时产生了一个持久化文件的事件
    startSaving(RDBFLAGS_AOF_PREAMBLE);

    // 代表该aof文件还存储了rdb格式的数据  这种aof文件恢复数据会更快
    if (server.aof_use_rdb_preamble) {
        int error;
        // 先将快照数据存储到rdb文件中   当作为aof生成rdb数据时  rdbflags是 RDBFLAGS_AOF_PREAMBLE 这样在生成rdb数据的同时 还会抽空从父进程中读取后写入的aof数据
        if (rdbSaveRio(&aof,&error,RDBFLAGS_AOF_PREAMBLE,NULL) == C_ERR) {
            errno = error;
            goto werr;
        }
    } else {
        // 基于当前db的数据 直接产生最简的command 相当于瘦身后的aof
        if (rewriteAppendOnlyFileRio(&aof) == C_ERR) goto werr;
    }

    /* Do an initial slow fsync here while the parent is still sending
     * data, in order to make the next final fsync faster.
     * 先针对这些数据执行一次刷盘
     * */
    if (fflush(fp) == EOF) goto werr;
    if (fsync(fileno(fp)) == -1) goto werr;

    /* Read again a few times to get more data from the parent.
     * We can't read forever (the server may receive data from clients
     * faster than it is able to send data to the child), so we try to read
     * some more data in a loop as soon as there is a good chance more data
     * will come. If it looks like we are wasting time, we abort (this
     * happens after 20 ms without new data). */

    // 现在就是从父进程中读取最新数据并写入到aof中
    int nodata = 0;
    mstime_t start = mstime();
    // 在20毫秒内没有读取到新数据 或者处理时间超过1000毫秒结束处理
    while(mstime()-start < 1000 && nodata < 20) {
        if (aeWait(server.aof_pipe_read_data_from_parent, AE_READABLE, 1) <= 0)
        {
            nodata++;
            continue;
        }
        nodata = 0; /* Start counting from zero, we stop on N *contiguous*
                       timeouts. */
        aofReadDiffFromParent();
    }

    /* Ask the master to stop sending diffs.
     * 通知父进程不要再传输数据了
     * */
    if (write(server.aof_pipe_write_ack_to_parent,"!",1) != 1) goto werr;
    if (anetNonBlock(NULL,server.aof_pipe_read_ack_from_parent) != ANET_OK)
        goto werr;
    /* We read the ACK from the server using a 10 seconds timeout. Normally
     * it should reply ASAP, but just in case we lose its reply, we are sure
     * the child will eventually get terminated.
     * 阻塞等待父进程确实收到信息
     * */
    if (syncRead(server.aof_pipe_read_ack_from_parent,&byte,1,5000) != 1 ||
        byte != '!') goto werr;
    serverLog(LL_NOTICE,"Parent agreed to stop sending diffs. Finalizing AOF...");

    /* Read the final diff if any.
     * 在子进程发送终止信号 和父进程接收终止信号间存在时间差  这里就是读取中间产生的数据
     * 在这之后可能还会有新的command写入 但是之后的数据都会被丢弃  使用子进程写入不然会包含这个问题  如果只是用主线程生成aof数据 那么可以确保所有command都写入到aof中
     * */
    aofReadDiffFromParent();

    /* Write the received diff to the file. */
    serverLog(LL_NOTICE,
        "Concatenating %.2f MB of AOF diff received from parent.",
        (double) sdslen(server.aof_child_diff) / (1024*1024));

    // 将此时从父进程捕获到的所有command 写入到aof文件中 这样能确保aof的数据最简化 (瘦身后的command + 这段时间内产生的新的command)
    if (rioWrite(&aof,server.aof_child_diff,sdslen(server.aof_child_diff)) == 0)
        goto werr;

    /* Make sure data will not remain on the OS's output buffers
     * 此时aof的写入工作就全部完成了
     * */
    if (fflush(fp)) goto werr;
    if (fsync(fileno(fp))) goto werr;
    if (fclose(fp)) { fp = NULL; goto werr; }
    fp = NULL;

    /* Use RENAME to make sure the DB file is changed atomically only
     * if the generate DB file is ok. */
    if (rename(tmpfile,filename) == -1) {
        serverLog(LL_WARNING,"Error moving temp append only file on the final destination: %s", strerror(errno));
        unlink(tmpfile);
        stopSaving(0);
        return C_ERR;
    }
    serverLog(LL_NOTICE,"SYNC append only file rewrite performed");
    // 代表持久化工作完成 发出一个事件给module的监听器
    stopSaving(1);
    return C_OK;

werr:
    serverLog(LL_WARNING,"Write error writing append only file on disk: %s", strerror(errno));
    if (fp) fclose(fp);
    unlink(tmpfile);
    stopSaving(0);
    return C_ERR;
}

在子进程生成aof数据的同时,主线程可能还会继续处理新的command,这些数据除了会写入到aof_buf中,还会写入到aof_rewrite_buf_blocks中,当aof的瘦身command生成后,就会读取aof_rewrite_buf_blocks的数据并追加到aof文件。当向主进程发送终止符号"!"后,父进程就不会继续传输数据了,如果此时宕机,数据是会有丢失的。我们在考虑一点。当瘦身完成后,之前存储在aof_buf中的数据还有必要么(假设这部分数据还没有写入到aof文件),已经没有必要了。因为瘦身后的aof文件优先级更高,之前的数据必然被丢弃。这部分逻辑就是在backgroundRewriteDoneHandler,该方法的触发点在serverCron.checkChildrenDone中

/**
 * 检查某个子进程是否完成了任务
 * 在server的serverCron中 每隔一段时间就会检测此时后台进程是否完成了任务
 */
void checkChildrenDone(void) {
    int statloc;
    pid_t pid;

    /* If we have a diskless rdb child (note that we support only one concurrent
     * child), we want to avoid collecting it's exit status and acting on it
     * as long as we didn't finish to drain the pipe, since then we're at risk
     * of starting a new fork and a new pipe before we're done with the previous
     * one.
     * 如果此时的子进程是向slave节点传输数据  那么可以不等待
     * */
    if (server.rdb_child_pid != -1 && server.rdb_pipe_conns)
        return;

    // 等待子进程被彻底关闭
    // exit只是将子进程变成僵尸进程(也可能会自动清理?内核级别不太了解) WNOHANG代表当没有子进程时立即返回,而不是继续阻塞 返回值是收集到的子进程id 如果没有收集到进程则返回0
    // 第三个参数就是获取采集到的子进程信息 这里传入null代表不需要采集
    if ((pid = wait3(&statloc,WNOHANG,NULL)) != 0) {
        // 判断子进程是否是正常返回的
        int exitcode = WEXITSTATUS(statloc);
        int bysignal = 0;

        // 代表是异常终止  WTERMSIG(statloc) 会获取到终止的异常信号
        if (WIFSIGNALED(statloc)) bysignal = WTERMSIG(statloc);

        /* sigKillChildHandler catches the signal and calls exit(), but we
         * must make sure not to flag lastbgsave_status, etc incorrectly.
         * We could directly terminate the child process via SIGUSR1
         * without handling it, but in this case Valgrind will log an
         * annoying error.
         * 代表本次子进程是由于信号被终止的
         * */
        if (exitcode == SERVER_CHILD_NOERROR_RETVAL) {
            bysignal = SIGUSR1;
            exitcode = 1;
        }

        // 代表本次wait3 异常退出了
        if (pid == -1) {
            serverLog(LL_WARNING,"wait3() returned an error: %s. "
                "rdb_child_pid = %d, aof_child_pid = %d, module_child_pid = %d",
                strerror(errno),
                (int) server.rdb_child_pid,
                (int) server.aof_child_pid,
                (int) server.module_child_pid);

            // 当正常回收进程时 父进程会接收子进程发送的一些数据

            // 代表本次关闭的是一个rdb子进程
        } else if (pid == server.rdb_child_pid) {
            // 当子进程完成任务后 触发一些逻辑
            backgroundSaveDoneHandler(exitcode,bysignal);
            // 统计本次跨进程传输 消耗的内存
            if (!bysignal && exitcode == 0) receiveChildInfo();
            // 关闭aof子进程 会做一些资源清理
        } else if (pid == server.aof_child_pid) {
            backgroundRewriteDoneHandler(exitcode,bysignal);
            if (!bysignal && exitcode == 0) receiveChildInfo();
        } else if (pid == server.module_child_pid) {
            ModuleForkDoneHandler(exitcode,bysignal);
            if (!bysignal && exitcode == 0) receiveChildInfo();
        } else {
            // 此时回收的子进程pid没有匹配上任何一个子进程 认为本次回收的是一个ldb进程  TODO ldb是解析lua脚本的 先忽略吧
            if (!ldbRemoveChild(pid)) {
                serverLog(LL_WARNING,
                    "Warning, detected child with unmatched pid: %ld",
                    (long)pid);
            }
        }
        // 因为子进程被回收 此时dict允许被重新扩容了
        updateDictResizePolicy();
        // 清理父子进程间的通道
        closeChildInfoPipe();
    }
}
/* A background append only file rewriting (BGREWRITEAOF) terminated its work.
 * Handle this.
 * 在serverCron中 会检测子进程是否完成了任务 当子进程在执行aof相关的任务时 完成后就是执行该方法
 * */
void backgroundRewriteDoneHandler(int exitcode, int bysignal) {
    // 进程正常退出
    if (!bysignal && exitcode == 0) {
        int newfd, oldfd;
        char tmpfile[256];
        long long now = ustime();
        mstime_t latency;

        serverLog(LL_NOTICE,
            "Background AOF rewrite terminated with success");

        /* Flush the differences accumulated by the parent to the
         * rewritten AOF. */
        latencyStartMonitor(latency);
        snprintf(tmpfile,256,"temp-rewriteaof-bg-%d.aof",
            (int)server.aof_child_pid);
        newfd = open(tmpfile,O_WRONLY|O_APPEND);
        if (newfd == -1) {
            serverLog(LL_WARNING,
                "Unable to open the temporary AOF produced by the child: %s", strerror(errno));
            goto cleanup;
        }

        // 因为子进程已经完成了aof的写入 主线程中 aof_buf中存储的数据就可以丢弃了 只要保存父子进程间未同步的数据就可以
        if (aofRewriteBufferWrite(newfd) == -1) {
            serverLog(LL_WARNING,
                "Error trying to flush the parent diff to the rewritten AOF: %s", strerror(errno));
            close(newfd);
            goto cleanup;
        }
        latencyEndMonitor(latency);
        latencyAddSampleIfNeeded("aof-rewrite-diff-write",latency);

        serverLog(LL_NOTICE,
            "Residual parent diff successfully flushed to the rewritten AOF (%.2f MB)", (double) aofRewriteBufferSize() / (1024*1024));

        /* The only remaining thing to do is to rename the temporary file to
         * the configured file and switch the file descriptor used to do AOF
         * writes. We don't want close(2) or rename(2) calls to block the
         * server on old file deletion.
         *
         * There are two possible scenarios:
         *
         * 1) AOF is DISABLED and this was a one time rewrite. The temporary
         * file will be renamed to the configured file. When this file already
         * exists, it will be unlinked, which may block the server.
         *
         * 2) AOF is ENABLED and the rewritten AOF will immediately start
         * receiving writes. After the temporary file is renamed to the
         * configured file, the original AOF file descriptor will be closed.
         * Since this will be the last reference to that file, closing it
         * causes the underlying file to be unlinked, which may block the
         * server.
         *
         * To mitigate the blocking effect of the unlink operation (either
         * caused by rename(2) in scenario 1, or by close(2) in scenario 2), we
         * use a background thread to take care of this. First, we
         * make scenario 1 identical to scenario 2 by opening the target file
         * when it exists. The unlink operation after the rename(2) will then
         * be executed upon calling close(2) for its descriptor. Everything to
         * guarantee atomicity for this switch has already happened by then, so
         * we don't care what the outcome or duration of that close operation
         * is, as long as the file descriptor is released again. */
        if (server.aof_fd == -1) {
            /* AOF disabled */

            /* Don't care if this fails: oldfd will be -1 and we handle that.
             * One notable case of -1 return is if the old file does
             * not exist. */
            oldfd = open(server.aof_filename,O_RDONLY|O_NONBLOCK);
        } else {
            /* AOF enabled */
            oldfd = -1; /* We'll set this to the current AOF filedes later. */
        }

        /* Rename the temporary file. This will not unlink the target file if
         * it exists, because we reference it with "oldfd". */
        latencyStartMonitor(latency);
        // 使用这个存储了 rewrite数据的文件作为新的aof文件 并丢弃之前的aof文件  因为这个方法是主线程在执行serverCron中调用的 与aof的写入互斥 所以不需要做并发控制
        if (rename(tmpfile,server.aof_filename) == -1) {
            serverLog(LL_WARNING,
                "Error trying to rename the temporary AOF file %s into %s: %s",
                tmpfile,
                server.aof_filename,
                strerror(errno));
            close(newfd);
            if (oldfd != -1) close(oldfd);
            goto cleanup;
        }
        latencyEndMonitor(latency);
        latencyAddSampleIfNeeded("aof-rename",latency);

        // 如果之前没有开启aof文件 那么在更新了文件的数据后 关闭文件
        if (server.aof_fd == -1) {
            /* AOF disabled, we don't need to set the AOF file descriptor
             * to this new file, so we can close it. */
            close(newfd);
        } else {
            /* AOF enabled, replace the old fd with the new one.
             * 更新此时的aof文件句柄
             * */
            oldfd = server.aof_fd;
            server.aof_fd = newfd;
            // 根据刷盘类型 执行刷盘任务
            if (server.aof_fsync == AOF_FSYNC_ALWAYS)
                redis_fsync(newfd);
            else if (server.aof_fsync == AOF_FSYNC_EVERYSEC)
                aof_background_fsync(newfd);
            server.aof_selected_db = -1; /* Make sure SELECT is re-issued */
            aofUpdateCurrentSize();
            server.aof_rewrite_base_size = server.aof_current_size;
            server.aof_fsync_offset = server.aof_current_size;

            /* Clear regular AOF buffer since its contents was just written to
             * the new AOF from the background rewrite buffer. */
            sdsfree(server.aof_buf);
            // 这样就可以清空aof_buf的数据了
            server.aof_buf = sdsempty();
        }

        // 代表最近一次子进程的 aof文件生成成功
        server.aof_lastbgrewrite_status = C_OK;

        serverLog(LL_NOTICE, "Background AOF rewrite finished successfully");
        /* Change state from WAIT_REWRITE to ON if needed
         * 代表整个 rewrite完成
         * */
        if (server.aof_state == AOF_WAIT_REWRITE)
            server.aof_state = AOF_ON;

        /* Asynchronously close the overwritten AOF.
         * 旧文件会由后台任务关闭
         * */
        if (oldfd != -1) bioCreateBackgroundJob(BIO_CLOSE_FILE,(void*)(long)oldfd,NULL,NULL);

        serverLog(LL_VERBOSE,
            "Background AOF rewrite signal handler took %lldus", ustime()-now);
        // 非正常退出
    } else if (!bysignal && exitcode != 0) {
        server.aof_lastbgrewrite_status = C_ERR;

        serverLog(LL_WARNING,
            "Background AOF rewrite terminated with error");
    } else {
        /* SIGUSR1 is whitelisted, so we have a way to kill a child without
         * triggering an error condition. */
        if (bysignal != SIGUSR1)
            server.aof_lastbgrewrite_status = C_ERR;

        serverLog(LL_WARNING,
            "Background AOF rewrite terminated by signal %d", bysignal);
    }

cleanup:
    aofClosePipes();
    aofRewriteBufferReset();
    aofRemoveTempFile(server.aof_child_pid);
    server.aof_child_pid = -1;
    server.aof_rewrite_time_last = time(NULL)-server.aof_rewrite_time_start;
    server.aof_rewrite_time_start = -1;
    /* Schedule a new rewrite if we are waiting for it to switch the AOF ON. */
    if (server.aof_state == AOF_WAIT_REWRITE)
        server.aof_rewrite_scheduled = 1;
}

这里用子进程瘦身后的aof临时文件替换原始的aof文件。

以上,aof的解析已经全部完成了。