kafka源码:生产者main线程初始化过程

124 阅读1分钟

源码版本:kafka 3.0

image.png

入口

创建kafka生产者对象

image.png

连续跃过三个this构造函数

public KafkaProducer(Properties properties) {
    this(properties, null, null);
}
public KafkaProducer(Properties properties, Serializer<K> keySerializer, Serializer<V> valueSerializer) {
    this(Utils.propsToMap(properties), keySerializer, valueSerializer);
}
public KafkaProducer(Map<String, Object> configs, Serializer<K> keySerializer, Serializer<V> valueSerializer) {
    this(new ProducerConfig(ProducerConfig.appendSerializerToConfig(configs, keySerializer, valueSerializer)),
            keySerializer, valueSerializer, null, null, null, Time.SYSTEM);
}

见到真身 KafkaProducer

image.png

次构造方法里面主要做了如下操作:

获取事务id

String transactionalId = config.getString(ProducerConfig.TRANSACTIONAL_ID_CONFIG);

获取客户端id

this.clientId = config.getString(ProducerConfig.CLIENT_ID_CONFIG);

监控相关配置

JmxReporter jmxReporter = new JmxReporter();
jmxReporter.configure(config.originals(Collections.singletonMap(ProducerConfig.CLIENT_ID_CONFIG, clientId)));
reporters.add(jmxReporter);

分区器配置

this.partitioner = config.getConfiguredInstance(
        ProducerConfig.PARTITIONER_CLASS_CONFIG,
        Partitioner.class,
        Collections.singletonMap(ProducerConfig.CLIENT_ID_CONFIG, clientId));

序列化配置

if (keySerializer == null) {
    this.keySerializer = config.getConfiguredInstance(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,
                                                                             Serializer.class);
    this.keySerializer.configure(config.originals(Collections.singletonMap(ProducerConfig.CLIENT_ID_CONFIG, clientId)), true);
} else {
    config.ignore(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG);
    this.keySerializer = keySerializer;
}
if (valueSerializer == null) {
    this.valueSerializer = config.getConfiguredInstance(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
                                                                               Serializer.class);
    this.valueSerializer.configure(config.originals(Collections.singletonMap(ProducerConfig.CLIENT_ID_CONFIG, clientId)), false);
} else {
    config.ignore(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG);
    this.valueSerializer = valueSerializer;
}

拦截器配置

List<ProducerInterceptor<K, V>> interceptorList = (List) config.getConfiguredInstances(
        ProducerConfig.INTERCEPTOR_CLASSES_CONFIG,
        ProducerInterceptor.class,
        Collections.singletonMap(ProducerConfig.CLIENT_ID_CONFIG, clientId));
if (interceptors != null)
    this.interceptors = interceptors;
else
    this.interceptors = new ProducerInterceptors<>(interceptorList);// 多个拦截器组成拦截器链
ClusterResourceListeners clusterResourceListeners = configureClusterResourceListeners(keySerializer,
        valueSerializer, interceptorList, reporters);

单条信息最大值配置

this.maxRequestSize = config.getInt(ProducerConfig.MAX_REQUEST_SIZE_CONFIG);

缓存大小配置

this.totalMemorySize = config.getLong(ProducerConfig.BUFFER_MEMORY_CONFIG);

压缩策略设置

this.compressionType = CompressionType.forName(config.getString(ProducerConfig.COMPRESSION_TYPE_CONFIG));

创建缓存队列

this.accumulator = new RecordAccumulator(logContext,// 缓冲区大小 默认32M
        config.getInt(ProducerConfig.BATCH_SIZE_CONFIG),//  batch.size 默认16k
        this.compressionType,
        lingerMs(config),// 默认0
        retryBackoffMs,
        deliveryTimeoutMs,
        metrics,
        PRODUCER_METRIC_GROUP_NAME,
        time,
        apiVersions,
        transactionManager,
        new BufferPool(this.totalMemorySize, config.getInt(ProducerConfig.BATCH_SIZE_CONFIG), metrics, time, PRODUCER_METRIC_GROUP_NAME));// 内存池

连接kafka集群

List<InetSocketAddress> addresses = ClientUtils.parseAndValidateAddresses(
        config.getList(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG),// 连接kafka集群
        config.getString(ProducerConfig.CLIENT_DNS_LOOKUP_CONFIG));
if (metadata != null) {// 获取元数据

获取元数据

if (metadata != null) {// 获取元数据
    this.metadata = metadata;
} else {
    this.metadata = new ProducerMetadata(retryBackoffMs,
            config.getLong(ProducerConfig.METADATA_MAX_AGE_CONFIG),
            config.getLong(ProducerConfig.METADATA_MAX_IDLE_CONFIG),
            logContext,
            clusterResourceListeners,
            Time.SYSTEM);
    this.metadata.bootstrap(addresses);
}

创建sender线程

this.sender = newSender(logContext, kafkaClient, this.metadata);// 发送
String ioThreadName = NETWORK_THREAD_PREFIX + " | " + clientId;
this.ioThread = new KafkaThread(ioThreadName, this.sender, true);//创建的send线程放入了KafkaThread,后台运行

public KafkaThread(final String name, Runnable runnable, boolean daemon) {
    super(runnable, name);
    configureThread(name, daemon);
}

private void configureThread(final String name, boolean daemon) {
    setDaemon(daemon);// 守护进程方式运行
    setUncaughtExceptionHandler((t, e) -> log.error("Uncaught exception in thread '{}':", name, e));
}

启动sender线程

this.ioThread.start();// 启动send线程

实际调用的是 Sender的run方法


// main loop, runs until close is called
while (running) {
    try {
        runOnce();
    } catch (Exception e) {
        log.error("Uncaught error in kafka producer I/O thread: ", e);
    }
}

run里面调用的是runOnce();


/**
 * Run a single iteration of sending
 *
 */
void runOnce() {
    if (transactionManager != null) {
        try {
            transactionManager.maybeResolveSequences();

            // do not continue sending if the transaction manager is in a failed state
            if (transactionManager.hasFatalError()) {
                RuntimeException lastError = transactionManager.lastError();
                if (lastError != null)
                    maybeAbortBatches(lastError);
                client.poll(retryBackoffMs, time.milliseconds());
                return;
            }

            // Check whether we need a new producerId. If so, we will enqueue an InitProducerId
            // request which will be sent below
            transactionManager.bumpIdempotentEpochAndResetIdIfNeeded();

            if (maybeSendAndPollTransactionalRequest()) {
                return;
            }
        } catch (AuthenticationException e) {
            // This is already logged as error, but propagated here to perform any clean ups.
            log.trace("Authentication exception while processing transactional request", e);
            transactionManager.authenticationFailed(e);
        }
    }

    long currentTimeMs = time.milliseconds();
    long pollTimeout = sendProducerData(currentTimeMs);
    client.poll(pollTimeout, currentTimeMs);
}