############################# 服务基本设置 #############################
# The id of the broker. This must be set to a unique integer for each broker.
# kafka集群分组ID
broker.id=1
############################# Socket Server 设置 #############################
listeners=PLAINTEXT://:9092
# socket服务监听端口号
port=9092
# 主机名
host.name=192.168.30.65
# Hostname the broker will advertise to producers and consumers. If not set, it uses the
# value for "host.name" if configured. Otherwise, it will use the value returned from
# java.net.InetAddress.getCanonicalHostName().
advertised.host.name=192.168.30.65
# The port to publish to ZooKeeper for clients to use. If this is not set,
# it will publish the same port that the broker binds to.
#advertised.port=<port accessible by clients>
# 处理网络请求的线程数
num.network.threads=3
# 处理磁盘I/O的线程数
num.io.threads=8
# socket传输数据大小
socket.send.buffer.bytes=102400
# socket接收数据大小
socket.receive.buffer.bytes=102400
# socket最大请求字节数
socket.request.max.bytes=104857600
############################# 日志配置 #############################
# 日志存放目录
log.dirs=/u01/kafka/kafka-logs
# 分区大小.
num.partitions=3
# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
# This value is recommended to be increased for installations with data dirs located in RAID array.
num.recovery.threads.per.data.dir=1
"server.properties" [只读] 137L, 6294C 1,1 顶端
num.recovery.threads.per.data.dir=1
############################# Log Flush Policy #############################
# Messages are immediately written to the filesystem but by default we only fsync() to sync
# the OS cache lazily. The following configurations control the flush of data to disk.
# There are a few important trade-offs here:
# 1. Durability: Unflushed data may be lost if you are not using replication.
# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to exceessive seeks.
# The settings below allow one to configure the flush policy to flush data after a period of time or
# every N messages (or both). This can be done globally and overridden on a per-topic basis.
# The number of messages to accept before forcing a flush of data to disk
#log.flush.interval.messages=10000
# The maximum amount of time a message can sit in a log before we force a flush
#log.flush.interval.ms=1000
############################# 日志存储策略 #############################
# 当达到下面的消息数量时,会将数据flush到日志文件中。默认10000
**log.flush.interval.messages=10000
**# 当达到下面的时间(ms)时,执行一次强制的flush操作。interval.ms和interval.messages无论哪个达到,都会flush。默认3000ms
**log.flush.interval.ms=1000**
# 检查是否需要将日志flush的时间间隔
**log.flush.scheduler.interval.ms=3000**
# 日志保存时间 (hours|minutes),默认为7天(168小时)。超过这个时间会根据policy处理数据。bytes和minutes无论哪个先达到都会触发。
**log.retention.hours=24**
# 日志数据存储的最大字节数。超过这个时间会根据policy处理数据。
**log.retention.bytes=21474836480**
# 控制日志segment文件的大小,超出该大小则追加到一个新的日志segment文件中(-1表示没有限制)
**log.segment.bytes=1073741824**
# 当达到下面时间,会强制新建一个segment
**log.roll.hours=168**
# 日志片段文件的检查周期,查看它们是否达到了删除策略的设置(log.retention.hours或log.retention.bytes)
**log.retention.check.interval.ms=60000**
# 是否开启压缩
**log.cleaner.enable=false**
# 日志清理策略(delete|compact)
**log.cleanup.policy=delete**
# 对于压缩的日志保留的最长时间
**log.cleaner.delete.retention.ms=86400000**
# 对于segment日志的索引文件大小限制
**log.index.size.max.bytes=10485760**
#y索引计算的一个缓冲区,一般不需要设置。
**log.index.interval.bytes=4096**
############################# Zookeeper配置 #############################
# zookeeper节点
#zookeeper.connect=192.168.30.65:2188,192.168.30.66:2188,192.168.30.67:2188
**zookeeper.connect=192.168.30.65:2188**
# zookeeper网络连接超时时间
**zookeeper.connection.timeout.ms=6000 **
Kafka配置文件
点赞
收藏