kafka4.0是最新版kafka,可在kafka官網下載,依賴的jdk版本要求在jdk17及jdk17以上
tar -xzf kafka_2.13-4.0.0.tgz
mv kafka_2.13-4.0.0 kafka
cd kafka
# 隨便一臺節點運行生成隨機uuid,后面每臺節點都要使用此uuid
bin/kafka-storage.sh random-uuid ? ? ? ? 生成的uuid(IyyjPwZcTa2LHKkV1rj5pg)
# 每個節點需要在config/server.properties中配置相關內容
node.id
controller.quorum.voters=1@192.168.10.10:9093,2@192.168.10.20:9093,3@192.168.10.30:9093
log.dirs=/opt/kafka/log/kraft-combined-logs
num.partitions=16
# 每臺節點設置日志目錄的格式,$KAFKA_CLUSTER_ID應為上面生成的值,每個節點都要用相同的uuid
bin/kafka-storage.sh format -t IyyjPwZcTa2LHKkV1rj5pg -c config/server.properties
# 啟動 Kafka 服務,每臺節點都執行
bin/kafka-server-start.sh -daemon ?/opt/kafka/config/server.properties
# 測試kafka集群
# 創建主題名為cluster-topic存儲事件
bin/kafka-topics.sh --bootstrap-server 192.168.10.10:9092 --create --topic cluster-topic1 --partitions 3 --replication-factor 3
# 在其他節點(9092)查詢該topic
bin/kafka-topics.sh --describe --topic cluster-topic1 --bootstrap-server 192.168.10.20:9092
# 將事件寫入主題
bin/kafka-console-producer.sh --topic cluster-topic1 --bootstrap-server 192.168.10.10:9092
This is my first event
This is my second event
# 讀取事件
bin/kafka-console-consumer.sh --topic cluster-topic --from-beginning --bootstrap-server 192.168.10.10:9092
# 刪除主題
bin/kafka-topics.sh --bootstrap-server 192.168.10.10:9092 --delete --topic cluster-topic
# 配置systemd服務
# 先關閉kafka服務
/opt/kafka/bin/kafka-server-stop.sh config/server.properties
# 配置服務
cat >>/etc/systemd/system/kafka.service << EOF[Unit]Description=Apache Kafka Service (KRaft Mode)After=network.target[Service]Type=simpleUser=rootGroup=rootExecStart=/opt/kafka/bin/kafka-server-start.sh /opt/kafka/config/server.propertiesExecStop=/opt/kafka/bin/kafka-server-stop.shRestart=on-abnormalWorkingDirectory=/opt/kafkaEnvironment="JAVA_HOME=/usr/local/jdk-17.0.12"[Install]WantedBy=multi-user.targetEOFsystemctl daemon-reloadsystemctl start kafkasystemctl status kafka