环境如下:

CentOS-7-x86_64
zookeeper-3.4.11
kafka_2.12-1.1.0

一.zookeeper下载与安装

1)下载zookeeper

[root@localhost opt]# cd /opt/
[root@localhost opt]# wget https://mirrors.cnnic.cn/apache/zookeeper/zookeeper-3.4.11/zookeeper-3.4.11.tar.gz

2)解压

[root@localhost opt]# tar zxvf zookeeper-3.4.11.tar.gz 
[root@localhost opt]# ls
zookeeper-3.4.11 zookeeper-3.4.11.tar.gz

3)配置

[root@localhost opt]# cd zookeeper-3.4.11
[root@localhost zookeeper-3.4.11]# ll
total 1596
drwxr-xr-x. 2 502 games 149 Nov 1 14:52 bin
-rw-r--r--. 1 502 games 87943 Nov 1 14:47 build.xml
drwxr-xr-x. 2 502 games 77 Nov 1 14:52 conf
drwxr-xr-x. 10 502 games 130 Nov 1 14:47 contrib
drwxr-xr-x. 2 502 games 4096 Nov 1 14:54 dist-maven
drwxr-xr-x. 6 502 games 4096 Nov 1 14:52 docs
-rw-r--r--. 1 502 games 1709 Nov 1 14:47 ivysettings.xml
-rw-r--r--. 1 502 games 8197 Nov 1 14:47 ivy.xml
drwxr-xr-x. 4 502 games 4096 Nov 1 14:52 lib
-rw-r--r--. 1 502 games 11938 Nov 1 14:47 LICENSE.txt
-rw-r--r--. 1 502 games 3132 Nov 1 14:47 NOTICE.txt
-rw-r--r--. 1 502 games 1585 Nov 1 14:47 README.md
-rw-r--r--. 1 502 games 1770 Nov 1 14:47 README_packaging.txt
drwxr-xr-x. 5 502 games 47 Nov 1 14:47 recipes
drwxr-xr-x. 8 502 games 211 Nov 1 14:52 src
-rw-r--r--. 1 502 games 1478279 Nov 1 14:49 zookeeper-3.4.11.jar
-rw-r--r--. 1 502 games 195 Nov 1 14:52 zookeeper-3.4.11.jar.asc
-rw-r--r--. 1 502 games 33 Nov 1 14:49 zookeeper-3.4.11.jar.md5
-rw-r--r--. 1 502 games 41 Nov 1 14:49 zookeeper-3.4.11.jar.sha1
[root@localhost zookeeper-3.4.11]# cp -rf conf/zoo_sample.cfg conf/zoo.cfg
[root@localhost zookeeper-3.4.11]# vi conf/zoo.cfg

修改或添加zoo.cfg文件中如下两个配置项:

dataDir=/opt/zookeeper-3.4.11/zkdata #这个目录是预先创建的
dataLogDir=/opt/zookeeper-3.4.11/zkdatalog #这个目录是预先创建的

创建zk数据存储和zk日志存储目录:

[root@localhost zookeeper-3.4.11]# mkdir /opt/zookeeper-3.4.11/zkdata
[root@localhost zookeeper-3.4.11]# mkdir /opt/zookeeper-3.4.11/zkdatalog
[root@localhost zookeeper-3.4.11]# ll
total 1596
drwxr-xr-x. 2 502 games 149 Nov 1 14:52 bin
-rw-r--r--. 1 502 games 87943 Nov 1 14:47 build.xml
drwxr-xr-x. 2 502 games 92 Mar 31 11:12 conf
drwxr-xr-x. 10 502 games 130 Nov 1 14:47 contrib
drwxr-xr-x. 2 502 games 4096 Nov 1 14:54 dist-maven
drwxr-xr-x. 6 502 games 4096 Nov 1 14:52 docs
-rw-r--r--. 1 502 games 1709 Nov 1 14:47 ivysettings.xml
-rw-r--r--. 1 502 games 8197 Nov 1 14:47 ivy.xml
drwxr-xr-x. 4 502 games 4096 Nov 1 14:52 lib
-rw-r--r--. 1 502 games 11938 Nov 1 14:47 LICENSE.txt
-rw-r--r--. 1 502 games 3132 Nov 1 14:47 NOTICE.txt
-rw-r--r--. 1 502 games 1585 Nov 1 14:47 README.md
-rw-r--r--. 1 502 games 1770 Nov 1 14:47 README_packaging.txt
drwxr-xr-x. 5 502 games 47 Nov 1 14:47 recipes
drwxr-xr-x. 8 502 games 211 Nov 1 14:52 src
drwxr-xr-x. 2 root root 6 Mar 31 11:13 zkdata
drwxr-xr-x. 2 root root 6 Mar 31 11:13 zkdatalog
-rw-r--r--. 1 502 games 1478279 Nov 1 14:49 zookeeper-3.4.11.jar
-rw-r--r--. 1 502 games 195 Nov 1 14:52 zookeeper-3.4.11.jar.asc
-rw-r--r--. 1 502 games 33 Nov 1 14:49 zookeeper-3.4.11.jar.md5
-rw-r--r--. 1 502 games 41 Nov 1 14:49 zookeeper-3.4.11.jar.sha1

4)配置环境变量

[root@localhost zookeeper-3.4.11]# vi /etc/profile

配置项如下:

# config java class path 
export JAVA_HOME=/usr/local/java/jdk1.8.0_161
export JRE_HOME=${JAVA_HOME}/jre
export CLASSPATH=.:${JAVA_HOME}/lib:${JRE_HOME}/lib:$ZOOKEEPER_HOME/lib:
export PATH=${JAVA_HOME}/bin:$ZOOKEEPER_HOME/bin:$PATH

# config zookeeper install path
export ZOOKEEPER_HOME=/opt/zookeeper-3.4.11

5)启动zookeeper

[root@localhost bin]# cd /opt/zookeeper-3.4.11/bin
[root@localhost bin]# ll
total 36
-rwxr-xr-x. 1 502 games 232 Nov 1 14:47 README.txt
-rwxr-xr-x. 1 502 games 1937 Nov 1 14:47 zkCleanup.sh
-rwxr-xr-x. 1 502 games 1056 Nov 1 14:47 zkCli.cmd
-rwxr-xr-x. 1 502 games 1534 Nov 1 14:47 zkCli.sh
-rwxr-xr-x. 1 502 games 1628 Nov 1 14:47 zkEnv.cmd
-rwxr-xr-x. 1 502 games 2696 Nov 1 14:47 zkEnv.sh
-rwxr-xr-x. 1 502 games 1089 Nov 1 14:47 zkServer.cmd
-rwxr-xr-x. 1 502 games 6773 Nov 1 14:47 zkServer.sh
[root@localhost bin]# ./zkServer.sh start
ZooKeeper JMX enabled by default
Using config: /opt/zookeeper-3.4.11/bin/../conf/zoo.cfg
Starting zookeeper ... STARTED

二.kafka下载与安装

1).下载kafka:

[root@localhost bin]# cd /opt/
[root@localhost opt]# wget http://apache.fayea.com/kafka/1.1.0/kafka_2.12-1.1.0.tgz
--2018-03-31 11:21:52-- http://apache.fayea.com/kafka/1.1.0/kafka_2.12-1.1.0.tgz
Resolving apache.fayea.com (apache.fayea.com)... 202.115.175.188, 202.115.175.187
Connecting to apache.fayea.com (apache.fayea.com)|202.115.175.188|:80... connected.
HTTP request sent, awaiting response... 200 OK
Length: 50326212 (48M) [application/x-gzip]
Saving to: ‘kafka_2.12-1.1.0.tgz’

100%[=============================================================================================================================>] 50,326,212 442KB/s in 1m 44s

2018-03-31 11:23:36 (473 KB/s) - ‘kafka_2.12-1.1.0.tgz’ saved [50326212/50326212]

[root@localhost opt]# ll
total 84964
-rw-r--r--. 1 root root 50326212 Mar 28 08:05 kafka_2.12-1.1.0.tgz
drwxr-xr-x. 15 502 games 4096 Mar 31 11:20 zookeeper-3.4.11
-rw-r--r--. 1 root root 36668066 Nov 8 13:24 zookeeper-3.4.11.tar.gz

2) 解压:

tar -zxvf kafka_2.12-1.1.0.tgz

3) 配置:

进入kafka安装工程根目录编辑config/server.properties

[root@localhost opt]# cd /opt/kafka_2.12-1.1.0/config/
[root@localhost config]# ll
total 64
-rw-r--r--. 1 root root 906 Mar 23 18:51 connect-console-sink.properties
-rw-r--r--. 1 root root 909 Mar 23 18:51 connect-console-source.properties
-rw-r--r--. 1 root root 5807 Mar 23 18:51 connect-distributed.properties
-rw-r--r--. 1 root root 883 Mar 23 18:51 connect-file-sink.properties
-rw-r--r--. 1 root root 881 Mar 23 18:51 connect-file-source.properties
-rw-r--r--. 1 root root 1111 Mar 23 18:51 connect-log4j.properties
-rw-r--r--. 1 root root 2730 Mar 23 18:51 connect-standalone.properties
-rw-r--r--. 1 root root 1221 Mar 23 18:51 consumer.properties
-rw-r--r--. 1 root root 4727 Mar 23 18:51 log4j.properties
-rw-r--r--. 1 root root 1919 Mar 23 18:51 producer.properties
-rw-r--r--. 1 root root 6851 Mar 23 18:51 server.properties
-rw-r--r--. 1 root root 1032 Mar 23 18:51 tools-log4j.properties
-rw-r--r--. 1 root root 1023 Mar 23 18:51 zookeeper.properties
[root@localhost config]# mkdir /opt/kafka_2.12-1.1.0/kafka_log

添加或者修改以下两个配置项:

log.dirs=/opt/kafka_2.12-1.1.0/kafka_log      #(提前创建)
listeners=PLAINTEXT://192.168.0.111:9092

config/server.properties修改后:

  1 [root@localhost config]# more server.properties 
  2 # Licensed to the Apache Software Foundation (ASF) under one or more
  3 # contributor license agreements. See the NOTICE file distributed with
  4 # this work for additional information regarding copyright ownership.
  5 # The ASF licenses this file to You under the Apache License, Version 2.0
  6 # (the "License"); you may not use this file except in compliance with
  7 # the License. You may obtain a copy of the License at
  8 #
  9 # http://www.apache.org/licenses/LICENSE-2.0
 10 #
 11 # Unless required by applicable law or agreed to in writing, software
 12 # distributed under the License is distributed on an "AS IS" BASIS,
 13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 14 # See the License for the specific language governing permissions and
 15 # limitations under the License.
 16 
 17 # see kafka.server.KafkaConfig for additional details and defaults
 18 
 19 ############################# Server Basics #############################
 20 
 21 # The id of the broker. This must be set to a unique integer for each broker.
 22 broker.id=0
 23 
 24 ############################# Socket Server Settings #############################
 25 
 26 # The address the socket server listens on. It will get the value returned from 
 27 # java.net.InetAddress.getCanonicalHostName() if not configured.
 28 # FORMAT:
 29 # listeners = listener_name://host_name:port
 30 # EXAMPLE:
 31 # listeners = PLAINTEXT://your.host.name:9092
 32 #listeners=PLAINTEXT://:9092
 33 listeners=PLAINTEXT://192.178.0.111:9092
 34 
 35 # Hostname and port the broker will advertise to producers and consumers. If not set, 
 36 # it uses the value for "listeners" if configured. Otherwise, it will use the value
 37 # returned from java.net.InetAddress.getCanonicalHostName().
 38 #advertised.listeners=PLAINTEXT://your.host.name:9092
 39 
 40 # Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details
 41 #listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL
 42 
 43 # The number of threads that the server uses for receiving requests from the network and sending responses to the network
 44 num.network.threads=3
 45 
 46 # The number of threads that the server uses for processing requests, which may include disk I/O
 47 num.io.threads=8
 48 
 49 # The send buffer (SO_SNDBUF) used by the socket server
 50 socket.send.buffer.bytes=102400
 51 
 52 # The receive buffer (SO_RCVBUF) used by the socket server
 53 socket.receive.buffer.bytes=102400
 54 
 55 # The maximum size of a request that the socket server will accept (protection against OOM)
 56 socket.request.max.bytes=104857600
 57 
 58 
 59 ############################# Log Basics #############################
 60 
 61 # A comma separated list of directories under which to store log files
 62 #log.dirs=/tmp/kafka-logs
 63 log.dirs=/opt/kafka_2.12-1.1.0/kafka_log
 64 
 65 # The default number of log partitions per topic. More partitions allow greater
 66 # parallelism for consumption, but this will also result in more files across
 67 # the brokers.
 68 num.partitions=1
 69 
 70 # The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
 71 # This value is recommended to be increased for installations with data dirs located in RAID array.
 72 num.recovery.threads.per.data.dir=1
 73 
 74 ############################# Internal Topic Settings #############################
 75 # The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
 76 # For anything other than development testing, a value greater than 1 is recommended for to ensure availability such as 3.
 77 offsets.topic.replication.factor=1
 78 transaction.state.log.replication.factor=1
 79 transaction.state.log.min.isr=1
 80 
 81 ############################# Log Flush Policy #############################
 82 
 83 # Messages are immediately written to the filesystem but by default we only fsync() to sync
 84 # the OS cache lazily. The following configurations control the flush of data to disk.
 85 # There are a few important trade-offs here:
 86 # 1. Durability: Unflushed data may be lost if you are not using replication.
 87 # 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
 88 # 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks.
 89 # The settings below allow one to configure the flush policy to flush data after a period of time or
 90 # every N messages (or both). This can be done globally and overridden on a per-topic basis.
 91 
 92 # The number of messages to accept before forcing a flush of data to disk
 93 #log.flush.interval.messages=10000
 94 
 95 # The maximum amount of time a message can sit in a log before we force a flush
 96 #log.flush.interval.ms=1000
 97 
 98 ############################# Log Retention Policy #############################
 99 
100 # The following configurations control the disposal of log segments. The policy can
101 # be set to delete segments after a period of time, or after a given size has accumulated.
102 # A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
103 # from the end of the log.
104 
105 # The minimum age of a log file to be eligible for deletion due to age
106 log.retention.hours=168
107 
108 # A size-based retention policy for logs. Segments are pruned from the log unless the remaining
109 # segments drop below log.retention.bytes. Functions independently of log.retention.hours.
110 #log.retention.bytes=1073741824
111 
112 # The maximum size of a log segment file. When this size is reached a new log segment will be created.
113 log.segment.bytes=1073741824
114 
115 # The interval at which log segments are checked to see if they can be deleted according
116 # to the retention policies
117 log.retention.check.interval.ms=300000
118 
119 ############################# Zookeeper #############################
120 
121 # Zookeeper connection string (see zookeeper docs for details).
122 # This is a comma separated host:port pairs, each corresponding to a zk
123 # server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
124 # You can also append an optional chroot string to the urls to specify the
125 # root directory for all kafka znodes.
126 zookeeper.connect=localhost:2181
127 
128 # Timeout in ms for connecting to zookeeper
129 zookeeper.connection.timeout.ms=6000
130 
131 
132 ############################# Group Coordinator Settings #############################
133 
134 # The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance.
135 # The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms.
136 # The default value for this is 3 seconds.
137 # We override this to 0 here as it makes for a better out-of-the-box experience for development and testing.
138 # However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances dur
139 ing application startup.
140 group.initial.rebalance.delay.ms=0
View Code

相关文章:

  • 2022-01-09
  • 2022-12-23
  • 2022-12-23
  • 2021-12-29
  • 2022-12-23
  • 2021-10-20
  • 2021-07-19
  • 2021-12-09
猜你喜欢
  • 2022-12-23
  • 2021-11-07
  • 2021-08-25
  • 2021-12-19
  • 2021-11-29
  • 2021-06-07
相关资源
相似解决方案