kafka install

java install

参考java 环境安装

zookeeper install

参考zookeeper install

kafka standard install

  1. kafka安装

    1
    2
    3
    4
    5
    6
    7
    [root@10 ~]# cd /mnt/ops/app/
    [root@10 app]# tar xzf kafka_2.11-0.9.0.0.tgz
    [root@10 app]# mv kafka_2.11-0.9.0.0 /mnt/app/kafka
    [root@10 app]# chown -R wisdom.wisdom /mnt/app/kafka

    [root@10 app]# mkdir -p /mnt/{data,log}/kafka
    [root@10 app]# chown -R wisdom.wisdom /mnt/{data,log}/kafka
  2. kafka配置文件

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    [root@10 app]# cp /mnt/app/kafka/config/{server.properties,server.properties.bak}
    [root@10 app]# cat > /mnt/app/kafka/config/server.properties <<EOF
    > broker.id=113
    > advertised.host.name=10.0.2.113
    > advertised.port=9092
    > delete.topic.enable=true
    > listeners=PLAINTEXT://:9092
    > num.network.threads=9
    > num.io.threads=16
    > socket.send.buffer.bytes=102400
    > socket.receive.buffer.bytes=102400
    > socket.request.max.bytes=104857600
    > log.dirs=/mnt/data/kafka
    > num.partitions=3
    > num.recovery.threads.per.data.dir=2
    > default.replication.factor = 1
    > replica.fetch.max.bytes=20000000
    > num.replica.fetchers=2
    > message.max.bytes=10000000
    > log.flush.interval.messages=10000
    > log.flush.interval.ms=1000
    > log.retention.hours=48
    > log.segment.bytes=1073741824
    > log.retention.check.interval.ms=300000
    > zookeeper.connect=10.0.2.113:2181
    > zookeeper.connection.timeout.ms=6000
    > EOF
  3. kafka log存放位置

    1
    2
    3
    4
    5
    6
    7
    8
    9
    [root@10 app]# vim /mnt/app/kafka/bin/kafka-run-class.sh
    # Log directory to use
    if [ "x$LOG_DIR" = "x" ]; then
    LOG_DIR="/mnt/log/kafka"
    JMX_PORT=8092
    else
    LOG_DIR="/mnt/log/kafka"
    JMX_PORT=8092
    fi
  4. kafka jvm修改

    1
    2
    3
    4
    [root@10 app]# vim /mnt/app/kafka/bin/kafka-server-start.sh
    if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
    export KAFKA_HEAP_OPTS="-Xmx4G -Xms4G -Xmn2G -XX:PermSize=64m -XX:MaxPermSize=128m -XX:SurvivorRatio=6 -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly"
    fi
  5. kafka 启动

    1
    2
    [root@10 app]# su - wisdom
    [wisdom@10 ~]$ /mnt/app/kafka/bin/kafka-server-start.sh -daemon /mnt/app/kafka/config/server.properties

kafka cluster install

  1. kafka安装

    1
    2
    3
    4
    5
    6
    7
    [root@10 ~]# cd /mnt/ops/app/
    [root@10 app]# tar xzf kafka_2.11-0.9.0.0.tgz
    [root@10 app]# mv kafka_2.11-0.9.0.0 /mnt/app/kafka
    [root@10 app]# chown -R wisdom.wisdom /mnt/app/kafka

    [root@10 app]# mkdir -p /mnt/{data,log}/kafka
    [root@10 app]# chown -R wisdom.wisdom /mnt/{data,log}/kafka
  2. kafka配置文件

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    [root@10 app]# cp /mnt/app/kafka/config/{server.properties,server.properties.bak}

    kafka cluster-1:
    [root@10 app]# cat > /mnt/app/kafka/config/server.properties <<EOF
    > broker.id=113
    > advertised.host.name=10.0.2.113
    > advertised.port=9092
    > delete.topic.enable=true
    > listeners=PLAINTEXT://:9092
    > num.network.threads=4
    > num.io.threads=8
    > socket.send.buffer.bytes=102400
    > socket.receive.buffer.bytes=102400
    > socket.request.max.bytes=104857600
    > log.dirs=/mnt/data/kafka
    > num.partitions=3
    > num.recovery.threads.per.data.dir=2
    > default.replication.factor = 1
    > replica.fetch.max.bytes=20000000
    > num.replica.fetchers=2
    > message.max.bytes=10000000
    > log.flush.interval.messages=10000
    > log.flush.interval.ms=1000
    > log.retention.hours=48
    > log.segment.bytes=1073741824
    > log.retention.check.interval.ms=300000
    > zookeeper.connect=10.0.2.113:2181,10.0.2.114:2181,10.0.2.115:2181
    > zookeeper.connection.timeout.ms=6000
    > EOF

    kafka cluster-2:
    [root@10 app]# cat > /mnt/app/kafka/config/server.properties <<EOF
    > broker.id=114
    > advertised.host.name=10.0.2.114
    > advertised.port=9092
    > delete.topic.enable=true
    > listeners=PLAINTEXT://:9092
    > num.network.threads=4
    > num.io.threads=8
    > socket.send.buffer.bytes=102400
    > socket.receive.buffer.bytes=102400
    > socket.request.max.bytes=104857600
    > log.dirs=/mnt/data/kafka
    > num.partitions=5
    > num.recovery.threads.per.data.dir=2
    > default.replication.factor = 1
    > replica.fetch.max.bytes=20000000
    > num.replica.fetchers=2
    > message.max.bytes=10000000
    > log.flush.interval.messages=10000
    > log.flush.interval.ms=1000
    > log.retention.hours=48
    > log.segment.bytes=1073741824
    > log.retention.check.interval.ms=300000
    > zookeeper.connect=10.0.2.113:2181,10.0.2.114:2181,10.0.2.115:2181
    > zookeeper.connection.timeout.ms=6000
    > EOF

    kafka cluster-3:
    [root@10 app]# cat > /mnt/app/kafka/config/server.properties <<EOF
    > broker.id=115
    > advertised.host.name=10.0.2.115
    > advertised.port=9092
    > delete.topic.enable=true
    > listeners=PLAINTEXT://:9092
    > num.network.threads=4
    > num.io.threads=8
    > socket.send.buffer.bytes=102400
    > socket.receive.buffer.bytes=102400
    > socket.request.max.bytes=104857600
    > log.dirs=/mnt/data/kafka
    > num.partitions=5
    > num.recovery.threads.per.data.dir=2
    > default.replication.factor = 1
    > replica.fetch.max.bytes=20000000
    > num.replica.fetchers=2
    > message.max.bytes=10000000
    > log.flush.interval.messages=10000
    > log.flush.interval.ms=1000
    > log.retention.hours=48
    > log.segment.bytes=1073741824
    > log.retention.check.interval.ms=300000
    > zookeeper.connect=10.0.2.113:2181,10.0.2.114:2181,10.0.2.115:2181
    > zookeeper.connection.timeout.ms=6000
    > EOF
  3. kafka log存放位置

    1
    2
    3
    4
    5
    6
    7
    8
    9
    [root@10 app]# vim /mnt/app/kafka/bin/kafka-run-class.sh
    # Log directory to use
    if [ "x$LOG_DIR" = "x" ]; then
    LOG_DIR="/mnt/log/kafka"
    JMX_PORT=8092
    else
    LOG_DIR="/mnt/log/kafka"
    JMX_PORT=8092
    fi
  4. kafka jvm修改

    1
    2
    3
    4
    [root@10 app]# vim /mnt/app/kafka/bin/kafka-server-start.sh
    if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
    export KAFKA_HEAP_OPTS="-Xmx4G -Xms4G -Xmn2G -XX:PermSize=64m -XX:MaxPermSize=128m -XX:SurvivorRatio=6 -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly"
    fi
  5. kafka 启动

    1
    2
    [root@10 app]# su - wisdom
    [wisdom@10 ~]$ /mnt/app/kafka/bin/kafka-server-start.sh -daemon /mnt/app/kafka/config/server.properties