From 642c620d0074c8547fc2165bb95490d2fce04ce4 Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Wed, 1 Mar 2017 14:50:01 +0100 Subject: [PATCH 01/93] Uses latest jre image --- docker-kafka-persistent/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker-kafka-persistent/Dockerfile b/docker-kafka-persistent/Dockerfile index 81200562..4db24ede 100644 --- a/docker-kafka-persistent/Dockerfile +++ b/docker-kafka-persistent/Dockerfile @@ -1,5 +1,5 @@ -FROM openjdk:8u102-jre +FROM openjdk:8u121-jre ENV kafka_version=0.10.1.1 ENV scala_version=2.11.8 From 42ea2182f63541ffbe256bdd353e300bf73ba003 Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Wed, 1 Mar 2017 14:50:21 +0100 Subject: [PATCH 02/93] Upgrades Kafka to latest release --- docker-kafka-persistent/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker-kafka-persistent/Dockerfile b/docker-kafka-persistent/Dockerfile index 4db24ede..8e593ce4 100644 --- a/docker-kafka-persistent/Dockerfile +++ b/docker-kafka-persistent/Dockerfile @@ -1,7 +1,7 @@ FROM openjdk:8u121-jre -ENV kafka_version=0.10.1.1 +ENV kafka_version=0.10.2.0 ENV scala_version=2.11.8 ENV kafka_bin_version=2.11-$kafka_version From 3739b24219d6850dcf7a08b5aac8f7e9a9e39c76 Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Wed, 1 Mar 2017 14:50:52 +0100 Subject: [PATCH 03/93] Upgrades Scala to latest, supported as of Kafka 0.10.2 --- docker-kafka-persistent/Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docker-kafka-persistent/Dockerfile b/docker-kafka-persistent/Dockerfile index 8e593ce4..678f6f2c 100644 --- a/docker-kafka-persistent/Dockerfile +++ b/docker-kafka-persistent/Dockerfile @@ -2,8 +2,8 @@ FROM openjdk:8u121-jre ENV kafka_version=0.10.2.0 -ENV scala_version=2.11.8 -ENV kafka_bin_version=2.11-$kafka_version +ENV scala_version=2.12.1 +ENV kafka_bin_version=2.12-$kafka_version RUN curl -SLs "http://www.scala-lang.org/files/archive/scala-$scala_version.deb" -o scala.deb \ && dpkg -i scala.deb \ From a385740ddf05532f48c73f2aba60a4ab44b61b6e Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Wed, 1 Mar 2017 15:02:59 +0100 Subject: [PATCH 04/93] Adds namespace to kubectl commands, matching that in yamls --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 492af465..8cfaecdf 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ Alternatively create [PV](http://kubernetes.io/docs/user-guide/persistent-volume ./bootstrap/pv.sh kubectl create -f ./bootstrap/pvc.yml # check that claims are bound -kubectl get pvc +kubectl -n kafka get pvc ``` ## Set up Zookeeper @@ -49,7 +49,7 @@ kubectl create -f ./ You might want to verify in logs that Kafka found its own DNS name(s) correctly. Look for records like: ``` -kubectl logs kafka-0 | grep "Registered broker" +kubectl -n kafka logs kafka-0 | grep "Registered broker" # INFO Registered broker 0 at path /brokers/ids/0 with addresses: PLAINTEXT -> EndPoint(kafka-0.broker.kafka.svc.cluster.local,9092,PLAINTEXT) ``` @@ -79,5 +79,5 @@ kubectl create -f test/21consumer-test1.yml Testing and retesting... delete the namespace. PVs are outside namespaces so delete them too. ``` kubectl delete namespace kafka -rm -R ./data/ && kubectl delete pv datadir-kafka-0 datadir-kafka-1 datadir-kafka-2 +rm -R ./data/ && kubectl -n kafka delete pv datadir-kafka-0 datadir-kafka-1 datadir-kafka-2 ``` From 1110d598c8dfe705f78fc101cf697738e65d97c6 Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Wed, 1 Mar 2017 15:08:43 +0100 Subject: [PATCH 05/93] Makes PV match PVC, avoiding small initial storage becase resize is difficult in GKE --- README.md | 2 +- bootstrap/pv-template.yml | 6 +++--- bootstrap/pv.sh | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 8cfaecdf..11175391 100644 --- a/README.md +++ b/README.md @@ -17,7 +17,7 @@ Alternatively create [PV](http://kubernetes.io/docs/user-guide/persistent-volume ``` ./bootstrap/pv.sh -kubectl create -f ./bootstrap/pvc.yml +kubectl create -f ./10pvc.yml # check that claims are bound kubectl -n kafka get pvc ``` diff --git a/bootstrap/pv-template.yml b/bootstrap/pv-template.yml index e58bfb2b..befb6b6c 100644 --- a/bootstrap/pv-template.yml +++ b/bootstrap/pv-template.yml @@ -10,7 +10,7 @@ spec: accessModes: - ReadWriteOnce capacity: - storage: 100Mi + storage: 200Gi hostPath: path: /tmp/k8s-data/datadir-kafka-0 --- @@ -25,7 +25,7 @@ spec: accessModes: - ReadWriteOnce capacity: - storage: 100Mi + storage: 200Gi hostPath: path: /tmp/k8s-data/datadir-kafka-1 --- @@ -40,6 +40,6 @@ spec: accessModes: - ReadWriteOnce capacity: - storage: 100Mi + storage: 200Gi hostPath: path: /tmp/k8s-data/datadir-kafka-2 diff --git a/bootstrap/pv.sh b/bootstrap/pv.sh index 78bf7f5d..c12787bc 100755 --- a/bootstrap/pv.sh +++ b/bootstrap/pv.sh @@ -1,6 +1,6 @@ #!/bin/bash -echo "Note that in for example GKE a PetSet will have PersistentVolume(s) and PersistentVolumeClaim(s) created for it automatically" +echo "Note that in for example GKE a StatefulSet will have PersistentVolume(s) and PersistentVolumeClaim(s) created for it automatically" dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && cd .. && pwd )" path="$dir/data" From b9340fe5d45b9aca8a0c11b924698624666e941e Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Wed, 1 Mar 2017 15:40:13 +0100 Subject: [PATCH 06/93] 0.10.2 adds to the confusion about consumer args, #21 --- test/21consumer-test1.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/21consumer-test1.yml b/test/21consumer-test1.yml index aff5944f..33218738 100644 --- a/test/21consumer-test1.yml +++ b/test/21consumer-test1.yml @@ -17,8 +17,8 @@ spec: image: solsson/kafka:0.10.0.1 command: - ./bin/kafka-console-consumer.sh - - --zookeeper - - zookeeper:2181 + - --bootstrap-server + - kafka-0.broker.kafka.svc.cluster.local:9092,kafka-1.broker.kafka.svc.cluster.local:9092,kafka-2.broker.kafka.svc.cluster.local:9092 - --topic - test1 - --from-beginning From cfe5cd7ab3c0cd26e87969219d2c626ff85addb4 Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Fri, 23 Jun 2017 20:16:22 +0200 Subject: [PATCH 07/93] Use a generic kafka image and explicitly override "eligible for deletion" --- 50kafka.yml | 4 +- docker-kafka-persistent/Dockerfile | 19 --- .../config/server.properties | 124 ------------------ 3 files changed, 2 insertions(+), 145 deletions(-) delete mode 100644 docker-kafka-persistent/Dockerfile delete mode 100644 docker-kafka-persistent/config/server.properties diff --git a/50kafka.yml b/50kafka.yml index 8a262dff..e6d33eb7 100644 --- a/50kafka.yml +++ b/50kafka.yml @@ -14,13 +14,13 @@ spec: terminationGracePeriodSeconds: 10 containers: - name: broker - image: solsson/kafka-persistent:0.10.1@sha256:0719b4688b666490abf4b32a3cc5c5da7bb2d6276b47377b35de5429f783e9c2 + image: solsson/kafka:0.10.2.0-alpine@sha256:a5256a1026750b5cad5a127dfe685f8b4b8053d06443392150c208ad84deaf48 ports: - containerPort: 9092 command: - sh - -c - - "./bin/kafka-server-start.sh config/server.properties --override broker.id=$(hostname | awk -F'-' '{print $2}')" + - "./bin/kafka-server-start.sh config/server.properties --override log.retention.hours=-1 --override broker.id=$(hostname | awk -F'-' '{print $2}')" volumeMounts: - name: datadir mountPath: /opt/kafka/data diff --git a/docker-kafka-persistent/Dockerfile b/docker-kafka-persistent/Dockerfile deleted file mode 100644 index 678f6f2c..00000000 --- a/docker-kafka-persistent/Dockerfile +++ /dev/null @@ -1,19 +0,0 @@ - -FROM openjdk:8u121-jre - -ENV kafka_version=0.10.2.0 -ENV scala_version=2.12.1 -ENV kafka_bin_version=2.12-$kafka_version - -RUN curl -SLs "http://www.scala-lang.org/files/archive/scala-$scala_version.deb" -o scala.deb \ - && dpkg -i scala.deb \ - && rm scala.deb \ - && curl -SLs "http://www.apache.org/dist/kafka/$kafka_version/kafka_$kafka_bin_version.tgz" | tar -xzf - -C /opt \ - && mv /opt/kafka_$kafka_bin_version /opt/kafka - -WORKDIR /opt/kafka -ENTRYPOINT ["bin/kafka-server-start.sh"] - -ADD config/server.properties config/ - -CMD ["config/server.properties"] diff --git a/docker-kafka-persistent/config/server.properties b/docker-kafka-persistent/config/server.properties deleted file mode 100644 index 649a2619..00000000 --- a/docker-kafka-persistent/config/server.properties +++ /dev/null @@ -1,124 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# see kafka.server.KafkaConfig for additional details and defaults - -############################# Server Basics ############################# - -# The id of the broker. This must be set to a unique integer for each broker. -broker.id=0 - -# Use https://github.com/Yolean/kafka-topic-client instead -auto.create.topics.enable=false - -# Switch to enable topic deletion or not, default value is false -delete.topic.enable=false - -############################# Socket Server Settings ############################# - -# The address the socket server listens on. It will get the value returned from -# java.net.InetAddress.getCanonicalHostName() if not configured. -# FORMAT: -# listeners = security_protocol://host_name:port -# EXAMPLE: -# listeners = PLAINTEXT://your.host.name:9092 -#listeners=PLAINTEXT://:9092 - -# Hostname and port the broker will advertise to producers and consumers. If not set, -# it uses the value for "listeners" if configured. Otherwise, it will use the value -# returned from java.net.InetAddress.getCanonicalHostName(). -#advertised.listeners=PLAINTEXT://your.host.name:9092 - -# The number of threads handling network requests -num.network.threads=3 - -# The number of threads doing disk I/O -num.io.threads=8 - -# The send buffer (SO_SNDBUF) used by the socket server -socket.send.buffer.bytes=102400 - -# The receive buffer (SO_RCVBUF) used by the socket server -socket.receive.buffer.bytes=102400 - -# The maximum size of a request that the socket server will accept (protection against OOM) -socket.request.max.bytes=104857600 - - -############################# Log Basics ############################# - -# A comma seperated list of directories under which to store log files -log.dirs=/opt/kafka/data/topics - -# The default number of log partitions per topic. More partitions allow greater -# parallelism for consumption, but this will also result in more files across -# the brokers. -num.partitions=1 - -# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. -# This value is recommended to be increased for installations with data dirs located in RAID array. -num.recovery.threads.per.data.dir=1 - -############################# Log Flush Policy ############################# - -# Messages are immediately written to the filesystem but by default we only fsync() to sync -# the OS cache lazily. The following configurations control the flush of data to disk. -# There are a few important trade-offs here: -# 1. Durability: Unflushed data may be lost if you are not using replication. -# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush. -# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to exceessive seeks. -# The settings below allow one to configure the flush policy to flush data after a period of time or -# every N messages (or both). This can be done globally and overridden on a per-topic basis. - -# The number of messages to accept before forcing a flush of data to disk -#log.flush.interval.messages=10000 - -# The maximum amount of time a message can sit in a log before we force a flush -#log.flush.interval.ms=1000 - -############################# Log Retention Policy ############################# - -# The following configurations control the disposal of log segments. The policy can -# be set to delete segments after a period of time, or after a given size has accumulated. -# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens -# from the end of the log. - -# The minimum age of a log file to be eligible for deletion -log.retention.hours=-1 - -# A size-based retention policy for logs. Segments are pruned from the log as long as the remaining -# segments don't drop below log.retention.bytes. -#log.retention.bytes=1073741824 - -# The maximum size of a log segment file. When this size is reached a new log segment will be created. -log.segment.bytes=1073741824 - -# The interval at which log segments are checked to see if they can be deleted according -# to the retention policies -log.retention.check.interval.ms=300000 - -############################# Zookeeper ############################# - -# Zookeeper connection string (see zookeeper docs for details). -# This is a comma separated host:port pairs, each corresponding to a zk -# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002". -# You can also append an optional chroot string to the urls to specify the -# root directory for all kafka znodes. -zookeeper.connect=zookeeper:2181 - -# Timeout in ms for connecting to zookeeper -zookeeper.connection.timeout.ms=6000 - - From 5b8e94a0791a2bceedabcb70805924ae145ff015 Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Fri, 23 Jun 2017 20:19:06 +0200 Subject: [PATCH 08/93] Upgrade to development build of the latest 0.11 RC --- 50kafka.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/50kafka.yml b/50kafka.yml index e6d33eb7..02780c82 100644 --- a/50kafka.yml +++ b/50kafka.yml @@ -14,7 +14,7 @@ spec: terminationGracePeriodSeconds: 10 containers: - name: broker - image: solsson/kafka:0.10.2.0-alpine@sha256:a5256a1026750b5cad5a127dfe685f8b4b8053d06443392150c208ad84deaf48 + image: solsson/kafka:0.11.0.0-rc2@sha256:713869ff8d04840f3dfd6d002463124dbad0a5b7182fcdb5f15b200b35dbe65d ports: - containerPort: 9092 command: From 5ad15506d7e1b16627d73f893175d8e0741fc5e0 Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Fri, 23 Jun 2017 20:21:57 +0200 Subject: [PATCH 09/93] Modern kafka clients use a bootstrap servers list to connect... which should be resolvable to individual brokers. Thus we should avoid the gotcha with a regular kubernetes service that you need to replicate everything to every broker (#21). See 20dns.yml for how to resolve brokers. --- 30service.yml | 11 ----------- 1 file changed, 11 deletions(-) delete mode 100644 30service.yml diff --git a/30service.yml b/30service.yml deleted file mode 100644 index 5403da29..00000000 --- a/30service.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -apiVersion: v1 -kind: Service -metadata: - name: kafka - namespace: kafka -spec: - ports: - - port: 9092 - selector: - app: kafka From 6e8cab0a86f6a9e52f9a90015d4e2438eaab91df Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Fri, 23 Jun 2017 21:18:37 +0200 Subject: [PATCH 10/93] Reuses the statefulset's image in test pods, without tag duplication... Works when scheduled on the same node(s) --- test/11topic-create-test1.yml | 3 ++- test/12topic-create-test2.yml | 3 ++- test/21consumer-test1.yml | 3 ++- test/99testclient.yml | 3 ++- 4 files changed, 8 insertions(+), 4 deletions(-) diff --git a/test/11topic-create-test1.yml b/test/11topic-create-test1.yml index fdb805e3..e03c6c9c 100644 --- a/test/11topic-create-test1.yml +++ b/test/11topic-create-test1.yml @@ -10,7 +10,8 @@ spec: spec: containers: - name: kafka - image: solsson/kafka:0.10.0.1 + image: solsson/kafka + imagePullPolicy: Never command: - ./bin/kafka-topics.sh - --zookeeper diff --git a/test/12topic-create-test2.yml b/test/12topic-create-test2.yml index 45d98816..9abc77f3 100644 --- a/test/12topic-create-test2.yml +++ b/test/12topic-create-test2.yml @@ -10,7 +10,8 @@ spec: spec: containers: - name: kafka - image: solsson/kafka:0.10.0.1 + image: solsson/kafka + imagePullPolicy: Never command: - ./bin/kafka-topics.sh - --zookeeper diff --git a/test/21consumer-test1.yml b/test/21consumer-test1.yml index 33218738..7faf80bb 100644 --- a/test/21consumer-test1.yml +++ b/test/21consumer-test1.yml @@ -14,7 +14,8 @@ spec: spec: containers: - name: kafka - image: solsson/kafka:0.10.0.1 + image: solsson/kafka + imagePullPolicy: Never command: - ./bin/kafka-console-consumer.sh - --bootstrap-server diff --git a/test/99testclient.yml b/test/99testclient.yml index 3ffa63a3..97e3dea0 100644 --- a/test/99testclient.yml +++ b/test/99testclient.yml @@ -8,7 +8,8 @@ metadata: spec: containers: - name: kafka - image: solsson/kafka-persistent:0.10.1@sha256:0719b4688b666490abf4b32a3cc5c5da7bb2d6276b47377b35de5429f783e9c2 + image: solsson/kafka + imagePullPolicy: Never command: - sh - -c From 15bcb8742a2937a6ff881a9b2610e8031cd1ca5e Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Sat, 24 Jun 2017 08:19:16 +0200 Subject: [PATCH 11/93] Borrows string trick from https://github.com/kubernetes/charts/ --- 50kafka.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/50kafka.yml b/50kafka.yml index 02780c82..d24e5bc9 100644 --- a/50kafka.yml +++ b/50kafka.yml @@ -20,7 +20,7 @@ spec: command: - sh - -c - - "./bin/kafka-server-start.sh config/server.properties --override log.retention.hours=-1 --override broker.id=$(hostname | awk -F'-' '{print $2}')" + - "./bin/kafka-server-start.sh config/server.properties --override log.retention.hours=-1 --override broker.id=${HOSTNAME##*-}" volumeMounts: - name: datadir mountPath: /opt/kafka/data From a2b658ab4e432d7568b5dd82839dbfebbdb5a18b Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Fri, 23 Jun 2017 21:15:45 +0200 Subject: [PATCH 12/93] Upgrades zookeeper to latest --- zookeeper/50zoo.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zookeeper/50zoo.yml b/zookeeper/50zoo.yml index 5cb7c024..e67c7151 100644 --- a/zookeeper/50zoo.yml +++ b/zookeeper/50zoo.yml @@ -14,7 +14,7 @@ spec: terminationGracePeriodSeconds: 10 containers: - name: zookeeper - image: solsson/zookeeper-statefulset:3.4.9@sha256:d32b44b32009a69b3450a5216f459e504f1041f587596895219fc04cf22f5546 + image: solsson/zookeeper-statefulset:3.4.10@sha256:d07376612cee33706e308ba7fd174df6b778989d26e49e24537f71b2d1699ab6 env: - name: ZOO_SERVERS value: server.1=zoo-0.zoo:2888:3888:participant server.2=zoo-1.zoo:2888:3888:participant server.3=zoo-2.zoo:2888:3888:participant server.4=zoo-3.zoo:2888:3888:participant server.5=zoo-4.zoo:2888:3888:participant From 344df6eeb2738763255c3223e9545358667421a1 Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Fri, 23 Jun 2017 21:20:08 +0200 Subject: [PATCH 13/93] Uses zookeeper without the bind address sed --- zookeeper/50zoo.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zookeeper/50zoo.yml b/zookeeper/50zoo.yml index e67c7151..c3be35ac 100644 --- a/zookeeper/50zoo.yml +++ b/zookeeper/50zoo.yml @@ -14,7 +14,7 @@ spec: terminationGracePeriodSeconds: 10 containers: - name: zookeeper - image: solsson/zookeeper-statefulset:3.4.10@sha256:d07376612cee33706e308ba7fd174df6b778989d26e49e24537f71b2d1699ab6 + image: solsson/zookeeper-statefulset:3.4.10@sha256:dbedb438671dfe6bf92dbe630c4ea070e7c60c3e4d10c276c701442574a76427 env: - name: ZOO_SERVERS value: server.1=zoo-0.zoo:2888:3888:participant server.2=zoo-1.zoo:2888:3888:participant server.3=zoo-2.zoo:2888:3888:participant server.4=zoo-3.zoo:2888:3888:participant server.5=zoo-4.zoo:2888:3888:participant From 8897c054abc0e8b0c8de6ee2bc54d4775bb395c4 Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Fri, 23 Jun 2017 21:40:19 +0200 Subject: [PATCH 14/93] We do need the bind address sed. This image logs the at start, to reduce the element of surprise. --- zookeeper/50zoo.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zookeeper/50zoo.yml b/zookeeper/50zoo.yml index c3be35ac..3ac4ce96 100644 --- a/zookeeper/50zoo.yml +++ b/zookeeper/50zoo.yml @@ -14,7 +14,7 @@ spec: terminationGracePeriodSeconds: 10 containers: - name: zookeeper - image: solsson/zookeeper-statefulset:3.4.10@sha256:dbedb438671dfe6bf92dbe630c4ea070e7c60c3e4d10c276c701442574a76427 + image: solsson/zookeeper-statefulset:3.4.10@sha256:0ad93c98d5165b4eb747c4b0dd04a7a448a5c4b4cbcaa4bffc15018b76b81bb5 env: - name: ZOO_SERVERS value: server.1=zoo-0.zoo:2888:3888:participant server.2=zoo-1.zoo:2888:3888:participant server.3=zoo-2.zoo:2888:3888:participant server.4=zoo-3.zoo:2888:3888:participant server.5=zoo-4.zoo:2888:3888:participant From a9b7a2220a6daaaa8a4e014734ab86384fb7d9de Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Sat, 24 Jun 2017 09:55:16 +0200 Subject: [PATCH 15/93] Removes out-of-date zookeeper info --- README.md | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 11175391..13d6032f 100644 --- a/README.md +++ b/README.md @@ -24,18 +24,15 @@ kubectl -n kafka get pvc ## Set up Zookeeper -There is a Zookeeper+StatefulSet [blog post](http://blog.kubernetes.io/2016/12/statefulset-run-scale-stateful-applications-in-kubernetes.html) and [example](https://github.com/kubernetes/contrib/tree/master/statefulsets/zookeeper), -but it appears tuned for workloads heavier than Kafka topic metadata. - -The Kafka book (Definitive Guide, O'Reilly 2016) recommends that Kafka has its own Zookeeper cluster, +The Kafka book (Definitive Guide, O'Reilly 2016) recommends that Kafka has its own Zookeeper cluster with at least 5 instances, so we use the [official docker image](https://hub.docker.com/_/zookeeper/) but with a [startup script change to guess node id from hostname](https://github.com/solsson/zookeeper-docker/commit/df9474f858ad548be8a365cb000a4dd2d2e3a217). -Zookeeper runs as a [Deployment](http://kubernetes.io/docs/user-guide/deployments/) without persistent storage: ``` kubectl create -f ./zookeeper/ ``` +Despite being a StatefulSet, there is no persistent volume by default. If you lose your zookeeper cluster, kafka will be unaware that persisted topics exist. The data is still there, but you need to re-create topics. From 07fc0d17e5ca600be6cf0c87c09670d4c984bc4b Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Sun, 25 Jun 2017 06:13:08 +0200 Subject: [PATCH 16/93] Uses the new small image, with only selected jars from the build --- 50kafka.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/50kafka.yml b/50kafka.yml index d24e5bc9..c614c8a2 100644 --- a/50kafka.yml +++ b/50kafka.yml @@ -14,7 +14,7 @@ spec: terminationGracePeriodSeconds: 10 containers: - name: broker - image: solsson/kafka:0.11.0.0-rc2@sha256:713869ff8d04840f3dfd6d002463124dbad0a5b7182fcdb5f15b200b35dbe65d + image: solsson/kafka:0.11.0.0-rc2@sha256:c1316e0131f4ec83bc645ca2141e4fda94e0d28f4fb5f836e15e37a5e054bdf1 ports: - containerPort: 9092 command: From df88b0de32006e6db20a4b2116a83b9be3b90146 Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Wed, 9 Nov 2016 15:33:06 +0100 Subject: [PATCH 17/93] Default log dir is /tmp/kafka-logs so it should be changed to be inside the persistent volume mount --- 50kafka.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/50kafka.yml b/50kafka.yml index c614c8a2..1dd24bfc 100644 --- a/50kafka.yml +++ b/50kafka.yml @@ -20,7 +20,7 @@ spec: command: - sh - -c - - "./bin/kafka-server-start.sh config/server.properties --override log.retention.hours=-1 --override broker.id=${HOSTNAME##*-}" + - "./bin/kafka-server-start.sh config/server.properties --override log.retention.hours=-1 --override log.dirs=/opt/kafka/data/topics --override broker.id=${HOSTNAME##*-}" volumeMounts: - name: datadir mountPath: /opt/kafka/data From 5919d454d84739edf90d3478afaec9fc559e9f2f Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Sun, 25 Jun 2017 12:47:14 +0200 Subject: [PATCH 18/93] Support for generic image results in a verbose startup command, make it git friendly --- 50kafka.yml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/50kafka.yml b/50kafka.yml index 1dd24bfc..c99630b5 100644 --- a/50kafka.yml +++ b/50kafka.yml @@ -20,7 +20,12 @@ spec: command: - sh - -c - - "./bin/kafka-server-start.sh config/server.properties --override log.retention.hours=-1 --override log.dirs=/opt/kafka/data/topics --override broker.id=${HOSTNAME##*-}" + - > + ./bin/kafka-server-start.sh + config/server.properties + --override log.retention.hours=-1 + --override log.dirs=/opt/kafka/data/topics + --override broker.id=${HOSTNAME##*-} volumeMounts: - name: datadir mountPath: /opt/kafka/data From 3ab9938d85b4119354ec10de9ed572da1570af0b Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Sun, 25 Jun 2017 15:34:31 +0200 Subject: [PATCH 19/93] Adds metrics exporter for Prometheus --- 50kafka.yml | 10 ++++++++++ README.md | 8 ++++++++ 2 files changed, 18 insertions(+) diff --git a/50kafka.yml b/50kafka.yml index c99630b5..1dd48046 100644 --- a/50kafka.yml +++ b/50kafka.yml @@ -10,11 +10,21 @@ spec: metadata: labels: app: kafka + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "5556" spec: terminationGracePeriodSeconds: 10 containers: + - name: metrics + image: solsson/kafka-prometheus-jmx-exporter + ports: + - containerPort: 5556 - name: broker image: solsson/kafka:0.11.0.0-rc2@sha256:c1316e0131f4ec83bc645ca2141e4fda94e0d28f4fb5f836e15e37a5e054bdf1 + env: + - name: JMX_PORT + value: "5555" ports: - containerPort: 9092 command: diff --git a/README.md b/README.md index 13d6032f..5b9dbad2 100644 --- a/README.md +++ b/README.md @@ -78,3 +78,11 @@ Testing and retesting... delete the namespace. PVs are outside namespaces so del kubectl delete namespace kafka rm -R ./data/ && kubectl -n kafka delete pv datadir-kafka-0 datadir-kafka-1 datadir-kafka-2 ``` + +## Metrics, Prometheus style + +Is the metrics system up and running? +``` +kubectl logs -c metrics kafka-0 +kubectl exec -c broker kafka-0 -- /bin/sh -c 'apk add --no-cache curl && curl http://localhost:5556/metrics' +``` From a1823a8f21149b6b82a91593c1a35117a0aee1bc Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Sun, 25 Jun 2017 15:38:07 +0200 Subject: [PATCH 20/93] Use a specific build, github.com/solsson/dockerfiles/commit/81e8e4c20be19835d4036db1c975ff17455fe7e7 --- 50kafka.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/50kafka.yml b/50kafka.yml index 1dd48046..831dc607 100644 --- a/50kafka.yml +++ b/50kafka.yml @@ -17,7 +17,7 @@ spec: terminationGracePeriodSeconds: 10 containers: - name: metrics - image: solsson/kafka-prometheus-jmx-exporter + image: solsson/kafka-prometheus-jmx-exporter@sha256:1f7c96c287a2dbec1d909cd8f96c0656310239b55a9a90d7fd12c81f384f1f7d ports: - containerPort: 5556 - name: broker From 42972715f1738acd4af350da2a34cf4bd100a43e Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Sun, 25 Jun 2017 20:36:14 +0200 Subject: [PATCH 21/93] Reindents yaml to match the rest of this repo, and most examples out there --- zookeeper/50zoo.yml | 44 ++++++++++++++++++++++---------------------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/zookeeper/50zoo.yml b/zookeeper/50zoo.yml index 3ac4ce96..33ea7b8a 100644 --- a/zookeeper/50zoo.yml +++ b/zookeeper/50zoo.yml @@ -13,26 +13,26 @@ spec: spec: terminationGracePeriodSeconds: 10 containers: - - name: zookeeper - image: solsson/zookeeper-statefulset:3.4.10@sha256:0ad93c98d5165b4eb747c4b0dd04a7a448a5c4b4cbcaa4bffc15018b76b81bb5 - env: - - name: ZOO_SERVERS - value: server.1=zoo-0.zoo:2888:3888:participant server.2=zoo-1.zoo:2888:3888:participant server.3=zoo-2.zoo:2888:3888:participant server.4=zoo-3.zoo:2888:3888:participant server.5=zoo-4.zoo:2888:3888:participant - ports: - - containerPort: 2181 - name: client - - containerPort: 2888 - name: peer - - containerPort: 3888 - name: leader-election - volumeMounts: - - name: datadir - mountPath: /data - # There's defaults in this folder, such as logging config - #- name: conf - # mountPath: /conf - volumes: - #- name: conf - # emptyDir: {} + - name: zookeeper + image: solsson/zookeeper-statefulset:3.4.10@sha256:0ad93c98d5165b4eb747c4b0dd04a7a448a5c4b4cbcaa4bffc15018b76b81bb5 + env: + - name: ZOO_SERVERS + value: server.1=zoo-0.zoo:2888:3888:participant server.2=zoo-1.zoo:2888:3888:participant server.3=zoo-2.zoo:2888:3888:participant server.4=zoo-3.zoo:2888:3888:participant server.5=zoo-4.zoo:2888:3888:participant + ports: + - containerPort: 2181 + name: client + - containerPort: 2888 + name: peer + - containerPort: 3888 + name: leader-election + volumeMounts: - name: datadir - emptyDir: {} + mountPath: /data + # There's defaults in this folder, such as logging config + #- name: conf + # mountPath: /conf + volumes: + #- name: conf + # emptyDir: {} + - name: datadir + emptyDir: {} From ccb9e5df12e5dbe70f243432d1613bd101b35630 Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Sun, 25 Jun 2017 18:18:35 +0200 Subject: [PATCH 22/93] Uses the kafka image as Zookeeper service... to reduce image download times and use a supported version. Initial config is from https://zookeeper.apache.org/doc/r3.4.10/zookeeperAdmin.html#sc_zkMulitServerSetup and the fixes for stateful set are from https://github.com/solsson/zookeeper-docker --- zookeeper/10zookeeper-config.yml | 24 ++++++++++++++++++++++++ zookeeper/50zoo.yml | 27 +++++++++++++++++---------- 2 files changed, 41 insertions(+), 10 deletions(-) create mode 100644 zookeeper/10zookeeper-config.yml diff --git a/zookeeper/10zookeeper-config.yml b/zookeeper/10zookeeper-config.yml new file mode 100644 index 00000000..e9402cbd --- /dev/null +++ b/zookeeper/10zookeeper-config.yml @@ -0,0 +1,24 @@ +kind: ConfigMap +metadata: + name: zookeeper-config + namespace: kafka +apiVersion: v1 +data: + zookeeper.properties: |- + tickTime=2000 + dataDir=/var/lib/zookeeper/data + dataLogDir=/var/lib/zookeeper/log + clientPort=2181 + initLimit=5 + syncLimit=2 + server.1=zoo-0.zoo:2888:3888:participant + server.2=zoo-1.zoo:2888:3888:participant + server.3=zoo-2.zoo:2888:3888:participant + server.4=zoo-3.zoo:2888:3888:participant + server.5=zoo-4.zoo:2888:3888:participant + + log4j.properties: |- + log4j.rootLogger=INFO, stdout + log4j.appender.stdout=org.apache.log4j.ConsoleAppender + log4j.appender.stdout.layout=org.apache.log4j.PatternLayout + log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n diff --git a/zookeeper/50zoo.yml b/zookeeper/50zoo.yml index 33ea7b8a..27e34388 100644 --- a/zookeeper/50zoo.yml +++ b/zookeeper/50zoo.yml @@ -14,10 +14,17 @@ spec: terminationGracePeriodSeconds: 10 containers: - name: zookeeper - image: solsson/zookeeper-statefulset:3.4.10@sha256:0ad93c98d5165b4eb747c4b0dd04a7a448a5c4b4cbcaa4bffc15018b76b81bb5 - env: - - name: ZOO_SERVERS - value: server.1=zoo-0.zoo:2888:3888:participant server.2=zoo-1.zoo:2888:3888:participant server.3=zoo-2.zoo:2888:3888:participant server.4=zoo-3.zoo:2888:3888:participant server.5=zoo-4.zoo:2888:3888:participant + image: solsson/kafka:0.11.0.0-rc2@sha256:c1316e0131f4ec83bc645ca2141e4fda94e0d28f4fb5f836e15e37a5e054bdf1 + command: + - sh + - -c + - > + set -e; + export ZOOKEEPER_SERVER_ID=$((${HOSTNAME##*-} + 1)); + echo "${ZOOKEEPER_SERVER_ID:-1}" | tee /var/lib/zookeeper/data/myid; + sed -i "s/server\.$ZOOKEEPER_SERVER_ID\=[a-z0-9.-]*/server.$ZOOKEEPER_SERVER_ID=0.0.0.0/" config/zookeeper.properties; + cat config/zookeeper.properties; + ./bin/zookeeper-server-start.sh config/zookeeper.properties ports: - containerPort: 2181 name: client @@ -26,13 +33,13 @@ spec: - containerPort: 3888 name: leader-election volumeMounts: + - name: config + mountPath: /usr/local/kafka/config - name: datadir - mountPath: /data - # There's defaults in this folder, such as logging config - #- name: conf - # mountPath: /conf + mountPath: /var/lib/zookeeper/data volumes: - #- name: conf - # emptyDir: {} + - name: config + configMap: + name: zookeeper-config - name: datadir emptyDir: {} From 3344799b4d6eb27885a0d0225ee999711c94c507 Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Sun, 25 Jun 2017 18:24:07 +0200 Subject: [PATCH 23/93] Uses the same data path convention as zookeeper, from Confluent Platform --- 50kafka.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/50kafka.yml b/50kafka.yml index 831dc607..db7092db 100644 --- a/50kafka.yml +++ b/50kafka.yml @@ -34,11 +34,11 @@ spec: ./bin/kafka-server-start.sh config/server.properties --override log.retention.hours=-1 - --override log.dirs=/opt/kafka/data/topics + --override log.dirs=/var/lib/kafka/data/topics --override broker.id=${HOSTNAME##*-} volumeMounts: - name: datadir - mountPath: /opt/kafka/data + mountPath: /var/lib/kafka/data volumeClaimTemplates: - metadata: name: datadir From 4351e7c1779190cc264a49947ebc346d6ce3729c Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Sun, 25 Jun 2017 21:16:51 +0200 Subject: [PATCH 24/93] Uses a named storage class so you can select volume type specifically for zoo --- bootstrap/storageclass-kafka-zookeeper-gke.yml | 7 +++++++ zookeeper/50zoo.yml | 14 +++++++++++--- 2 files changed, 18 insertions(+), 3 deletions(-) create mode 100644 bootstrap/storageclass-kafka-zookeeper-gke.yml diff --git a/bootstrap/storageclass-kafka-zookeeper-gke.yml b/bootstrap/storageclass-kafka-zookeeper-gke.yml new file mode 100644 index 00000000..44891bac --- /dev/null +++ b/bootstrap/storageclass-kafka-zookeeper-gke.yml @@ -0,0 +1,7 @@ +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: kafka-zookeeper +provisioner: kubernetes.io/gce-pd +parameters: + type: pd-ssd diff --git a/zookeeper/50zoo.yml b/zookeeper/50zoo.yml index 27e34388..9251ce10 100644 --- a/zookeeper/50zoo.yml +++ b/zookeeper/50zoo.yml @@ -35,11 +35,19 @@ spec: volumeMounts: - name: config mountPath: /usr/local/kafka/config - - name: datadir + - name: data mountPath: /var/lib/zookeeper/data volumes: - name: config configMap: name: zookeeper-config - - name: datadir - emptyDir: {} + volumeClaimTemplates: + - metadata: + name: data + annotations: + volume.beta.kubernetes.io/storage-class: kafka-zookeeper + spec: + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: 1Gi From 9479e819475f128e7fb1d82c5df0abae6c9cfa76 Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Sun, 25 Jun 2017 21:31:24 +0200 Subject: [PATCH 25/93] Verified the volume setup with Minikube --- bootstrap/storageclass-kafka-zookeeper-minikube.yml | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 bootstrap/storageclass-kafka-zookeeper-minikube.yml diff --git a/bootstrap/storageclass-kafka-zookeeper-minikube.yml b/bootstrap/storageclass-kafka-zookeeper-minikube.yml new file mode 100644 index 00000000..ba89eb46 --- /dev/null +++ b/bootstrap/storageclass-kafka-zookeeper-minikube.yml @@ -0,0 +1,5 @@ +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: kafka-zookeeper +provisioner: k8s.io/minikube-hostpath From a8c8a39713cbe5ae6199f0733454ae05cfd4eb20 Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Sun, 25 Jun 2017 21:40:12 +0200 Subject: [PATCH 26/93] Updates the readme --- README.md | 15 ++++++++------- .../storageclass-zookeeper-gke.yml | 0 .../storageclass-zookeeper-minikube.yml | 0 3 files changed, 8 insertions(+), 7 deletions(-) rename bootstrap/storageclass-kafka-zookeeper-gke.yml => configure-gke/storageclass-zookeeper-gke.yml (100%) rename bootstrap/storageclass-kafka-zookeeper-minikube.yml => configure-minikube/storageclass-zookeeper-minikube.yml (100%) diff --git a/README.md b/README.md index 5b9dbad2..cb401d10 100644 --- a/README.md +++ b/README.md @@ -8,6 +8,12 @@ To get consistent service DNS names `kafka-N.broker.kafka`(`.svc.cluster.local`) kubectl create -f 00namespace.yml ``` +## Prepare your cluster + +For Minikube run `kubectl create -f configure-minikube/`. + +There's a similar setup for gke, in `configure-gke` of course. You might want to tweak it before creating. + ## Set up volume claims You may add [storage class](http://kubernetes.io/docs/user-guide/persistent-volumes/#storageclasses) @@ -24,18 +30,13 @@ kubectl -n kafka get pvc ## Set up Zookeeper -The Kafka book (Definitive Guide, O'Reilly 2016) recommends that Kafka has its own Zookeeper cluster with at least 5 instances, -so we use the [official docker image](https://hub.docker.com/_/zookeeper/) -but with a [startup script change to guess node id from hostname](https://github.com/solsson/zookeeper-docker/commit/df9474f858ad548be8a365cb000a4dd2d2e3a217). +The Kafka book (Definitive Guide, O'Reilly 2016) recommends that Kafka has its own Zookeeper cluster with at least 5 instances. +We use the zookeeper build that comes with the Kafka distribution, and tweak the startup command to support StatefulSet. ``` kubectl create -f ./zookeeper/ ``` -Despite being a StatefulSet, there is no persistent volume by default. -If you lose your zookeeper cluster, kafka will be unaware that persisted topics exist. -The data is still there, but you need to re-create topics. - ## Start Kafka Assuming you have your PVCs `Bound`, or enabled automatic provisioning (see above), go ahead and: diff --git a/bootstrap/storageclass-kafka-zookeeper-gke.yml b/configure-gke/storageclass-zookeeper-gke.yml similarity index 100% rename from bootstrap/storageclass-kafka-zookeeper-gke.yml rename to configure-gke/storageclass-zookeeper-gke.yml diff --git a/bootstrap/storageclass-kafka-zookeeper-minikube.yml b/configure-minikube/storageclass-zookeeper-minikube.yml similarity index 100% rename from bootstrap/storageclass-kafka-zookeeper-minikube.yml rename to configure-minikube/storageclass-zookeeper-minikube.yml From 26173af8577d3c11904196a04ca0b158f339bd64 Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Sun, 25 Jun 2017 21:47:43 +0200 Subject: [PATCH 27/93] Enables metrics export to Prometheus, but they look very uninteresting. The selected config is from the jmx_exporter examples. --- README.md | 2 ++ zookeeper/50zoo.yml | 16 ++++++++++++++++ 2 files changed, 18 insertions(+) diff --git a/README.md b/README.md index cb401d10..8b469cd5 100644 --- a/README.md +++ b/README.md @@ -86,4 +86,6 @@ Is the metrics system up and running? ``` kubectl logs -c metrics kafka-0 kubectl exec -c broker kafka-0 -- /bin/sh -c 'apk add --no-cache curl && curl http://localhost:5556/metrics' +kubectl logs -c metrics zoo-0 +kubectl exec -c zookeeper zoo-0 -- /bin/sh -c 'apk add --no-cache curl && curl http://localhost:5556/metrics' ``` diff --git a/zookeeper/50zoo.yml b/zookeeper/50zoo.yml index 9251ce10..12d9c441 100644 --- a/zookeeper/50zoo.yml +++ b/zookeeper/50zoo.yml @@ -10,11 +10,27 @@ spec: metadata: labels: app: zookeeper + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "5556" spec: terminationGracePeriodSeconds: 10 containers: + - name: metrics + image: solsson/kafka-prometheus-jmx-exporter@sha256:1f7c96c287a2dbec1d909cd8f96c0656310239b55a9a90d7fd12c81f384f1f7d + command: + - "java" + - "-jar" + - "jmx_prometheus_httpserver.jar" + - "5556" + - example_configs/zookeeper.yaml + ports: + - containerPort: 5556 - name: zookeeper image: solsson/kafka:0.11.0.0-rc2@sha256:c1316e0131f4ec83bc645ca2141e4fda94e0d28f4fb5f836e15e37a5e054bdf1 + env: + - name: JMX_PORT + value: "5555" command: - sh - -c From 4fd1e5ebf4196ac27d6d49d2c1a6b7b57eaab8e3 Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Mon, 26 Jun 2017 13:00:12 +0200 Subject: [PATCH 28/93] Makes persistence a fundamental attribute of the statefulset --- zookeeper/10zookeeper-config.yml | 6 +++--- zookeeper/{20zoo-service.yml => 20pzoo-service.yml} | 3 ++- zookeeper/{50zoo.yml => 50pzoo.yml} | 5 +++-- 3 files changed, 8 insertions(+), 6 deletions(-) rename zookeeper/{20zoo-service.yml => 20pzoo-service.yml} (83%) rename zookeeper/{50zoo.yml => 50pzoo.yml} (96%) diff --git a/zookeeper/10zookeeper-config.yml b/zookeeper/10zookeeper-config.yml index e9402cbd..b718ce1d 100644 --- a/zookeeper/10zookeeper-config.yml +++ b/zookeeper/10zookeeper-config.yml @@ -11,9 +11,9 @@ data: clientPort=2181 initLimit=5 syncLimit=2 - server.1=zoo-0.zoo:2888:3888:participant - server.2=zoo-1.zoo:2888:3888:participant - server.3=zoo-2.zoo:2888:3888:participant + server.1=pzoo-0.zoo:2888:3888:participant + server.2=pzoo-1.zoo:2888:3888:participant + server.3=pzoo-2.zoo:2888:3888:participant server.4=zoo-3.zoo:2888:3888:participant server.5=zoo-4.zoo:2888:3888:participant diff --git a/zookeeper/20zoo-service.yml b/zookeeper/20pzoo-service.yml similarity index 83% rename from zookeeper/20zoo-service.yml rename to zookeeper/20pzoo-service.yml index d15dcc69..00c33e1c 100644 --- a/zookeeper/20zoo-service.yml +++ b/zookeeper/20pzoo-service.yml @@ -1,7 +1,7 @@ apiVersion: v1 kind: Service metadata: - name: zoo + name: pzoo namespace: kafka spec: ports: @@ -12,3 +12,4 @@ spec: clusterIP: None selector: app: zookeeper + storage: persistent diff --git a/zookeeper/50zoo.yml b/zookeeper/50pzoo.yml similarity index 96% rename from zookeeper/50zoo.yml rename to zookeeper/50pzoo.yml index 12d9c441..925f4c50 100644 --- a/zookeeper/50zoo.yml +++ b/zookeeper/50pzoo.yml @@ -1,15 +1,16 @@ apiVersion: apps/v1beta1 kind: StatefulSet metadata: - name: zoo + name: pzoo namespace: kafka spec: - serviceName: "zoo" + serviceName: "pzoo" replicas: 5 template: metadata: labels: app: zookeeper + storage: persistent annotations: prometheus.io/scrape: "true" prometheus.io/port: "5556" From 225569f30ba7644816a3cb2c1a83b731cc3c0276 Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Mon, 26 Jun 2017 13:03:03 +0200 Subject: [PATCH 29/93] Creates identical definitions for a non-persistent zoo statefulset --- zookeeper/21zoo-service.yml | 15 ++++++++ zookeeper/51zoo.yml | 70 +++++++++++++++++++++++++++++++++++++ 2 files changed, 85 insertions(+) create mode 100644 zookeeper/21zoo-service.yml create mode 100644 zookeeper/51zoo.yml diff --git a/zookeeper/21zoo-service.yml b/zookeeper/21zoo-service.yml new file mode 100644 index 00000000..00c33e1c --- /dev/null +++ b/zookeeper/21zoo-service.yml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: pzoo + namespace: kafka +spec: + ports: + - port: 2888 + name: peer + - port: 3888 + name: leader-election + clusterIP: None + selector: + app: zookeeper + storage: persistent diff --git a/zookeeper/51zoo.yml b/zookeeper/51zoo.yml new file mode 100644 index 00000000..925f4c50 --- /dev/null +++ b/zookeeper/51zoo.yml @@ -0,0 +1,70 @@ +apiVersion: apps/v1beta1 +kind: StatefulSet +metadata: + name: pzoo + namespace: kafka +spec: + serviceName: "pzoo" + replicas: 5 + template: + metadata: + labels: + app: zookeeper + storage: persistent + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "5556" + spec: + terminationGracePeriodSeconds: 10 + containers: + - name: metrics + image: solsson/kafka-prometheus-jmx-exporter@sha256:1f7c96c287a2dbec1d909cd8f96c0656310239b55a9a90d7fd12c81f384f1f7d + command: + - "java" + - "-jar" + - "jmx_prometheus_httpserver.jar" + - "5556" + - example_configs/zookeeper.yaml + ports: + - containerPort: 5556 + - name: zookeeper + image: solsson/kafka:0.11.0.0-rc2@sha256:c1316e0131f4ec83bc645ca2141e4fda94e0d28f4fb5f836e15e37a5e054bdf1 + env: + - name: JMX_PORT + value: "5555" + command: + - sh + - -c + - > + set -e; + export ZOOKEEPER_SERVER_ID=$((${HOSTNAME##*-} + 1)); + echo "${ZOOKEEPER_SERVER_ID:-1}" | tee /var/lib/zookeeper/data/myid; + sed -i "s/server\.$ZOOKEEPER_SERVER_ID\=[a-z0-9.-]*/server.$ZOOKEEPER_SERVER_ID=0.0.0.0/" config/zookeeper.properties; + cat config/zookeeper.properties; + ./bin/zookeeper-server-start.sh config/zookeeper.properties + ports: + - containerPort: 2181 + name: client + - containerPort: 2888 + name: peer + - containerPort: 3888 + name: leader-election + volumeMounts: + - name: config + mountPath: /usr/local/kafka/config + - name: data + mountPath: /var/lib/zookeeper/data + volumes: + - name: config + configMap: + name: zookeeper-config + volumeClaimTemplates: + - metadata: + name: data + annotations: + volume.beta.kubernetes.io/storage-class: kafka-zookeeper + spec: + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: 1Gi From cb83353833645e543153cf6f9756ef3858f80442 Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Mon, 26 Jun 2017 13:14:54 +0200 Subject: [PATCH 30/93] A cluster in three availability zones now get one persistent zk each, and two that can move automatically at node failures --- zookeeper/10zookeeper-config.yml | 10 +++++----- zookeeper/21zoo-service.yml | 4 ++-- zookeeper/50pzoo.yml | 2 +- zookeeper/51zoo.yml | 22 +++++++--------------- 4 files changed, 15 insertions(+), 23 deletions(-) diff --git a/zookeeper/10zookeeper-config.yml b/zookeeper/10zookeeper-config.yml index b718ce1d..58d8b6aa 100644 --- a/zookeeper/10zookeeper-config.yml +++ b/zookeeper/10zookeeper-config.yml @@ -11,11 +11,11 @@ data: clientPort=2181 initLimit=5 syncLimit=2 - server.1=pzoo-0.zoo:2888:3888:participant - server.2=pzoo-1.zoo:2888:3888:participant - server.3=pzoo-2.zoo:2888:3888:participant - server.4=zoo-3.zoo:2888:3888:participant - server.5=zoo-4.zoo:2888:3888:participant + server.1=pzoo-0.pzoo:2888:3888:participant + server.2=pzoo-1.pzoo:2888:3888:participant + server.3=pzoo-2.pzoo:2888:3888:participant + server.4=zoo-0.zoo:2888:3888:participant + server.5=zoo-1.zoo:2888:3888:participant log4j.properties: |- log4j.rootLogger=INFO, stdout diff --git a/zookeeper/21zoo-service.yml b/zookeeper/21zoo-service.yml index 00c33e1c..93fb3219 100644 --- a/zookeeper/21zoo-service.yml +++ b/zookeeper/21zoo-service.yml @@ -1,7 +1,7 @@ apiVersion: v1 kind: Service metadata: - name: pzoo + name: zoo namespace: kafka spec: ports: @@ -12,4 +12,4 @@ spec: clusterIP: None selector: app: zookeeper - storage: persistent + storage: ephemeral diff --git a/zookeeper/50pzoo.yml b/zookeeper/50pzoo.yml index 925f4c50..993fd55d 100644 --- a/zookeeper/50pzoo.yml +++ b/zookeeper/50pzoo.yml @@ -5,7 +5,7 @@ metadata: namespace: kafka spec: serviceName: "pzoo" - replicas: 5 + replicas: 3 template: metadata: labels: diff --git a/zookeeper/51zoo.yml b/zookeeper/51zoo.yml index 925f4c50..f041a9a9 100644 --- a/zookeeper/51zoo.yml +++ b/zookeeper/51zoo.yml @@ -1,16 +1,16 @@ apiVersion: apps/v1beta1 kind: StatefulSet metadata: - name: pzoo + name: zoo namespace: kafka spec: - serviceName: "pzoo" - replicas: 5 + serviceName: "zoo" + replicas: 2 template: metadata: labels: app: zookeeper - storage: persistent + storage: ephemeral annotations: prometheus.io/scrape: "true" prometheus.io/port: "5556" @@ -37,7 +37,7 @@ spec: - -c - > set -e; - export ZOOKEEPER_SERVER_ID=$((${HOSTNAME##*-} + 1)); + export ZOOKEEPER_SERVER_ID=$((${HOSTNAME##*-} + 4)); echo "${ZOOKEEPER_SERVER_ID:-1}" | tee /var/lib/zookeeper/data/myid; sed -i "s/server\.$ZOOKEEPER_SERVER_ID\=[a-z0-9.-]*/server.$ZOOKEEPER_SERVER_ID=0.0.0.0/" config/zookeeper.properties; cat config/zookeeper.properties; @@ -58,13 +58,5 @@ spec: - name: config configMap: name: zookeeper-config - volumeClaimTemplates: - - metadata: - name: data - annotations: - volume.beta.kubernetes.io/storage-class: kafka-zookeeper - spec: - accessModes: [ "ReadWriteOnce" ] - resources: - requests: - storage: 1Gi + - name: data + emptyDir: {} From efb1019fd9881f316e0ee355cbafc26e3d146be2 Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Mon, 26 Jun 2017 13:25:37 +0200 Subject: [PATCH 31/93] Forks can tweak storage classes, but here we want setup to be simple... and with the mix of PV and emptyDir there's no reason to make PVs faster than host disks. Use 10GB as it is the minimum for standard disks on GKE. --- README.md | 6 ------ configure-gke/storageclass-zookeeper-gke.yml | 7 ------- configure-minikube/storageclass-zookeeper-minikube.yml | 5 ----- zookeeper/50pzoo.yml | 4 +--- 4 files changed, 1 insertion(+), 21 deletions(-) delete mode 100644 configure-gke/storageclass-zookeeper-gke.yml delete mode 100644 configure-minikube/storageclass-zookeeper-minikube.yml diff --git a/README.md b/README.md index 8b469cd5..ef5db0c8 100644 --- a/README.md +++ b/README.md @@ -8,12 +8,6 @@ To get consistent service DNS names `kafka-N.broker.kafka`(`.svc.cluster.local`) kubectl create -f 00namespace.yml ``` -## Prepare your cluster - -For Minikube run `kubectl create -f configure-minikube/`. - -There's a similar setup for gke, in `configure-gke` of course. You might want to tweak it before creating. - ## Set up volume claims You may add [storage class](http://kubernetes.io/docs/user-guide/persistent-volumes/#storageclasses) diff --git a/configure-gke/storageclass-zookeeper-gke.yml b/configure-gke/storageclass-zookeeper-gke.yml deleted file mode 100644 index 44891bac..00000000 --- a/configure-gke/storageclass-zookeeper-gke.yml +++ /dev/null @@ -1,7 +0,0 @@ -kind: StorageClass -apiVersion: storage.k8s.io/v1 -metadata: - name: kafka-zookeeper -provisioner: kubernetes.io/gce-pd -parameters: - type: pd-ssd diff --git a/configure-minikube/storageclass-zookeeper-minikube.yml b/configure-minikube/storageclass-zookeeper-minikube.yml deleted file mode 100644 index ba89eb46..00000000 --- a/configure-minikube/storageclass-zookeeper-minikube.yml +++ /dev/null @@ -1,5 +0,0 @@ -kind: StorageClass -apiVersion: storage.k8s.io/v1 -metadata: - name: kafka-zookeeper -provisioner: k8s.io/minikube-hostpath diff --git a/zookeeper/50pzoo.yml b/zookeeper/50pzoo.yml index 993fd55d..25e2ceb4 100644 --- a/zookeeper/50pzoo.yml +++ b/zookeeper/50pzoo.yml @@ -61,10 +61,8 @@ spec: volumeClaimTemplates: - metadata: name: data - annotations: - volume.beta.kubernetes.io/storage-class: kafka-zookeeper spec: accessModes: [ "ReadWriteOnce" ] resources: requests: - storage: 1Gi + storage: 10Gi From 10543bfbd2f83a3f4c53b3d6d73e582de1e99878 Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Tue, 27 Jun 2017 07:34:21 +0200 Subject: [PATCH 32/93] Uses dynamically provisioned volume for Kafka too. It has matured, ... simplifies use of multi-zone clusters, works in Minikube, and volumes are deleted when the PV is deleted (by manual kubectl). --- 10pvc.yml | 48 --------------------------------------- 50kafka.yml | 4 ++-- bootstrap/pv-template.yml | 45 ------------------------------------ bootstrap/pv.sh | 11 --------- 4 files changed, 2 insertions(+), 106 deletions(-) delete mode 100644 10pvc.yml delete mode 100644 bootstrap/pv-template.yml delete mode 100755 bootstrap/pv.sh diff --git a/10pvc.yml b/10pvc.yml deleted file mode 100644 index 51de19c1..00000000 --- a/10pvc.yml +++ /dev/null @@ -1,48 +0,0 @@ ---- -kind: PersistentVolumeClaim -apiVersion: v1 -metadata: - name: datadir-kafka-0 - namespace: kafka -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 200Gi - selector: - matchLabels: - app: kafka - podindex: "0" ---- -kind: PersistentVolumeClaim -apiVersion: v1 -metadata: - name: datadir-kafka-1 - namespace: kafka -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 200Gi - selector: - matchLabels: - app: kafka - podindex: "1" ---- -kind: PersistentVolumeClaim -apiVersion: v1 -metadata: - name: datadir-kafka-2 - namespace: kafka -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 200Gi - selector: - matchLabels: - app: kafka - podindex: "2" diff --git a/50kafka.yml b/50kafka.yml index db7092db..372eba8b 100644 --- a/50kafka.yml +++ b/50kafka.yml @@ -37,11 +37,11 @@ spec: --override log.dirs=/var/lib/kafka/data/topics --override broker.id=${HOSTNAME##*-} volumeMounts: - - name: datadir + - name: data mountPath: /var/lib/kafka/data volumeClaimTemplates: - metadata: - name: datadir + name: data spec: accessModes: [ "ReadWriteOnce" ] resources: diff --git a/bootstrap/pv-template.yml b/bootstrap/pv-template.yml deleted file mode 100644 index befb6b6c..00000000 --- a/bootstrap/pv-template.yml +++ /dev/null @@ -1,45 +0,0 @@ ---- -apiVersion: v1 -kind: PersistentVolume -metadata: - name: datadir-kafka-0 - labels: - app: kafka - podindex: "0" -spec: - accessModes: - - ReadWriteOnce - capacity: - storage: 200Gi - hostPath: - path: /tmp/k8s-data/datadir-kafka-0 ---- -apiVersion: v1 -kind: PersistentVolume -metadata: - name: datadir-kafka-1 - labels: - app: kafka - podindex: "1" -spec: - accessModes: - - ReadWriteOnce - capacity: - storage: 200Gi - hostPath: - path: /tmp/k8s-data/datadir-kafka-1 ---- -apiVersion: v1 -kind: PersistentVolume -metadata: - name: datadir-kafka-2 - labels: - app: kafka - podindex: "2" -spec: - accessModes: - - ReadWriteOnce - capacity: - storage: 200Gi - hostPath: - path: /tmp/k8s-data/datadir-kafka-2 diff --git a/bootstrap/pv.sh b/bootstrap/pv.sh deleted file mode 100755 index c12787bc..00000000 --- a/bootstrap/pv.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash - -echo "Note that in for example GKE a StatefulSet will have PersistentVolume(s) and PersistentVolumeClaim(s) created for it automatically" - -dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && cd .. && pwd )" -path="$dir/data" -echo "Please enter a path where to store data during local testing: ($path)" -read newpath -[ -n "$newpath" ] && path=$newpath - -cat bootstrap/pv-template.yml | sed "s|/tmp/k8s-data|$path|" | kubectl create -f - From f45ced550d55f58937f27b008ec73f6b51c6bd65 Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Tue, 27 Jun 2017 07:58:06 +0200 Subject: [PATCH 33/93] Adds utility to update the kafka image, which we keep the same to minimize pull waits --- test/11topic-create-test1.yml | 3 +-- test/12topic-create-test2.yml | 3 +-- test/21consumer-test1.yml | 3 +-- test/99testclient.yml | 3 +-- update-kafka-image.sh | 8 ++++++++ 5 files changed, 12 insertions(+), 8 deletions(-) create mode 100755 update-kafka-image.sh diff --git a/test/11topic-create-test1.yml b/test/11topic-create-test1.yml index e03c6c9c..a8338ce2 100644 --- a/test/11topic-create-test1.yml +++ b/test/11topic-create-test1.yml @@ -10,8 +10,7 @@ spec: spec: containers: - name: kafka - image: solsson/kafka - imagePullPolicy: Never + image: solsson/kafka:0.11.0.0-rc2@sha256:c1316e0131f4ec83bc645ca2141e4fda94e0d28f4fb5f836e15e37a5e054bdf1 command: - ./bin/kafka-topics.sh - --zookeeper diff --git a/test/12topic-create-test2.yml b/test/12topic-create-test2.yml index 9abc77f3..f0d8bcc7 100644 --- a/test/12topic-create-test2.yml +++ b/test/12topic-create-test2.yml @@ -10,8 +10,7 @@ spec: spec: containers: - name: kafka - image: solsson/kafka - imagePullPolicy: Never + image: solsson/kafka:0.11.0.0-rc2@sha256:c1316e0131f4ec83bc645ca2141e4fda94e0d28f4fb5f836e15e37a5e054bdf1 command: - ./bin/kafka-topics.sh - --zookeeper diff --git a/test/21consumer-test1.yml b/test/21consumer-test1.yml index 7faf80bb..761affec 100644 --- a/test/21consumer-test1.yml +++ b/test/21consumer-test1.yml @@ -14,8 +14,7 @@ spec: spec: containers: - name: kafka - image: solsson/kafka - imagePullPolicy: Never + image: solsson/kafka:0.11.0.0-rc2@sha256:c1316e0131f4ec83bc645ca2141e4fda94e0d28f4fb5f836e15e37a5e054bdf1 command: - ./bin/kafka-console-consumer.sh - --bootstrap-server diff --git a/test/99testclient.yml b/test/99testclient.yml index 97e3dea0..bc9eba2c 100644 --- a/test/99testclient.yml +++ b/test/99testclient.yml @@ -8,8 +8,7 @@ metadata: spec: containers: - name: kafka - image: solsson/kafka - imagePullPolicy: Never + image: solsson/kafka:0.11.0.0-rc2@sha256:c1316e0131f4ec83bc645ca2141e4fda94e0d28f4fb5f836e15e37a5e054bdf1 command: - sh - -c diff --git a/update-kafka-image.sh b/update-kafka-image.sh new file mode 100755 index 00000000..1c322930 --- /dev/null +++ b/update-kafka-image.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +IMAGE=$1 +[ -z "$IMAGE" ] && echo "First argument should be the image to set" && exit 1 + +for F in ./ test/ zookeeper/; do + sed -i "s|image: solsson/kafka:.*|image: $IMAGE|" $F*.yml +done From 49865bca8b3fd5fb214f4b9e88d490f9dd09104b Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Tue, 27 Jun 2017 07:58:56 +0200 Subject: [PATCH 34/93] Adds a test that produces a message that you can see in the logs of 21consumer-test1 --- test/31producer-test1.yml | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) create mode 100644 test/31producer-test1.yml diff --git a/test/31producer-test1.yml b/test/31producer-test1.yml new file mode 100644 index 00000000..7a2e2761 --- /dev/null +++ b/test/31producer-test1.yml @@ -0,0 +1,24 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: producer-test1 + namespace: kafka +spec: + template: + metadata: + name: producer-test1 + spec: + containers: + - name: kafka + image: solsson/kafka:0.11.0.0-rc2@sha256:c1316e0131f4ec83bc645ca2141e4fda94e0d28f4fb5f836e15e37a5e054bdf1 + command: + - /bin/sh + - -c + - > + echo "test1 $(date)" + | + ./bin/kafka-console-producer.sh + --topic test1 + --broker-list kafka-0.broker.kafka.svc.cluster.local:9092,kafka-1.broker.kafka.svc.cluster.local:9092,kafka-2.broker.kafka.svc.cluster.local:9092 + ; sleep 1 + restartPolicy: Never From 3bc821b085281e5563be20221583870746e07ad8 Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Tue, 27 Jun 2017 08:31:04 +0200 Subject: [PATCH 35/93] Adds tentative resource requests, based on what idle pods use (though this includes monitoring) --- 50kafka.yml | 4 ++++ zookeeper/50pzoo.yml | 4 ++++ zookeeper/51zoo.yml | 4 ++++ 3 files changed, 12 insertions(+) diff --git a/50kafka.yml b/50kafka.yml index 372eba8b..56460491 100644 --- a/50kafka.yml +++ b/50kafka.yml @@ -36,6 +36,10 @@ spec: --override log.retention.hours=-1 --override log.dirs=/var/lib/kafka/data/topics --override broker.id=${HOSTNAME##*-} + resources: + requests: + cpu: 100m + memory: 600Mi volumeMounts: - name: data mountPath: /var/lib/kafka/data diff --git a/zookeeper/50pzoo.yml b/zookeeper/50pzoo.yml index 25e2ceb4..db992071 100644 --- a/zookeeper/50pzoo.yml +++ b/zookeeper/50pzoo.yml @@ -49,6 +49,10 @@ spec: name: peer - containerPort: 3888 name: leader-election + resources: + requests: + cpu: 10m + memory: 120Mi volumeMounts: - name: config mountPath: /usr/local/kafka/config diff --git a/zookeeper/51zoo.yml b/zookeeper/51zoo.yml index f041a9a9..caa13d6e 100644 --- a/zookeeper/51zoo.yml +++ b/zookeeper/51zoo.yml @@ -49,6 +49,10 @@ spec: name: peer - containerPort: 3888 name: leader-election + resources: + requests: + cpu: 10m + memory: 120Mi volumeMounts: - name: config mountPath: /usr/local/kafka/config From 620c4e21134ccdb20ba2e00d25f16bee5a1be6e9 Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Tue, 27 Jun 2017 10:27:31 +0200 Subject: [PATCH 36/93] Removes volume claims documentation, as we've gone completely dynamic --- README.md | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/README.md b/README.md index ef5db0c8..960de581 100644 --- a/README.md +++ b/README.md @@ -8,20 +8,6 @@ To get consistent service DNS names `kafka-N.broker.kafka`(`.svc.cluster.local`) kubectl create -f 00namespace.yml ``` -## Set up volume claims - -You may add [storage class](http://kubernetes.io/docs/user-guide/persistent-volumes/#storageclasses) -to the kafka StatefulSet declaration to enable automatic volume provisioning. - -Alternatively create [PV](http://kubernetes.io/docs/user-guide/persistent-volumes/#persistent-volumes)s and [PVC](http://kubernetes.io/docs/user-guide/persistent-volumes/#persistentvolumeclaims)s manually. For example in Minikube. - -``` -./bootstrap/pv.sh -kubectl create -f ./10pvc.yml -# check that claims are bound -kubectl -n kafka get pvc -``` - ## Set up Zookeeper The Kafka book (Definitive Guide, O'Reilly 2016) recommends that Kafka has its own Zookeeper cluster with at least 5 instances. From f4ac28897dadadf976f862cb525726c30e4f49bc Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Tue, 27 Jun 2017 10:42:19 +0200 Subject: [PATCH 37/93] A monitoring-only pod uses 0m / ~32Mi resources --- 50kafka.yml | 9 ++++++++- test/monitoring-test.yml | 35 +++++++++++++++++++++++++++++++++++ zookeeper/50pzoo.yml | 9 ++++++++- zookeeper/51zoo.yml | 15 +++++++++++---- 4 files changed, 62 insertions(+), 6 deletions(-) create mode 100644 test/monitoring-test.yml diff --git a/50kafka.yml b/50kafka.yml index 56460491..806f6500 100644 --- a/50kafka.yml +++ b/50kafka.yml @@ -20,6 +20,13 @@ spec: image: solsson/kafka-prometheus-jmx-exporter@sha256:1f7c96c287a2dbec1d909cd8f96c0656310239b55a9a90d7fd12c81f384f1f7d ports: - containerPort: 5556 + resources: + requests: + cpu: 0m + memory: 40Mi + limits: + cpu: 10m + memory: 40Mi - name: broker image: solsson/kafka:0.11.0.0-rc2@sha256:c1316e0131f4ec83bc645ca2141e4fda94e0d28f4fb5f836e15e37a5e054bdf1 env: @@ -39,7 +46,7 @@ spec: resources: requests: cpu: 100m - memory: 600Mi + memory: 512Mi volumeMounts: - name: data mountPath: /var/lib/kafka/data diff --git a/test/monitoring-test.yml b/test/monitoring-test.yml new file mode 100644 index 00000000..4b5f230a --- /dev/null +++ b/test/monitoring-test.yml @@ -0,0 +1,35 @@ +# Sets up a pod that monitors itself, to test resource usage etc. +# kubectl exec test-metrics-... -- /bin/sh -c 'apk add --no-cache curl && curl http://localhost:5556/metrics' +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: monitoring-test + namespace: kafka +spec: + replicas: 1 + template: + metadata: + labels: + app: monitoring-test + # Uncomment to test with prometheus + #annotations: + # prometheus.io/scrape: "true" + # prometheus.io/port: "5556" + spec: + containers: + - name: monitor + image: solsson/kafka-prometheus-jmx-exporter@sha256:1f7c96c287a2dbec1d909cd8f96c0656310239b55a9a90d7fd12c81f384f1f7d + command: + - java + - -Dcom.sun.management.jmxremote.ssl=false + - -Dcom.sun.management.jmxremote.authenticate=false + - -Dcom.sun.management.jmxremote.port=5555 + - -jar + - jmx_prometheus_httpserver.jar + - "5556" + - example_configs/httpserver_sample_config.yml + ports: + - name: jmx + containerPort: 5555 + - name: slashmetrics + containerPort: 5556 diff --git a/zookeeper/50pzoo.yml b/zookeeper/50pzoo.yml index db992071..12890958 100644 --- a/zookeeper/50pzoo.yml +++ b/zookeeper/50pzoo.yml @@ -27,6 +27,13 @@ spec: - example_configs/zookeeper.yaml ports: - containerPort: 5556 + resources: + requests: + cpu: 0m + memory: 40Mi + limits: + cpu: 10m + memory: 40Mi - name: zookeeper image: solsson/kafka:0.11.0.0-rc2@sha256:c1316e0131f4ec83bc645ca2141e4fda94e0d28f4fb5f836e15e37a5e054bdf1 env: @@ -52,7 +59,7 @@ spec: resources: requests: cpu: 10m - memory: 120Mi + memory: 100Mi volumeMounts: - name: config mountPath: /usr/local/kafka/config diff --git a/zookeeper/51zoo.yml b/zookeeper/51zoo.yml index caa13d6e..82cf7ee2 100644 --- a/zookeeper/51zoo.yml +++ b/zookeeper/51zoo.yml @@ -20,13 +20,20 @@ spec: - name: metrics image: solsson/kafka-prometheus-jmx-exporter@sha256:1f7c96c287a2dbec1d909cd8f96c0656310239b55a9a90d7fd12c81f384f1f7d command: - - "java" - - "-jar" - - "jmx_prometheus_httpserver.jar" + - java + - -jar + - jmx_prometheus_httpserver.jar - "5556" - example_configs/zookeeper.yaml ports: - containerPort: 5556 + resources: + requests: + cpu: 0m + memory: 40Mi + limits: + cpu: 10m + memory: 40Mi - name: zookeeper image: solsson/kafka:0.11.0.0-rc2@sha256:c1316e0131f4ec83bc645ca2141e4fda94e0d28f4fb5f836e15e37a5e054bdf1 env: @@ -52,7 +59,7 @@ spec: resources: requests: cpu: 10m - memory: 120Mi + memory: 100Mi volumeMounts: - name: config mountPath: /usr/local/kafka/config From 1a8f2d95be8d56cdd57bb3dccb366a5c4fc2e17b Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Tue, 27 Jun 2017 10:56:49 +0200 Subject: [PATCH 38/93] s --- zookeeper/50pzoo.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/zookeeper/50pzoo.yml b/zookeeper/50pzoo.yml index 12890958..7dfc1e64 100644 --- a/zookeeper/50pzoo.yml +++ b/zookeeper/50pzoo.yml @@ -20,9 +20,9 @@ spec: - name: metrics image: solsson/kafka-prometheus-jmx-exporter@sha256:1f7c96c287a2dbec1d909cd8f96c0656310239b55a9a90d7fd12c81f384f1f7d command: - - "java" - - "-jar" - - "jmx_prometheus_httpserver.jar" + - java + - -jar + - jmx_prometheus_httpserver.jar - "5556" - example_configs/zookeeper.yaml ports: From 13442382d2be57fc9e368ceb67f392c5494fdfa9 Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Tue, 27 Jun 2017 12:23:21 +0200 Subject: [PATCH 39/93] Got quite repeatable OOMKilled on pzoo pods, so I figured it must be... in metrics becuase nither zoo nor kafka has limits --- 50kafka.yml | 2 +- zookeeper/50pzoo.yml | 2 +- zookeeper/51zoo.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/50kafka.yml b/50kafka.yml index 806f6500..2a3faf6a 100644 --- a/50kafka.yml +++ b/50kafka.yml @@ -26,7 +26,7 @@ spec: memory: 40Mi limits: cpu: 10m - memory: 40Mi + memory: 80Mi - name: broker image: solsson/kafka:0.11.0.0-rc2@sha256:c1316e0131f4ec83bc645ca2141e4fda94e0d28f4fb5f836e15e37a5e054bdf1 env: diff --git a/zookeeper/50pzoo.yml b/zookeeper/50pzoo.yml index 7dfc1e64..da1ba818 100644 --- a/zookeeper/50pzoo.yml +++ b/zookeeper/50pzoo.yml @@ -33,7 +33,7 @@ spec: memory: 40Mi limits: cpu: 10m - memory: 40Mi + memory: 80Mi - name: zookeeper image: solsson/kafka:0.11.0.0-rc2@sha256:c1316e0131f4ec83bc645ca2141e4fda94e0d28f4fb5f836e15e37a5e054bdf1 env: diff --git a/zookeeper/51zoo.yml b/zookeeper/51zoo.yml index 82cf7ee2..f4f6298b 100644 --- a/zookeeper/51zoo.yml +++ b/zookeeper/51zoo.yml @@ -33,7 +33,7 @@ spec: memory: 40Mi limits: cpu: 10m - memory: 40Mi + memory: 80Mi - name: zookeeper image: solsson/kafka:0.11.0.0-rc2@sha256:c1316e0131f4ec83bc645ca2141e4fda94e0d28f4fb5f836e15e37a5e054bdf1 env: From 411192d3d6bc06984d93556fa5867225203d8aed Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Tue, 27 Jun 2017 12:26:52 +0200 Subject: [PATCH 40/93] Reverts to default termination period, and uses bash for "shell form"... as Alpine's /bin/busybox (ash) does not forward signals, according to https://pracucci.com/graceful-shutdown-of-kubernetes-pods.html The reason for the termination period change is that we haven't observed any termination behavior yet so we can't know how slow it might be. --- 50kafka.yml | 4 ++-- zookeeper/50pzoo.yml | 4 ++-- zookeeper/51zoo.yml | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/50kafka.yml b/50kafka.yml index 2a3faf6a..4a57f3d3 100644 --- a/50kafka.yml +++ b/50kafka.yml @@ -14,7 +14,7 @@ spec: prometheus.io/scrape: "true" prometheus.io/port: "5556" spec: - terminationGracePeriodSeconds: 10 + terminationGracePeriodSeconds: 30 containers: - name: metrics image: solsson/kafka-prometheus-jmx-exporter@sha256:1f7c96c287a2dbec1d909cd8f96c0656310239b55a9a90d7fd12c81f384f1f7d @@ -35,7 +35,7 @@ spec: ports: - containerPort: 9092 command: - - sh + - /bin/bash - -c - > ./bin/kafka-server-start.sh diff --git a/zookeeper/50pzoo.yml b/zookeeper/50pzoo.yml index da1ba818..7b949ce6 100644 --- a/zookeeper/50pzoo.yml +++ b/zookeeper/50pzoo.yml @@ -15,7 +15,7 @@ spec: prometheus.io/scrape: "true" prometheus.io/port: "5556" spec: - terminationGracePeriodSeconds: 10 + terminationGracePeriodSeconds: 30 containers: - name: metrics image: solsson/kafka-prometheus-jmx-exporter@sha256:1f7c96c287a2dbec1d909cd8f96c0656310239b55a9a90d7fd12c81f384f1f7d @@ -40,7 +40,7 @@ spec: - name: JMX_PORT value: "5555" command: - - sh + - /bin/bash - -c - > set -e; diff --git a/zookeeper/51zoo.yml b/zookeeper/51zoo.yml index f4f6298b..b73db298 100644 --- a/zookeeper/51zoo.yml +++ b/zookeeper/51zoo.yml @@ -15,7 +15,7 @@ spec: prometheus.io/scrape: "true" prometheus.io/port: "5556" spec: - terminationGracePeriodSeconds: 10 + terminationGracePeriodSeconds: 30 containers: - name: metrics image: solsson/kafka-prometheus-jmx-exporter@sha256:1f7c96c287a2dbec1d909cd8f96c0656310239b55a9a90d7fd12c81f384f1f7d @@ -40,7 +40,7 @@ spec: - name: JMX_PORT value: "5555" command: - - sh + - /bin/bash - -c - > set -e; From 2c4b6cd96dd45d52065431ad97a034d0444100a7 Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Tue, 27 Jun 2017 13:08:54 +0200 Subject: [PATCH 41/93] Adds probes, but for Kafka I don't think it indicates readiness... which might not matter because we no longer have a loadbalancing service. These probes won't catch all failure modes, but if they fail we're pretty sure the container is malfunctioning. I found some sources recommending ./bin/kafka-topics.sh for probes but to me it looks risky to introduce a dependency to some other service for such things. One such source is https://github.com/kubernetes/charts/pull/144 The zookeeper probe is from https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ An issue is that zookeeper's logs are quite verbose for every probe. --- 50kafka.yml | 6 ++++++ zookeeper/50pzoo.yml | 12 ++++++++++++ zookeeper/51zoo.yml | 12 ++++++++++++ 3 files changed, 30 insertions(+) diff --git a/50kafka.yml b/50kafka.yml index 4a57f3d3..862cbe0b 100644 --- a/50kafka.yml +++ b/50kafka.yml @@ -47,6 +47,12 @@ spec: requests: cpu: 100m memory: 512Mi + livenessProbe: + exec: + command: + - /bin/sh + - -c + - 'echo "" | nc -w 1 127.0.0.1 9092' volumeMounts: - name: data mountPath: /var/lib/kafka/data diff --git a/zookeeper/50pzoo.yml b/zookeeper/50pzoo.yml index 7b949ce6..c8e35426 100644 --- a/zookeeper/50pzoo.yml +++ b/zookeeper/50pzoo.yml @@ -60,6 +60,18 @@ spec: requests: cpu: 10m memory: 100Mi + livenessProbe: + exec: + command: + - /bin/sh + - -c + - '[ "imok" == $(echo "ruok" | nc -w 1 127.0.0.1 2181) ]' + readinessProbe: + exec: + command: + - /bin/sh + - -c + - '[ "imok" == $(echo "ruok" | nc -w 1 127.0.0.1 2181) ]' volumeMounts: - name: config mountPath: /usr/local/kafka/config diff --git a/zookeeper/51zoo.yml b/zookeeper/51zoo.yml index b73db298..50be93d9 100644 --- a/zookeeper/51zoo.yml +++ b/zookeeper/51zoo.yml @@ -60,6 +60,18 @@ spec: requests: cpu: 10m memory: 100Mi + livenessProbe: + exec: + command: + - /bin/sh + - -c + - '[ "imok" == $(echo "ruok" | nc -w 1 127.0.0.1 2181) ]' + readinessProbe: + exec: + command: + - /bin/sh + - -c + - '[ "imok" == $(echo "ruok" | nc -w 1 127.0.0.1 2181) ]' volumeMounts: - name: config mountPath: /usr/local/kafka/config From 0ab701c7a136facb55e29b19ff67cd14b9c08cfc Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Tue, 27 Jun 2017 14:42:59 +0200 Subject: [PATCH 42/93] Reduces termination grace period for zookeeper because I fail to trigger termination by signal --- zookeeper/50pzoo.yml | 2 +- zookeeper/51zoo.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/zookeeper/50pzoo.yml b/zookeeper/50pzoo.yml index c8e35426..82e94b49 100644 --- a/zookeeper/50pzoo.yml +++ b/zookeeper/50pzoo.yml @@ -15,7 +15,7 @@ spec: prometheus.io/scrape: "true" prometheus.io/port: "5556" spec: - terminationGracePeriodSeconds: 30 + terminationGracePeriodSeconds: 10 containers: - name: metrics image: solsson/kafka-prometheus-jmx-exporter@sha256:1f7c96c287a2dbec1d909cd8f96c0656310239b55a9a90d7fd12c81f384f1f7d diff --git a/zookeeper/51zoo.yml b/zookeeper/51zoo.yml index 50be93d9..c8b346d4 100644 --- a/zookeeper/51zoo.yml +++ b/zookeeper/51zoo.yml @@ -15,7 +15,7 @@ spec: prometheus.io/scrape: "true" prometheus.io/port: "5556" spec: - terminationGracePeriodSeconds: 30 + terminationGracePeriodSeconds: 10 containers: - name: metrics image: solsson/kafka-prometheus-jmx-exporter@sha256:1f7c96c287a2dbec1d909cd8f96c0656310239b55a9a90d7fd12c81f384f1f7d From b3c6cd24c45e22c48849ccdbf3d32fe5d92cab9e Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Tue, 27 Jun 2017 16:19:38 +0200 Subject: [PATCH 43/93] Raises memory limit for metrics; got 10 OOMKilled per pod in the last 3 hours --- 50kafka.yml | 2 +- zookeeper/50pzoo.yml | 2 +- zookeeper/51zoo.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/50kafka.yml b/50kafka.yml index 862cbe0b..7d3b3b11 100644 --- a/50kafka.yml +++ b/50kafka.yml @@ -26,7 +26,7 @@ spec: memory: 40Mi limits: cpu: 10m - memory: 80Mi + memory: 100Mi - name: broker image: solsson/kafka:0.11.0.0-rc2@sha256:c1316e0131f4ec83bc645ca2141e4fda94e0d28f4fb5f836e15e37a5e054bdf1 env: diff --git a/zookeeper/50pzoo.yml b/zookeeper/50pzoo.yml index 82e94b49..ad4b6b7c 100644 --- a/zookeeper/50pzoo.yml +++ b/zookeeper/50pzoo.yml @@ -33,7 +33,7 @@ spec: memory: 40Mi limits: cpu: 10m - memory: 80Mi + memory: 100Mi - name: zookeeper image: solsson/kafka:0.11.0.0-rc2@sha256:c1316e0131f4ec83bc645ca2141e4fda94e0d28f4fb5f836e15e37a5e054bdf1 env: diff --git a/zookeeper/51zoo.yml b/zookeeper/51zoo.yml index c8b346d4..856b2516 100644 --- a/zookeeper/51zoo.yml +++ b/zookeeper/51zoo.yml @@ -33,7 +33,7 @@ spec: memory: 40Mi limits: cpu: 10m - memory: 80Mi + memory: 100Mi - name: zookeeper image: solsson/kafka:0.11.0.0-rc2@sha256:c1316e0131f4ec83bc645ca2141e4fda94e0d28f4fb5f836e15e37a5e054bdf1 env: From 53b2cb53b5b4d75abcc26862bf4f7d66b6cfeb1e Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Wed, 28 Jun 2017 16:15:35 +0200 Subject: [PATCH 44/93] Limiting metrics' JVM to match resource limits. Still getting OOMKilled though, but maybe half as often. --- README.md | 1 + zookeeper/51zoo.yml | 5 +++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 960de581..25ecff2e 100644 --- a/README.md +++ b/README.md @@ -69,3 +69,4 @@ kubectl exec -c broker kafka-0 -- /bin/sh -c 'apk add --no-cache curl && curl ht kubectl logs -c metrics zoo-0 kubectl exec -c zookeeper zoo-0 -- /bin/sh -c 'apk add --no-cache curl && curl http://localhost:5556/metrics' ``` +Metrics containers can't be used for the curl because they're too short on memory. diff --git a/zookeeper/51zoo.yml b/zookeeper/51zoo.yml index 856b2516..d43dc36f 100644 --- a/zookeeper/51zoo.yml +++ b/zookeeper/51zoo.yml @@ -21,6 +21,8 @@ spec: image: solsson/kafka-prometheus-jmx-exporter@sha256:1f7c96c287a2dbec1d909cd8f96c0656310239b55a9a90d7fd12c81f384f1f7d command: - java + - -Xms39M + - -Xmx99M - -jar - jmx_prometheus_httpserver.jar - "5556" @@ -41,9 +43,8 @@ spec: value: "5555" command: - /bin/bash - - -c + - -euc - > - set -e; export ZOOKEEPER_SERVER_ID=$((${HOSTNAME##*-} + 4)); echo "${ZOOKEEPER_SERVER_ID:-1}" | tee /var/lib/zookeeper/data/myid; sed -i "s/server\.$ZOOKEEPER_SERVER_ID\=[a-z0-9.-]*/server.$ZOOKEEPER_SERVER_ID=0.0.0.0/" config/zookeeper.properties; From bccfdfa150929aadd77fec2bf9a25461f2f6b8b2 Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Wed, 28 Jun 2017 16:32:19 +0200 Subject: [PATCH 45/93] Upgrades to latest build from https://github.com/solsson/dockerfiles/pull/4, with plain logging>=INFO config --- 50kafka.yml | 2 +- test/11topic-create-test1.yml | 2 +- test/12topic-create-test2.yml | 2 +- test/21consumer-test1.yml | 2 +- test/31producer-test1.yml | 2 +- test/99testclient.yml | 2 +- zookeeper/50pzoo.yml | 2 +- zookeeper/51zoo.yml | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) diff --git a/50kafka.yml b/50kafka.yml index 7d3b3b11..8d4560c1 100644 --- a/50kafka.yml +++ b/50kafka.yml @@ -28,7 +28,7 @@ spec: cpu: 10m memory: 100Mi - name: broker - image: solsson/kafka:0.11.0.0-rc2@sha256:c1316e0131f4ec83bc645ca2141e4fda94e0d28f4fb5f836e15e37a5e054bdf1 + image: solsson/kafka:0.11.0.0@sha256:4c194db2ec15698aca6f1aa8a2fd5e5c566caed82b4bf43446c388f315397756 env: - name: JMX_PORT value: "5555" diff --git a/test/11topic-create-test1.yml b/test/11topic-create-test1.yml index a8338ce2..d42f0eb3 100644 --- a/test/11topic-create-test1.yml +++ b/test/11topic-create-test1.yml @@ -10,7 +10,7 @@ spec: spec: containers: - name: kafka - image: solsson/kafka:0.11.0.0-rc2@sha256:c1316e0131f4ec83bc645ca2141e4fda94e0d28f4fb5f836e15e37a5e054bdf1 + image: solsson/kafka:0.11.0.0@sha256:4c194db2ec15698aca6f1aa8a2fd5e5c566caed82b4bf43446c388f315397756 command: - ./bin/kafka-topics.sh - --zookeeper diff --git a/test/12topic-create-test2.yml b/test/12topic-create-test2.yml index f0d8bcc7..687480c8 100644 --- a/test/12topic-create-test2.yml +++ b/test/12topic-create-test2.yml @@ -10,7 +10,7 @@ spec: spec: containers: - name: kafka - image: solsson/kafka:0.11.0.0-rc2@sha256:c1316e0131f4ec83bc645ca2141e4fda94e0d28f4fb5f836e15e37a5e054bdf1 + image: solsson/kafka:0.11.0.0@sha256:4c194db2ec15698aca6f1aa8a2fd5e5c566caed82b4bf43446c388f315397756 command: - ./bin/kafka-topics.sh - --zookeeper diff --git a/test/21consumer-test1.yml b/test/21consumer-test1.yml index 761affec..e1db2cf3 100644 --- a/test/21consumer-test1.yml +++ b/test/21consumer-test1.yml @@ -14,7 +14,7 @@ spec: spec: containers: - name: kafka - image: solsson/kafka:0.11.0.0-rc2@sha256:c1316e0131f4ec83bc645ca2141e4fda94e0d28f4fb5f836e15e37a5e054bdf1 + image: solsson/kafka:0.11.0.0@sha256:4c194db2ec15698aca6f1aa8a2fd5e5c566caed82b4bf43446c388f315397756 command: - ./bin/kafka-console-consumer.sh - --bootstrap-server diff --git a/test/31producer-test1.yml b/test/31producer-test1.yml index 7a2e2761..9dfe3c8d 100644 --- a/test/31producer-test1.yml +++ b/test/31producer-test1.yml @@ -10,7 +10,7 @@ spec: spec: containers: - name: kafka - image: solsson/kafka:0.11.0.0-rc2@sha256:c1316e0131f4ec83bc645ca2141e4fda94e0d28f4fb5f836e15e37a5e054bdf1 + image: solsson/kafka:0.11.0.0@sha256:4c194db2ec15698aca6f1aa8a2fd5e5c566caed82b4bf43446c388f315397756 command: - /bin/sh - -c diff --git a/test/99testclient.yml b/test/99testclient.yml index bc9eba2c..ae71b0d8 100644 --- a/test/99testclient.yml +++ b/test/99testclient.yml @@ -8,7 +8,7 @@ metadata: spec: containers: - name: kafka - image: solsson/kafka:0.11.0.0-rc2@sha256:c1316e0131f4ec83bc645ca2141e4fda94e0d28f4fb5f836e15e37a5e054bdf1 + image: solsson/kafka:0.11.0.0@sha256:4c194db2ec15698aca6f1aa8a2fd5e5c566caed82b4bf43446c388f315397756 command: - sh - -c diff --git a/zookeeper/50pzoo.yml b/zookeeper/50pzoo.yml index ad4b6b7c..037c49eb 100644 --- a/zookeeper/50pzoo.yml +++ b/zookeeper/50pzoo.yml @@ -35,7 +35,7 @@ spec: cpu: 10m memory: 100Mi - name: zookeeper - image: solsson/kafka:0.11.0.0-rc2@sha256:c1316e0131f4ec83bc645ca2141e4fda94e0d28f4fb5f836e15e37a5e054bdf1 + image: solsson/kafka:0.11.0.0@sha256:4c194db2ec15698aca6f1aa8a2fd5e5c566caed82b4bf43446c388f315397756 env: - name: JMX_PORT value: "5555" diff --git a/zookeeper/51zoo.yml b/zookeeper/51zoo.yml index d43dc36f..2bf1be9c 100644 --- a/zookeeper/51zoo.yml +++ b/zookeeper/51zoo.yml @@ -37,7 +37,7 @@ spec: cpu: 10m memory: 100Mi - name: zookeeper - image: solsson/kafka:0.11.0.0-rc2@sha256:c1316e0131f4ec83bc645ca2141e4fda94e0d28f4fb5f836e15e37a5e054bdf1 + image: solsson/kafka:0.11.0.0@sha256:4c194db2ec15698aca6f1aa8a2fd5e5c566caed82b4bf43446c388f315397756 env: - name: JMX_PORT value: "5555" From d6b870c0a26f5575f7da4c84f7da203f6ba86667 Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Wed, 28 Jun 2017 16:32:53 +0200 Subject: [PATCH 46/93] shell script is now osx, but no longer gnu :) --- update-kafka-image.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/update-kafka-image.sh b/update-kafka-image.sh index 1c322930..602ccdd4 100755 --- a/update-kafka-image.sh +++ b/update-kafka-image.sh @@ -4,5 +4,5 @@ IMAGE=$1 [ -z "$IMAGE" ] && echo "First argument should be the image to set" && exit 1 for F in ./ test/ zookeeper/; do - sed -i "s|image: solsson/kafka:.*|image: $IMAGE|" $F*.yml + sed -i '' "s|image: solsson/kafka:.*|image: $IMAGE|" $F*.yml done From 4481b4da8263c98023679327ec43e3288c6c859a Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Wed, 28 Jun 2017 16:40:37 +0200 Subject: [PATCH 47/93] Applies the limit to persistent zookeeper pods too. They seem more prone to restarts than 51zoo. --- zookeeper/50pzoo.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/zookeeper/50pzoo.yml b/zookeeper/50pzoo.yml index 037c49eb..689fae1f 100644 --- a/zookeeper/50pzoo.yml +++ b/zookeeper/50pzoo.yml @@ -21,6 +21,8 @@ spec: image: solsson/kafka-prometheus-jmx-exporter@sha256:1f7c96c287a2dbec1d909cd8f96c0656310239b55a9a90d7fd12c81f384f1f7d command: - java + - -Xms39M + - -Xmx99M - -jar - jmx_prometheus_httpserver.jar - "5556" From 07d895c3ee67ce0186d22479574af103c3e851ea Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Wed, 28 Jun 2017 16:52:49 +0200 Subject: [PATCH 48/93] Same startup as 51zoo --- zookeeper/50pzoo.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/zookeeper/50pzoo.yml b/zookeeper/50pzoo.yml index 689fae1f..e3704536 100644 --- a/zookeeper/50pzoo.yml +++ b/zookeeper/50pzoo.yml @@ -43,9 +43,8 @@ spec: value: "5555" command: - /bin/bash - - -c + - -euc - > - set -e; export ZOOKEEPER_SERVER_ID=$((${HOSTNAME##*-} + 1)); echo "${ZOOKEEPER_SERVER_ID:-1}" | tee /var/lib/zookeeper/data/myid; sed -i "s/server\.$ZOOKEEPER_SERVER_ID\=[a-z0-9.-]*/server.$ZOOKEEPER_SERVER_ID=0.0.0.0/" config/zookeeper.properties; From ac443a9c8dcea15b562dc49673f6722f093b41aa Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Sun, 23 Jul 2017 06:48:02 +0200 Subject: [PATCH 49/93] Fixes posix compatibility for probes --- zookeeper/50pzoo.yml | 4 ++-- zookeeper/51zoo.yml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/zookeeper/50pzoo.yml b/zookeeper/50pzoo.yml index e3704536..f3f666ba 100644 --- a/zookeeper/50pzoo.yml +++ b/zookeeper/50pzoo.yml @@ -66,13 +66,13 @@ spec: command: - /bin/sh - -c - - '[ "imok" == $(echo "ruok" | nc -w 1 127.0.0.1 2181) ]' + - '[ "imok" = "$(echo ruok | nc -w 1 127.0.0.1 2181)" ]' readinessProbe: exec: command: - /bin/sh - -c - - '[ "imok" == $(echo "ruok" | nc -w 1 127.0.0.1 2181) ]' + - '[ "imok" = "$(echo ruok | nc -w 1 127.0.0.1 2181)" ]' volumeMounts: - name: config mountPath: /usr/local/kafka/config diff --git a/zookeeper/51zoo.yml b/zookeeper/51zoo.yml index 2bf1be9c..43199480 100644 --- a/zookeeper/51zoo.yml +++ b/zookeeper/51zoo.yml @@ -66,13 +66,13 @@ spec: command: - /bin/sh - -c - - '[ "imok" == $(echo "ruok" | nc -w 1 127.0.0.1 2181) ]' + - '[ "imok" = "$(echo ruok | nc -w 1 127.0.0.1 2181)" ]' readinessProbe: exec: command: - /bin/sh - -c - - '[ "imok" == $(echo "ruok" | nc -w 1 127.0.0.1 2181) ]' + - '[ "imok" = "$(echo ruok | nc -w 1 127.0.0.1 2181)" ]' volumeMounts: - name: config mountPath: /usr/local/kafka/config From 9f47cd09eda070b7f6e1b7ef907431e239883146 Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Sun, 23 Jul 2017 07:02:13 +0200 Subject: [PATCH 50/93] Upgrades to current https://github.com/solsson/dockerfiles/pull/5 --- 50kafka.yml | 2 +- test/11topic-create-test1.yml | 2 +- test/12topic-create-test2.yml | 2 +- test/21consumer-test1.yml | 2 +- test/31producer-test1.yml | 2 +- test/99testclient.yml | 2 +- update-kafka-image.sh | 2 +- zookeeper/50pzoo.yml | 2 +- zookeeper/51zoo.yml | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) diff --git a/50kafka.yml b/50kafka.yml index 8d4560c1..f53216dc 100644 --- a/50kafka.yml +++ b/50kafka.yml @@ -28,7 +28,7 @@ spec: cpu: 10m memory: 100Mi - name: broker - image: solsson/kafka:0.11.0.0@sha256:4c194db2ec15698aca6f1aa8a2fd5e5c566caed82b4bf43446c388f315397756 + image: solsson/kafka:0.11.0.0@sha256:92c5092d7c2f10abd11693731f6e112d40bfd42d6428a7cdf0516c9666dd3e58 env: - name: JMX_PORT value: "5555" diff --git a/test/11topic-create-test1.yml b/test/11topic-create-test1.yml index d42f0eb3..536f3518 100644 --- a/test/11topic-create-test1.yml +++ b/test/11topic-create-test1.yml @@ -10,7 +10,7 @@ spec: spec: containers: - name: kafka - image: solsson/kafka:0.11.0.0@sha256:4c194db2ec15698aca6f1aa8a2fd5e5c566caed82b4bf43446c388f315397756 + image: solsson/kafka:0.11.0.0@sha256:92c5092d7c2f10abd11693731f6e112d40bfd42d6428a7cdf0516c9666dd3e58 command: - ./bin/kafka-topics.sh - --zookeeper diff --git a/test/12topic-create-test2.yml b/test/12topic-create-test2.yml index 687480c8..06fe4ab9 100644 --- a/test/12topic-create-test2.yml +++ b/test/12topic-create-test2.yml @@ -10,7 +10,7 @@ spec: spec: containers: - name: kafka - image: solsson/kafka:0.11.0.0@sha256:4c194db2ec15698aca6f1aa8a2fd5e5c566caed82b4bf43446c388f315397756 + image: solsson/kafka:0.11.0.0@sha256:92c5092d7c2f10abd11693731f6e112d40bfd42d6428a7cdf0516c9666dd3e58 command: - ./bin/kafka-topics.sh - --zookeeper diff --git a/test/21consumer-test1.yml b/test/21consumer-test1.yml index e1db2cf3..baac880c 100644 --- a/test/21consumer-test1.yml +++ b/test/21consumer-test1.yml @@ -14,7 +14,7 @@ spec: spec: containers: - name: kafka - image: solsson/kafka:0.11.0.0@sha256:4c194db2ec15698aca6f1aa8a2fd5e5c566caed82b4bf43446c388f315397756 + image: solsson/kafka:0.11.0.0@sha256:92c5092d7c2f10abd11693731f6e112d40bfd42d6428a7cdf0516c9666dd3e58 command: - ./bin/kafka-console-consumer.sh - --bootstrap-server diff --git a/test/31producer-test1.yml b/test/31producer-test1.yml index 9dfe3c8d..701090c4 100644 --- a/test/31producer-test1.yml +++ b/test/31producer-test1.yml @@ -10,7 +10,7 @@ spec: spec: containers: - name: kafka - image: solsson/kafka:0.11.0.0@sha256:4c194db2ec15698aca6f1aa8a2fd5e5c566caed82b4bf43446c388f315397756 + image: solsson/kafka:0.11.0.0@sha256:92c5092d7c2f10abd11693731f6e112d40bfd42d6428a7cdf0516c9666dd3e58 command: - /bin/sh - -c diff --git a/test/99testclient.yml b/test/99testclient.yml index ae71b0d8..8efa119c 100644 --- a/test/99testclient.yml +++ b/test/99testclient.yml @@ -8,7 +8,7 @@ metadata: spec: containers: - name: kafka - image: solsson/kafka:0.11.0.0@sha256:4c194db2ec15698aca6f1aa8a2fd5e5c566caed82b4bf43446c388f315397756 + image: solsson/kafka:0.11.0.0@sha256:92c5092d7c2f10abd11693731f6e112d40bfd42d6428a7cdf0516c9666dd3e58 command: - sh - -c diff --git a/update-kafka-image.sh b/update-kafka-image.sh index 602ccdd4..1c322930 100755 --- a/update-kafka-image.sh +++ b/update-kafka-image.sh @@ -4,5 +4,5 @@ IMAGE=$1 [ -z "$IMAGE" ] && echo "First argument should be the image to set" && exit 1 for F in ./ test/ zookeeper/; do - sed -i '' "s|image: solsson/kafka:.*|image: $IMAGE|" $F*.yml + sed -i "s|image: solsson/kafka:.*|image: $IMAGE|" $F*.yml done diff --git a/zookeeper/50pzoo.yml b/zookeeper/50pzoo.yml index f3f666ba..16c0aa54 100644 --- a/zookeeper/50pzoo.yml +++ b/zookeeper/50pzoo.yml @@ -37,7 +37,7 @@ spec: cpu: 10m memory: 100Mi - name: zookeeper - image: solsson/kafka:0.11.0.0@sha256:4c194db2ec15698aca6f1aa8a2fd5e5c566caed82b4bf43446c388f315397756 + image: solsson/kafka:0.11.0.0@sha256:92c5092d7c2f10abd11693731f6e112d40bfd42d6428a7cdf0516c9666dd3e58 env: - name: JMX_PORT value: "5555" diff --git a/zookeeper/51zoo.yml b/zookeeper/51zoo.yml index 43199480..8e15a7fe 100644 --- a/zookeeper/51zoo.yml +++ b/zookeeper/51zoo.yml @@ -37,7 +37,7 @@ spec: cpu: 10m memory: 100Mi - name: zookeeper - image: solsson/kafka:0.11.0.0@sha256:4c194db2ec15698aca6f1aa8a2fd5e5c566caed82b4bf43446c388f315397756 + image: solsson/kafka:0.11.0.0@sha256:92c5092d7c2f10abd11693731f6e112d40bfd42d6428a7cdf0516c9666dd3e58 env: - name: JMX_PORT value: "5555" From 6a934de646c4d62675e604e899e583293372ba14 Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Sun, 23 Jul 2017 08:50:01 +0200 Subject: [PATCH 51/93] solsson/kafka on debian restores installation path to /opt/kafka --- zookeeper/50pzoo.yml | 2 +- zookeeper/51zoo.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/zookeeper/50pzoo.yml b/zookeeper/50pzoo.yml index 16c0aa54..166c5033 100644 --- a/zookeeper/50pzoo.yml +++ b/zookeeper/50pzoo.yml @@ -75,7 +75,7 @@ spec: - '[ "imok" = "$(echo ruok | nc -w 1 127.0.0.1 2181)" ]' volumeMounts: - name: config - mountPath: /usr/local/kafka/config + mountPath: /opt/kafka/config - name: data mountPath: /var/lib/zookeeper/data volumes: diff --git a/zookeeper/51zoo.yml b/zookeeper/51zoo.yml index 8e15a7fe..23c0c786 100644 --- a/zookeeper/51zoo.yml +++ b/zookeeper/51zoo.yml @@ -75,7 +75,7 @@ spec: - '[ "imok" = "$(echo ruok | nc -w 1 127.0.0.1 2181)" ]' volumeMounts: - name: config - mountPath: /usr/local/kafka/config + mountPath: /opt/kafka/config - name: data mountPath: /var/lib/zookeeper/data volumes: From c188f43cb8a252cd685a4944d35577ebc17a3668 Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Sun, 23 Jul 2017 08:50:44 +0200 Subject: [PATCH 52/93] Default shell on debian should forward signals properly --- 50kafka.yml | 2 +- zookeeper/50pzoo.yml | 2 +- zookeeper/51zoo.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/50kafka.yml b/50kafka.yml index f53216dc..a41ad390 100644 --- a/50kafka.yml +++ b/50kafka.yml @@ -35,7 +35,7 @@ spec: ports: - containerPort: 9092 command: - - /bin/bash + - /bin/sh - -c - > ./bin/kafka-server-start.sh diff --git a/zookeeper/50pzoo.yml b/zookeeper/50pzoo.yml index 166c5033..ddcef92f 100644 --- a/zookeeper/50pzoo.yml +++ b/zookeeper/50pzoo.yml @@ -42,7 +42,7 @@ spec: - name: JMX_PORT value: "5555" command: - - /bin/bash + - /bin/sh - -euc - > export ZOOKEEPER_SERVER_ID=$((${HOSTNAME##*-} + 1)); diff --git a/zookeeper/51zoo.yml b/zookeeper/51zoo.yml index 23c0c786..da571c4c 100644 --- a/zookeeper/51zoo.yml +++ b/zookeeper/51zoo.yml @@ -42,7 +42,7 @@ spec: - name: JMX_PORT value: "5555" command: - - /bin/bash + - /bin/sh - -euc - > export ZOOKEEPER_SERVER_ID=$((${HOSTNAME##*-} + 4)); From 1758478d9117730ab0fd3d6f2e28422594026585 Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Sun, 23 Jul 2017 08:54:38 +0200 Subject: [PATCH 53/93] Adds yaml with the default .properties from 0.11.0.0 --- 10broker-config.yml | 239 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 239 insertions(+) create mode 100644 10broker-config.yml diff --git a/10broker-config.yml b/10broker-config.yml new file mode 100644 index 00000000..b86a0577 --- /dev/null +++ b/10broker-config.yml @@ -0,0 +1,239 @@ +kind: ConfigMap +metadata: + name: broker-config + namespace: kafka +apiVersion: v1 +data: + server.properties: |- + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + # see kafka.server.KafkaConfig for additional details and defaults + + ############################# Server Basics ############################# + + # The id of the broker. This must be set to a unique integer for each broker. + broker.id=0 + + # Switch to enable topic deletion or not, default value is false + #delete.topic.enable=true + + ############################# Socket Server Settings ############################# + + # The address the socket server listens on. It will get the value returned from + # java.net.InetAddress.getCanonicalHostName() if not configured. + # FORMAT: + # listeners = listener_name://host_name:port + # EXAMPLE: + # listeners = PLAINTEXT://your.host.name:9092 + #listeners=PLAINTEXT://:9092 + + # Hostname and port the broker will advertise to producers and consumers. If not set, + # it uses the value for "listeners" if configured. Otherwise, it will use the value + # returned from java.net.InetAddress.getCanonicalHostName(). + #advertised.listeners=PLAINTEXT://your.host.name:9092 + + # Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details + #listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL + + # The number of threads that the server uses for receiving requests from the network and sending responses to the network + num.network.threads=3 + + # The number of threads that the server uses for processing requests, which may include disk I/O + num.io.threads=8 + + # The send buffer (SO_SNDBUF) used by the socket server + socket.send.buffer.bytes=102400 + + # The receive buffer (SO_RCVBUF) used by the socket server + socket.receive.buffer.bytes=102400 + + # The maximum size of a request that the socket server will accept (protection against OOM) + socket.request.max.bytes=104857600 + + + ############################# Log Basics ############################# + + # A comma seperated list of directories under which to store log files + log.dirs=/tmp/kafka-logs + + # The default number of log partitions per topic. More partitions allow greater + # parallelism for consumption, but this will also result in more files across + # the brokers. + num.partitions=1 + + # The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. + # This value is recommended to be increased for installations with data dirs located in RAID array. + num.recovery.threads.per.data.dir=1 + + ############################# Internal Topic Settings ############################# + # The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state" + # For anything other than development testing, a value greater than 1 is recommended for to ensure availability such as 3. + offsets.topic.replication.factor=1 + transaction.state.log.replication.factor=1 + transaction.state.log.min.isr=1 + + ############################# Log Flush Policy ############################# + + # Messages are immediately written to the filesystem but by default we only fsync() to sync + # the OS cache lazily. The following configurations control the flush of data to disk. + # There are a few important trade-offs here: + # 1. Durability: Unflushed data may be lost if you are not using replication. + # 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush. + # 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to exceessive seeks. + # The settings below allow one to configure the flush policy to flush data after a period of time or + # every N messages (or both). This can be done globally and overridden on a per-topic basis. + + # The number of messages to accept before forcing a flush of data to disk + #log.flush.interval.messages=10000 + + # The maximum amount of time a message can sit in a log before we force a flush + #log.flush.interval.ms=1000 + + ############################# Log Retention Policy ############################# + + # The following configurations control the disposal of log segments. The policy can + # be set to delete segments after a period of time, or after a given size has accumulated. + # A segment will be deleted whenever *either* of these criteria are met. Deletion always happens + # from the end of the log. + + # The minimum age of a log file to be eligible for deletion due to age + log.retention.hours=168 + + # A size-based retention policy for logs. Segments are pruned from the log as long as the remaining + # segments don't drop below log.retention.bytes. Functions independently of log.retention.hours. + #log.retention.bytes=1073741824 + + # The maximum size of a log segment file. When this size is reached a new log segment will be created. + log.segment.bytes=1073741824 + + # The interval at which log segments are checked to see if they can be deleted according + # to the retention policies + log.retention.check.interval.ms=300000 + + ############################# Zookeeper ############################# + + # Zookeeper connection string (see zookeeper docs for details). + # This is a comma separated host:port pairs, each corresponding to a zk + # server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002". + # You can also append an optional chroot string to the urls to specify the + # root directory for all kafka znodes. + zookeeper.connect=localhost:2181 + + # Timeout in ms for connecting to zookeeper + zookeeper.connection.timeout.ms=6000 + + + ############################# Group Coordinator Settings ############################# + + # The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance. + # The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms. + # The default value for this is 3 seconds. + # We override this to 0 here as it makes for a better out-of-the-box experience for development and testing. + # However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup. + group.initial.rebalance.delay.ms=0 + + log4j.properties: |- + # Licensed to the Apache Software Foundation (ASF) under one or more + # contributor license agreements. See the NOTICE file distributed with + # this work for additional information regarding copyright ownership. + # The ASF licenses this file to You under the Apache License, Version 2.0 + # (the "License"); you may not use this file except in compliance with + # the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + # Unspecified loggers and loggers with additivity=true output to server.log and stdout + # Note that INFO only applies to unspecified loggers, the log level of the child logger is used otherwise + log4j.rootLogger=INFO, stdout, kafkaAppender + + log4j.appender.stdout=org.apache.log4j.ConsoleAppender + log4j.appender.stdout.layout=org.apache.log4j.PatternLayout + log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n + + log4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.kafkaAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.kafkaAppender.File=${kafka.logs.dir}/server.log + log4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.kafkaAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + + log4j.appender.stateChangeAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.stateChangeAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.stateChangeAppender.File=${kafka.logs.dir}/state-change.log + log4j.appender.stateChangeAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.stateChangeAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + + log4j.appender.requestAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.requestAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.requestAppender.File=${kafka.logs.dir}/kafka-request.log + log4j.appender.requestAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.requestAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + + log4j.appender.cleanerAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.cleanerAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.cleanerAppender.File=${kafka.logs.dir}/log-cleaner.log + log4j.appender.cleanerAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.cleanerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + + log4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.controllerAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.controllerAppender.File=${kafka.logs.dir}/controller.log + log4j.appender.controllerAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.controllerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + + log4j.appender.authorizerAppender=org.apache.log4j.DailyRollingFileAppender + log4j.appender.authorizerAppender.DatePattern='.'yyyy-MM-dd-HH + log4j.appender.authorizerAppender.File=${kafka.logs.dir}/kafka-authorizer.log + log4j.appender.authorizerAppender.layout=org.apache.log4j.PatternLayout + log4j.appender.authorizerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n + + # Change the two lines below to adjust ZK client logging + log4j.logger.org.I0Itec.zkclient.ZkClient=INFO + log4j.logger.org.apache.zookeeper=INFO + + # Change the two lines below to adjust the general broker logging level (output to server.log and stdout) + log4j.logger.kafka=INFO + log4j.logger.org.apache.kafka=INFO + + # Change to DEBUG or TRACE to enable request logging + log4j.logger.kafka.request.logger=WARN, requestAppender + log4j.additivity.kafka.request.logger=false + + # Uncomment the lines below and change log4j.logger.kafka.network.RequestChannel$ to TRACE for additional output + # related to the handling of requests + #log4j.logger.kafka.network.Processor=TRACE, requestAppender + #log4j.logger.kafka.server.KafkaApis=TRACE, requestAppender + #log4j.additivity.kafka.server.KafkaApis=false + log4j.logger.kafka.network.RequestChannel$=WARN, requestAppender + log4j.additivity.kafka.network.RequestChannel$=false + + log4j.logger.kafka.controller=TRACE, controllerAppender + log4j.additivity.kafka.controller=false + + log4j.logger.kafka.log.LogCleaner=INFO, cleanerAppender + log4j.additivity.kafka.log.LogCleaner=false + + log4j.logger.state.change.logger=TRACE, stateChangeAppender + log4j.additivity.state.change.logger=false + + # Change to DEBUG to enable audit log for the authorizer + log4j.logger.kafka.authorizer.logger=WARN, authorizerAppender + log4j.additivity.kafka.authorizer.logger=false From a30b5e75503975bb7f769beaae0a4a5c8f167c58 Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Tue, 25 Jul 2017 06:33:11 +0200 Subject: [PATCH 54/93] Use config map's config instead of image's --- 50kafka.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/50kafka.yml b/50kafka.yml index a41ad390..c5736b85 100644 --- a/50kafka.yml +++ b/50kafka.yml @@ -54,8 +54,14 @@ spec: - -c - 'echo "" | nc -w 1 127.0.0.1 9092' volumeMounts: + - name: config + mountPath: /opt/kafka/config - name: data mountPath: /var/lib/kafka/data + volumes: + - name: config + configMap: + name: broker-config volumeClaimTemplates: - metadata: name: data From b3491ce33dafdd9789287c5c0b267cbf9d30d21d Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Tue, 25 Jul 2017 06:35:22 +0200 Subject: [PATCH 55/93] Validates against a gotcha --- update-kafka-image.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/update-kafka-image.sh b/update-kafka-image.sh index 1c322930..fae9c6ff 100755 --- a/update-kafka-image.sh +++ b/update-kafka-image.sh @@ -3,6 +3,8 @@ IMAGE=$1 [ -z "$IMAGE" ] && echo "First argument should be the image to set" && exit 1 +[[ $IMAGE != solsson/kafka:* ]] && echo "Should be the full image identifier" && exit 1 + for F in ./ test/ zookeeper/; do sed -i "s|image: solsson/kafka:.*|image: $IMAGE|" $F*.yml done From d8b2b41d48e51e8309c4e13e78b45401df30e6cc Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Tue, 25 Jul 2017 06:41:53 +0200 Subject: [PATCH 56/93] With stock config we have to change zookeeper lookup from the default localhost --- 50kafka.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/50kafka.yml b/50kafka.yml index c5736b85..7d65d495 100644 --- a/50kafka.yml +++ b/50kafka.yml @@ -40,6 +40,7 @@ spec: - > ./bin/kafka-server-start.sh config/server.properties + --override zookeeper.connect=zookeeper:2181 --override log.retention.hours=-1 --override log.dirs=/var/lib/kafka/data/topics --override broker.id=${HOSTNAME##*-} From c86ed9c92ae889a10f4c04d526c7b757effb13ca Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Tue, 25 Jul 2017 07:02:11 +0200 Subject: [PATCH 57/93] As recommended by https://www.confluent.io/blog/apache-kafka-for-service-architectures/ --- test/12topic-create-test2.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/12topic-create-test2.yml b/test/12topic-create-test2.yml index 06fe4ab9..baf2cc0a 100644 --- a/test/12topic-create-test2.yml +++ b/test/12topic-create-test2.yml @@ -22,4 +22,6 @@ spec: - "1" - --replication-factor - "3" + - --config + - min.insync.replicas=2 restartPolicy: Never From 0681cc515fa1c505b905ef60c7d3132e8d7510af Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Tue, 25 Jul 2017 20:07:34 +0200 Subject: [PATCH 58/93] I think time saved by auto-creating topics will be lost ... tenfold in confusion caused by typos etc --- 50kafka.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/50kafka.yml b/50kafka.yml index 7d65d495..45eb86fd 100644 --- a/50kafka.yml +++ b/50kafka.yml @@ -44,6 +44,7 @@ spec: --override log.retention.hours=-1 --override log.dirs=/var/lib/kafka/data/topics --override broker.id=${HOSTNAME##*-} + --override auto.create.topics.enable=false resources: requests: cpu: 100m From 480b5fa7e33fb99159c36821b9d089c4b77f4dc8 Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Tue, 25 Jul 2017 20:10:55 +0200 Subject: [PATCH 59/93] New build with https://github.com/solsson/dockerfiles/pull/9 --- 50kafka.yml | 2 +- test/11topic-create-test1.yml | 2 +- test/12topic-create-test2.yml | 2 +- test/21consumer-test1.yml | 2 +- test/31producer-test1.yml | 2 +- test/99testclient.yml | 2 +- zookeeper/50pzoo.yml | 2 +- zookeeper/51zoo.yml | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) diff --git a/50kafka.yml b/50kafka.yml index 45eb86fd..c00d920b 100644 --- a/50kafka.yml +++ b/50kafka.yml @@ -28,7 +28,7 @@ spec: cpu: 10m memory: 100Mi - name: broker - image: solsson/kafka:0.11.0.0@sha256:92c5092d7c2f10abd11693731f6e112d40bfd42d6428a7cdf0516c9666dd3e58 + image: solsson/kafka:0.11.0.0@sha256:df808192488b280e3bee7a271208032a5669e0e58d4aebe83500492ebaea342b env: - name: JMX_PORT value: "5555" diff --git a/test/11topic-create-test1.yml b/test/11topic-create-test1.yml index 536f3518..8f734f95 100644 --- a/test/11topic-create-test1.yml +++ b/test/11topic-create-test1.yml @@ -10,7 +10,7 @@ spec: spec: containers: - name: kafka - image: solsson/kafka:0.11.0.0@sha256:92c5092d7c2f10abd11693731f6e112d40bfd42d6428a7cdf0516c9666dd3e58 + image: solsson/kafka:0.11.0.0@sha256:df808192488b280e3bee7a271208032a5669e0e58d4aebe83500492ebaea342b command: - ./bin/kafka-topics.sh - --zookeeper diff --git a/test/12topic-create-test2.yml b/test/12topic-create-test2.yml index baf2cc0a..d1d80483 100644 --- a/test/12topic-create-test2.yml +++ b/test/12topic-create-test2.yml @@ -10,7 +10,7 @@ spec: spec: containers: - name: kafka - image: solsson/kafka:0.11.0.0@sha256:92c5092d7c2f10abd11693731f6e112d40bfd42d6428a7cdf0516c9666dd3e58 + image: solsson/kafka:0.11.0.0@sha256:df808192488b280e3bee7a271208032a5669e0e58d4aebe83500492ebaea342b command: - ./bin/kafka-topics.sh - --zookeeper diff --git a/test/21consumer-test1.yml b/test/21consumer-test1.yml index baac880c..a6222ac8 100644 --- a/test/21consumer-test1.yml +++ b/test/21consumer-test1.yml @@ -14,7 +14,7 @@ spec: spec: containers: - name: kafka - image: solsson/kafka:0.11.0.0@sha256:92c5092d7c2f10abd11693731f6e112d40bfd42d6428a7cdf0516c9666dd3e58 + image: solsson/kafka:0.11.0.0@sha256:df808192488b280e3bee7a271208032a5669e0e58d4aebe83500492ebaea342b command: - ./bin/kafka-console-consumer.sh - --bootstrap-server diff --git a/test/31producer-test1.yml b/test/31producer-test1.yml index 701090c4..3e06c098 100644 --- a/test/31producer-test1.yml +++ b/test/31producer-test1.yml @@ -10,7 +10,7 @@ spec: spec: containers: - name: kafka - image: solsson/kafka:0.11.0.0@sha256:92c5092d7c2f10abd11693731f6e112d40bfd42d6428a7cdf0516c9666dd3e58 + image: solsson/kafka:0.11.0.0@sha256:df808192488b280e3bee7a271208032a5669e0e58d4aebe83500492ebaea342b command: - /bin/sh - -c diff --git a/test/99testclient.yml b/test/99testclient.yml index 8efa119c..f4e325ca 100644 --- a/test/99testclient.yml +++ b/test/99testclient.yml @@ -8,7 +8,7 @@ metadata: spec: containers: - name: kafka - image: solsson/kafka:0.11.0.0@sha256:92c5092d7c2f10abd11693731f6e112d40bfd42d6428a7cdf0516c9666dd3e58 + image: solsson/kafka:0.11.0.0@sha256:df808192488b280e3bee7a271208032a5669e0e58d4aebe83500492ebaea342b command: - sh - -c diff --git a/zookeeper/50pzoo.yml b/zookeeper/50pzoo.yml index ddcef92f..f4fce220 100644 --- a/zookeeper/50pzoo.yml +++ b/zookeeper/50pzoo.yml @@ -37,7 +37,7 @@ spec: cpu: 10m memory: 100Mi - name: zookeeper - image: solsson/kafka:0.11.0.0@sha256:92c5092d7c2f10abd11693731f6e112d40bfd42d6428a7cdf0516c9666dd3e58 + image: solsson/kafka:0.11.0.0@sha256:df808192488b280e3bee7a271208032a5669e0e58d4aebe83500492ebaea342b env: - name: JMX_PORT value: "5555" diff --git a/zookeeper/51zoo.yml b/zookeeper/51zoo.yml index da571c4c..ead8cb47 100644 --- a/zookeeper/51zoo.yml +++ b/zookeeper/51zoo.yml @@ -37,7 +37,7 @@ spec: cpu: 10m memory: 100Mi - name: zookeeper - image: solsson/kafka:0.11.0.0@sha256:92c5092d7c2f10abd11693731f6e112d40bfd42d6428a7cdf0516c9666dd3e58 + image: solsson/kafka:0.11.0.0@sha256:df808192488b280e3bee7a271208032a5669e0e58d4aebe83500492ebaea342b env: - name: JMX_PORT value: "5555" From 8340b11e1f1f825313969ed2505a9c93f9ba6549 Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Wed, 26 Jul 2017 05:43:41 +0200 Subject: [PATCH 60/93] New build at commit 0314080 --- 50kafka.yml | 2 +- test/11topic-create-test1.yml | 2 +- test/12topic-create-test2.yml | 2 +- test/21consumer-test1.yml | 2 +- test/31producer-test1.yml | 2 +- test/99testclient.yml | 2 +- zookeeper/50pzoo.yml | 2 +- zookeeper/51zoo.yml | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) diff --git a/50kafka.yml b/50kafka.yml index c00d920b..deee06f8 100644 --- a/50kafka.yml +++ b/50kafka.yml @@ -28,7 +28,7 @@ spec: cpu: 10m memory: 100Mi - name: broker - image: solsson/kafka:0.11.0.0@sha256:df808192488b280e3bee7a271208032a5669e0e58d4aebe83500492ebaea342b + image: solsson/kafka:0.11.0.0@sha256:e0dec6aa1f376bd374a6ca5863b783d01703acf1f71c0c4441a217a7bd80dfbc env: - name: JMX_PORT value: "5555" diff --git a/test/11topic-create-test1.yml b/test/11topic-create-test1.yml index 8f734f95..e7e54c34 100644 --- a/test/11topic-create-test1.yml +++ b/test/11topic-create-test1.yml @@ -10,7 +10,7 @@ spec: spec: containers: - name: kafka - image: solsson/kafka:0.11.0.0@sha256:df808192488b280e3bee7a271208032a5669e0e58d4aebe83500492ebaea342b + image: solsson/kafka:0.11.0.0@sha256:e0dec6aa1f376bd374a6ca5863b783d01703acf1f71c0c4441a217a7bd80dfbc command: - ./bin/kafka-topics.sh - --zookeeper diff --git a/test/12topic-create-test2.yml b/test/12topic-create-test2.yml index d1d80483..fe97b40e 100644 --- a/test/12topic-create-test2.yml +++ b/test/12topic-create-test2.yml @@ -10,7 +10,7 @@ spec: spec: containers: - name: kafka - image: solsson/kafka:0.11.0.0@sha256:df808192488b280e3bee7a271208032a5669e0e58d4aebe83500492ebaea342b + image: solsson/kafka:0.11.0.0@sha256:e0dec6aa1f376bd374a6ca5863b783d01703acf1f71c0c4441a217a7bd80dfbc command: - ./bin/kafka-topics.sh - --zookeeper diff --git a/test/21consumer-test1.yml b/test/21consumer-test1.yml index a6222ac8..f861070a 100644 --- a/test/21consumer-test1.yml +++ b/test/21consumer-test1.yml @@ -14,7 +14,7 @@ spec: spec: containers: - name: kafka - image: solsson/kafka:0.11.0.0@sha256:df808192488b280e3bee7a271208032a5669e0e58d4aebe83500492ebaea342b + image: solsson/kafka:0.11.0.0@sha256:e0dec6aa1f376bd374a6ca5863b783d01703acf1f71c0c4441a217a7bd80dfbc command: - ./bin/kafka-console-consumer.sh - --bootstrap-server diff --git a/test/31producer-test1.yml b/test/31producer-test1.yml index 3e06c098..b41a1d2e 100644 --- a/test/31producer-test1.yml +++ b/test/31producer-test1.yml @@ -10,7 +10,7 @@ spec: spec: containers: - name: kafka - image: solsson/kafka:0.11.0.0@sha256:df808192488b280e3bee7a271208032a5669e0e58d4aebe83500492ebaea342b + image: solsson/kafka:0.11.0.0@sha256:e0dec6aa1f376bd374a6ca5863b783d01703acf1f71c0c4441a217a7bd80dfbc command: - /bin/sh - -c diff --git a/test/99testclient.yml b/test/99testclient.yml index f4e325ca..6525416e 100644 --- a/test/99testclient.yml +++ b/test/99testclient.yml @@ -8,7 +8,7 @@ metadata: spec: containers: - name: kafka - image: solsson/kafka:0.11.0.0@sha256:df808192488b280e3bee7a271208032a5669e0e58d4aebe83500492ebaea342b + image: solsson/kafka:0.11.0.0@sha256:e0dec6aa1f376bd374a6ca5863b783d01703acf1f71c0c4441a217a7bd80dfbc command: - sh - -c diff --git a/zookeeper/50pzoo.yml b/zookeeper/50pzoo.yml index f4fce220..4cfd3d4b 100644 --- a/zookeeper/50pzoo.yml +++ b/zookeeper/50pzoo.yml @@ -37,7 +37,7 @@ spec: cpu: 10m memory: 100Mi - name: zookeeper - image: solsson/kafka:0.11.0.0@sha256:df808192488b280e3bee7a271208032a5669e0e58d4aebe83500492ebaea342b + image: solsson/kafka:0.11.0.0@sha256:e0dec6aa1f376bd374a6ca5863b783d01703acf1f71c0c4441a217a7bd80dfbc env: - name: JMX_PORT value: "5555" diff --git a/zookeeper/51zoo.yml b/zookeeper/51zoo.yml index ead8cb47..cbc0af1f 100644 --- a/zookeeper/51zoo.yml +++ b/zookeeper/51zoo.yml @@ -37,7 +37,7 @@ spec: cpu: 10m memory: 100Mi - name: zookeeper - image: solsson/kafka:0.11.0.0@sha256:df808192488b280e3bee7a271208032a5669e0e58d4aebe83500492ebaea342b + image: solsson/kafka:0.11.0.0@sha256:e0dec6aa1f376bd374a6ca5863b783d01703acf1f71c0c4441a217a7bd80dfbc env: - name: JMX_PORT value: "5555" From 114b773cd7db1475768ae04547e03fd47361e74d Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Wed, 26 Jul 2017 05:49:46 +0200 Subject: [PATCH 61/93] Clarifies a gotcha: to mount config with log4j.properties ... you must use /opt/kafka/config, due to how log4j.properites (sometimes tools- or connect-) are resolved by the ./bin scripts. See https://github.com/solsson/dockerfiles/pull/10 --- 50kafka.yml | 4 +++- zookeeper/50pzoo.yml | 2 ++ zookeeper/51zoo.yml | 2 ++ 3 files changed, 7 insertions(+), 1 deletion(-) diff --git a/50kafka.yml b/50kafka.yml index deee06f8..dcc4e949 100644 --- a/50kafka.yml +++ b/50kafka.yml @@ -30,6 +30,8 @@ spec: - name: broker image: solsson/kafka:0.11.0.0@sha256:e0dec6aa1f376bd374a6ca5863b783d01703acf1f71c0c4441a217a7bd80dfbc env: + - name: KAFKA_LOG4J_OPTS + value: -Dlog4j.configuration=file:/opt/kafka/config/log4j.properties - name: JMX_PORT value: "5555" ports: @@ -39,7 +41,7 @@ spec: - -c - > ./bin/kafka-server-start.sh - config/server.properties + ./config/server.properties --override zookeeper.connect=zookeeper:2181 --override log.retention.hours=-1 --override log.dirs=/var/lib/kafka/data/topics diff --git a/zookeeper/50pzoo.yml b/zookeeper/50pzoo.yml index 4cfd3d4b..ff289d8b 100644 --- a/zookeeper/50pzoo.yml +++ b/zookeeper/50pzoo.yml @@ -39,6 +39,8 @@ spec: - name: zookeeper image: solsson/kafka:0.11.0.0@sha256:e0dec6aa1f376bd374a6ca5863b783d01703acf1f71c0c4441a217a7bd80dfbc env: + - name: KAFKA_LOG4J_OPTS + value: -Dlog4j.configuration=file:/opt/kafka/config/log4j.properties - name: JMX_PORT value: "5555" command: diff --git a/zookeeper/51zoo.yml b/zookeeper/51zoo.yml index cbc0af1f..067096de 100644 --- a/zookeeper/51zoo.yml +++ b/zookeeper/51zoo.yml @@ -39,6 +39,8 @@ spec: - name: zookeeper image: solsson/kafka:0.11.0.0@sha256:e0dec6aa1f376bd374a6ca5863b783d01703acf1f71c0c4441a217a7bd80dfbc env: + - name: KAFKA_LOG4J_OPTS + value: -Dlog4j.configuration=file:/opt/kafka/config/log4j.properties - name: JMX_PORT value: "5555" command: From 6f8f6d460b8683c07e5201a82415abbe0e106463 Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Wed, 26 Jul 2017 11:02:51 +0200 Subject: [PATCH 62/93] Tagged with the policy from https://github.com/solsson/dockerfiles/pull/11 --- 50kafka.yml | 2 +- test/11topic-create-test1.yml | 2 +- test/12topic-create-test2.yml | 2 +- test/21consumer-test1.yml | 2 +- test/31producer-test1.yml | 2 +- test/99testclient.yml | 2 +- zookeeper/50pzoo.yml | 2 +- zookeeper/51zoo.yml | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) diff --git a/50kafka.yml b/50kafka.yml index dcc4e949..8536b1c4 100644 --- a/50kafka.yml +++ b/50kafka.yml @@ -28,7 +28,7 @@ spec: cpu: 10m memory: 100Mi - name: broker - image: solsson/kafka:0.11.0.0@sha256:e0dec6aa1f376bd374a6ca5863b783d01703acf1f71c0c4441a217a7bd80dfbc + image: solsson/kafka:0.11.0.0@sha256:b27560de08d30ebf96d12e74f80afcaca503ad4ca3103e63b1fd43a2e4c976ce env: - name: KAFKA_LOG4J_OPTS value: -Dlog4j.configuration=file:/opt/kafka/config/log4j.properties diff --git a/test/11topic-create-test1.yml b/test/11topic-create-test1.yml index e7e54c34..321dc575 100644 --- a/test/11topic-create-test1.yml +++ b/test/11topic-create-test1.yml @@ -10,7 +10,7 @@ spec: spec: containers: - name: kafka - image: solsson/kafka:0.11.0.0@sha256:e0dec6aa1f376bd374a6ca5863b783d01703acf1f71c0c4441a217a7bd80dfbc + image: solsson/kafka:0.11.0.0@sha256:b27560de08d30ebf96d12e74f80afcaca503ad4ca3103e63b1fd43a2e4c976ce command: - ./bin/kafka-topics.sh - --zookeeper diff --git a/test/12topic-create-test2.yml b/test/12topic-create-test2.yml index fe97b40e..edbe1dfb 100644 --- a/test/12topic-create-test2.yml +++ b/test/12topic-create-test2.yml @@ -10,7 +10,7 @@ spec: spec: containers: - name: kafka - image: solsson/kafka:0.11.0.0@sha256:e0dec6aa1f376bd374a6ca5863b783d01703acf1f71c0c4441a217a7bd80dfbc + image: solsson/kafka:0.11.0.0@sha256:b27560de08d30ebf96d12e74f80afcaca503ad4ca3103e63b1fd43a2e4c976ce command: - ./bin/kafka-topics.sh - --zookeeper diff --git a/test/21consumer-test1.yml b/test/21consumer-test1.yml index f861070a..43678f8a 100644 --- a/test/21consumer-test1.yml +++ b/test/21consumer-test1.yml @@ -14,7 +14,7 @@ spec: spec: containers: - name: kafka - image: solsson/kafka:0.11.0.0@sha256:e0dec6aa1f376bd374a6ca5863b783d01703acf1f71c0c4441a217a7bd80dfbc + image: solsson/kafka:0.11.0.0@sha256:b27560de08d30ebf96d12e74f80afcaca503ad4ca3103e63b1fd43a2e4c976ce command: - ./bin/kafka-console-consumer.sh - --bootstrap-server diff --git a/test/31producer-test1.yml b/test/31producer-test1.yml index b41a1d2e..354a16a7 100644 --- a/test/31producer-test1.yml +++ b/test/31producer-test1.yml @@ -10,7 +10,7 @@ spec: spec: containers: - name: kafka - image: solsson/kafka:0.11.0.0@sha256:e0dec6aa1f376bd374a6ca5863b783d01703acf1f71c0c4441a217a7bd80dfbc + image: solsson/kafka:0.11.0.0@sha256:b27560de08d30ebf96d12e74f80afcaca503ad4ca3103e63b1fd43a2e4c976ce command: - /bin/sh - -c diff --git a/test/99testclient.yml b/test/99testclient.yml index 6525416e..a3670044 100644 --- a/test/99testclient.yml +++ b/test/99testclient.yml @@ -8,7 +8,7 @@ metadata: spec: containers: - name: kafka - image: solsson/kafka:0.11.0.0@sha256:e0dec6aa1f376bd374a6ca5863b783d01703acf1f71c0c4441a217a7bd80dfbc + image: solsson/kafka:0.11.0.0@sha256:b27560de08d30ebf96d12e74f80afcaca503ad4ca3103e63b1fd43a2e4c976ce command: - sh - -c diff --git a/zookeeper/50pzoo.yml b/zookeeper/50pzoo.yml index ff289d8b..ef12291b 100644 --- a/zookeeper/50pzoo.yml +++ b/zookeeper/50pzoo.yml @@ -37,7 +37,7 @@ spec: cpu: 10m memory: 100Mi - name: zookeeper - image: solsson/kafka:0.11.0.0@sha256:e0dec6aa1f376bd374a6ca5863b783d01703acf1f71c0c4441a217a7bd80dfbc + image: solsson/kafka:0.11.0.0@sha256:b27560de08d30ebf96d12e74f80afcaca503ad4ca3103e63b1fd43a2e4c976ce env: - name: KAFKA_LOG4J_OPTS value: -Dlog4j.configuration=file:/opt/kafka/config/log4j.properties diff --git a/zookeeper/51zoo.yml b/zookeeper/51zoo.yml index 067096de..0f5972cf 100644 --- a/zookeeper/51zoo.yml +++ b/zookeeper/51zoo.yml @@ -37,7 +37,7 @@ spec: cpu: 10m memory: 100Mi - name: zookeeper - image: solsson/kafka:0.11.0.0@sha256:e0dec6aa1f376bd374a6ca5863b783d01703acf1f71c0c4441a217a7bd80dfbc + image: solsson/kafka:0.11.0.0@sha256:b27560de08d30ebf96d12e74f80afcaca503ad4ca3103e63b1fd43a2e4c976ce env: - name: KAFKA_LOG4J_OPTS value: -Dlog4j.configuration=file:/opt/kafka/config/log4j.properties From 5bb49e3d40150ff79f57a043a353eb66182c8beb Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Wed, 26 Jul 2017 11:24:55 +0200 Subject: [PATCH 63/93] With explicit log4j path we can change config mount ... to not mix with sample config. See https://github.com/solsson/dockerfiles/pull/10. --- 50kafka.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/50kafka.yml b/50kafka.yml index 8536b1c4..3ec2bb53 100644 --- a/50kafka.yml +++ b/50kafka.yml @@ -31,7 +31,7 @@ spec: image: solsson/kafka:0.11.0.0@sha256:b27560de08d30ebf96d12e74f80afcaca503ad4ca3103e63b1fd43a2e4c976ce env: - name: KAFKA_LOG4J_OPTS - value: -Dlog4j.configuration=file:/opt/kafka/config/log4j.properties + value: -Dlog4j.configuration=file:/etc/kafka/log4j.properties - name: JMX_PORT value: "5555" ports: @@ -41,7 +41,7 @@ spec: - -c - > ./bin/kafka-server-start.sh - ./config/server.properties + /etc/kafka/server.properties --override zookeeper.connect=zookeeper:2181 --override log.retention.hours=-1 --override log.dirs=/var/lib/kafka/data/topics @@ -59,7 +59,7 @@ spec: - 'echo "" | nc -w 1 127.0.0.1 9092' volumeMounts: - name: config - mountPath: /opt/kafka/config + mountPath: /etc/kafka - name: data mountPath: /var/lib/kafka/data volumes: From a2d324d509d7041642123f22b589ad4925ea1199 Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Wed, 26 Jul 2017 11:39:53 +0200 Subject: [PATCH 64/93] Default shell on Debian shows the same symptom ... of not forwarding signals as Alpine did. Kafka logs say nothing, and after 30s the container is terminated. With /bin/bash instead the log indicates shutdown behavior. This reverts commit c188f43cb8a252cd685a4944d35577ebc17a3668. --- 50kafka.yml | 2 +- zookeeper/50pzoo.yml | 2 +- zookeeper/51zoo.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/50kafka.yml b/50kafka.yml index 3ec2bb53..051349e9 100644 --- a/50kafka.yml +++ b/50kafka.yml @@ -37,7 +37,7 @@ spec: ports: - containerPort: 9092 command: - - /bin/sh + - /bin/bash - -c - > ./bin/kafka-server-start.sh diff --git a/zookeeper/50pzoo.yml b/zookeeper/50pzoo.yml index ef12291b..ef77c5cb 100644 --- a/zookeeper/50pzoo.yml +++ b/zookeeper/50pzoo.yml @@ -44,7 +44,7 @@ spec: - name: JMX_PORT value: "5555" command: - - /bin/sh + - /bin/bash - -euc - > export ZOOKEEPER_SERVER_ID=$((${HOSTNAME##*-} + 1)); diff --git a/zookeeper/51zoo.yml b/zookeeper/51zoo.yml index 0f5972cf..6eb81d42 100644 --- a/zookeeper/51zoo.yml +++ b/zookeeper/51zoo.yml @@ -44,7 +44,7 @@ spec: - name: JMX_PORT value: "5555" command: - - /bin/sh + - /bin/bash - -euc - > export ZOOKEEPER_SERVER_ID=$((${HOSTNAME##*-} + 4)); From be5a82037682d9b1ebd907a367e839ecd71aa1fa Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Wed, 26 Jul 2017 11:38:47 +0200 Subject: [PATCH 65/93] Demonstrates how an init script can be used to ... modify config prior to kafka start. My aim with this is to get rid of the /bin/sh startup scripts and use bin + args directly. --- 10broker-config.yml | 4 ++++ 50kafka.yml | 7 +++++++ 2 files changed, 11 insertions(+) diff --git a/10broker-config.yml b/10broker-config.yml index b86a0577..b6ecf1cc 100644 --- a/10broker-config.yml +++ b/10broker-config.yml @@ -4,6 +4,10 @@ metadata: namespace: kafka apiVersion: v1 data: + init.sh: |- + echo "I guess I'm running in the init container" + sed -i 's/%p %m/%p -INIT-WAS-HERE- %m/' /etc/kafka/log4j.properties + server.properties: |- # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with diff --git a/50kafka.yml b/50kafka.yml index 051349e9..930dcdad 100644 --- a/50kafka.yml +++ b/50kafka.yml @@ -15,6 +15,13 @@ spec: prometheus.io/port: "5556" spec: terminationGracePeriodSeconds: 30 + initContainers: + - name: init-config + image: solsson/kafka:0.11.0.0@sha256:b27560de08d30ebf96d12e74f80afcaca503ad4ca3103e63b1fd43a2e4c976ce + command: ['/bin/sh', '-ec', '. /etc/kafka/init.sh'] + volumeMounts: + - name: config + mountPath: /etc/kafka containers: - name: metrics image: solsson/kafka-prometheus-jmx-exporter@sha256:1f7c96c287a2dbec1d909cd8f96c0656310239b55a9a90d7fd12c81f384f1f7d From 0d534e8ad6f0766cb29293b148ffc9ee6bdec35f Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Wed, 26 Jul 2017 16:48:14 +0200 Subject: [PATCH 66/93] Moves broker.id config into init script --- 10broker-config.yml | 9 ++++++--- 50kafka.yml | 3 +-- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/10broker-config.yml b/10broker-config.yml index b6ecf1cc..af0f0374 100644 --- a/10broker-config.yml +++ b/10broker-config.yml @@ -5,8 +5,11 @@ metadata: apiVersion: v1 data: init.sh: |- - echo "I guess I'm running in the init container" - sed -i 's/%p %m/%p -INIT-WAS-HERE- %m/' /etc/kafka/log4j.properties + #!/bin/bash + set -x + + export KAFKA_BROKER_ID=${HOSTNAME##*-} + sed -i "s/\${KAFKA_BROKER_ID}/$KAFKA_BROKER_ID/" /etc/kafka/server.properties server.properties: |- # Licensed to the Apache Software Foundation (ASF) under one or more @@ -29,7 +32,7 @@ data: ############################# Server Basics ############################# # The id of the broker. This must be set to a unique integer for each broker. - broker.id=0 + broker.id=${KAFKA_BROKER_ID} # Switch to enable topic deletion or not, default value is false #delete.topic.enable=true diff --git a/50kafka.yml b/50kafka.yml index 930dcdad..c55edc5b 100644 --- a/50kafka.yml +++ b/50kafka.yml @@ -18,7 +18,7 @@ spec: initContainers: - name: init-config image: solsson/kafka:0.11.0.0@sha256:b27560de08d30ebf96d12e74f80afcaca503ad4ca3103e63b1fd43a2e4c976ce - command: ['/bin/sh', '-ec', '. /etc/kafka/init.sh'] + command: ['/bin/bash', '/etc/kafka/init.sh'] volumeMounts: - name: config mountPath: /etc/kafka @@ -52,7 +52,6 @@ spec: --override zookeeper.connect=zookeeper:2181 --override log.retention.hours=-1 --override log.dirs=/var/lib/kafka/data/topics - --override broker.id=${HOSTNAME##*-} --override auto.create.topics.enable=false resources: requests: From bfe7e31511e96d53b558708ffa7bcb1cc2336c42 Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Wed, 26 Jul 2017 16:51:16 +0200 Subject: [PATCH 67/93] With no bash tricks in command we can use the actual bin ... avoiding a layer of indirection and the associated gotchas with signals. --- 50kafka.yml | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/50kafka.yml b/50kafka.yml index c55edc5b..9b8bde28 100644 --- a/50kafka.yml +++ b/50kafka.yml @@ -44,15 +44,16 @@ spec: ports: - containerPort: 9092 command: - - /bin/bash - - -c - - > - ./bin/kafka-server-start.sh - /etc/kafka/server.properties - --override zookeeper.connect=zookeeper:2181 - --override log.retention.hours=-1 - --override log.dirs=/var/lib/kafka/data/topics - --override auto.create.topics.enable=false + - ./bin/kafka-server-start.sh + - /etc/kafka/server.properties + - --override + - zookeeper.connect=zookeeper:2181 + - --override + - log.retention.hours=-1 + - --override + - log.dirs=/var/lib/kafka/data/topics + - --override + - auto.create.topics.enable=false resources: requests: cpu: 100m From fda7bdb6b91c5abfb044efc6630447c86d8544ba Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Wed, 26 Jul 2017 21:45:48 +0200 Subject: [PATCH 68/93] Employs the init script concept for zookeeper too, reducing duplcation --- zookeeper/10zookeeper-config.yml | 14 +++++++++++++- zookeeper/50pzoo.yml | 23 +++++++++++++---------- zookeeper/51zoo.yml | 23 +++++++++++++---------- 3 files changed, 39 insertions(+), 21 deletions(-) diff --git a/zookeeper/10zookeeper-config.yml b/zookeeper/10zookeeper-config.yml index 58d8b6aa..58e5c56e 100644 --- a/zookeeper/10zookeeper-config.yml +++ b/zookeeper/10zookeeper-config.yml @@ -4,6 +4,18 @@ metadata: namespace: kafka apiVersion: v1 data: + init.sh: |- + #!/bin/bash + set -x + + OFFSET=1 + case $HOSTNAME in zoo-*) + OFFSET=4 + esac + export ZOOKEEPER_SERVER_ID=$((${HOSTNAME##*-} + $OFFSET)) + echo "${ZOOKEEPER_SERVER_ID:-1}" | tee /var/lib/zookeeper/data/myid + sed -i "s/server\.$ZOOKEEPER_SERVER_ID\=[a-z0-9.-]*/server.$ZOOKEEPER_SERVER_ID=0.0.0.0/" /etc/kafka/zookeeper.properties + zookeeper.properties: |- tickTime=2000 dataDir=/var/lib/zookeeper/data @@ -16,7 +28,7 @@ data: server.3=pzoo-2.pzoo:2888:3888:participant server.4=zoo-0.zoo:2888:3888:participant server.5=zoo-1.zoo:2888:3888:participant - + log4j.properties: |- log4j.rootLogger=INFO, stdout log4j.appender.stdout=org.apache.log4j.ConsoleAppender diff --git a/zookeeper/50pzoo.yml b/zookeeper/50pzoo.yml index ef77c5cb..9989662f 100644 --- a/zookeeper/50pzoo.yml +++ b/zookeeper/50pzoo.yml @@ -16,6 +16,15 @@ spec: prometheus.io/port: "5556" spec: terminationGracePeriodSeconds: 10 + initContainers: + - name: init-config + image: solsson/kafka:0.11.0.0@sha256:b27560de08d30ebf96d12e74f80afcaca503ad4ca3103e63b1fd43a2e4c976ce + command: ['/bin/bash', '/etc/kafka/init.sh'] + volumeMounts: + - name: config + mountPath: /etc/kafka + - name: data + mountPath: /var/lib/zookeeper/data containers: - name: metrics image: solsson/kafka-prometheus-jmx-exporter@sha256:1f7c96c287a2dbec1d909cd8f96c0656310239b55a9a90d7fd12c81f384f1f7d @@ -40,18 +49,12 @@ spec: image: solsson/kafka:0.11.0.0@sha256:b27560de08d30ebf96d12e74f80afcaca503ad4ca3103e63b1fd43a2e4c976ce env: - name: KAFKA_LOG4J_OPTS - value: -Dlog4j.configuration=file:/opt/kafka/config/log4j.properties + value: -Dlog4j.configuration=file:/etc/kafka/log4j.properties - name: JMX_PORT value: "5555" command: - - /bin/bash - - -euc - - > - export ZOOKEEPER_SERVER_ID=$((${HOSTNAME##*-} + 1)); - echo "${ZOOKEEPER_SERVER_ID:-1}" | tee /var/lib/zookeeper/data/myid; - sed -i "s/server\.$ZOOKEEPER_SERVER_ID\=[a-z0-9.-]*/server.$ZOOKEEPER_SERVER_ID=0.0.0.0/" config/zookeeper.properties; - cat config/zookeeper.properties; - ./bin/zookeeper-server-start.sh config/zookeeper.properties + - ./bin/zookeeper-server-start.sh + - /etc/kafka/zookeeper.properties ports: - containerPort: 2181 name: client @@ -77,7 +80,7 @@ spec: - '[ "imok" = "$(echo ruok | nc -w 1 127.0.0.1 2181)" ]' volumeMounts: - name: config - mountPath: /opt/kafka/config + mountPath: /etc/kafka - name: data mountPath: /var/lib/zookeeper/data volumes: diff --git a/zookeeper/51zoo.yml b/zookeeper/51zoo.yml index 6eb81d42..90dae02c 100644 --- a/zookeeper/51zoo.yml +++ b/zookeeper/51zoo.yml @@ -16,6 +16,15 @@ spec: prometheus.io/port: "5556" spec: terminationGracePeriodSeconds: 10 + initContainers: + - name: init-config + image: solsson/kafka:0.11.0.0@sha256:b27560de08d30ebf96d12e74f80afcaca503ad4ca3103e63b1fd43a2e4c976ce + command: ['/bin/bash', '/etc/kafka/init.sh'] + volumeMounts: + - name: config + mountPath: /etc/kafka + - name: data + mountPath: /var/lib/zookeeper/data containers: - name: metrics image: solsson/kafka-prometheus-jmx-exporter@sha256:1f7c96c287a2dbec1d909cd8f96c0656310239b55a9a90d7fd12c81f384f1f7d @@ -40,18 +49,12 @@ spec: image: solsson/kafka:0.11.0.0@sha256:b27560de08d30ebf96d12e74f80afcaca503ad4ca3103e63b1fd43a2e4c976ce env: - name: KAFKA_LOG4J_OPTS - value: -Dlog4j.configuration=file:/opt/kafka/config/log4j.properties + value: -Dlog4j.configuration=file:/etc/kafka/log4j.properties - name: JMX_PORT value: "5555" command: - - /bin/bash - - -euc - - > - export ZOOKEEPER_SERVER_ID=$((${HOSTNAME##*-} + 4)); - echo "${ZOOKEEPER_SERVER_ID:-1}" | tee /var/lib/zookeeper/data/myid; - sed -i "s/server\.$ZOOKEEPER_SERVER_ID\=[a-z0-9.-]*/server.$ZOOKEEPER_SERVER_ID=0.0.0.0/" config/zookeeper.properties; - cat config/zookeeper.properties; - ./bin/zookeeper-server-start.sh config/zookeeper.properties + - ./bin/zookeeper-server-start.sh + - /etc/kafka/zookeeper.properties ports: - containerPort: 2181 name: client @@ -77,7 +80,7 @@ spec: - '[ "imok" = "$(echo ruok | nc -w 1 127.0.0.1 2181)" ]' volumeMounts: - name: config - mountPath: /opt/kafka/config + mountPath: /etc/kafka - name: data mountPath: /var/lib/zookeeper/data volumes: From 082f57aa7f32bb17aca73849a910fa1318220098 Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Thu, 27 Jul 2017 04:54:06 +0200 Subject: [PATCH 69/93] Places the myid magic number where replicas are --- zookeeper/10zookeeper-config.yml | 7 ++----- zookeeper/51zoo.yml | 3 +++ 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/zookeeper/10zookeeper-config.yml b/zookeeper/10zookeeper-config.yml index 58e5c56e..20f7823a 100644 --- a/zookeeper/10zookeeper-config.yml +++ b/zookeeper/10zookeeper-config.yml @@ -8,11 +8,8 @@ data: #!/bin/bash set -x - OFFSET=1 - case $HOSTNAME in zoo-*) - OFFSET=4 - esac - export ZOOKEEPER_SERVER_ID=$((${HOSTNAME##*-} + $OFFSET)) + [ -z "$ID_OFFSET" ] && ID_OFFSET=1 + export ZOOKEEPER_SERVER_ID=$((${HOSTNAME##*-} + $ID_OFFSET)) echo "${ZOOKEEPER_SERVER_ID:-1}" | tee /var/lib/zookeeper/data/myid sed -i "s/server\.$ZOOKEEPER_SERVER_ID\=[a-z0-9.-]*/server.$ZOOKEEPER_SERVER_ID=0.0.0.0/" /etc/kafka/zookeeper.properties diff --git a/zookeeper/51zoo.yml b/zookeeper/51zoo.yml index 90dae02c..7e39cac8 100644 --- a/zookeeper/51zoo.yml +++ b/zookeeper/51zoo.yml @@ -20,6 +20,9 @@ spec: - name: init-config image: solsson/kafka:0.11.0.0@sha256:b27560de08d30ebf96d12e74f80afcaca503ad4ca3103e63b1fd43a2e4c976ce command: ['/bin/bash', '/etc/kafka/init.sh'] + env: + - name: ID_OFFSET + value: "4" volumeMounts: - name: config mountPath: /etc/kafka From b848f85f37c50ca232be1ef7e462568b0f680453 Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Thu, 27 Jul 2017 05:00:58 +0200 Subject: [PATCH 70/93] Stops logs from growing when zookeeper is idle --- zookeeper/10zookeeper-config.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/zookeeper/10zookeeper-config.yml b/zookeeper/10zookeeper-config.yml index 20f7823a..e796b4ba 100644 --- a/zookeeper/10zookeeper-config.yml +++ b/zookeeper/10zookeeper-config.yml @@ -31,3 +31,7 @@ data: log4j.appender.stdout=org.apache.log4j.ConsoleAppender log4j.appender.stdout.layout=org.apache.log4j.PatternLayout log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n + + # Suppress connection log messages, three lines per livenessProbe execution + log4j.logger.org.apache.zookeeper.server.NIOServerCnxnFactory=WARN + log4j.logger.org.apache.zookeeper.server.NIOServerCnxn=WARN From 829de73dd5cc90d125d0c614d279523d18070673 Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Thu, 27 Jul 2017 05:23:36 +0200 Subject: [PATCH 71/93] Unimportant --- test/monitoring-test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/monitoring-test.yml b/test/monitoring-test.yml index 4b5f230a..227ba9fd 100644 --- a/test/monitoring-test.yml +++ b/test/monitoring-test.yml @@ -1,5 +1,5 @@ # Sets up a pod that monitors itself, to test resource usage etc. -# kubectl exec test-metrics-... -- /bin/sh -c 'apk add --no-cache curl && curl http://localhost:5556/metrics' +# kubectl exec monitoring-test-... -- /bin/sh -c 'apk add --no-cache curl && curl http://localhost:5556/metrics' apiVersion: extensions/v1beta1 kind: Deployment metadata: From 51c3097d705fae5bef51f16b11fc1c52e7649b0e Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Thu, 27 Jul 2017 05:24:07 +0200 Subject: [PATCH 72/93] Belongs in the github.com/yolean/kubernetes-monitoring project --- test/monitoring-test.yml | 35 ----------------------------------- 1 file changed, 35 deletions(-) delete mode 100644 test/monitoring-test.yml diff --git a/test/monitoring-test.yml b/test/monitoring-test.yml deleted file mode 100644 index 227ba9fd..00000000 --- a/test/monitoring-test.yml +++ /dev/null @@ -1,35 +0,0 @@ -# Sets up a pod that monitors itself, to test resource usage etc. -# kubectl exec monitoring-test-... -- /bin/sh -c 'apk add --no-cache curl && curl http://localhost:5556/metrics' -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: monitoring-test - namespace: kafka -spec: - replicas: 1 - template: - metadata: - labels: - app: monitoring-test - # Uncomment to test with prometheus - #annotations: - # prometheus.io/scrape: "true" - # prometheus.io/port: "5556" - spec: - containers: - - name: monitor - image: solsson/kafka-prometheus-jmx-exporter@sha256:1f7c96c287a2dbec1d909cd8f96c0656310239b55a9a90d7fd12c81f384f1f7d - command: - - java - - -Dcom.sun.management.jmxremote.ssl=false - - -Dcom.sun.management.jmxremote.authenticate=false - - -Dcom.sun.management.jmxremote.port=5555 - - -jar - - jmx_prometheus_httpserver.jar - - "5556" - - example_configs/httpserver_sample_config.yml - ports: - - name: jmx - containerPort: 5555 - - name: slashmetrics - containerPort: 5556 From c481cba64940778e6e279e7a018ae42ea69336e6 Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Fri, 28 Jul 2017 07:35:38 +0200 Subject: [PATCH 73/93] This project avoids scripting through addons ... which means we need some scripting to define the production setup. --- prod-yolean.sh | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100755 prod-yolean.sh diff --git a/prod-yolean.sh b/prod-yolean.sh new file mode 100755 index 00000000..03ee7662 --- /dev/null +++ b/prod-yolean.sh @@ -0,0 +1,12 @@ +#!/bin/bash +# Combines addons into what we 'kubectl apply -f' to production +set -ex + +git fetch +git checkout origin/kafka-011 +git checkout -b prod-yolean-$(date +"%Y%m%dT%H%M%S") + +for BRANCH in addon-rest addon-kube-events-topic +do + git merge --no-ff $BRANCH -m "prod-yolean merge $BRANCH" +done From 22a314ac161d3d203881eaf4b1a44ea8bf028a27 Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Fri, 28 Jul 2017 07:55:39 +0200 Subject: [PATCH 74/93] Makes /metrics export opt-in (through addon branch coming up) --- 50kafka.yml | 15 --------------- zookeeper/50pzoo.yml | 23 ----------------------- zookeeper/51zoo.yml | 23 ----------------------- 3 files changed, 61 deletions(-) diff --git a/50kafka.yml b/50kafka.yml index 9b8bde28..4404a6be 100644 --- a/50kafka.yml +++ b/50kafka.yml @@ -11,8 +11,6 @@ spec: labels: app: kafka annotations: - prometheus.io/scrape: "true" - prometheus.io/port: "5556" spec: terminationGracePeriodSeconds: 30 initContainers: @@ -23,24 +21,11 @@ spec: - name: config mountPath: /etc/kafka containers: - - name: metrics - image: solsson/kafka-prometheus-jmx-exporter@sha256:1f7c96c287a2dbec1d909cd8f96c0656310239b55a9a90d7fd12c81f384f1f7d - ports: - - containerPort: 5556 - resources: - requests: - cpu: 0m - memory: 40Mi - limits: - cpu: 10m - memory: 100Mi - name: broker image: solsson/kafka:0.11.0.0@sha256:b27560de08d30ebf96d12e74f80afcaca503ad4ca3103e63b1fd43a2e4c976ce env: - name: KAFKA_LOG4J_OPTS value: -Dlog4j.configuration=file:/etc/kafka/log4j.properties - - name: JMX_PORT - value: "5555" ports: - containerPort: 9092 command: diff --git a/zookeeper/50pzoo.yml b/zookeeper/50pzoo.yml index 9989662f..f9d5c587 100644 --- a/zookeeper/50pzoo.yml +++ b/zookeeper/50pzoo.yml @@ -12,8 +12,6 @@ spec: app: zookeeper storage: persistent annotations: - prometheus.io/scrape: "true" - prometheus.io/port: "5556" spec: terminationGracePeriodSeconds: 10 initContainers: @@ -26,32 +24,11 @@ spec: - name: data mountPath: /var/lib/zookeeper/data containers: - - name: metrics - image: solsson/kafka-prometheus-jmx-exporter@sha256:1f7c96c287a2dbec1d909cd8f96c0656310239b55a9a90d7fd12c81f384f1f7d - command: - - java - - -Xms39M - - -Xmx99M - - -jar - - jmx_prometheus_httpserver.jar - - "5556" - - example_configs/zookeeper.yaml - ports: - - containerPort: 5556 - resources: - requests: - cpu: 0m - memory: 40Mi - limits: - cpu: 10m - memory: 100Mi - name: zookeeper image: solsson/kafka:0.11.0.0@sha256:b27560de08d30ebf96d12e74f80afcaca503ad4ca3103e63b1fd43a2e4c976ce env: - name: KAFKA_LOG4J_OPTS value: -Dlog4j.configuration=file:/etc/kafka/log4j.properties - - name: JMX_PORT - value: "5555" command: - ./bin/zookeeper-server-start.sh - /etc/kafka/zookeeper.properties diff --git a/zookeeper/51zoo.yml b/zookeeper/51zoo.yml index 7e39cac8..778567db 100644 --- a/zookeeper/51zoo.yml +++ b/zookeeper/51zoo.yml @@ -12,8 +12,6 @@ spec: app: zookeeper storage: ephemeral annotations: - prometheus.io/scrape: "true" - prometheus.io/port: "5556" spec: terminationGracePeriodSeconds: 10 initContainers: @@ -29,32 +27,11 @@ spec: - name: data mountPath: /var/lib/zookeeper/data containers: - - name: metrics - image: solsson/kafka-prometheus-jmx-exporter@sha256:1f7c96c287a2dbec1d909cd8f96c0656310239b55a9a90d7fd12c81f384f1f7d - command: - - java - - -Xms39M - - -Xmx99M - - -jar - - jmx_prometheus_httpserver.jar - - "5556" - - example_configs/zookeeper.yaml - ports: - - containerPort: 5556 - resources: - requests: - cpu: 0m - memory: 40Mi - limits: - cpu: 10m - memory: 100Mi - name: zookeeper image: solsson/kafka:0.11.0.0@sha256:b27560de08d30ebf96d12e74f80afcaca503ad4ca3103e63b1fd43a2e4c976ce env: - name: KAFKA_LOG4J_OPTS value: -Dlog4j.configuration=file:/etc/kafka/log4j.properties - - name: JMX_PORT - value: "5555" command: - ./bin/zookeeper-server-start.sh - /etc/kafka/zookeeper.properties From 98635cc4784b1ed64022343fb1731cf859b1f291 Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Fri, 28 Jul 2017 08:09:00 +0200 Subject: [PATCH 75/93] Includes metrics in our prod --- prod-yolean.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/prod-yolean.sh b/prod-yolean.sh index 03ee7662..b95f9892 100755 --- a/prod-yolean.sh +++ b/prod-yolean.sh @@ -6,7 +6,7 @@ git fetch git checkout origin/kafka-011 git checkout -b prod-yolean-$(date +"%Y%m%dT%H%M%S") -for BRANCH in addon-rest addon-kube-events-topic +for BRANCH in addon-metrics addon-rest addon-kube-events-topic do git merge --no-ff $BRANCH -m "prod-yolean merge $BRANCH" done From 364c9b7316d764bfb1a43b7e6930505d624ced1f Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Fri, 28 Jul 2017 08:34:58 +0200 Subject: [PATCH 76/93] Uses storage classes for our prod --- prod-yolean.sh | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/prod-yolean.sh b/prod-yolean.sh index b95f9892..fb48139e 100755 --- a/prod-yolean.sh +++ b/prod-yolean.sh @@ -6,7 +6,11 @@ git fetch git checkout origin/kafka-011 git checkout -b prod-yolean-$(date +"%Y%m%dT%H%M%S") -for BRANCH in addon-metrics addon-rest addon-kube-events-topic +for BRANCH in \ + addon-storage-classes \ + addon-metrics \ + addon-rest \ + addon-kube-events-topic do git merge --no-ff $BRANCH -m "prod-yolean merge $BRANCH" done From ba907647b9f1c2186d45265d595c0b0bdc110980 Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Fri, 28 Jul 2017 09:28:18 +0200 Subject: [PATCH 77/93] Could be a pitch --- README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/README.md b/README.md index 25ecff2e..7841df07 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,10 @@ # Kafka as Kubernetes StatefulSet +Transparent Kafka setup that you can grow with, +extendable through [addon](https://github.com/Yolean/kubernetes-kafka/labels/addon)s. + + Example of three Kafka brokers depending on five Zookeeper instances. To get consistent service DNS names `kafka-N.broker.kafka`(`.svc.cluster.local`), run everything in a [namespace](http://kubernetes.io/docs/admin/namespaces/walkthrough/): From 36a36032acac13aa4b340557fcf4c1d1a9c053ee Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Fri, 28 Jul 2017 09:31:39 +0200 Subject: [PATCH 78/93] Could be the motivation --- README.md | 27 +++++++++++++++++++++------ 1 file changed, 21 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 7841df07..dd16bc7b 100644 --- a/README.md +++ b/README.md @@ -1,17 +1,32 @@ -# Kafka as Kubernetes StatefulSet -Transparent Kafka setup that you can grow with, -extendable through [addon](https://github.com/Yolean/kubernetes-kafka/labels/addon)s. +# Kafka on Kubernetes +Transparent Kafka setup that you can grow with. +Good for both experiments and production. -Example of three Kafka brokers depending on five Zookeeper instances. +How to use: + * Run a Kubernetes cluster, [minikube](https://github.com/kubernetes/minikube) or real. + * To quickly get a small Kafka cluster running, use the `kubectl apply`s below. + * To start using Kafka for real, fork and have a look at [addon](https://github.com/Yolean/kubernetes-kafka/labels/addon)s. + * Join the discussion here in issues and PRs. -To get consistent service DNS names `kafka-N.broker.kafka`(`.svc.cluster.local`), run everything in a [namespace](http://kubernetes.io/docs/admin/namespaces/walkthrough/): +Why? +No single readable readme can properly introduce both Kafka and Kubernets. +We started this project as beginners with both, +and by now our microservices enjoy lock-in with this "Streaming Platform" :smile:. +We read [Designing Data-Intensive Applications](http://dataintensive.net/) +and the [Confluent blog](https://www.confluent.io/blog/). + +## What you get + +[Bootstrap servers](http://kafka.apache.org/documentation/#producerconfigs): ``` -kubectl create -f 00namespace.yml +kafka-0.broker.kafka.svc.cluster.local:9092,kafka-1.broker.kafka.svc.cluster.local:9092,kafka-2.broker.kafka.svc.cluster.local:9092` ``` +Zookeeper at `zookeeper.kafka.svc.cluster.local:2181`. + ## Set up Zookeeper The Kafka book (Definitive Guide, O'Reilly 2016) recommends that Kafka has its own Zookeeper cluster with at least 5 instances. From dfa82bd6e56e9edc3d7cbcfabe86427d68aa2d61 Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Fri, 28 Jul 2017 09:38:42 +0200 Subject: [PATCH 79/93] Shorter --- README.md | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index dd16bc7b..534e7d94 100644 --- a/README.md +++ b/README.md @@ -12,18 +12,14 @@ How to use: * Join the discussion here in issues and PRs. Why? -No single readable readme can properly introduce both Kafka and Kubernets. -We started this project as beginners with both, -and by now our microservices enjoy lock-in with this "Streaming Platform" :smile:. -We read [Designing Data-Intensive Applications](http://dataintensive.net/) -and the [Confluent blog](https://www.confluent.io/blog/). +See for yourself. No single readable readme can properly introduce both Kafka and Kubernets. +Back when we read [Newman](http://samnewman.io/books/building_microservices/) we were beginners with both. +Now we read [Kleppmann](http://dataintensive.net/), [Confluent's blog](https://www.confluent.io/blog/) and [SRE](https://landing.google.com/sre/book.html) and enjoy this "Streaming Platform" lock-in :smile:. ## What you get -[Bootstrap servers](http://kafka.apache.org/documentation/#producerconfigs): -``` -kafka-0.broker.kafka.svc.cluster.local:9092,kafka-1.broker.kafka.svc.cluster.local:9092,kafka-2.broker.kafka.svc.cluster.local:9092` -``` +[Bootstrap servers](http://kafka.apache.org/documentation/#producerconfigs): `kafka-0.broker.kafka.svc.cluster.local:9092,kafka-1.broker.kafka.svc.cluster.local:9092,kafka-2.broker.kafka.svc.cluster.local:9092` +` Zookeeper at `zookeeper.kafka.svc.cluster.local:2181`. From 315317f5e3992a6a56be8e670fa571df41a6b506 Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Fri, 28 Jul 2017 09:44:48 +0200 Subject: [PATCH 80/93] Shorter still --- README.md | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 534e7d94..6191eda4 100644 --- a/README.md +++ b/README.md @@ -25,16 +25,15 @@ Zookeeper at `zookeeper.kafka.svc.cluster.local:2181`. ## Set up Zookeeper -The Kafka book (Definitive Guide, O'Reilly 2016) recommends that Kafka has its own Zookeeper cluster with at least 5 instances. -We use the zookeeper build that comes with the Kafka distribution, and tweak the startup command to support StatefulSet. +The [Kafka book](https://www.confluent.io/resources/kafka-definitive-guide-preview-edition/) recommends that Kafka has its own Zookeeper cluster with at least 5 instances. ``` kubectl create -f ./zookeeper/ ``` -## Start Kafka +To support automatic migration in the face of availability zone unavailability :wink: we mix persistent and ephemeral storage. -Assuming you have your PVCs `Bound`, or enabled automatic provisioning (see above), go ahead and: +## Start Kafka ``` kubectl create -f ./ From 7e6df7119c5dca05e69c09a6f5dad720c9b9889a Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Fri, 28 Jul 2017 09:45:31 +0200 Subject: [PATCH 81/93] If you got this far you don't need Kubernetes intro --- README.md | 8 -------- 1 file changed, 8 deletions(-) diff --git a/README.md b/README.md index 6191eda4..ae75c21d 100644 --- a/README.md +++ b/README.md @@ -66,14 +66,6 @@ Pods that keep consuming messages (but they won't exit on cluster failures) kubectl create -f test/21consumer-test1.yml ``` -## Teardown & cleanup - -Testing and retesting... delete the namespace. PVs are outside namespaces so delete them too. -``` -kubectl delete namespace kafka -rm -R ./data/ && kubectl -n kafka delete pv datadir-kafka-0 datadir-kafka-1 datadir-kafka-2 -``` - ## Metrics, Prometheus style Is the metrics system up and running? From c4f3c412c8338d562e49a1ac77d4654c07c71ce1 Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Fri, 28 Jul 2017 09:47:49 +0200 Subject: [PATCH 82/93] Reworked in https://github.com/Yolean/kubernetes-kafka/pull/51 --- README.md | 21 --------------------- 1 file changed, 21 deletions(-) diff --git a/README.md b/README.md index ae75c21d..547ddcd1 100644 --- a/README.md +++ b/README.md @@ -45,27 +45,6 @@ kubectl -n kafka logs kafka-0 | grep "Registered broker" # INFO Registered broker 0 at path /brokers/ids/0 with addresses: PLAINTEXT -> EndPoint(kafka-0.broker.kafka.svc.cluster.local,9092,PLAINTEXT) ``` -## Testing manually - -There's a Kafka pod that doesn't start the server, so you can invoke the various shell scripts. -``` -kubectl create -f test/99testclient.yml -``` - -See `./test/test.sh` for some sample commands. - -## Automated test, while going chaosmonkey on the cluster - -This is WIP, but topic creation has been automated. Note that as a [Job](http://kubernetes.io/docs/user-guide/jobs/), it will restart if the command fails, including if the topic exists :( -``` -kubectl create -f test/11topic-create-test1.yml -``` - -Pods that keep consuming messages (but they won't exit on cluster failures) -``` -kubectl create -f test/21consumer-test1.yml -``` - ## Metrics, Prometheus style Is the metrics system up and running? From a880307fa1eb7ec785f12d08a12d2e27ff669454 Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Fri, 28 Jul 2017 09:48:29 +0200 Subject: [PATCH 83/93] Metrics intro belongs in ... https://github.com/Yolean/kubernetes-kafka/pull/49 but maybe with tests instead of talk --- README.md | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/README.md b/README.md index 547ddcd1..56e270da 100644 --- a/README.md +++ b/README.md @@ -44,14 +44,3 @@ You might want to verify in logs that Kafka found its own DNS name(s) correctly. kubectl -n kafka logs kafka-0 | grep "Registered broker" # INFO Registered broker 0 at path /brokers/ids/0 with addresses: PLAINTEXT -> EndPoint(kafka-0.broker.kafka.svc.cluster.local,9092,PLAINTEXT) ``` - -## Metrics, Prometheus style - -Is the metrics system up and running? -``` -kubectl logs -c metrics kafka-0 -kubectl exec -c broker kafka-0 -- /bin/sh -c 'apk add --no-cache curl && curl http://localhost:5556/metrics' -kubectl logs -c metrics zoo-0 -kubectl exec -c zookeeper zoo-0 -- /bin/sh -c 'apk add --no-cache curl && curl http://localhost:5556/metrics' -``` -Metrics containers can't be used for the curl because they're too short on memory. From 91b4dde07d80ec2fb7e44bd8edfac28e381ba0c1 Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Fri, 28 Jul 2017 10:37:03 +0200 Subject: [PATCH 84/93] Where to go from here --- README.md | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 56e270da..97b6d46e 100644 --- a/README.md +++ b/README.md @@ -18,12 +18,14 @@ Now we read [Kleppmann](http://dataintensive.net/), [Confluent's blog](https://w ## What you get +Keep an eye on `kubectl --namespace kafka get pods -w`. + [Bootstrap servers](http://kafka.apache.org/documentation/#producerconfigs): `kafka-0.broker.kafka.svc.cluster.local:9092,kafka-1.broker.kafka.svc.cluster.local:9092,kafka-2.broker.kafka.svc.cluster.local:9092` ` Zookeeper at `zookeeper.kafka.svc.cluster.local:2181`. -## Set up Zookeeper +## Start Zookeeper The [Kafka book](https://www.confluent.io/resources/kafka-definitive-guide-preview-edition/) recommends that Kafka has its own Zookeeper cluster with at least 5 instances. @@ -31,7 +33,7 @@ The [Kafka book](https://www.confluent.io/resources/kafka-definitive-guide-previ kubectl create -f ./zookeeper/ ``` -To support automatic migration in the face of availability zone unavailability :wink: we mix persistent and ephemeral storage. +To support automatic migration in the face of availability zone unavailability we mix persistent and ephemeral storage. ## Start Kafka @@ -44,3 +46,8 @@ You might want to verify in logs that Kafka found its own DNS name(s) correctly. kubectl -n kafka logs kafka-0 | grep "Registered broker" # INFO Registered broker 0 at path /brokers/ids/0 with addresses: PLAINTEXT -> EndPoint(kafka-0.broker.kafka.svc.cluster.local,9092,PLAINTEXT) ``` + +That's it. Just add business value :wink:. +For clients we tend to use [librdkafka](https://github.com/edenhill/librdkafka)-based drivers like [node-rdkafka](https://github.com/Blizzard/node-rdkafka). +To use [Kafka Connect](http://kafka.apache.org/documentation/#connect) and [Kafka Streams](http://kafka.apache.org/documentation/streams/) you may want to take a look at our [sample](https://github.com/solsson/dockerfiles/tree/master/connect-files) [Dockerfile](https://github.com/solsson/dockerfiles/tree/master/streams-logfilter)s. +Don't forget the [addon](https://github.com/Yolean/kubernetes-kafka/labels/addon)s. From b58165916ff156082c8ed8ea08a8c68c1506efdc Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Fri, 28 Jul 2017 10:49:09 +0200 Subject: [PATCH 85/93] Review after github render --- README.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 97b6d46e..2c747dc6 100644 --- a/README.md +++ b/README.md @@ -7,20 +7,20 @@ Good for both experiments and production. How to use: * Run a Kubernetes cluster, [minikube](https://github.com/kubernetes/minikube) or real. - * To quickly get a small Kafka cluster running, use the `kubectl apply`s below. - * To start using Kafka for real, fork and have a look at [addon](https://github.com/Yolean/kubernetes-kafka/labels/addon)s. - * Join the discussion here in issues and PRs. + * Quickstart: use the `kubectl apply`s below. + * Kafka for real: fork and have a look at [addon](https://github.com/Yolean/kubernetes-kafka/labels/addon)s. + * Join the discussion in issues and PRs. Why? -See for yourself. No single readable readme can properly introduce both Kafka and Kubernets. +See for yourself, but we think this project gives you better adaptability than [helm](https://github.com/kubernetes/helm) [chart](https://github.com/kubernetes/charts/tree/master/incubator/kafka)s. No single readable readme or template can properly introduce both Kafka and Kubernets. Back when we read [Newman](http://samnewman.io/books/building_microservices/) we were beginners with both. -Now we read [Kleppmann](http://dataintensive.net/), [Confluent's blog](https://www.confluent.io/blog/) and [SRE](https://landing.google.com/sre/book.html) and enjoy this "Streaming Platform" lock-in :smile:. +Now we've read [Kleppmann](http://dataintensive.net/), [Confluent](https://www.confluent.io/blog/) and [SRE](https://landing.google.com/sre/book.html) and enjoy this "Streaming Platform" lock-in :smile:. ## What you get Keep an eye on `kubectl --namespace kafka get pods -w`. -[Bootstrap servers](http://kafka.apache.org/documentation/#producerconfigs): `kafka-0.broker.kafka.svc.cluster.local:9092,kafka-1.broker.kafka.svc.cluster.local:9092,kafka-2.broker.kafka.svc.cluster.local:9092` +The goal is to provide [Bootstrap servers](http://kafka.apache.org/documentation/#producerconfigs): `kafka-0.broker.kafka.svc.cluster.local:9092,kafka-1.broker.kafka.svc.cluster.local:9092,kafka-2.broker.kafka.svc.cluster.local:9092` ` Zookeeper at `zookeeper.kafka.svc.cluster.local:2181`. @@ -30,7 +30,7 @@ Zookeeper at `zookeeper.kafka.svc.cluster.local:2181`. The [Kafka book](https://www.confluent.io/resources/kafka-definitive-guide-preview-edition/) recommends that Kafka has its own Zookeeper cluster with at least 5 instances. ``` -kubectl create -f ./zookeeper/ +kubectl apply -f ./zookeeper/ ``` To support automatic migration in the face of availability zone unavailability we mix persistent and ephemeral storage. @@ -38,7 +38,7 @@ To support automatic migration in the face of availability zone unavailability w ## Start Kafka ``` -kubectl create -f ./ +kubectl apply -f ./ ``` You might want to verify in logs that Kafka found its own DNS name(s) correctly. Look for records like: From 8cbd6718fbc4c84f109ee0d94956e16e52be533d Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Fri, 28 Jul 2017 08:37:59 +0200 Subject: [PATCH 86/93] Prepares for tests to move to separate namespace --- test/00namespace.yml | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 test/00namespace.yml diff --git a/test/00namespace.yml b/test/00namespace.yml new file mode 100644 index 00000000..fbb6e0ef --- /dev/null +++ b/test/00namespace.yml @@ -0,0 +1,5 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: test-kafka From c7eae1b9717dd9b031b3b585ce9a4d518b2fef7d Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Fri, 28 Jul 2017 16:20:32 +0200 Subject: [PATCH 87/93] Suggests a structure for test cases as single yml --- test/basic-produce-consume.yml | 88 ++++++++++++++++++++++++++++++++++ 1 file changed, 88 insertions(+) create mode 100644 test/basic-produce-consume.yml diff --git a/test/basic-produce-consume.yml b/test/basic-produce-consume.yml new file mode 100644 index 00000000..37ee807d --- /dev/null +++ b/test/basic-produce-consume.yml @@ -0,0 +1,88 @@ +--- +kind: ConfigMap +metadata: + name: basic-produce-consume + namespace: test-kafka +apiVersion: v1 +data: + + live.sh: |- + exit 0 + + run.sh: |- + echo "Test is up" + exit 0 + +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: basic-produce-consume + namespace: test-kafka +spec: + template: + spec: + containers: + - name: topic-create + image: solsson/kafka:0.11.0.0@sha256:b27560de08d30ebf96d12e74f80afcaca503ad4ca3103e63b1fd43a2e4c976ce + command: + - ./bin/kafka-topics.sh + - --zookeeper + - zookeeper.kafka.svc.cluster.local:2181 + - --create + - --topic + - test-basic-produce-consume + - --partitions + - "1" + - --replication-factor + - "1" + restartPolicy: Never +--- +apiVersion: apps/v1beta1 +kind: Deployment +metadata: + name: basic-produce-consume + namespace: test-kafka +spec: + replicas: 1 + template: + metadata: + labels: + test-target: kafka + test-type: readiness + spec: + containers: + - name: kafka + # common test images + #image: solsson/curl@sha256:8b0927b81d10043e70f3e05e33e36fb9b3b0cbfcbccdb9f04fd53f67a270b874 + image: solsson/kafkacat@sha256:1266d140c52cb39bf314b6f22b6d7a01c4c9084781bc779fdfade51214a713a8 + #image: solsson/kubectl-kafkacat@sha256:3715a7ede3f168f677ee6faf311ff6887aff31f660cfeecad5d87b4f18516321 + env: + - name: BOOTSTRAP + value: kafka-0.broker.kafka.svc.cluster.local:9092,kafka-1.broker.kafka.svc.cluster.local:9092,kafka-2.broker.kafka.svc.cluster.local:9092 + - name: ZOOKEEPER + value: zookeeper.kafka.svc.cluster.local:2181 + # Test set up + command: + - tail + - -f + - /dev/null + # Test run + readinessProbe: + exec: + command: + - /bin/bash + - /test/run.sh + # Test restart on nonzero exit + livenessProbe: + exec: + command: + - /bin/bash + - /test/live.sh + volumeMounts: + - name: config + mountPath: /test + volumes: + - name: config + configMap: + name: basic-produce-consume From 24c43d2119577470790d8e489d76c56e33088e03 Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Fri, 28 Jul 2017 20:22:59 +0200 Subject: [PATCH 88/93] Working boilerplate, with output to kubectl logs --- test/basic-produce-consume.yml | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/test/basic-produce-consume.yml b/test/basic-produce-consume.yml index 37ee807d..39208e02 100644 --- a/test/basic-produce-consume.yml +++ b/test/basic-produce-consume.yml @@ -6,11 +6,17 @@ metadata: apiVersion: v1 data: - live.sh: |- + setup.sh: |- + touch /tmp/testlog + tail -f /tmp/testlog + + continue.sh: |- exit 0 run.sh: |- - echo "Test is up" + exec >> /tmp/testlog + exec 2>&1 + echo "Test completed at $(date -u +%Y-%m-%dT%H:%M:%SZ)." exit 0 --- @@ -30,6 +36,7 @@ spec: - --zookeeper - zookeeper.kafka.svc.cluster.local:2181 - --create + - --if-not-exists - --topic - test-basic-produce-consume - --partitions @@ -64,21 +71,20 @@ spec: value: zookeeper.kafka.svc.cluster.local:2181 # Test set up command: - - tail - - -f - - /dev/null - # Test run + - /bin/bash + - /test/setup.sh + # Test run, again and again readinessProbe: exec: command: - /bin/bash - /test/run.sh - # Test restart on nonzero exit + # Test quit on nonzero exit livenessProbe: exec: command: - /bin/bash - - /test/live.sh + - /test/continue.sh volumeMounts: - name: config mountPath: /test From c91c42e822228170e0b85ebd2dce041a8894857f Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Fri, 28 Jul 2017 20:42:34 +0200 Subject: [PATCH 89/93] Implements the actual test --- test/basic-produce-consume.yml | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/test/basic-produce-consume.yml b/test/basic-produce-consume.yml index 39208e02..a81cd666 100644 --- a/test/basic-produce-consume.yml +++ b/test/basic-produce-consume.yml @@ -16,7 +16,12 @@ data: run.sh: |- exec >> /tmp/testlog exec 2>&1 - echo "Test completed at $(date -u +%Y-%m-%dT%H:%M:%SZ)." + + unique=$(date -Ins) + + echo "Test $unique" | kafkacat -P -b $BOOTSTRAP -t test-basic-produce-consume -v + kafkacat -C -b $BOOTSTRAP -t test-basic-produce-consume -o -1 -e | grep $unique + exit 0 --- @@ -66,24 +71,28 @@ spec: #image: solsson/kubectl-kafkacat@sha256:3715a7ede3f168f677ee6faf311ff6887aff31f660cfeecad5d87b4f18516321 env: - name: BOOTSTRAP - value: kafka-0.broker.kafka.svc.cluster.local:9092,kafka-1.broker.kafka.svc.cluster.local:9092,kafka-2.broker.kafka.svc.cluster.local:9092 + #value: kafka-0.broker.kafka.svc.cluster.local:9092,kafka-1.broker.kafka.svc.cluster.local:9092,kafka-2.broker.kafka.svc.cluster.local:9092 + value: kafka-0.broker.kafka.svc.cluster.local:9092 - name: ZOOKEEPER value: zookeeper.kafka.svc.cluster.local:2181 # Test set up command: - /bin/bash + - -e - /test/setup.sh # Test run, again and again readinessProbe: exec: command: - /bin/bash + - -e - /test/run.sh # Test quit on nonzero exit livenessProbe: exec: command: - /bin/bash + - -e - /test/continue.sh volumeMounts: - name: config From 754fd2f83a7c5e125c81d9d0b9f792427ba8a812 Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Fri, 28 Jul 2017 20:46:19 +0200 Subject: [PATCH 90/93] Introduces the test automation concept as briefly as possible --- README.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/README.md b/README.md index 2c747dc6..64649061 100644 --- a/README.md +++ b/README.md @@ -51,3 +51,11 @@ That's it. Just add business value :wink:. For clients we tend to use [librdkafka](https://github.com/edenhill/librdkafka)-based drivers like [node-rdkafka](https://github.com/Blizzard/node-rdkafka). To use [Kafka Connect](http://kafka.apache.org/documentation/#connect) and [Kafka Streams](http://kafka.apache.org/documentation/streams/) you may want to take a look at our [sample](https://github.com/solsson/dockerfiles/tree/master/connect-files) [Dockerfile](https://github.com/solsson/dockerfiles/tree/master/streams-logfilter)s. Don't forget the [addon](https://github.com/Yolean/kubernetes-kafka/labels/addon)s. + +# Tests + +``` +kubectl apply -f test/ +# Anything that isn't READY here is a failed test +kubectl -n test-kafka get pods -l test-target=kafka,test-type=readiness -w +``` From 454bea4ba2c82ca2e150d076f2355c1110c22136 Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Fri, 28 Jul 2017 21:22:10 +0200 Subject: [PATCH 91/93] Makes room for a more basic basic test --- ...oduce-consume.yml => basic-with-kafkacat.yml} | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) rename test/{basic-produce-consume.yml => basic-with-kafkacat.yml} (88%) diff --git a/test/basic-produce-consume.yml b/test/basic-with-kafkacat.yml similarity index 88% rename from test/basic-produce-consume.yml rename to test/basic-with-kafkacat.yml index a81cd666..a8974e80 100644 --- a/test/basic-produce-consume.yml +++ b/test/basic-with-kafkacat.yml @@ -1,7 +1,7 @@ --- kind: ConfigMap metadata: - name: basic-produce-consume + name: basic-with-kafkacat namespace: test-kafka apiVersion: v1 data: @@ -19,8 +19,8 @@ data: unique=$(date -Ins) - echo "Test $unique" | kafkacat -P -b $BOOTSTRAP -t test-basic-produce-consume -v - kafkacat -C -b $BOOTSTRAP -t test-basic-produce-consume -o -1 -e | grep $unique + echo "Test $unique" | kafkacat -P -b $BOOTSTRAP -t test-basic-with-kafkacat -v + kafkacat -C -b $BOOTSTRAP -t test-basic-with-kafkacat -o -1 -e | grep $unique exit 0 @@ -28,7 +28,7 @@ data: apiVersion: batch/v1 kind: Job metadata: - name: basic-produce-consume + name: basic-with-kafkacat namespace: test-kafka spec: template: @@ -43,7 +43,7 @@ spec: - --create - --if-not-exists - --topic - - test-basic-produce-consume + - test-basic-with-kafkacat - --partitions - "1" - --replication-factor @@ -53,7 +53,7 @@ spec: apiVersion: apps/v1beta1 kind: Deployment metadata: - name: basic-produce-consume + name: basic-with-kafkacat namespace: test-kafka spec: replicas: 1 @@ -64,7 +64,7 @@ spec: test-type: readiness spec: containers: - - name: kafka + - name: testcase # common test images #image: solsson/curl@sha256:8b0927b81d10043e70f3e05e33e36fb9b3b0cbfcbccdb9f04fd53f67a270b874 image: solsson/kafkacat@sha256:1266d140c52cb39bf314b6f22b6d7a01c4c9084781bc779fdfade51214a713a8 @@ -100,4 +100,4 @@ spec: volumes: - name: config configMap: - name: basic-produce-consume + name: basic-with-kafkacat From 48bd7c39af657db1338662cf99b5531aac929d1d Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Fri, 28 Jul 2017 21:23:59 +0200 Subject: [PATCH 92/93] Now we're on par with the old tests, but automated --- test/11topic-create-test1.yml | 25 ---------- test/12topic-create-test2.yml | 27 ----------- test/21consumer-test1.yml | 24 --------- test/31producer-test1.yml | 24 --------- test/99testclient.yml | 15 ------ test/basic-produce-consume.yml | 89 ++++++++++++++++++++++++++++++++++ test/test.sh | 34 ------------- 7 files changed, 89 insertions(+), 149 deletions(-) delete mode 100644 test/11topic-create-test1.yml delete mode 100644 test/12topic-create-test2.yml delete mode 100644 test/21consumer-test1.yml delete mode 100644 test/31producer-test1.yml delete mode 100644 test/99testclient.yml create mode 100644 test/basic-produce-consume.yml delete mode 100644 test/test.sh diff --git a/test/11topic-create-test1.yml b/test/11topic-create-test1.yml deleted file mode 100644 index 321dc575..00000000 --- a/test/11topic-create-test1.yml +++ /dev/null @@ -1,25 +0,0 @@ -apiVersion: batch/v1 -kind: Job -metadata: - name: topic-create-test1 - namespace: kafka -spec: - template: - metadata: - name: topic-create-test1 - spec: - containers: - - name: kafka - image: solsson/kafka:0.11.0.0@sha256:b27560de08d30ebf96d12e74f80afcaca503ad4ca3103e63b1fd43a2e4c976ce - command: - - ./bin/kafka-topics.sh - - --zookeeper - - zookeeper:2181 - - --create - - --topic - - test1 - - --partitions - - "1" - - --replication-factor - - "1" - restartPolicy: Never diff --git a/test/12topic-create-test2.yml b/test/12topic-create-test2.yml deleted file mode 100644 index edbe1dfb..00000000 --- a/test/12topic-create-test2.yml +++ /dev/null @@ -1,27 +0,0 @@ -apiVersion: batch/v1 -kind: Job -metadata: - name: topic-create-test2 - namespace: kafka -spec: - template: - metadata: - name: topic-create-test2 - spec: - containers: - - name: kafka - image: solsson/kafka:0.11.0.0@sha256:b27560de08d30ebf96d12e74f80afcaca503ad4ca3103e63b1fd43a2e4c976ce - command: - - ./bin/kafka-topics.sh - - --zookeeper - - zookeeper:2181 - - --create - - --topic - - test2 - - --partitions - - "1" - - --replication-factor - - "3" - - --config - - min.insync.replicas=2 - restartPolicy: Never diff --git a/test/21consumer-test1.yml b/test/21consumer-test1.yml deleted file mode 100644 index 43678f8a..00000000 --- a/test/21consumer-test1.yml +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: consumer-test1 - namespace: kafka -spec: - replicas: 1 - template: - metadata: - labels: - app: consumer - scope: test - topic: test1 - spec: - containers: - - name: kafka - image: solsson/kafka:0.11.0.0@sha256:b27560de08d30ebf96d12e74f80afcaca503ad4ca3103e63b1fd43a2e4c976ce - command: - - ./bin/kafka-console-consumer.sh - - --bootstrap-server - - kafka-0.broker.kafka.svc.cluster.local:9092,kafka-1.broker.kafka.svc.cluster.local:9092,kafka-2.broker.kafka.svc.cluster.local:9092 - - --topic - - test1 - - --from-beginning diff --git a/test/31producer-test1.yml b/test/31producer-test1.yml deleted file mode 100644 index 354a16a7..00000000 --- a/test/31producer-test1.yml +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: batch/v1 -kind: Job -metadata: - name: producer-test1 - namespace: kafka -spec: - template: - metadata: - name: producer-test1 - spec: - containers: - - name: kafka - image: solsson/kafka:0.11.0.0@sha256:b27560de08d30ebf96d12e74f80afcaca503ad4ca3103e63b1fd43a2e4c976ce - command: - - /bin/sh - - -c - - > - echo "test1 $(date)" - | - ./bin/kafka-console-producer.sh - --topic test1 - --broker-list kafka-0.broker.kafka.svc.cluster.local:9092,kafka-1.broker.kafka.svc.cluster.local:9092,kafka-2.broker.kafka.svc.cluster.local:9092 - ; sleep 1 - restartPolicy: Never diff --git a/test/99testclient.yml b/test/99testclient.yml deleted file mode 100644 index a3670044..00000000 --- a/test/99testclient.yml +++ /dev/null @@ -1,15 +0,0 @@ -# Kafka image without the service, so you can run ./bin/ stuff -# kubectl exec -ti testclient -- /bin/bash -apiVersion: v1 -kind: Pod -metadata: - name: testclient - namespace: kafka -spec: - containers: - - name: kafka - image: solsson/kafka:0.11.0.0@sha256:b27560de08d30ebf96d12e74f80afcaca503ad4ca3103e63b1fd43a2e4c976ce - command: - - sh - - -c - - "exec tail -f /dev/null" diff --git a/test/basic-produce-consume.yml b/test/basic-produce-consume.yml new file mode 100644 index 00000000..f56a01f8 --- /dev/null +++ b/test/basic-produce-consume.yml @@ -0,0 +1,89 @@ +--- +kind: ConfigMap +metadata: + name: basic-produce-consume + namespace: test-kafka +apiVersion: v1 +data: + + setup.sh: |- + touch /tmp/testlog + + ./bin/kafka-topics.sh --zookeeper $ZOOKEEPER \ + --create --if-not-exists --topic test-basic-with-kafkacat \ + --partitions 1 --replication-factor 1 + + # Despite the deprecation warning --zookeeper nothing is consumed when using --bootstrap-server + ./bin/kafka-console-consumer.sh --zookeeper $ZOOKEEPER --topic test-basic-produce-consume > /tmp/testconsumed & + + tail -f /tmp/testlog + + continue.sh: |- + exit 0 + + run.sh: |- + exec >> /tmp/testlog + exec 2>&1 + + unique=$(date -Ins) + + echo "Test $unique" | ./bin/kafka-console-producer.sh --broker-list $BOOTSTRAP --topic test-basic-produce-consume + echo "" + tail -n 1 /tmp/testconsumed | grep $unique + + # How to make this test fail: + #apt-get update && apt-get install -y --no-install-recommends procps + #pkill java + + exit 0 + +--- +apiVersion: apps/v1beta1 +kind: Deployment +metadata: + name: basic-produce-consume + namespace: test-kafka +spec: + replicas: 1 + template: + metadata: + labels: + test-target: kafka + test-type: readiness + spec: + containers: + - name: testcase + image: solsson/kafka:0.11.0.0@sha256:b27560de08d30ebf96d12e74f80afcaca503ad4ca3103e63b1fd43a2e4c976ce + env: + - name: BOOTSTRAP + value: kafka-0.broker.kafka.svc.cluster.local:9092,kafka-1.broker.kafka.svc.cluster.local:9092,kafka-2.broker.kafka.svc.cluster.local:9092 + - name: ZOOKEEPER + value: zookeeper.kafka.svc.cluster.local:2181 + # Test set up + command: + - /bin/bash + - -e + - /test/setup.sh + # Test run, again and again + readinessProbe: + exec: + command: + - /bin/bash + - -e + - /test/run.sh + # JVM start is slow, can we keep producer started and restore the default preriod 10s? + periodSeconds: 30 + # Test quit on nonzero exit + livenessProbe: + exec: + command: + - /bin/bash + - -e + - /test/continue.sh + volumeMounts: + - name: config + mountPath: /test + volumes: + - name: config + configMap: + name: basic-produce-consume diff --git a/test/test.sh b/test/test.sh deleted file mode 100644 index bfc4a8f0..00000000 --- a/test/test.sh +++ /dev/null @@ -1,34 +0,0 @@ - -# List topics -kubectl exec testclient -- ./bin/kafka-topics.sh --zookeeper zookeeper:2181 --list - -# Create topic -kubectl exec testclient -- ./bin/kafka-topics.sh --zookeeper zookeeper:2181 --topic test1 --create --partitions 1 --replication-factor 1 - -# Set one of your terminals to listen to messages on the test topic -kubectl exec -ti testclient -- ./bin/kafka-console-consumer.sh --zookeeper zookeeper:2181 --topic test1 --from-beginning - -# Go ahead and produce messages -echo "Write a message followed by enter, exit using Ctrl+C" -kubectl exec -ti testclient -- ./bin/kafka-console-producer.sh --broker-list kafka-0.broker.kafka.svc.cluster.local:9092 --topic test1 - -# Bootstrap even if two nodes are down (shorter name requires same namespace) -kubectl exec -ti testclient -- ./bin/kafka-console-producer.sh --broker-list kafka-0.broker:9092,kafka-1.broker:9092,kafka-2.broker:9092 --topic test1 - -# The following commands run in the pod -kubectl exec -ti testclient -- /bin/bash - -# Topic 2, replicated -./bin/kafka-topics.sh --zookeeper zookeeper:2181 --describe --topic test2 - -./bin/kafka-verifiable-consumer.sh \ - --broker-list=kafka-0.broker.kafka.svc.cluster.local:9092,kafka-1.broker.kafka.svc.cluster.local:9092 \ - --topic=test2 --group-id=A --verbose - -# If a topic isn't available this producer will tell you -# WARN Error while fetching metadata with correlation id X : {topicname=LEADER_NOT_AVAILABLE} -# ... but with current config Kafka will auto-create the topic -./bin/kafka-verifiable-producer.sh \ - --broker-list=kafka-0.broker.kafka.svc.cluster.local:9092,kafka-1.broker.kafka.svc.cluster.local:9092 \ - --value-prefix=1 --topic=test2 \ - --acks=1 --throughput=1 --max-messages=10 From ab35705506f83581f99b764e9489af24482188a6 Mon Sep 17 00:00:00 2001 From: Staffan Olsson Date: Sat, 29 Jul 2017 04:51:26 +0200 Subject: [PATCH 93/93] The test concept just caught a mistake --- README.md | 2 +- test/basic-produce-consume.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 64649061..9853d12e 100644 --- a/README.md +++ b/README.md @@ -57,5 +57,5 @@ Don't forget the [addon](https://github.com/Yolean/kubernetes-kafka/labels/addon ``` kubectl apply -f test/ # Anything that isn't READY here is a failed test -kubectl -n test-kafka get pods -l test-target=kafka,test-type=readiness -w +kubectl get pods -l test-target=kafka,test-type=readiness -w --all-namespaces ``` diff --git a/test/basic-produce-consume.yml b/test/basic-produce-consume.yml index f56a01f8..fdacea06 100644 --- a/test/basic-produce-consume.yml +++ b/test/basic-produce-consume.yml @@ -10,7 +10,7 @@ data: touch /tmp/testlog ./bin/kafka-topics.sh --zookeeper $ZOOKEEPER \ - --create --if-not-exists --topic test-basic-with-kafkacat \ + --create --if-not-exists --topic test-basic-produce-consume \ --partitions 1 --replication-factor 1 # Despite the deprecation warning --zookeeper nothing is consumed when using --bootstrap-server