diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 786e3d5c..e91fadf4 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1 +1 @@ -* @confluentinc/clients +* @confluentinc/clients @confluentinc/data-governance diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index cb301559..4724334d 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -9,11 +9,11 @@ about: Create a report to help us improve - Node Version [e.g. 8.2.1]: - NPM Version [e.g. 5.4.2]: - C++ Toolchain [e.g. Visual Studio, llvm, g++]: - - confluent-kafka-js version [e.g. 2.3.3]: + - confluent-kafka-javascript version [e.g. 2.3.3]: **Steps to Reproduce** -**confluent-kafka-js Configuration Settings** +**confluent-kafka-javascript Configuration Settings** **Additional context** diff --git a/.github/workflows/npm-publish.yml b/.github/workflows/npm-publish.yml index c547ffba..0a21a501 100644 --- a/.github/workflows/npm-publish.yml +++ b/.github/workflows/npm-publish.yml @@ -1,7 +1,7 @@ # This workflow will run tests using node and then publish a package to GitHub Packages when a release is created # For more information see: https://help.github.com/actions/language-and-framework-guides/publishing-nodejs-packages -name: Publish confluent-kafka-js +name: Publish confluent-kafka-javascript on: release: diff --git a/.gitignore b/.gitignore index 1b6d3a6a..603212d2 100644 --- a/.gitignore +++ b/.gitignore @@ -1,14 +1,19 @@ build/ +dist/ node_modules/ deps/librdkafka npm-debug.log docs +examples/**/package-lock.json + deps/* !deps/*.gyp !deps/windows-install.* .DS_Store +.idea .vscode +coverage diff --git a/.gitmodules b/.gitmodules index 4d4e7fb9..63dc1ac1 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +1,3 @@ [submodule "deps/librdkafka"] path = deps/librdkafka - url = https://github.com/edenhill/librdkafka.git + url = https://github.com/confluentinc/librdkafka.git diff --git a/.jshintignore b/.jshintignore deleted file mode 100644 index b43bf86b..00000000 --- a/.jshintignore +++ /dev/null @@ -1 +0,0 @@ -README.md diff --git a/.jshintrc b/.jshintrc deleted file mode 100644 index 09968b8b..00000000 --- a/.jshintrc +++ /dev/null @@ -1,23 +0,0 @@ -{ - "node": true, - "mocha": true, - "browser": false, - "boss": true, - "curly": true, - "debug": false, - "devel": false, - "eqeqeq": true, - "evil": true, - "forin": false, - "latedef": false, - "noarg": true, - "nonew": true, - "nomen": false, - "onevar": false, - "plusplus": false, - "regexp": false, - "undef": true, - "strict": false, - "white": false, - "eqnull": true -} diff --git a/.npmignore b/.npmignore index c6793a26..418ac7aa 100644 --- a/.npmignore +++ b/.npmignore @@ -5,6 +5,9 @@ deps/* .gitmodules Dockerfile deps/librdkafka/config.h +schemaregistry +schemaregistry-examples build .github .vscode +.semaphore \ No newline at end of file diff --git a/.semaphore/build-docker.sh b/.semaphore/build-docker.sh new file mode 100755 index 00000000..3f148428 --- /dev/null +++ b/.semaphore/build-docker.sh @@ -0,0 +1,10 @@ +#!/bin/sh +# This script is used to build the project within a docker image. +# The docker image is assumed to be an alpine docker image, for glibc based builds, we use +# the semaphhore agent directly. + +apk add -U ca-certificates openssl ncurses coreutils python3 make gcc g++ libgcc linux-headers grep util-linux binutils findutils perl patch musl-dev bash +# /v is the volume mount point for the project root +cd /v +npm install +npx node-pre-gyp package diff --git a/.semaphore/project.yml b/.semaphore/project.yml index e20c3839..7bef71a4 100644 --- a/.semaphore/project.yml +++ b/.semaphore/project.yml @@ -6,12 +6,12 @@ apiVersion: v1alpha kind: Project metadata: - name: confluent-kafka-js + name: confluent-kafka-javascript description: "" spec: visibility: private repository: - url: git@github.com:confluentinc/confluent-kafka-js.git + url: git@github.com:confluentinc/confluent-kafka-javascript.git run_on: - branches - tags diff --git a/.semaphore/project_public.yml b/.semaphore/project_public.yml new file mode 100644 index 00000000..fb83ce58 --- /dev/null +++ b/.semaphore/project_public.yml @@ -0,0 +1,25 @@ +# This file is managed by ServiceBot plugin - Semaphore. The content in this file is created using a common +# template and configurations in service.yml. +# Modifications in this file will be overwritten by generated content in the nightly run. +# For more information, please refer to the page: +# https://confluentinc.atlassian.net/wiki/spaces/Foundations/pages/2871296194/Add+SemaphoreCI +apiVersion: v1alpha +kind: Project +metadata: + name: confluent-kafka-javascript + description: "" +spec: + visibility: public + repository: + url: git@github.com:confluentinc/confluent-kafka-javascript.git + run_on: + - forked_pull_requests + pipeline_file: .semaphore/semaphore.yml + integration_type: github_app + status: + pipeline_files: + - path: .semaphore/semaphore.yml + level: pipeline + forked_pull_requests: + allowed_contributors: + - "ConfluentSemaphore" diff --git a/.semaphore/semaphore.yml b/.semaphore/semaphore.yml index 24f504ca..a3d82cf2 100644 --- a/.semaphore/semaphore.yml +++ b/.semaphore/semaphore.yml @@ -15,7 +15,7 @@ auto_cancel: when: "branch != 'master'" execution_time_limit: - hours: 1 + hours: 3 queue: - when: "branch != 'master'" @@ -25,37 +25,492 @@ global_job_config: prologue: commands: - checkout - - make show-args - - . vault-setup - - . vault-sem-get-secret ci-reporting - - . vault-sem-get-secret v1/ci/kv/service-foundations/cc-mk-include - - make init-ci - epilogue: - always: - commands: - - make epilogue-ci + - git submodule update --init --recursive + - cd deps/librdkafka + - git fetch origin + - git checkout v2.5.3 + - cd ../../ + - cache clear blocks: - - name: "Build, Test, Release" - run: - # don't run the build or unit tests on non-functional changes... - when: "change_in('/', {exclude: ['/.deployed-versions/', '.github/']})" + - name: "Linux amd64 (musl): Build and test" + dependencies: [ ] + task: + agent: + machine: + type: s1-prod-ubuntu20-04-amd64-2 + prologue: + commands: + - '[[ -z $DOCKERHUB_APIKEY ]] || docker login --username $DOCKERHUB_USER --password $DOCKERHUB_APIKEY' + jobs: + - name: "Build from source and test for musl" + commands: + - docker run -v "$(pwd):/v" node:18-alpine /v/.semaphore/build-docker.sh + + - name: "Linux arm64 (musl): Build and test" + dependencies: [ ] + task: + agent: + machine: + type: s1-prod-ubuntu20-04-arm64-1 + prologue: + commands: + - '[[ -z $DOCKERHUB_APIKEY ]] || docker login --username $DOCKERHUB_USER --password $DOCKERHUB_APIKEY' + jobs: + - name: "Build from source and test for musl" + commands: + - docker run -v "$(pwd):/v" node:18-alpine /v/.semaphore/build-docker.sh + + - name: "Linux arm64: Build and test" + dependencies: [ ] + task: + agent: + machine: + type: s1-prod-ubuntu20-04-arm64-1 + jobs: + - name: "Build from source and test" + commands: + - npm install # this will actually not build anything if we have a release, but rather, fetch things using node-pre-gyp - so change this later. + - make test + + - name: 'macOS arm64/m1: Build and test' + dependencies: [] + task: + agent: + machine: + type: s1-prod-macos-13-5-arm64 + jobs: + - name: 'Build from source and test' + commands: + - npm install # this will actually not build anything if we have a release, but rather, fetch things using node-pre-gyp - so change this later. + - make test + + - name: "Linux amd64: Build, test, lint" + dependencies: [ ] task: - # You can customize your CI job here -# env_vars: -# # custom env_vars -# prologue: -# commands: -# # custom vault secrets -# # custom prologue commands + agent: + machine: + type: s1-prod-ubuntu20-04-amd64-2 + prologue: + commands: + - npm install # this will actually not build anything if we have a release, but rather, fetch things using node-pre-gyp - so change this later. jobs: - - name: "Build, Test, Release" + - name: "Test" commands: - - make build - make test - - make release-ci - epilogue: - always: + - name: "Promisified Tests" + commands: + - '[[ -z $DOCKERHUB_APIKEY ]] || docker login --username $DOCKERHUB_USER --password $DOCKERHUB_APIKEY' + - docker compose up -d && sleep 30 + - export NODE_OPTIONS='--max-old-space-size=1536' + - npx jest --forceExit --no-colors --ci test/promisified/admin/delete_groups.spec.js test/promisified/consumer/pause.spec.js + - name: "ESLint" + commands: + - npx eslint lib/kafkajs + + - name: "Linux amd64: Performance" + dependencies: [ ] + task: + agent: + machine: + type: s1-prod-ubuntu20-04-amd64-4 + env_vars: + - name: TARGET_PRODUCE_PERFORMANCE + value: "35" + - name: TARGET_CONSUME_PERFORMANCE + value: "18" + - name: TARGET_CTP_PERFORMANCE + value: "0.02" + prologue: + commands: + - wget -qO - https://packages.confluent.io/deb/7.7/archive.key | sudo apt-key add - + - sudo add-apt-repository "deb https://packages.confluent.io/clients/deb $(lsb_release -cs) main" + - sudo apt-get update + - sudo apt-get install -y build-essential gcc g++ make python3 + - sudo apt-get install -y librdkafka-dev bc + - export CKJS_LINKING=dynamic + - export BUILD_LIBRDKAFKA=0 + - npm install + - npx node-pre-gyp --build-from-source clean + - npx node-pre-gyp --build-from-source configure + - npx node-pre-gyp --build-from-source build + jobs: + - name: "Performance Test" + commands: + - '[[ -z $DOCKERHUB_APIKEY ]] || docker login --username $DOCKERHUB_USER --password $DOCKERHUB_APIKEY' + - docker compose up -d && sleep 30 + - export NODE_OPTIONS='--max-old-space-size=1536' + - cd examples/performance + - npm install + - bash -c '../../ci/tests/run_perf_test.sh' + + - name: "Linux amd64: Release" + dependencies: [ ] + run: + when: "tag =~ '^v[0-9]\\.'" + task: + agent: + machine: + type: s1-prod-ubuntu20-04-amd64-2 + env_vars: + - name: ARCHITECTURE + value: "x64" + - name: PLATFORM + value: "linux" + - name: LIBC + value: "glibc" + prologue: + commands: + - '[[ -z $DOCKERHUB_APIKEY ]] || docker login --username $DOCKERHUB_USER --password $DOCKERHUB_APIKEY' + jobs: + - name: "Release: LTS:18" + commands: + - sem-version node 18.19.0 + - export NODE_ABI=108 + - export ARTIFACT_KEY="confluent-kafka-javascript-${SEMAPHORE_GIT_TAG_NAME}-node-v${NODE_ABI}-${PLATFORM}-${LIBC}-${ARCHITECTURE}.tar.gz" + - npm install # node-pre-gyp will fallback to build here, because new tag implies no release yet. + - npx node-pre-gyp package + - ls build/stage/${SEMAPHORE_GIT_TAG_NAME}/${ARTIFACT_KEY} + - artifact push workflow "build/stage/${SEMAPHORE_GIT_TAG_NAME}/${ARTIFACT_KEY}" --destination "releases/${SEMAPHORE_GIT_TAG_NAME}/${ARTIFACT_KEY}" + - name: "Release: LTS:20" + commands: + - sem-version node 20.10.0 + - export NODE_ABI=115 + - export ARTIFACT_KEY="confluent-kafka-javascript-${SEMAPHORE_GIT_TAG_NAME}-node-v${NODE_ABI}-${PLATFORM}-${LIBC}-${ARCHITECTURE}.tar.gz" + - npm install # node-pre-gyp will fallback to build here, because new tag implies no release yet. + - npx node-pre-gyp package + - ls build/stage/${SEMAPHORE_GIT_TAG_NAME}/${ARTIFACT_KEY} + - artifact push workflow "build/stage/${SEMAPHORE_GIT_TAG_NAME}/${ARTIFACT_KEY}" --destination "releases/${SEMAPHORE_GIT_TAG_NAME}/${ARTIFACT_KEY}" + - name: "Release: latest: 21" + commands: + - sem-version node 21.4.0 + - export NODE_ABI=120 + - export ARTIFACT_KEY="confluent-kafka-javascript-${SEMAPHORE_GIT_TAG_NAME}-node-v${NODE_ABI}-${PLATFORM}-${LIBC}-${ARCHITECTURE}.tar.gz" + - npm install # node-pre-gyp will fallback to build here, because new tag implies no release yet. + - npx node-pre-gyp package + - ls build/stage/${SEMAPHORE_GIT_TAG_NAME}/${ARTIFACT_KEY} + - artifact push workflow "build/stage/${SEMAPHORE_GIT_TAG_NAME}/${ARTIFACT_KEY}" --destination "releases/${SEMAPHORE_GIT_TAG_NAME}/${ARTIFACT_KEY}" + - name: "Release: latest: 22" + commands: + - sem-version node 22.2.0 + - export NODE_ABI=127 + - export ARTIFACT_KEY="confluent-kafka-javascript-${SEMAPHORE_GIT_TAG_NAME}-node-v${NODE_ABI}-${PLATFORM}-${LIBC}-${ARCHITECTURE}.tar.gz" + - npm install # node-pre-gyp will fallback to build here, because new tag implies no release yet. + - npx node-pre-gyp package + - ls build/stage/${SEMAPHORE_GIT_TAG_NAME}/${ARTIFACT_KEY} + - artifact push workflow "build/stage/${SEMAPHORE_GIT_TAG_NAME}/${ARTIFACT_KEY}" --destination "releases/${SEMAPHORE_GIT_TAG_NAME}/${ARTIFACT_KEY}" + + - name: "Linux arm64: Release" + dependencies: [ ] + run: + when: "tag =~ '^v[0-9]\\.'" + task: + agent: + machine: + type: s1-prod-ubuntu20-04-arm64-1 + env_vars: + - name: ARCHITECTURE + value: "arm64" + - name: PLATFORM + value: "linux" + - name: LIBC + value: "glibc" + prologue: + commands: + - '[[ -z $DOCKERHUB_APIKEY ]] || docker login --username $DOCKERHUB_USER --password $DOCKERHUB_APIKEY' + jobs: + - name: "Release: LTS:18" + commands: + - sem-version node 18.19.0 + - export NODE_ABI=108 + - export ARTIFACT_KEY="confluent-kafka-javascript-${SEMAPHORE_GIT_TAG_NAME}-node-v${NODE_ABI}-${PLATFORM}-${LIBC}-${ARCHITECTURE}.tar.gz" + - npm install # node-pre-gyp will fallback to build here, because new tag implies no release yet. + - npx node-pre-gyp package + - ls build/stage/${SEMAPHORE_GIT_TAG_NAME}/${ARTIFACT_KEY} + - artifact push workflow "build/stage/${SEMAPHORE_GIT_TAG_NAME}/${ARTIFACT_KEY}" --destination "releases/${SEMAPHORE_GIT_TAG_NAME}/${ARTIFACT_KEY}" + - name: "Release: LTS:20" + commands: + - sem-version node 20.10.0 + - export NODE_ABI=115 + - export ARTIFACT_KEY="confluent-kafka-javascript-${SEMAPHORE_GIT_TAG_NAME}-node-v${NODE_ABI}-${PLATFORM}-${LIBC}-${ARCHITECTURE}.tar.gz" + - npm install # node-pre-gyp will fallback to build here, because new tag implies no release yet. + - npx node-pre-gyp package + - ls build/stage/${SEMAPHORE_GIT_TAG_NAME}/${ARTIFACT_KEY} + - artifact push workflow "build/stage/${SEMAPHORE_GIT_TAG_NAME}/${ARTIFACT_KEY}" --destination "releases/${SEMAPHORE_GIT_TAG_NAME}/${ARTIFACT_KEY}" + - name: "Release: latest: 21" + commands: + - sem-version node 21.4.0 + - export NODE_ABI=120 + - export ARTIFACT_KEY="confluent-kafka-javascript-${SEMAPHORE_GIT_TAG_NAME}-node-v${NODE_ABI}-${PLATFORM}-${LIBC}-${ARCHITECTURE}.tar.gz" + - npm install # node-pre-gyp will fallback to build here, because new tag implies no release yet. + - npx node-pre-gyp package + - ls build/stage/${SEMAPHORE_GIT_TAG_NAME}/${ARTIFACT_KEY} + - artifact push workflow "build/stage/${SEMAPHORE_GIT_TAG_NAME}/${ARTIFACT_KEY}" --destination "releases/${SEMAPHORE_GIT_TAG_NAME}/${ARTIFACT_KEY}" + - name: "Release: latest: 22" + commands: + - sem-version node 22.2.0 + - export NODE_ABI=127 + - export ARTIFACT_KEY="confluent-kafka-javascript-${SEMAPHORE_GIT_TAG_NAME}-node-v${NODE_ABI}-${PLATFORM}-${LIBC}-${ARCHITECTURE}.tar.gz" + - npm install # node-pre-gyp will fallback to build here, because new tag implies no release yet. + - npx node-pre-gyp package + - ls build/stage/${SEMAPHORE_GIT_TAG_NAME}/${ARTIFACT_KEY} + - artifact push workflow "build/stage/${SEMAPHORE_GIT_TAG_NAME}/${ARTIFACT_KEY}" --destination "releases/${SEMAPHORE_GIT_TAG_NAME}/${ARTIFACT_KEY}" + + - name: "Linux amd64 musl: Release" + dependencies: [ ] + run: + when: "tag =~ '^v[0-9]\\.'" + task: + agent: + machine: + type: s1-prod-ubuntu20-04-amd64-1 + env_vars: + - name: ARCHITECTURE + value: "x64" + - name: PLATFORM + value: "linux" + - name: LIBC + value: "musl" + prologue: + commands: + - '[[ -z $DOCKERHUB_APIKEY ]] || docker login --username $DOCKERHUB_USER --password $DOCKERHUB_APIKEY' + jobs: + - name: "Release: LTS:18" + commands: + - export NODE_ABI=108 + - export ARTIFACT_KEY="confluent-kafka-javascript-${SEMAPHORE_GIT_TAG_NAME}-node-v${NODE_ABI}-linux-${LIBC}-${ARCHITECTURE}.tar.gz" + - docker run -v "$(pwd):/v" node:18-alpine /v/.semaphore/build-docker.sh + - ls build/stage/${SEMAPHORE_GIT_TAG_NAME}/${ARTIFACT_KEY} + - artifact push workflow "build/stage/${SEMAPHORE_GIT_TAG_NAME}/${ARTIFACT_KEY}" --destination "releases/${SEMAPHORE_GIT_TAG_NAME}/${ARTIFACT_KEY}" + - name: "Release: LTS:20" + commands: + - export NODE_ABI=115 + - export ARTIFACT_KEY="confluent-kafka-javascript-${SEMAPHORE_GIT_TAG_NAME}-node-v${NODE_ABI}-linux-${LIBC}-${ARCHITECTURE}.tar.gz" + - docker run -v "$(pwd):/v" node:20-alpine /v/.semaphore/build-docker.sh + - ls build/stage/${SEMAPHORE_GIT_TAG_NAME}/${ARTIFACT_KEY} + - artifact push workflow "build/stage/${SEMAPHORE_GIT_TAG_NAME}/${ARTIFACT_KEY}" --destination "releases/${SEMAPHORE_GIT_TAG_NAME}/${ARTIFACT_KEY}" + - name: "Release: latest: 21" + commands: + - export NODE_ABI=120 + - export ARTIFACT_KEY="confluent-kafka-javascript-${SEMAPHORE_GIT_TAG_NAME}-node-v${NODE_ABI}-linux-${LIBC}-${ARCHITECTURE}.tar.gz" + - docker run -v "$(pwd):/v" node:21-alpine /v/.semaphore/build-docker.sh + - ls build/stage/${SEMAPHORE_GIT_TAG_NAME}/${ARTIFACT_KEY} + - artifact push workflow "build/stage/${SEMAPHORE_GIT_TAG_NAME}/${ARTIFACT_KEY}" --destination "releases/${SEMAPHORE_GIT_TAG_NAME}/${ARTIFACT_KEY}" + - name: "Release: latest: 22" + commands: + - export NODE_ABI=127 + - export ARTIFACT_KEY="confluent-kafka-javascript-${SEMAPHORE_GIT_TAG_NAME}-node-v${NODE_ABI}-linux-${LIBC}-${ARCHITECTURE}.tar.gz" + - docker run -v "$(pwd):/v" node:22-alpine /v/.semaphore/build-docker.sh + - ls build/stage/${SEMAPHORE_GIT_TAG_NAME}/${ARTIFACT_KEY} + - artifact push workflow "build/stage/${SEMAPHORE_GIT_TAG_NAME}/${ARTIFACT_KEY}" --destination "releases/${SEMAPHORE_GIT_TAG_NAME}/${ARTIFACT_KEY}" + + - name: "Linux arm64 musl: Release" + dependencies: [ ] + run: + when: "tag =~ '^v[0-9]\\.'" + task: + agent: + machine: + type: s1-prod-ubuntu20-04-arm64-1 + env_vars: + - name: ARCHITECTURE + value: "arm64" + - name: PLATFORM + value: "linux" + - name: LIBC + value: "musl" + prologue: + commands: + - '[[ -z $DOCKERHUB_APIKEY ]] || docker login --username $DOCKERHUB_USER --password $DOCKERHUB_APIKEY' + jobs: + - name: "Release: LTS:18" + commands: + - export NODE_ABI=108 + - export ARTIFACT_KEY="confluent-kafka-javascript-${SEMAPHORE_GIT_TAG_NAME}-node-v${NODE_ABI}-linux-${LIBC}-${ARCHITECTURE}.tar.gz" + - docker run -v "$(pwd):/v" node:18-alpine /v/.semaphore/build-docker.sh + - ls build/stage/${SEMAPHORE_GIT_TAG_NAME}/${ARTIFACT_KEY} + - artifact push workflow "build/stage/${SEMAPHORE_GIT_TAG_NAME}/${ARTIFACT_KEY}" --destination "releases/${SEMAPHORE_GIT_TAG_NAME}/${ARTIFACT_KEY}" + - name: "Release: LTS:20" + commands: + - export NODE_ABI=115 + - export ARTIFACT_KEY="confluent-kafka-javascript-${SEMAPHORE_GIT_TAG_NAME}-node-v${NODE_ABI}-linux-${LIBC}-${ARCHITECTURE}.tar.gz" + - docker run -v "$(pwd):/v" node:20-alpine /v/.semaphore/build-docker.sh + - ls build/stage/${SEMAPHORE_GIT_TAG_NAME}/${ARTIFACT_KEY} + - artifact push workflow "build/stage/${SEMAPHORE_GIT_TAG_NAME}/${ARTIFACT_KEY}" --destination "releases/${SEMAPHORE_GIT_TAG_NAME}/${ARTIFACT_KEY}" + - name: "Release: latest: 21" + commands: + - export NODE_ABI=120 + - export ARTIFACT_KEY="confluent-kafka-javascript-${SEMAPHORE_GIT_TAG_NAME}-node-v${NODE_ABI}-linux-${LIBC}-${ARCHITECTURE}.tar.gz" + - docker run -v "$(pwd):/v" node:21-alpine /v/.semaphore/build-docker.sh + - ls build/stage/${SEMAPHORE_GIT_TAG_NAME}/${ARTIFACT_KEY} + - artifact push workflow "build/stage/${SEMAPHORE_GIT_TAG_NAME}/${ARTIFACT_KEY}" --destination "releases/${SEMAPHORE_GIT_TAG_NAME}/${ARTIFACT_KEY}" + - name: "Release: latest: 22" + commands: + - export NODE_ABI=127 + - export ARTIFACT_KEY="confluent-kafka-javascript-${SEMAPHORE_GIT_TAG_NAME}-node-v${NODE_ABI}-linux-${LIBC}-${ARCHITECTURE}.tar.gz" + - docker run -v "$(pwd):/v" node:22-alpine /v/.semaphore/build-docker.sh + - ls build/stage/${SEMAPHORE_GIT_TAG_NAME}/${ARTIFACT_KEY} + - artifact push workflow "build/stage/${SEMAPHORE_GIT_TAG_NAME}/${ARTIFACT_KEY}" --destination "releases/${SEMAPHORE_GIT_TAG_NAME}/${ARTIFACT_KEY}" + + - name: "macOS arm64/m1: Release" + dependencies: [ ] + run: + when: "tag =~ '^v[0-9]\\.'" + task: + agent: + machine: + type: s1-prod-macos-13-5-arm64 + env_vars: + - name: ARCHITECTURE + value: "arm64" + - name: PLATFORM + value: "darwin" + - name: LIBC + value: "unknown" + jobs: + - name: "Release: LTS:18" + commands: + - sem-version node 18.19.0 + - export NODE_ABI=108 + - export ARTIFACT_KEY="confluent-kafka-javascript-${SEMAPHORE_GIT_TAG_NAME}-node-v${NODE_ABI}-${PLATFORM}-${LIBC}-${ARCHITECTURE}.tar.gz" + - npm install # node-pre-gyp will fallback to build here, because new tag implies no release yet. + - npx node-pre-gyp package + - ls build/stage/${SEMAPHORE_GIT_TAG_NAME}/${ARTIFACT_KEY} + - artifact push workflow "build/stage/${SEMAPHORE_GIT_TAG_NAME}/${ARTIFACT_KEY}" --destination "releases/${SEMAPHORE_GIT_TAG_NAME}/${ARTIFACT_KEY}" + - name: "Release: LTS:20" + commands: + - sem-version node 20.10.0 + - export NODE_ABI=115 + - export ARTIFACT_KEY="confluent-kafka-javascript-${SEMAPHORE_GIT_TAG_NAME}-node-v${NODE_ABI}-${PLATFORM}-${LIBC}-${ARCHITECTURE}.tar.gz" + - npm install # node-pre-gyp will fallback to build here, because new tag implies no release yet. + - npx node-pre-gyp package + - ls build/stage/${SEMAPHORE_GIT_TAG_NAME}/${ARTIFACT_KEY} + - artifact push workflow "build/stage/${SEMAPHORE_GIT_TAG_NAME}/${ARTIFACT_KEY}" --destination "releases/${SEMAPHORE_GIT_TAG_NAME}/${ARTIFACT_KEY}" + - name: "Release: latest: 21" + commands: + - sem-version node 21.4.0 + - export NODE_ABI=120 + - export ARTIFACT_KEY="confluent-kafka-javascript-${SEMAPHORE_GIT_TAG_NAME}-node-v${NODE_ABI}-${PLATFORM}-${LIBC}-${ARCHITECTURE}.tar.gz" + - npm install # node-pre-gyp will fallback to build here, because new tag implies no release yet. + - npx node-pre-gyp package + - ls build/stage/${SEMAPHORE_GIT_TAG_NAME}/${ARTIFACT_KEY} + - artifact push workflow "build/stage/${SEMAPHORE_GIT_TAG_NAME}/${ARTIFACT_KEY}" --destination "releases/${SEMAPHORE_GIT_TAG_NAME}/${ARTIFACT_KEY}" + - name: "Release: latest: 22" + commands: + - sem-version node 22.2.0 + - export NODE_ABI=127 + - export ARTIFACT_KEY="confluent-kafka-javascript-${SEMAPHORE_GIT_TAG_NAME}-node-v${NODE_ABI}-${PLATFORM}-${LIBC}-${ARCHITECTURE}.tar.gz" + - npm install # node-pre-gyp will fallback to build here, because new tag implies no release yet. + - npx node-pre-gyp package + - ls build/stage/${SEMAPHORE_GIT_TAG_NAME}/${ARTIFACT_KEY} + - artifact push workflow "build/stage/${SEMAPHORE_GIT_TAG_NAME}/${ARTIFACT_KEY}" --destination "releases/${SEMAPHORE_GIT_TAG_NAME}/${ARTIFACT_KEY}" + + - name: "Windows x64: Release" + dependencies: [ ] + run: + when: "tag =~ '^v[0-9]\\.'" + task: + agent: + machine: + type: s1-prod-windows + env_vars: + # Disable vcpkg telemetry + - name: VCPKG_DISABLE_METRICS + value: 'yes' + - name: ARCHITECTURE + value: "x64" + - name: PLATFORM + value: "win32" + - name: LIBC + value: "unknown" + prologue: + commands: + # The semaphore agent already comes with an installed version of node. We, however, need to use a different + # version of node for the release (as many as we need to cover all the different ABIs). + # The node installer does not allow us to downgrade, so we need to uninstall the current version. + # The method below isn't particularly robust (as it depends on the particular format of the URL), but it + # works and can be easily fixed if it breaks (the node --version in the below jobs can be checked if there are + # any issues in the build). + - $env:InstalledMajor = (Get-Command node).Version.Major + - $env:InstalledMinor = (Get-Command node).Version.Minor + - $env:InstalledBuild = (Get-Command node).Version.Build + - $env:InstalledVersion = "v${env:InstalledMajor}.${env:InstalledMinor}.${env:InstalledBuild}" + - echo "https://nodejs.org/dist/${env:InstalledVersion}/node-${env:InstalledVersion}-x64.msi" + - Invoke-WebRequest "https://nodejs.org/dist/${env:InstalledVersion}/node-${env:InstalledVersion}-x64.msi" -OutFile node_old.msi + - msiexec /qn /l* node-old-log.txt /uninstall node_old.msi + - cat .\node-old-log.txt + jobs: + - name: "Release: LTS:18" + commands: + - Invoke-WebRequest "https://nodejs.org/download/release/v18.19.0/node-v18.19.0-x64.msi" -OutFile node.msi + - msiexec /qn /l* node-log.txt /i node.msi + - cat .\node-log.txt + - node --version + - pip install setuptools + - $env:NODE_ABI = 108 + - $env:ARTIFACT_KEY = "confluent-kafka-javascript-${env:SEMAPHORE_GIT_TAG_NAME}-node-v${env:NODE_ABI}-${env:PLATFORM}-${env:LIBC}-${env:ARCHITECTURE}.tar.gz" + - echo "$env:ARTIFACT_KEY" + - npm install # node-pre-gyp will fallback to build here, because new tag implies no release yet. + - npx node-pre-gyp package + - ls "build/stage/${env:SEMAPHORE_GIT_TAG_NAME}/${env:ARTIFACT_KEY}" + - artifact push workflow "build/stage/${env:SEMAPHORE_GIT_TAG_NAME}/${env:ARTIFACT_KEY}" --destination "releases/${env:SEMAPHORE_GIT_TAG_NAME}/${env:ARTIFACT_KEY}" + - name: "Release: LTS:20" + commands: + - Invoke-WebRequest "https://nodejs.org/dist/v20.11.0/node-v20.11.0-x64.msi" -OutFile node.msi + - msiexec /qn /l* node-log.txt /i node.msi + - node --version + - pip install setuptools + - $env:NODE_ABI = 115 + - $env:ARTIFACT_KEY = "confluent-kafka-javascript-${env:SEMAPHORE_GIT_TAG_NAME}-node-v${env:NODE_ABI}-${env:PLATFORM}-${env:LIBC}-${env:ARCHITECTURE}.tar.gz" + - echo "$env:ARTIFACT_KEY" + - npm install # node-pre-gyp will fallback to build here, because new tag implies no release yet. + - npx node-pre-gyp package + - ls "build/stage/${env:SEMAPHORE_GIT_TAG_NAME}/${env:ARTIFACT_KEY}" + - artifact push workflow "build/stage/${env:SEMAPHORE_GIT_TAG_NAME}/${env:ARTIFACT_KEY}" --destination "releases/${env:SEMAPHORE_GIT_TAG_NAME}/${env:ARTIFACT_KEY}" + - name: "Release: latest: 21" + commands: + - Invoke-WebRequest "https://nodejs.org/dist/v21.6.1/node-v21.6.1-x64.msi" -OutFile node.msi + - msiexec /qn /l* node-log.txt /i node.msi + - node --version + - pip install setuptools + - $env:NODE_ABI = 120 + - $env:ARTIFACT_KEY = "confluent-kafka-javascript-${env:SEMAPHORE_GIT_TAG_NAME}-node-v${env:NODE_ABI}-${env:PLATFORM}-${env:LIBC}-${env:ARCHITECTURE}.tar.gz" + - echo "$env:ARTIFACT_KEY" + - npm install # node-pre-gyp will fallback to build here, because new tag implies no release yet. + - npx node-pre-gyp package + - ls "build/stage/${env:SEMAPHORE_GIT_TAG_NAME}/${env:ARTIFACT_KEY}" + - artifact push workflow "build/stage/${env:SEMAPHORE_GIT_TAG_NAME}/${env:ARTIFACT_KEY}" --destination "releases/${env:SEMAPHORE_GIT_TAG_NAME}/${env:ARTIFACT_KEY}" + - name: "Release: latest: 22" + commands: + - Invoke-WebRequest "https://nodejs.org/dist/v22.2.0/node-v22.2.0-x64.msi" -OutFile node.msi + - msiexec /qn /l* node-log.txt /i node.msi + - node --version + - pip install setuptools + - $env:NODE_ABI = 127 + - $env:ARTIFACT_KEY = "confluent-kafka-javascript-${env:SEMAPHORE_GIT_TAG_NAME}-node-v${env:NODE_ABI}-${env:PLATFORM}-${env:LIBC}-${env:ARCHITECTURE}.tar.gz" + - echo "$env:ARTIFACT_KEY" + - npm install # node-pre-gyp will fallback to build here, because new tag implies no release yet. + - npx node-pre-gyp package + - ls "build/stage/${env:SEMAPHORE_GIT_TAG_NAME}/${env:ARTIFACT_KEY}" + - artifact push workflow "build/stage/${env:SEMAPHORE_GIT_TAG_NAME}/${env:ARTIFACT_KEY}" --destination "releases/${env:SEMAPHORE_GIT_TAG_NAME}/${env:ARTIFACT_KEY}" + + - name: 'Packaging: tar all release artifacts' + dependencies: + - 'Linux amd64: Release' + - 'Linux arm64: Release' + - 'Linux amd64 musl: Release' + - 'Linux arm64 musl: Release' + - 'macOS arm64/m1: Release' + - 'Windows x64: Release' + run: + when: "tag =~ '^v[0-9]\\.'" + task: + agent: + machine: + type: s1-prod-ubuntu20-04-amd64-2 + jobs: + - name: "Tarball" commands: - - make epilogue-ci - - make testbreak-after + - artifact pull workflow releases + - tar -czvf releases.${SEMAPHORE_GIT_TAG_NAME}.tar.gz releases + - artifact push project "releases.${SEMAPHORE_GIT_TAG_NAME}.tar.gz" --destination "releases/${SEMAPHORE_GIT_TAG_NAME}/releases.${SEMAPHORE_GIT_TAG_NAME}.tar.gz" \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 00000000..11802730 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,142 @@ +# confluent-kafka-javascript v0.2.1 + +v0.2.1 is a limited availability release. It is supported for all usage. + +## Features + +1. Update README, docs, and examples for Confluent's Schema Registry client. + + +# confluent-kafka-javascript v0.2.0 + +v0.2.0 is a limited availability release. It is supported for all usage. + +## Features + +1. Switch to using `librdkafka` on the latest released tag `v2.5.3` instead of `master`. + + +# confluent-kafka-javascript v0.1.17-devel + +v0.1.17-devel is a pre-production, early-access release. + +## Features + +1. Add a commitCb method to the callback-based API which allows committing asynchronously. +2. Pass assign/unassign functions to the rebalance callback in the promisified API, allowing + the user to control the assignment of partitions, or pause just after a rebalance. +3. Remove store from promisified API and let the library handle all the stores. +4. Add JavaScript-level debug logging to the client for debugging issues within the binding. +5. Various fixes for performance and robustness of the consumer cache. +6. Remove `consumerGroupId` argument from the `sendOffsets` method of the transactional producer, + and instead, only allow using a `consumer`. + +## Fixes + +1. Do not modify RegExps which don't start with a ^, instead, throw an error so + that there is no unexpected behaviour for the user (Issue [#64](https://github.com/confluentinc/confluent-kafka-javascript/issues/64)). +2. Do not mutate arguments in run, pause and resume (Issue [#61](https://github.com/confluentinc/confluent-kafka-javascript/issues/61)). +3. Fix a segmentation fault in `listGroups` when passing `matchConsumerGroupStates` as undefined. + + +# confluent-kafka-javascript v0.1.16-devel + +v0.1.16-devel is a pre-production, early-access release. + +## Features + +1. Add per-partition concurrency to consumer. +2. Add true `eachBatch` support to consumer. +3. Add a `leaderEpoch` field to the topic partitions where required (listing, committing, etc.). + + +# confluent-kafka-javascript v0.1.15-devel + +v0.1.15-devel is a pre-production, early-access release. + +## Features + +1. Add Node v22 builds and bump librdkafka version on each version bump of this library. + + +# confluent-kafka-javascript v0.1.14-devel + +v0.1.14-devel is a pre-production, early-access release. + +## Features + +1. Add metadata to offset commit and offset store (non-promisified API). +2. Add types for logger and loglevel to configuration. +3. Add Producer polling from background thread. This improves performance for cases when send is awaited on. +4. Enable consume optimization from v0.1.13-devel (Features #2) by default for the promisified API. + +## Bug Fixes + +1. Fix issues with the header conversions from promisified API to the non-promisified API to match + the type signature and allow Buffers to be passed as header values in the C++ layer. + + +# confluent-kafka-javascript v0.1.13-devel + +v0.1.13-devel is a pre-production, early-access release. + +## Features + +1. Add support for `storeOffsets` in the consumer API. +2. Add optimization while consuming, in cases where the size of messages pending in our subscription is less than the consumer cache size. + +## Bug Fixes + +1. Fix memory leak in incremental assign (@martijnimhoff, #35). +2. Fix various issues with typings, and reconcile typings, JavaScript code, and MIGRATION.md to be consistent. + + +# confluent-kafka-javascript v0.1.12-devel + +v0.1.12-devel is a pre-production, early-access release. + +## Features + +1. Add support for `listTopics` in the Admin API. +2. Add support for OAUTHBEARER token refresh callback for both promisified and non promisified API. + +## Bug Fixes + +1. Fix aliasing bug between `NodeKafka::Conf` and `RdKafka::ConfImpl`. +2. Fix issue where `assign/unassign` were called instead of `incrementalAssign/incrementalUnassign` while using + the Cooperative Sticky assigner, and setting the `rebalance_cb` as a boolean rather than as a function. +3. Fix memory leaks in Dispatcher and Conf (both leaked memory at client close). +4. Fix type definitions and make `KafkaJS` and `RdKafka` separate namespaces, while maintaining compatibility + with node-rdkafka's type definitions. + + +# confluent-kafka-javascript v0.1.11-devel + +v0.1.11-devel is a pre-production, early-access release. + +## Features + +1. Add support for `eachBatch` in the Consumer API (partial support for API compatibility). +2. Add support for `listGroups`, `describeGroups` and `deleteGroups` in the Admin API. + + +# confluent-kafka-javascript v0.1.10-devel + +v0.1.10-devel is a pre-production, early-access release. + +## Features + +1. Pre-built binaries for Windows (x64) added on an experimental basis. + + +# confluent-kafka-javascript v0.1.9-devel + +v0.1.9-devel is a pre-production, early-access release. + +## Features + +1. Pre-built binaries for Linux (both amd64 and arm64, both musl and glibc), for macOS (m1), for node versions 18, 20 and 21. +2. Promisified API for Consumer, Producer and Admin Client. +3. Allow passing topic configuration properties via the global configuration block. +4. Remove dependencies with security issues. +5. Support for the Cooperative Sticky assignor. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index e2cabe1f..e718a27f 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,39 +1,17 @@ -# Contributing to `confluent-kafka-js` +# Contributing to `confluent-kafka-javascript` :+1::tada: First off, thanks for taking the time to contribute! :tada::+1: -The following is a set of guidelines for contributing to `confluent-kafka-js` +The following is a set of guidelines for contributing to `confluent-kafka-javascript` which is hosted by [Confluent Inc.](https://github.com/confluentinc) on GitHub. This document lists rules, guidelines, and help getting started, so if you feel something is missing feel free to send a pull request. -#### Table Of Contents - -[What should I know before I get started?](#what-should-i-know-before-i-get-started) - * [Contributor Agreement](#contributor-agreement) - -[How Can I Contribute?](#how-can-i-contribute) - * [Reporting Bugs](#reporting-bugs) - * [Suggesting Enhancements](#suggesting-enhancements) - * [Pull Requests](#pull-requests) - -[Styleguides](#styleguides) - * [Git Commit Messages](#git-commit-messages) - * [JavaScript Styleguide](#javascript-styleguide) - * [C++ Styleguide](#c++-styleguide) - * [Specs Styleguide](#specs-styleguide) - * [Documentation Styleguide](#documentation-styleguide) - -[Debugging](#debugging) - * [Debugging C++](#debugging-c) - -[Updating librdkafka version](#updating-librdkafka-version) - ## What should I know before I get started? ### Contributor Agreement -Not currently required. +Required (please follow instructions after making any Pull Requests). ## How can I contribute? @@ -49,6 +27,10 @@ replicas, partitions, and brokers you are connecting to, because some issues might be related to Kafka. A list of `librdkafka` configuration key-value pairs also helps. +Adding the property `debug` in your `librdkafka` configuration will help us. A list of +possible values is available [here](https://github.com/confluentinc/librdkafka/blob/master/CONFIGURATION.md), +but you can set it to `all` if verbose logs are okay. + ### Suggesting Enhancements Please use __Github Issues__ to suggest enhancements. We are happy to consider @@ -61,7 +43,7 @@ library's core. * Include new test cases (either end-to-end or unit tests) with your change. * Follow our style guides. -* Make sure all tests are still passing and the `linter` does not report any issues. +* Make sure all tests are still passing and the linter does not report any issues. * End files with a new line. * Document the new code in the comments (if it is JavaScript) so the documentation generator can update the reference documentation. @@ -103,15 +85,8 @@ In short: ### JavaScript Styleguide -* Place `module.exports` at or near the top of the file. - * Defined functions are hoisted, so it is appropriate to define the - function after you export it. - * When exporting an object, define it first, then export it, and then add - methods or properties. -* Do not use ES2015 specific features (for example, do not use `let`, `const`, - or `class`). * All callbacks should follow the standard Node.js callback signature. -* Your JavaScript should properly pass the linter (`make jslint`). +* Your JavaScript should properly pass the linter (`make jslint` and `make eslint`). ### C++ Styleguide @@ -121,7 +96,8 @@ In short: ### Specs Styleguide -* Write all JavaScript tests by using the `mocha` testing framework. +* Write JavaScript tests by using the `mocha` testing framework for the + non-promisified API and `jest` for the promisified API. * All `mocha` tests should use exports syntax. * All `mocha` test files should be suffixed with `.spec.js` instead of `.js`. * Unit tests should mirror the JavaScript files they test (for example, @@ -144,7 +120,7 @@ In short: ## Editor -Using Visual Studio code to develop on `confluent-kafka-js`. If you use it you can configure the C++ plugin to resolve the paths needed to inform your intellisense. This is the config file I am using on a mac to resolve the required paths: +Using Visual Studio code to develop on `confluent-kafka-javascript`. If you use it you can configure the C++ plugin to resolve the paths needed to inform your intellisense. This is the config file I am using on a mac to resolve the required paths: `c_cpp_properties.json` ``` @@ -176,6 +152,26 @@ Using Visual Studio code to develop on `confluent-kafka-js`. If you use it you c } ``` +## Tests + +This project includes three types of tests in this project: +* end-to-end integration tests (`mocha`) +* unit tests (`mocha`) +* integration tests for promisified API (`jest`) + +You can run all types of tests by using `Makefile`. Doing so calls `mocha` or `jest` in your locally installed `node_modules` directory. + +* Before you run the tests, be sure to init and update the submodules: + 1. `git submodule init` + 2. `git submodule update` +* To run the unit tests, you can run `make lint` or `make test`. +* To run the promisified integration tests, you can use `make promisified_test`. + You must have a running Kafka installation available. By default, the test tries to connect to `localhost:9092`; + however, you can supply the `KAFKA_HOST` environment variable to override this default behavior. +* To run the integration tests, you can use `make e2e`. + You must have a running Kafka installation available. By default, the test tries to connect to `localhost:9092`; + however, you can supply the `KAFKA_HOST` environment variable to override this default behavior. Run `make e2e`. + ## Debugging ### Debugging C++ @@ -193,12 +189,22 @@ gdb node You can add breakpoints and so on after that. +### Debugging and Profiling JavaScript + +Run the code with the `--inspect` flag, and then open `chrome://inspect` in Chrome and connect to the debugger. + +Example: + +``` +node --inspect path/to/file.js +``` + ## Updating librdkafka version -The librdkafka should be periodically updated to the latest release in https://github.com/edenhill/librdkafka/releases +The librdkafka should be periodically updated to the latest release in https://github.com/confluentinc/librdkafka/releases Steps to update: -1. Update the `librdkafka` property in [`package.json`](https://github.com/confluentinc/confluent-kafka-js/blob/master/package.json) to the desired version. +1. Update the `librdkafka` property in [`package.json`](https://github.com/confluentinc/confluent-kafka-javascript/blob/master/package.json) to the desired version. 1. Update the librdkafka git submodule to that versions release commit (example below) @@ -209,20 +215,27 @@ Steps to update: If you get an error during that checkout command, double check that the submodule was initialized / cloned! You may need to run `git submodule update --init --recursive` -1. Update [`config.d.ts`](https://github.com/confluentinc/confluent-kafka-js/blob/master/config.d.ts) and [`errors.d.ts`](https://github.com/confluentinc/confluent-kafka-js/blob/master/errors.d.ts) TypeScript definitions by running: +1. Update [`config.d.ts`](https://github.com/confluentinc/confluent-kafka-javascript/blob/master/config.d.ts) and [`errors.d.ts`](https://github.com/confluentinc/confluent-kafka-javascript/blob/master/errors.d.ts) TypeScript definitions by running: ```bash node ci/librdkafka-defs-generator.js ``` - Note: This is ran automatically during CI flows but it's good to run it during the version upgrade pull request. 1. Run `npm install --lockfile-version 2` to build with the new version and fix any build errors that occur. 1. Run unit tests: `npm run test` -1. Update the version numbers referenced in the [`README.md`](https://github.com/confluentinc/confluent-kafka-js/blob/master/README.md) file to the new version. +1. Update the version numbers referenced in the [`README.md`](https://github.com/confluentinc/confluent-kafka-javascript/blob/master/README.md) file to the new version. + +## Releasing + +1. Increment the `version` in `package.json`. Change the version in `client.js` and `README.md`. Change the librdkafka version in `semaphore.yml` and in `package.json`. + +1. Run `npm install` to update the `package-lock.json` file. -## Publishing new npm version +1. Create a PR and merge the above changes, and tag the merged commit with the new version, e.g. `git tag vx.y.z && git push origin vx.y.z`. + This should be the same string as `version` in `package.json`. -1. Increment the `version` in `package.json` and merge that change in. +1. The CI will run on the tag, which will create the release artifacts in Semaphore CI. -1. Create a new github release. Set the tag & release title to the same string as `version` in `package.json`. +1. Create a new GitHub release with the tag, and upload the release artifacts from Semaphore CI. + The release title should be the same string as `version` in `package.json`. \ No newline at end of file diff --git a/INTRODUCTION.md b/INTRODUCTION.md new file mode 100644 index 00000000..ecbb3c2a --- /dev/null +++ b/INTRODUCTION.md @@ -0,0 +1,546 @@ +# Introduction to Confluent-Kafka-JavaScript + +## Configuration + +You can pass many configuration options to `librdkafka`. A full list can be found in `librdkafka`'s [Configuration.md](https://github.com/confluentinc/librdkafka/blob/v2.3.0/CONFIGURATION.md) + +Configuration keys that have the suffix `_cb` are designated as callbacks. Some +of these keys are informational and you can choose to opt-in (for example, `dr_cb`). Others are callbacks designed to +return a value, such as `partitioner_cb`. + +Not all of these options are supported. +The library will throw an error if the value you send in is invalid. + +The library currently supports the following callbacks: +* `partitioner_cb` +* `dr_cb` or `dr_msg_cb` +* `event_cb` +* `rebalance_cb` (see [Rebalancing](#rebalancing)) +* `offset_commit_cb` (see [Commits](#commits)) + +### Librdkafka Methods + +This library includes two utility functions for detecting the status of your installation. Please try to include these when making issue reports where applicable. + +You can get the features supported by your compile of `librdkafka` by reading the variable "features" on the root of the `confluent-kafka-javascript` object. + +```js +const Kafka = require('@confluentinc/kafka-javascript'); +console.log(Kafka.features); + +// #=> [ 'gzip', 'snappy', 'ssl', 'sasl', 'regex', 'lz4' ] +``` + +You can also get the version of `librdkafka` + +```js +const Kafka = require('@confluentinc/kafka-javascript'); +console.log(Kafka.librdkafkaVersion); + +// #=> 2.3.0 +``` + +## Usage: promisified API + +Still being written. In the meantime, the [QUICKSTART](./QUICKSTART.md) has a good starting point. + +## Usage: non-promisified API + +### Sending Messages + +A `Producer` sends messages to Kafka. The `Producer` constructor takes a configuration object, as shown in the following example: + +```js +const producer = new Kafka.Producer({ + 'metadata.broker.list': 'kafka-host1:9092,kafka-host2:9092' +}); +``` + +A `Producer` requires only `metadata.broker.list` (the Kafka brokers) to be created. The values in this list are separated by commas. For other configuration options, see the [Configuration.md](https://github.com/confluentinc/librdkafka/blob/v2.3.0/CONFIGURATION.md) file described previously. + +The following example illustrates a list with several `librdkafka` options set. + +```js +const producer = new Kafka.Producer({ + 'client.id': 'kafka', + 'metadata.broker.list': 'localhost:9092', + 'compression.codec': 'gzip', + 'retry.backoff.ms': 200, + 'message.send.max.retries': 10, + 'socket.keepalive.enable': true, + 'queue.buffering.max.messages': 100000, + 'queue.buffering.max.ms': 1000, + 'batch.num.messages': 1000000, + 'dr_cb': true +}); +``` + +#### Stream API + +You can easily use the `Producer` as a writable stream immediately after creation (as shown in the following example): + +```js +// Our producer with its Kafka brokers +// This call returns a new writable stream to our topic 'topic-name' +const stream = Kafka.Producer.createWriteStream({ + 'metadata.broker.list': 'kafka-host1:9092,kafka-host2:9092' +}, {}, { + topic: 'topic-name' +}); + +// Writes a message to the stream +const queuedSuccess = stream.write(Buffer.from('Awesome message')); + +if (queuedSuccess) { + console.log('We queued our message!'); +} else { + // Note that this only tells us if the stream's queue is full, + // it does NOT tell us if the message got to Kafka! See below... + console.log('Too many messages in our queue already'); +} + +// NOTE: MAKE SURE TO LISTEN TO THIS IF YOU WANT THE STREAM TO BE DURABLE +// Otherwise, any error will bubble up as an uncaught exception. +stream.on('error', (err) => { + // Here's where we'll know if something went wrong sending to Kafka + console.error('Error in our kafka stream'); + console.error(err); +}) +``` + +If you do not want your code to crash when an error happens, ensure you have an `error` listener on the stream. Most errors are not necessarily fatal, but the ones that are will immediately destroy the stream. If you use `autoClose`, the stream will close itself at the first sign of a problem. + +#### Standard API + +The Standard API is more performant, particularly when handling high volumes of messages. +However, it requires more manual setup to use. The following example illustrates its use: + +```js +const producer = new Kafka.Producer({ + 'metadata.broker.list': 'localhost:9092', + 'dr_cb': true +}); + +// Connect to the broker manually +producer.connect(); + +// Wait for the ready event before proceeding +producer.on('ready', () => { + try { + producer.produce( + // Topic to send the message to + 'topic', + // optionally we can manually specify a partition for the message + // this defaults to -1 - which will use librdkafka's default partitioner (consistent random for keyed messages, random for unkeyed messages) + null, + // Message to send. Must be a buffer + Buffer.from('Awesome message'), + // for keyed messages, we also specify the key - note that this field is optional + 'Stormwind', + // you can send a timestamp here. If your broker version supports it, + // it will get added. Otherwise, we default to 0 + Date.now(), + // you can send an opaque token here, which gets passed along + // to your delivery reports + ); + } catch (err) { + console.error('A problem occurred when sending our message'); + console.error(err); + } +}); + +// Any errors we encounter, including connection errors +producer.on('event.error', (err) => { + console.error('Error from producer'); + console.error(err); +}) + +// We must either call .poll() manually after sending messages +// or set the producer to poll on an interval (.setPollInterval). +// Without this, we do not get delivery events and the queue +// will eventually fill up. +producer.setPollInterval(100); +``` + +To see the configuration options available to you, see the [Configuration](#configuration) section. + +##### Methods + +| Method | Description | +|---------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `producer.connect()` | Connects to the broker.

The `connect()` method emits the `ready` event when it connects successfully. If it does not, the error will be passed through the callback. | +| `producer.disconnect()` | Disconnects from the broker.

The `disconnect()` method emits the `disconnected` event when it has disconnected. If it does not, the error will be passed through the callback. | +| `producer.poll()` | Polls the producer for delivery reports or other events to be transmitted via the emitter.

In order to get the events in `librdkafka`'s queue to emit, you must call this regularly. | +| `producer.setPollInterval(interval)` | Polls the producer on this interval, handling disconnections and reconnection. Set it to 0 to turn it off. | +| `producer.produce(topic, partition, msg, key, timestamp, opaque)` | Sends a message.

The `produce()` method throws when produce would return an error. Ordinarily, this is just if the queue is full. | +| `producer.flush(timeout, callback)` | Flush the librdkafka internal queue, sending all messages. Default timeout is 500ms | +| `producer.initTransactions(timeout, callback)` | Initializes the transactional producer. | +| `producer.beginTransaction(callback)` | Starts a new transaction. | +| `producer.sendOffsetsToTransaction(offsets, consumer, timeout, callback)` | Sends consumed topic-partition-offsets to the broker, which will get committed along with the transaction. | +| `producer.abortTransaction(timeout, callback)` | Aborts the ongoing transaction. | +| `producer.commitTransaction(timeout, callback)` | Commits the ongoing transaction. | + +##### Events + +Some configuration properties that end in `_cb` indicate that an event should be generated for that option. You can either: + +* provide a value of `true` and react to the event +* provide a callback function directly + +The following example illustrates an event: + +```js +const producer = new Kafka.Producer({ + 'client.id': 'my-client', // Specifies an identifier to use to help trace activity in Kafka + 'metadata.broker.list': 'localhost:9092', // Connect to a Kafka instance on localhost + 'dr_cb': true // Specifies that we want a delivery-report event to be generated +}); + +// Poll for events every 100 ms +producer.setPollInterval(100); + +producer.on('delivery-report', (err, report) => { + // Report of delivery statistics here: + // + console.log(report); +}); +``` + +The following table describes types of events. + +| Event | Description | +|-------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `disconnected` | The `disconnected` event is emitted when the broker has disconnected.

This event is emitted only when `.disconnect` is called. The wrapper will always try to reconnect otherwise. | +| `ready` | The `ready` event is emitted when the `Producer` is ready to send messages. | +| `event` | The `event` event is emitted when `librdkafka` reports an event (if you opted in via the `event_cb` option). | +| `event.log` | The `event.log` event is emitted when logging events come in (if you opted into logging via the `event_cb` option).

You will need to set a value for `debug` if you want to send information. | +| `event.stats` | The `event.stats` event is emitted when `librdkafka` reports stats (if you opted in by setting the `statistics.interval.ms` to a non-zero value). | +| `event.error` | The `event.error` event is emitted when `librdkafka` reports an error | +| `event.throttle` | The `event.throttle` event emitted when `librdkafka` reports throttling. | +| `delivery-report` | The `delivery-report` event is emitted when a delivery report has been found via polling.

To use this event, you must set `request.required.acks` to `1` or `-1` in topic configuration and `dr_cb` (or `dr_msg_cb` if you want the report to contain the message payload) to `true` in the `Producer` constructor options. | + +#### Higher Level Producer + +The higher level producer is a variant of the producer which can propagate callbacks to you upon message delivery. + +```js +const producer = new Kafka.HighLevelProducer({ + 'metadata.broker.list': 'localhost:9092', +}); +``` + +This will enrich the produce call so it will have a callback to tell you when the message has been delivered. You lose the ability to specify opaque tokens. + +```js +producer.produce(topicName, null, Buffer.from('alliance4ever'), null, Date.now(), (err, offset) => { + // The offset if our acknowledgement level allows us to receive delivery offsets + console.log(offset); +}); +``` + +Additionally you can add serializers to modify the value of a produce for a key or value before it is sent over to Kafka. + +```js +producer.setValueSerializer((value) => { + return Buffer.from(JSON.stringify(value)); +}); +``` + +Otherwise the behavior of the class should be exactly the same. + +### Kafka.KafkaConsumer + +To read messages from Kafka, you use a `KafkaConsumer`. You instantiate a `KafkaConsumer` object as follows: + +```js +const consumer = new Kafka.KafkaConsumer({ + 'group.id': 'kafka', + 'metadata.broker.list': 'localhost:9092', +}, {}); +``` + +The first parameter is the global config, while the second parameter is the topic config that gets applied to all subscribed topics. To view a list of all supported configuration properties, see the [Configuration.md](https://github.com/confluentinc/librdkafka/blob/master/CONFIGURATION.md) file described previously. Look for the `C` and `*` keys. + +The `group.id` and `metadata.broker.list` properties are required for a consumer. + +#### Rebalancing + +Rebalancing is managed internally by `librdkafka` by default. If you would like to override this functionality, you may provide your own logic as a rebalance callback. + +```js +const consumer = new Kafka.KafkaConsumer({ + 'group.id': 'kafka', + 'metadata.broker.list': 'localhost:9092', + 'rebalance_cb': (err, assignment) => { + + if (err.code === Kafka.CODES.ERRORS.ERR__ASSIGN_PARTITIONS) { + // Note: this can throw when you are disconnected. Take care and wrap it in + // a try catch if that matters to you + this.assign(assignment); + } else if (err.code == Kafka.CODES.ERRORS.ERR__REVOKE_PARTITIONS){ + // Same as above + this.unassign(); + } else { + // We had a real error + console.error(err); + } + + } +}) +``` + +`this` is bound to the `KafkaConsumer` you have created. By specifying a `rebalance_cb` you can also listen to the `rebalance` event as an emitted event. This event is not emitted when using the internal `librdkafka` rebalancer. + +#### Commits + +When you commit in `confluent-kafka-javascript`, the standard way is to queue the commit request up with the next `librdkafka` request to the broker. When doing this, there isn't a way to know the result of the commit. Luckily there is another callback you can listen to to get this information + +```js +const consumer = new Kafka.KafkaConsumer({ + 'group.id': 'kafka', + 'metadata.broker.list': 'localhost:9092', + 'offset_commit_cb': (err, topicPartitions) => { + + if (err) { + // There was an error committing + console.error(err); + } else { + // Commit went through. Let's log the topic partitions + console.log(topicPartitions); + } + + } +}) +``` + +`this` is bound to the `KafkaConsumer` you have created. By specifying an `offset_commit_cb` you can also listen to the `offset.commit` event as an emitted event. It receives an error and the list of topic partitions as argument. This is not emitted unless opted in. + +#### Message Structure + +Messages that are returned by the `KafkaConsumer` have the following structure. + +```js +{ + value: Buffer.from('hi'), // message contents as a Buffer + size: 2, // size of the message, in bytes + topic: 'librdtesting-01', // topic the message comes from + offset: 1337, // offset the message was read from + partition: 1, // partition the message was on + key: 'someKey', // key of the message if present + timestamp: 1510325354780 // timestamp of message creation +} +``` + +#### Stream API + +The stream API is the easiest way to consume messages. The following example illustrates the use of the stream API: + +```js +// Read from the librdtesting-01 topic... note that this creates a new stream on each call! +const stream = KafkaConsumer.createReadStream(globalConfig, topicConfig, { + topics: ['librdtesting-01'] +}); + +stream.on('data', (message) => { + console.log('Got message'); + console.log(message.value.toString()); +}); +``` + +You can also get the `consumer` from the streamConsumer, for using consumer methods. The following example illustrates that: + +```js +stream.consumer.commit(); // Commits all locally stored offsets +``` + +#### Standard API + +You can also use the Standard API and manage callbacks and events yourself. You can choose different modes for consuming messages: + +* *Flowing mode*. This mode flows all of the messages it can read by maintaining an infinite loop in the event loop. It only stops when it detects the consumer has issued the `unsubscribe` or `disconnect` method. +* *Non-flowing mode*. This mode reads a single message from Kafka at a time manually. + +The following example illustrates flowing mode: +```js +// Flowing mode +consumer.connect(); + +consumer + .on('ready', () => { + consumer.subscribe(['librdtesting-01']); + + // Consume from the librdtesting-01 topic. This is what determines + // the mode we are running in. By not specifying a callback (or specifying + // only a callback) we get messages as soon as they are available. + consumer.consume(); + }) + .on('data', (data) => { + // Output the actual message contents + console.log(data.value.toString()); + }); +``` +The following example illustrates non-flowing mode: +```js +// Non-flowing mode +consumer.connect(); + +consumer + .on('ready', () => { + // Subscribe to the librdtesting-01 topic + // This makes subsequent consumes read from that topic. + consumer.subscribe(['librdtesting-01']); + + // Read one message every 1000 milliseconds + setInterval(() => { + consumer.consume(1); + }, 1000); + }) + .on('data', (data) => { + console.log('Message found! Contents below.'); + console.log(data.value.toString()); + }); +``` + +The following table lists important methods for this API. + +| Method | Description | +|-----------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `consumer.connect()` | Connects to the broker.

The `connect()` emits the event `ready` when it has successfully connected. If it does not, the error will be passed through the callback. | +| `consumer.disconnect()` | Disconnects from the broker.

The `disconnect()` method emits `disconnected` when it has disconnected. If it does not, the error will be passed through the callback. | +| `consumer.subscribe(topics)` | Subscribes to an array of topics. | +| `consumer.unsubscribe()` | Unsubscribes from the currently subscribed topics.

You cannot subscribe to different topics without calling the `unsubscribe()` method first. | +| `consumer.consume(cb)` | Gets messages from the existing subscription as quickly as possible. If `cb` is specified, invokes `cb(err, message)`.

This method keeps a background thread running to do the work. Note that the number of threads in nodejs process is limited by `UV_THREADPOOL_SIZE` (default value is 4) and using up all of them blocks other parts of the application that need threads. If you need multiple consumers then consider increasing `UV_THREADPOOL_SIZE` or using `consumer.consume(number, cb)` instead. | +| `consumer.consume(number, cb)` | Gets `number` of messages from the existing subscription. If `cb` is specified, invokes `cb(err, message)`. | +| `consumer.commit()` | Commits all locally stored offsets | +| `consumer.commit(topicPartition)` | Commits offsets specified by the topic partition | +| `consumer.commitMessage(message)` | Commits the offsets specified by the message | + +The following table lists events for this API. + +| Event | Description | +|------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `data` | When using the Standard API consumed messages are emitted in this event. | +| `partition.eof` | When using Standard API and the configuration option `enable.partition.eof` is set, `partition.eof` events are emitted in this event. The event contains `topic`, `partition` and `offset` properties. | +| `warning` | The event is emitted in case of `UNKNOWN_TOPIC_OR_PART` or `TOPIC_AUTHORIZATION_FAILED` errors when consuming in *Flowing mode*. Since the consumer will continue working if the error is still happening, the warning event should reappear after the next metadata refresh. To control the metadata refresh rate set `topic.metadata.refresh.interval.ms` property. Once you resolve the error, you can manually call `getMetadata` to speed up consumer recovery. | +| `disconnected` | The `disconnected` event is emitted when the broker disconnects.

This event is only emitted when `.disconnect` is called. The wrapper will always try to reconnect otherwise. | +| `ready` | The `ready` event is emitted when the `Consumer` is ready to read messages. | +| `event` | The `event` event is emitted when `librdkafka` reports an event (if you opted in via the `event_cb` option). | +| `event.log` | The `event.log` event is emitted when logging events occur (if you opted in for logging via the `event_cb` option).

You will need to set a value for `debug` if you want information to send. | +| `event.stats` | The `event.stats` event is emitted when `librdkafka` reports stats (if you opted in by setting the `statistics.interval.ms` to a non-zero value). | +| `event.error` | The `event.error` event is emitted when `librdkafka` reports an error | +| `event.throttle` | The `event.throttle` event is emitted when `librdkafka` reports throttling. | + +### Reading current offsets from the broker for a topic + +Some times you find yourself in the situation where you need to know the latest (and earliest) offset for one of your topics. Connected producers and consumers both allow you to query for these through `queryWaterMarkOffsets` like follows: + +```js +const timeout = 5000, partition = 0; +consumer.queryWatermarkOffsets('my-topic', partition, timeout, (err, offsets) => { + const high = offsets.highOffset; + const low = offsets.lowOffset; +}); + +producer.queryWatermarkOffsets('my-topic', partition, timeout, (err, offsets) => { + const high = offsets.highOffset; + const low = offsets.lowOffset; +}); + +An error will be returned if the client was not connected or the request timed out within the specified interval. + +``` + +### Metadata + +Both `Kafka.Producer` and `Kafka.KafkaConsumer` include a `getMetadata` method to retrieve metadata from Kafka. + +Getting metadata on any connection returns the following data structure: + +```js +{ + orig_broker_id: 1, + orig_broker_name: "broker_name", + brokers: [ + { + id: 1, + host: 'localhost', + port: 40 + } + ], + topics: [ + { + name: 'awesome-topic', + partitions: [ + { + id: 1, + leader: 20, + replicas: [1, 2], + isrs: [1, 2] + } + ] + } + ] +} +``` + +The following example illustrates how to use the `getMetadata` method. + +When fetching metadata for a specific topic, if a topic reference does not exist, one is created using the default config. +Please see the documentation on `Client.getMetadata` if you want to set configuration parameters, e.g. `acks`, on a topic to produce messages to. + +```js +const opts = { + topic: 'librdtesting-01', + timeout: 10000 +}; + +producer.getMetadata(opts, (err, metadata) => { + if (err) { + console.error('Error getting metadata'); + console.error(err); + } else { + console.log('Got metadata'); + console.log(metadata); + } +}); +``` + +### Admin Client + +`confluent-kafka-javascript` now supports the admin client for creating, deleting, and scaling out topics. The `librdkafka` APIs also support altering configuration of topics and broker, but that is not currently implemented. + +To create an Admin client, you can do as follows: + +```js +const Kafka = require('@confluentinc/kafka-javascript'); + +const client = Kafka.AdminClient.create({ + 'client.id': 'kafka-admin', + 'metadata.broker.list': 'broker01' +}); +``` + +This will instantiate the `AdminClient`, which will allow the calling of the admin methods. + +```js +client.createTopic({ + topic: topicName, + num_partitions: 1, + replication_factor: 1 +}, (err) => { + // Done! +}); +``` + +All of the admin api methods can have an optional timeout as their penultimate parameter. + +The following table lists important methods for this API. + +| Method | Description | +|----------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------| +| `client.disconnect()` | Destroy the admin client, making it invalid for further use. | +| `client.createTopic(topic, timeout, cb)` | Create a topic on the broker with the given configuration. See JS doc for more on structure of the topic object | +| `client.deleteTopic(topicName, timeout, cb)` | Delete a topic of the given name | +| `client.createPartitions(topicName, desiredPartitions, timeout, cb)` | Create partitions until the topic has the desired number of partitions. | + +Check the tests for an example of how to use this API! diff --git a/LICENSE.kafkajs b/LICENSE.kafkajs new file mode 100644 index 00000000..3a91a6dc --- /dev/null +++ b/LICENSE.kafkajs @@ -0,0 +1,31 @@ +The promisified API (lib/kafkajs) is inspired by kafkajs (github.com/tulios/kafkajs). +The promisified tests (test/promisified) are also adapted from there. +Many error types are also adapted from there. +The license notice is reproduced below. + +---- + +The MIT License + +Copyright (c) 2018 Túlio Ornelas (ornelas.tulio@gmail.com) + +Permission is hereby granted, free of charge, +to any person obtaining a copy of this software and +associated documentation files (the "Software"), to +deal in the Software without restriction, including +without limitation the rights to use, copy, modify, +merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom +the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR +ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/LICENSE.node-rdkafka b/LICENSE.node-rdkafka new file mode 100644 index 00000000..e7a18a6f --- /dev/null +++ b/LICENSE.node-rdkafka @@ -0,0 +1,25 @@ +This project is based on node-rdkafka (github.com/Blizzard/node-rdkafka). +The license notice is reproduced below. + +-------- + +The MIT License (MIT) +Copyright (c) 2016 Blizzard Entertainment + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +IN THE SOFTWARE. \ No newline at end of file diff --git a/LICENSE.txt b/LICENSE.txt index d5ad6d41..101cc5a7 100644 --- a/LICENSE.txt +++ b/LICENSE.txt @@ -1,5 +1,5 @@ The MIT License (MIT) -Copyright (c) 2016-2023 Blizzard Entertainment +Copyright (c) 2023 Confluent, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in diff --git a/MIGRATION.md b/MIGRATION.md new file mode 100644 index 00000000..3c28761b --- /dev/null +++ b/MIGRATION.md @@ -0,0 +1,406 @@ +# Migration Guide + +## KafkaJS + + +1. Change the import statement, and add a `kafkaJS` block around your configs. + ```javascript + const { Kafka } = require('kafkajs'); + const kafka = new Kafka({ brokers: ['kafka1:9092', 'kafka2:9092'], /* ... */ }); + const producer = kafka.producer({ /* ... */, }); + ``` + to + ```javascript + const { Kafka } = require('@confluentinc/kafka-javascript').KafkaJS; + const kafka = new Kafka({ kafkaJS: { brokers: ['kafka1:9092', 'kafka2:9092'], /* ... */ } }); + const producer = kafka.producer({ kafkaJS: { /* ... */, } }); + ``` + +2. Try running your program. In case a migration is needed, an informative error will be thrown. + If you're using Typescript, some of these changes will be caught at compile time. + +3. The most common expected changes to the code are: + - For the **producer**: `acks`, `compression` and `timeout` are not set on a per-send() basis. + Rather, they must be configured in the top-level configuration while creating the producer. + - For the **consumer**: + - `fromBeginning` is not set on a per-subscribe() basis. + Rather, it must be configured in the top-level configuration while creating the consumer. + - `autoCommit` and `autoCommitInterval` are not set on a per-run() basis. + Rather, they must be configured in the top-level configuration while creating the consumer. + - `autoCommitThreshold` is not supported. + - `eachBatch`'s batch size never exceeds 1. + - For errors: Check the `error.code` rather than the error `name` or `type`. + +4. A more exhaustive list of semantic and configuration differences is [presented below](#common). + +5. An example migration: + +```diff +-const { Kafka } = require('kafkajs'); ++const { Kafka } = require('@confluentinc/kafka-javascript').KafkaJS; + +const kafka = new Kafka({ ++ kafkaJS: { + clientId: 'my-app', + brokers: ['kafka1:9092', 'kafka2:9092'] ++ } +}) + +const producerRun = async () => { +- const producer = kafka.producer(); ++ const producer = kafka.producer({ kafkaJS: { acks: 1 } }); + await producer.connect(); + await producer.send({ + topic: 'test-topic', +- acks: 1, + messages: [ + { value: 'Hello confluent-kafka-javascript user!' }, + ], + }); +}; + + +const consumerRun = async () => { + // Consuming +- const consumer = kafka.consumer({ groupId: 'test-group' }); ++ const consumer = kafka.consumer({ kafkaJS: { groupId: 'test-group', fromBeginning: true } }); + await consumer.connect(); +- await consumer.subscribe({ topic: 'test-topic', fromBeginning: true }); ++ await consumer.subscribe({ topic: 'test-topic' }); + + await consumer.run({ + eachMessage: async ({ topic, partition, message }) => { + console.log({ + partition, + offset: message.offset, + value: message.value.toString(), + }) + }, + }); +}; + +producerRun().then(consumerRun).catch(console.error); +``` + +### Common + +#### Configuration changes + ```javascript + const kafka = new Kafka({ kafkaJS: { /* common configuration changes */ } }); + ``` + Each allowed config property is discussed in the table below. + If there is any change in semantics or the default values, the property and the change is **highlighted in bold**. + + | Property | Default Value | Comment | + |-------------------------------|--------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + | **brokers** | - | A list of strings, representing the bootstrap brokers. **A function is no longer allowed as an argument for this.** | + | **ssl** | false | A boolean, set to true if ssl needs to be enabled. **Additional properties like CA, certificate, key, etc. need to be specified outside the kafkaJS block.** | + | **sasl** | - | An optional object of the form `{ mechanism: 'plain' | 'scram-sha-256' | 'scram-sha-512', username: string, password: string }` or `{ mechanism: 'oauthbearer', oauthBearerProvider: function }`. Note that for OAUTHBEARER based authentication, the provider function must return lifetime (in ms), and principal name along with token value. **Additional authentication types are not supported.** | + | clientId | "rdkafka" | An optional string used to identify the client. | + | **connectionTimeout** | 1000 | This timeout is not enforced individually, but a sum of `connectionTimeout` and `authenticationTimeout` is enforced together. | + | **authenticationTimeout** | 10000 | This timeout is not enforced individually, but a sum of `connectionTimeout` and `authenticationTimeout` is enforced together. | + | **reauthenticationThreshold** | **80% of connections.max.reauth.ms** | **No longer checked, the default is always used.** | + | requestTimeout | 30000 | number of milliseconds for a network request to timeout. | + | **enforceRequestTimeout** | true | When set to false, `requestTimeout` is set to 5 minutes. **This cannot be completely disabled.** | + | retry | object | Properties individually discussed below. | + | retry.maxRetryTime | 30000 | maximum time to backoff a retry, in milliseconds. | + | retry.initialRetryTime | 300 | minimum time to backoff a retry, in milliseconds | + | **retry.retries** | 5 | Total cap on the number of retries. **Applicable only to Produce requests.** | + | **retry.factor** | 0.2 | Randomization factor (jitter) for backoff. **Cannot be changed**. | + | **retry.multiplier** | 2 | Multiplier for exponential factor of backoff. **Cannot be changed.** | + | **retry.restartOnFailure** | true | Consumer only. **Cannot be changed**. Consumer will always make an attempt to restart. | + | logLevel | `logLevel.INFO` | Decides the severity level of the logger created by the underlying library. A logger created with the `INFO` level will not be able to log `DEBUG` messages later. | + | **socketFactory** | null | **No longer supported.** | + | outer config | {} | The configuration outside the kafkaJS block can contain any of the keys present in the [librdkafka CONFIGURATION table](https://github.com/confluentinc/librdkafka/blob/master/CONFIGURATION.md). | + + +### Producer + +#### Producer Configuration Changes + + ```javascript + const producer = kafka.producer({ kafkaJS: { /* producer-specific configuration changes. */ } }); + ``` + + Each allowed config property is discussed in the table below. + If there is any change in semantics or the default values, the property and the change is **highlighted in bold**. + + | Property | Default Value | Comment | + |-------------------------|------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + | **createPartitioner** | DefaultPartioner (murmur2_random) - Java client compatible | Custom partitioner support is not yet provided. The default partitioner's behaviour is retained, and a number of partitioners are provided via the `partitioner` property, which is specified outside the `kafkaJS` block. | + | **retry** | object | Identical to `retry` in the common configuration. This takes precedence over the common config retry. | + | metadataMaxAge | 5 minutes | Time in milliseconds after which to refresh metadata for known topics | + | allowAutoTopicCreation | true | Determines if a topic should be created if it doesn't exist while producing. | + | transactionTimeout | 60000 | The maximum amount of time in milliseconds that the transaction coordinator will wait for a transaction status update from the producer before proactively aborting the ongoing transaction. Only applicable when `transactionalId` is set to true. | + | idempotent | false | If set to true, ensures that messages are delivered exactly once and in order. If true, certain constraints must be respected for other properties, `maxInFlightRequests <= 5`, `retry.retries >= 0` | + | **maxInFlightRequests** | null | Maximum number of in-flight requests **per broker connection**. If not set, it is practically unbounded (same as KafkaJS). | + | transactionalId | null | If set, turns this into a transactional producer with this identifier. This also automatically sets `idempotent` to true. | + | **acks** | -1 | The number of required acks before a Produce succeeds. **This is set on a per-producer level, not on a per `send` level**. -1 denotes it will wait for all brokers in the in-sync replica set. | + | **compression** | CompressionTypes.NONE | Compression codec for Produce messages. **This is set on a per-producer level, not on a per `send` level**. It must be a key of CompressionType, namely GZIP, SNAPPY, LZ4, ZSTD or NONE. | + | **timeout** | 30000 | The ack timeout of the producer request in milliseconds. This value is only enforced by the broker. **This is set on a per-producer level, not on a per `send` level**. | + | outer config | {} | The configuration outside the kafkaJS block can contain any of the keys present in the [librdkafka CONFIGURATION table](https://github.com/confluentinc/librdkafka/blob/master/CONFIGURATION.md). | + + + +#### Semantic and Per-Method Changes + +* `send`: and `sendBatch`:` + - While sending multiple messages, even if one of the messages fails, the method throws an error. + - While `sendBatch` is available, it acts as a wrapper around send, and the actual batching is handled by librdkafka. + - `acks`, `compression` and `timeout` are not set on a per-send basis. Rather, they must be configured in the top-level configuration. See [configuration changes](#producer-configuration-changes). + Additionally, there are several more compression types available by default besides GZIP. + Before: + ```javascript + const kafka = new Kafka({/* ... */}); + const producer = kafka.producer(); + await producer.connect(); + + await producer.send({ + topic: 'test', + messages: [ /* ... */ ], + acks: 1, + compression: CompressionTypes.GZIP, + timeout: 30000, + }); + ``` + + After: + ```javascript + const kafka = new Kafka({ kafkaJS: { /* ... */ }}); + const producer = kafka.producer({ + kafkaJS: { + acks: 1, + compression: CompressionTypes.GZIP|CompressionTypes.SNAPPY|CompressionTypes.LZ4|CompressionTypes.ZSTD|CompressionTypes.NONE, + timeout: 30000, + } + }); + await producer.connect(); + + await producer.send({ + topic: 'test', + messages: [ /* ... */ ], + }); + ``` + - It's recommended to send a number of messages without awaiting them, and then calling `flush` to ensure all messages are sent, rather than awaiting each message. This is more efficient. + Example: + ```javascript + const kafka = new Kafka({ kafkaJS: { /* ... */ }}); + const producer = kafka.producer(); + await producer.connect(); + for (/*...*/) producer.send({ /* ... */}); + await producer.flush({timeout: 5000}); + ``` + + However, in case it is desired to await every message, `linger.ms` should be set to 0, to ensure that the default batching behaviour does not cause a delay in awaiting messages. + Example: + ```javascript + const kafka = new Kafka({ kafkaJS: { /* ... */ }}); + const producer = kafka.producer({ 'linger.ms': 0 }); + ``` + +* A transactional producer (with a `transactionId`) set, **cannot** send messages without initiating a transaction using `producer.transaction()`. +* While using `sendOffsets` from a transactional producer, the `consumerGroupId` argument must be omitted, and rather, the consumer object itself must be passed instead. + +### Consumer + +#### Consumer Configuration Changes + + ```javascript + const consumer = kafka.consumer({ kafkaJS: { /* producer-specific configuration changes. */ } }); + ``` + Each allowed config property is discussed in the table below. + If there is any change in semantics or the default values, the property and the change is **highlighted in bold**. + + | Property | Default Value | Comment | + |--------------------------|-----------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + | groupId | - | A mandatory string denoting consumer group name that this consumer is a part of. | + | **partitionAssigners** | `[PartitionAssigners.roundRobin]` | Support for range, roundRobin, and cooperativeSticky assignors is provided. Custom assignors are not supported. | + | **partitionAssignors** | `[PartitionAssignors.roundRobin]` | Alias for `partitionAssigners` | + | **rebalanceTimeout** | **300000** | The maximum allowed time for each member to join the group once a rebalance has begun. Note, that setting this value *also* changes the max poll interval. Message processing in `eachMessage/eachBatch` must not take more than this time. | + | heartbeatInterval | 3000 | The expected time in milliseconds between heartbeats to the consumer coordinator. | + | metadataMaxAge | 5 minutes | Time in milliseconds after which to refresh metadata for known topics | + | allowAutoTopicCreation | true | Determines if a topic should be created if it doesn't exist while consuming. | + | **maxBytesPerPartition** | 1048576 (1MB) | determines how many bytes can be fetched in one request from a single partition. There is a change in semantics, this size grows dynamically if a single message larger than this is encountered, and the client does not get stuck. | + | minBytes | 1 | Minimum number of bytes the broker responds with (or wait until `maxWaitTimeInMs`) | + | maxBytes | 10485760 (10MB) | Maximum number of bytes the broker responds with. | + | **retry** | object | Identical to `retry` in the common configuration. This takes precedence over the common config retry. | + | readUncommitted | false | If true, consumer will read transactional messages which have not been committed. | + | **maxInFlightRequests** | null | Maximum number of in-flight requests **per broker connection**. If not set, it is practically unbounded (same as KafkaJS). | + | rackId | null | Can be set to an arbitrary string which will be used for fetch-from-follower if set up on the cluster. | + | **fromBeginning** | false | If there is initial offset in offset store or the desired offset is out of range, and this is true, we consume the earliest possible offset. **This is set on a per-consumer level, not on a per `subscribe` level**. | + | **autoCommit** | true | Whether to periodically auto-commit offsets to the broker while consuming. **This is set on a per-consumer level, not on a per `run` level**. | + | **autoCommitInterval** | 5000 | Offsets are committed periodically at this interval, if autoCommit is true. **This is set on a per-consumer level, not on a per `run` level. The default value is changed to 5 seconds.**. | + | outer config | {} | The configuration outside the kafkaJS block can contain any of the keys present in the [librdkafka CONFIGURATION table](https://github.com/confluentinc/librdkafka/blob/master/CONFIGURATION.md). | + + +#### Semantic and Per-Method Changes + + +* `subscribe`: + - Regex flags are ignored while passing a topic subscription (like 'i' or 'g'). Regexes must start with '^', otherwise, an error is thrown. + - Subscribe must be called only after `connect`. + - An optional parameter, `replace` is provided. + If set to true, the current subscription is replaced with the new one. If set to false, the new subscription is added to the current one, for example, + `consumer.subscribe({ topics: ['topic1'], replace: true});`. + The default value is false to retain existing behaviour. + - While passing a list of topics to `subscribe`, the `fromBeginning` is not set on a per-subscribe basis. Rather, it must be configured in the top-level configuration. + + Before: + ```javascript + const consumer = kafka.consumer({ + groupId: 'test-group', + }); + await consumer.connect(); + await consumer.subscribe({ topics: ["topic"], fromBeginning: true}); + ``` + After: + ```javascript + const consumer = kafka.consumer({ + kafkaJS: { + groupId: 'test-group', + fromBeginning: true, + } + }); + await consumer.connect(); + await consumer.subscribe({ topics: ["topic"] }); + ``` + +* `run` : + - For auto-committing using a consumer, the properties `autoCommit` and `autoCommitInterval` on `run` are not set on a per-subscribe basis. + Rather, they must be configured in the top-level configuration. + `autoCommitThreshold` is not supported. + If `autoCommit` is set to true, messages are *not* committed per-message, but rather periodically at the interval specified by `autoCommitInterval` (default 5 seconds). + + Before: + ```javascript + const kafka = new Kafka({ /* ... */ }); + const consumer = kafka.consumer({ /* ... */ }); + await consumer.connect(); + await consumer.subscribe({ topics: ["topic"] }); + consumer.run({ + eachMessage: someFunc, + autoCommit: true, + autoCommitInterval: 5000, + }); + ``` + After: + ```javascript + const kafka = new Kafka({ kafkaJS: { /* ... */ } }); + const consumer = kafka.consumer({ + kafkaJS: { + /* ... */, + autoCommit: true, + autoCommitInterval: 5000, + }, + }); + await consumer.connect(); + await consumer.subscribe({ topics: ["topic"] }); + consumer.run({ + eachMessage: someFunc, + }); + ``` + - The `heartbeat()` no longer needs to be called by the user in the `eachMessage/eachBatch` callback. + Heartbeats are automatically managed by librdkafka. + - The `partitionsConsumedConcurrently` is supported by both `eachMessage` and `eachBatch`. + - An API compatible version of `eachBatch` is available, but the batch size calculation is not + as per configured parameters, rather, a constant maximum size is configured internally. This is subject + to change. + The property `eachBatchAutoResolve` is supported. + Within the `eachBatch` callback, use of `uncommittedOffsets` is unsupported, + and within the returned batch, `offsetLag` and `offsetLagLow` are unsupported. +* `commitOffsets`: + - Does not yet support sending metadata for topic partitions being committed. + - If called with no arguments, it commits all offsets passed to the user (or the stored offsets, if manually handling offset storage using `consumer.storeOffsets`). +* `seek`: + - The restriction to call seek only after `run` is removed. It can be called any time. +* `pause` and `resume`: + - These methods MUST be called after the consumer group is joined. + In practice, this means it can be called whenever `consumer.assignment()` has a non-zero size, or within the `eachMessage/eachBatch` callback. +* `stop` is not yet supported, and the user must disconnect the consumer. + +### Admin Client + +The admin-client only has support for a limited subset of methods, with more to be added. + + * The `createTopics` method does not yet support the `validateOnly` or `waitForLeaders` properties, and the per-topic configuration + does not support `replicaAssignment`. + * The `deleteTopics` method is fully supported. + * The `listTopics` method is supported with an additional `timeout` option. + * The `listGroups` method is supported with additional `timeout` and `matchConsumerGroupStates` options. + A number of additional properties have been added to the returned groups, and a list of errors within the returned object. + * The `describeGroups` method is supported with additional `timeout` and `includeAuthorizedOperations` options. + A number of additional properties have been added to the returned groups. + * The `deleteGroups` method is supported with an additional `timeout` option. + +### Using the Schema Registry + +In case you are using the Schema Registry client at `kafkajs/confluent-schema-registry`, you will not need to make any changes to the usage. +An example is made available [here](./examples/kafkajs/sr.js). + +### Error Handling + + Convert any checks based on `instanceof` and `error.name` or to error checks based on `error.code` or `error.type`. + + **Example**: + ```javascript + try { + await producer.send(/* args */); + } catch (error) { + if (!Kafka.isKafkaJSError(error)) { /* unrelated err handling */ } + else if (error.fatal) { /* fatal error, abandon producer */ } + else if (error.code === Kafka.ErrorCode.ERR__QUEUE_FULL) { /*...*/ } + else if (error.type === 'ERR_MSG_SIZE_TOO_LARGE') { /*...*/ } + /* and so on for specific errors */ + } + ``` + + **Error Type Changes**: + + Some possible subtypes of `KafkaJSError` have been removed, + and additional information has been added into `KafkaJSError`. + Fields have been added denoting if the error is fatal, retriable, or abortable (the latter two only relevant for a transactional producer). + Some error-specific fields have also been removed. + + An exhaustive list of changes is at the bottom of this section. + + For compatibility, as many error types as possible have been retained, but it is + better to switch to checking the `error.code`. + + + Exhaustive list of error types and error fields removed: + | Error | Change | + |-------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| + | `KafkaJSNonRetriableError` | Removed. Retriable errors are automatically retried by librdkafka, so there's no need for this type. Note that `error.retriable` still exists, but it's applicable only for transactional producer, where users are expected to retry an action themselves. All error types using this as a superclass now use `KafkaJSError` as their superclass. | + | `KafkaJSOffsetOutOfRange` | `topic` and `partition` are removed from this object. | + | `KafkaJSMemberIdRequired` | Removed. Automatically handled by librdkafka. | + | `KafkaJSNumberOfRetriesExceeded` | Removed. Retries are handled by librdkafka. | + | `KafkaJSNumberOfRetriesExceeded` | `broker, correlationId, createdAt, sentAt` and `pendingDuration` are removed from this object. | + | `KafkaJSMetadataNotLoaded` | Removed. Metadata is automatically reloaded by librdkafka. | + | `KafkaJSTopicMetadataNotLoaded` | Removed. Topic metadata is automatically reloaded by librdkafka. | + | `KafkaJSStaleTopicMetadataAssignment` | removed as it's automatically refreshed by librdkafka. | + | `KafkaJSDeleteGroupsError` | Removed, as the Admin Client doesn't have this yet. May be added back again, or changed. | + | `KafkaJSServerDoesNotSupportApiKey` | Removed, as this error isn't generally exposed to user in librdkafka. If raised, it is subsumed into `KafkaJSError` where `error.code === Kafka.ErrorCode.ERR_UNSUPPORTED_VERSION`. | + | `KafkaJSBrokerNotFound` | Removed. This error isn't exposed directly to the user in librdkafka. | + | `KafkaJSLockTimeout` | Removed. This error is not applicable while using librdkafka. | + | `KafkaJSUnsupportedMagicByteInMessageSet` | Removed. It is subsumed into `KafkaJSError` where `error.code === Kafka.ErrorCode.ERR_UNSUPPORTED_VERSION`. | + | `KafkaJSDeleteTopicRecordsError` | Removed, as the Admin Client doesn't have this yet. May be added back again, or changed. | + | `KafkaJSInvariantViolation` | Removed, as it's not applicable to librdkafka. Errors in internal state are subsumed into `KafkaJSError` where `error.code === Kafka.ErrorCode.ERR__STATE`. | + | `KafkaJSInvalidVarIntError` | Removed. This error isn't exposed directly to the user in librdkafka. | + | `KafkaJSInvalidLongError` | Removed. This error isn't exposed directly to the user in librdkafka. | + | `KafkaJSCreateTopicError` | Removed, as the Admin Client doesn't have this yet. May be added back again, or changed.. | + | `KafkaJSAlterPartitionReassignmentsError` | removed, as the RPC is not used in librdkafka. | + | `KafkaJSFetcherRebalanceError` | Removed. This error isn't exposed directly to the user in librdkafka. | + | `KafkaJSConnectionError` | `broker` is removed from this object. | + | `KafkaJSConnectionClosedError` | Removed. Subsumed into `KafkaJSConnectionError` as librdkafka treats them equivalently. | + +## node-rdkafka + +Change the import statement, from + ```javascript + const Kafka = require('node-rdkafka'); + ``` + to + ```javascript + const Kafka = require('@confluentinc/kafka-javascript'); + ``` +The rest of the functionality should work as usual. \ No newline at end of file diff --git a/Makefile b/Makefile index c054b210..e8ab3f5e 100644 --- a/Makefile +++ b/Makefile @@ -1,19 +1,14 @@ NODE-GYP ?= node_modules/.bin/node-gyp -# Sick of changing this. Do a check and try to use python 2 if it doesn't work -PYTHON_VERSION_FULL := $(wordlist 2,4,$(subst ., ,$(shell python --version 2>&1))) -PYTHON_VERSION_MAJOR := $(word 1,${PYTHON_VERSION_FULL}) - -ifeq ($(PYTHON_VERSION_MAJOR), 2) PYTHON = python -else -PYTHON = python2 +ifeq (, $(shell command -v python)) + PYTHON = python3 endif - NODE ?= node CPPLINT ?= cpplint.py BUILDTYPE ?= Release -TESTS = "test/**/*.js" +TESTS = $(ls test/producer/*.js test/*.js test/tools/*.js) +PROMISIFIED_TESTS = "test/promisified" E2E_TESTS = $(wildcard e2e/*.spec.js) TEST_REPORTER = TEST_OUTPUT = @@ -23,8 +18,7 @@ CONFIG_OUTPUTS = \ build/binding.Makefile build/config.gypi CPPLINT_FILES = $(wildcard src/*.cc src/*.h) -CPPLINT_FILTER = -legal/copyright -JSLINT_FILES = lib/*.js test/*.js e2e/*.js +CPPLINT_FILTER = -legal/copyright,-readability/todo,-whitespace/indent_namespace,-runtime/references PACKAGE = $(shell node -pe 'require("./package.json").name.split("/")[1]') VERSION = $(shell node -pe 'require("./package.json").version') @@ -38,13 +32,13 @@ endif all: lint lib test e2e -lint: cpplint jslint +lint: cpplint eslint cpplint: @$(PYTHON) $(CPPLINT) --filter=$(CPPLINT_FILTER) $(CPPLINT_FILES) -jslint: node_modules/.dirstamp - @./node_modules/.bin/jshint --verbose $(JSLINT_FILES) +eslint: node_modules/.dirstamp + @./node_modules/.bin/eslint . lib: node_modules/.dirstamp $(CONFIG_OUTPUTS) @PYTHONHTTPSVERIFY=0 $(NODE-GYP) build $(GYPBUILDARGS) @@ -59,6 +53,9 @@ $(CONFIG_OUTPUTS): node_modules/.dirstamp binding.gyp test: node_modules/.dirstamp @./node_modules/.bin/mocha --ui exports $(TEST_REPORTER) $(TESTS) $(TEST_OUTPUT) +promisified_test: node_modules/.dirstamp + @./node_modules/.bin/jest --ci --runInBand $(PROMISIFIED_TESTS) + check: node_modules/.dirstamp @$(NODE) util/test-compile.js @@ -78,7 +75,7 @@ endef docs: node_modules/.dirstamp @rm -rf docs - @./node_modules/jsdoc/jsdoc.js --destination docs \ + @./node_modules/jsdoc/jsdoc.js --debug --destination docs \ --recurse -R ./README.md \ -c ./jsdoc.conf \ --tutorials examples/ ./lib @@ -91,4 +88,4 @@ release-patch: clean: node_modules/.dirstamp @rm -f deps/librdkafka/config.h - @$(NODE-GYP) clean + @$(NODE-GYP) clean \ No newline at end of file diff --git a/QUICKSTART.md b/QUICKSTART.md new file mode 100644 index 00000000..1dddd66b --- /dev/null +++ b/QUICKSTART.md @@ -0,0 +1,70 @@ +# Basic Producer Example + +```javascript +const { Kafka } = require('@confluentinc/kafka-javascript').KafkaJS; + +async function producerStart() { + const producer = new Kafka().producer({ + 'bootstrap.servers': '', + }); + + await producer.connect(); + + const deliveryReports = await producer.send({ + topic: 'topic2', + messages: [ + { value: 'v222', partition: 0 }, + { value: 'v11', partition: 0, key: 'x' }, + ] + }); + + await producer.disconnect(); +} + +producerStart(); +``` + +# Basic Consumer Example + +```javascript +const { Kafka } = require('@confluentinc/kafka-javascript').KafkaJS; + +async function consumerStart() { + let consumer; + let stopped = false; + + // Initialization + consumer = new Kafka().consumer({ + 'bootstrap.servers': '', + 'group.id': 'test', + 'auto.offset.reset': 'earliest', + }); + + await consumer.connect(); + await consumer.subscribe({ topics: ["topic"] }); + + consumer.run({ + eachMessage: async ({ topic, partition, message }) => { + console.log({ + topic, + partition, + offset: message.offset, + key: message.key?.toString(), + value: message.value.toString(), + }); + } + }); + + // Update stopped whenever we're done consuming. + // The update can be in another async function or scheduled with setTimeout etc. + while(!stopped) { + await new Promise(resolve => setTimeout(resolve, 1000)); + } + + await consumer.disconnect(); +} + +consumerStart(); +``` + +See the examples in the [examples](examples) directory for more in-depth examples. \ No newline at end of file diff --git a/README.md b/README.md index cdba3ce0..f07ea273 100644 --- a/README.md +++ b/README.md @@ -1,631 +1,106 @@ -confluent-kafka-js - Node.js wrapper for Kafka C/C++ library -============================================== +Confluent's JavaScript Client for Apache KafkaTM +===================================================== -Copyright (c) 2016-2023 Blizzard Entertainment. +**confluent-kafka-javascript** is Confluent's JavaScript client for [Apache Kafka](http://kafka.apache.org/) and the +[Confluent Platform](https://www.confluent.io/product/compare/). This is an **limited availability** library. The goal is to provide an highly performant, reliable and easy to use JavaScript client that is based on [node-rdkafka](https://github.com/Blizzard/node-rdkafka) yet also API compatible with [KafkaJS](https://github.com/tulios/kafkajs) to provide flexibility to users and streamline migrations from other clients. -[https://github.com/confluentinc/confluent-kafka-js](https://github.com/confluentinc/confluent-kafka-js) +Features: -# Looking for Collaborators! +- **High performance** - confluent-kafka-javascript is a lightweight wrapper around +[librdkafka](https://github.com/confluentinc/librdkafka), a finely tuned C +client. -I am looking for *your* help to make this project even better! If you're interested, check [this out](https://github.com/confluentinc/confluent-kafka-js/issues/628) +- **Reliability** - There are a lot of details to get right when writing an Apache Kafka +client. We get them right in one place (librdkafka) and leverage this work +across all of our clients. -# Overview +- **Supported** - Commercial support is offered by [Confluent](https://confluent.io/). -The `confluent-kafka-js` library is a high-performance NodeJS client for [Apache Kafka](http://kafka.apache.org/) that wraps the native [librdkafka](https://github.com/edenhill/librdkafka) library. All the complexity of balancing writes across partitions and managing (possibly ever-changing) brokers should be encapsulated in the library. +- **Future proof** - Confluent, founded by the +creators of Kafka, is building a [streaming platform](https://www.confluent.io/product/) +with Apache Kafka at its core. It's high priority for us that client features keep +pace with core Apache Kafka and components of the [Confluent Platform](https://www.confluent.io/product/). -__This library currently uses `librdkafka` version `2.3.0`.__ +This library leverages the work and concepts from two popular Apache Kafka JavaScript clients: [node-rdkafka](https://github.com/Blizzard/node-rdkafka) and [KafkaJS](https://github.com/tulios/kafkajs). The core is heavily based on the node-rdkafka library, which uses our own [librdkafka](https://github.com/confluentinc/librdkafka) library for core client functionality. However, we leverage a promisified API and a more idiomatic interface, similar to the one in KafkaJS, making it easy for developers to migrate and adopt this client depending on the patterns and interface they prefer. We're very happy to have been able to leverage the excellent work of the many authors of these libraries! -## Reference Docs +### This library is currently in limited-availability -To view the reference docs for the current version, go [here](https://confluentinc.github.io/confluent-kafka-js/current/) +To use **Schema Registry**, use the existing [@confluentinc/schemaregistry](https://www.npmjs.com/package/@confluentinc/schemaregistry) library that is compatible with this library. For a simple schema registry example, see [sr.js](https://github.com/confluentinc/confluent-kafka-javascript/blob/dev_early_access_development_branch/examples/kafkajs/sr.js). -## Contributing - -For guidelines on contributing please see [CONTRIBUTING.md](https://github.com/confluentinc/confluent-kafka-js/blob/master/CONTRIBUTING.md) - -## Code of Conduct - -Play nice; Play fair. ## Requirements -* Apache Kafka >=0.9 -* Node.js >=4 -* Linux/Mac -* Windows?! See below -* OpenSSL - -### Mac OS High Sierra / Mojave - -OpenSSL has been upgraded in High Sierra and homebrew does not overwrite default system libraries. That means when building confluent-kafka-js, because you are using openssl, you need to tell the linker where to find it: - -```sh -export CPPFLAGS=-I/usr/local/opt/openssl/include -export LDFLAGS=-L/usr/local/opt/openssl/lib -``` - -Then you can run `npm install` on your application to get it to build correctly. - -__NOTE:__ From the `librdkafka` docs - -> WARNING: Due to a bug in Apache Kafka 0.9.0.x, the ApiVersionRequest (as sent by the client when connecting to the broker) will be silently ignored by the broker causing the request to time out after 10 seconds. This causes client-broker connections to stall for 10 seconds during connection-setup before librdkafka falls back on the `broker.version.fallback` protocol features. The workaround is to explicitly configure `api.version.request` to `false` on clients communicating with <=0.9.0.x brokers. - -### Alpine - -Using Alpine Linux? Check out the [docs](https://github.com/confluentinc/confluent-kafka-js/blob/master/examples/docker-alpine.md). - -### Windows - -Windows build **is not** compiled from `librdkafka` source but it is rather linked against the appropriate version of [NuGet librdkafka.redist](https://www.nuget.org/packages/librdkafka.redist/) static binary that gets downloaded from `https://globalcdn.nuget.org/packages/librdkafka.redist.2.3.0.nupkg` during installation. This download link can be changed using the environment variable `NODE_RDKAFKA_NUGET_BASE_URL` that defaults to `https://globalcdn.nuget.org/packages/` when it's no set. - -Requirements: - * [node-gyp for Windows](https://github.com/nodejs/node-gyp#on-windows) - -**Note:** I _still_ do not recommend using `confluent-kafka-js` in production on Windows. This feature was in high demand and is provided to help develop, but we do not test against Windows, and windows support may lag behind Linux/Mac support because those platforms are the ones used to develop this library. Contributors are welcome if any Windows issues are found :) - -## Tests - -This project includes two types of unit tests in this project: -* end-to-end integration tests -* unit tests - -You can run both types of tests by using `Makefile`. Doing so calls `mocha` in your locally installed `node_modules` directory. - -* Before you run the tests, be sure to init and update the submodules: - 1. `git submodule init` - 2. `git submodule update` -* To run the unit tests, you can run `make lint` or `make test`. -* To run the integration tests, you must have a running Kafka installation available. By default, the test tries to connect to `localhost:9092`; however, you can supply the `KAFKA_HOST` environment variable to override this default behavior. Run `make e2e`. - -# Usage - -You can install the `confluent-kafka-js` module like any other module: - -``` -npm install confluent-kafka-js -``` - -To use the module, you must `require` it. - -```js -const Kafka = require('confluent-kafka-js'); -``` - -## Configuration - -You can pass many configuration options to `librdkafka`. A full list can be found in `librdkafka`'s [Configuration.md](https://github.com/edenhill/librdkafka/blob/v2.3.0/CONFIGURATION.md) - -Configuration keys that have the suffix `_cb` are designated as callbacks. Some -of these keys are informational and you can choose to opt-in (for example, `dr_cb`). Others are callbacks designed to -return a value, such as `partitioner_cb`. - -Not all of these options are supported. -The library will throw an error if the value you send in is invalid. - -The library currently supports the following callbacks: -* `partitioner_cb` -* `dr_cb` or `dr_msg_cb` -* `event_cb` -* `rebalance_cb` (see [Rebalancing](#rebalancing)) -* `offset_commit_cb` (see [Commits](#commits)) - -### Librdkafka Methods - -This library includes two utility functions for detecting the status of your installation. Please try to include these when making issue reports where applicable. - -You can get the features supported by your compile of `librdkafka` by reading the variable "features" on the root of the `confluent-kafka-js` object. - -```js -const Kafka = require('confluent-kafka-js'); -console.log(Kafka.features); - -// #=> [ 'gzip', 'snappy', 'ssl', 'sasl', 'regex', 'lz4' ] -``` - -You can also get the version of `librdkafka` - -```js -const Kafka = require('confluent-kafka-js'); -console.log(Kafka.librdkafkaVersion); - -// #=> 2.3.0 -``` - -## Sending Messages - -A `Producer` sends messages to Kafka. The `Producer` constructor takes a configuration object, as shown in the following example: - -```js -const producer = new Kafka.Producer({ - 'metadata.broker.list': 'kafka-host1:9092,kafka-host2:9092' -}); -``` - -A `Producer` requires only `metadata.broker.list` (the Kafka brokers) to be created. The values in this list are separated by commas. For other configuration options, see the [Configuration.md](https://github.com/edenhill/librdkafka/blob/v2.3.0/CONFIGURATION.md) file described previously. - -The following example illustrates a list with several `librdkafka` options set. - -```js -const producer = new Kafka.Producer({ - 'client.id': 'kafka', - 'metadata.broker.list': 'localhost:9092', - 'compression.codec': 'gzip', - 'retry.backoff.ms': 200, - 'message.send.max.retries': 10, - 'socket.keepalive.enable': true, - 'queue.buffering.max.messages': 100000, - 'queue.buffering.max.ms': 1000, - 'batch.num.messages': 1000000, - 'dr_cb': true -}); -``` - -#### Stream API - -You can easily use the `Producer` as a writable stream immediately after creation (as shown in the following example): - -```js -// Our producer with its Kafka brokers -// This call returns a new writable stream to our topic 'topic-name' -const stream = Kafka.Producer.createWriteStream({ - 'metadata.broker.list': 'kafka-host1:9092,kafka-host2:9092' -}, {}, { - topic: 'topic-name' -}); - -// Writes a message to the stream -const queuedSuccess = stream.write(Buffer.from('Awesome message')); - -if (queuedSuccess) { - console.log('We queued our message!'); -} else { - // Note that this only tells us if the stream's queue is full, - // it does NOT tell us if the message got to Kafka! See below... - console.log('Too many messages in our queue already'); -} - -// NOTE: MAKE SURE TO LISTEN TO THIS IF YOU WANT THE STREAM TO BE DURABLE -// Otherwise, any error will bubble up as an uncaught exception. -stream.on('error', (err) => { - // Here's where we'll know if something went wrong sending to Kafka - console.error('Error in our kafka stream'); - console.error(err); -}) -``` - -If you do not want your code to crash when an error happens, ensure you have an `error` listener on the stream. Most errors are not necessarily fatal, but the ones that are will immediately destroy the stream. If you use `autoClose`, the stream will close itself at the first sign of a problem. - -#### Standard API - -The Standard API is more performant, particularly when handling high volumes of messages. -However, it requires more manual setup to use. The following example illustrates its use: - -```js -const producer = new Kafka.Producer({ - 'metadata.broker.list': 'localhost:9092', - 'dr_cb': true -}); - -// Connect to the broker manually -producer.connect(); - -// Wait for the ready event before proceeding -producer.on('ready', () => { - try { - producer.produce( - // Topic to send the message to - 'topic', - // optionally we can manually specify a partition for the message - // this defaults to -1 - which will use librdkafka's default partitioner (consistent random for keyed messages, random for unkeyed messages) - null, - // Message to send. Must be a buffer - Buffer.from('Awesome message'), - // for keyed messages, we also specify the key - note that this field is optional - 'Stormwind', - // you can send a timestamp here. If your broker version supports it, - // it will get added. Otherwise, we default to 0 - Date.now(), - // you can send an opaque token here, which gets passed along - // to your delivery reports - ); - } catch (err) { - console.error('A problem occurred when sending our message'); - console.error(err); - } -}); - -// Any errors we encounter, including connection errors -producer.on('event.error', (err) => { - console.error('Error from producer'); - console.error(err); -}) - -// We must either call .poll() manually after sending messages -// or set the producer to poll on an interval (.setPollInterval). -// Without this, we do not get delivery events and the queue -// will eventually fill up. -producer.setPollInterval(100); -``` - -To see the configuration options available to you, see the [Configuration](#configuration) section. - -##### Methods - -|Method|Description| -|-------|----------| -|`producer.connect()`| Connects to the broker.

The `connect()` method emits the `ready` event when it connects successfully. If it does not, the error will be passed through the callback. | -|`producer.disconnect()`| Disconnects from the broker.

The `disconnect()` method emits the `disconnected` event when it has disconnected. If it does not, the error will be passed through the callback. | -|`producer.poll()` | Polls the producer for delivery reports or other events to be transmitted via the emitter.

In order to get the events in `librdkafka`'s queue to emit, you must call this regularly. | -|`producer.setPollInterval(interval)` | Polls the producer on this interval, handling disconnections and reconnection. Set it to 0 to turn it off. | -|`producer.produce(topic, partition, msg, key, timestamp, opaque)`| Sends a message.

The `produce()` method throws when produce would return an error. Ordinarily, this is just if the queue is full. | -|`producer.flush(timeout, callback)`| Flush the librdkafka internal queue, sending all messages. Default timeout is 500ms | -|`producer.initTransactions(timeout, callback)`| Initializes the transactional producer. | -|`producer.beginTransaction(callback)`| Starts a new transaction. | -|`producer.sendOffsetsToTransaction(offsets, consumer, timeout, callback)`| Sends consumed topic-partition-offsets to the broker, which will get committed along with the transaction. | -|`producer.abortTransaction(timeout, callback)`| Aborts the ongoing transaction. | -|`producer.commitTransaction(timeout, callback)`| Commits the ongoing transaction. | - -##### Events - -Some configuration properties that end in `_cb` indicate that an event should be generated for that option. You can either: - -* provide a value of `true` and react to the event -* provide a callback function directly +The following configurations are supported: -The following example illustrates an event: +* Any supported version of Node.js (The two LTS versions, 18 and 20, and the latest versions, 21 and 22). +* Linux (x64 and arm64) - both glibc and musl/alpine. +* macOS - arm64/m1. +* Windows - x64. -```js -const producer = new Kafka.Producer({ - 'client.id': 'my-client', // Specifies an identifier to use to help trace activity in Kafka - 'metadata.broker.list': 'localhost:9092', // Connect to a Kafka instance on localhost - 'dr_cb': true // Specifies that we want a delivery-report event to be generated -}); +Installation on any of these platforms is meant to be seamless, without any C/C++ compilation required. -// Poll for events every 100 ms -producer.setPollInterval(100); +In case your system configuration is not within the supported ones, [a supported version of Python](https://devguide.python.org/versions/) must be available on the system for the installation process. [This is required for the `node-gyp` build tool.](https://github.com/nodejs/node-gyp?tab=readme-ov-file#configuring-python-dependency). -producer.on('delivery-report', (err, report) => { - // Report of delivery statistics here: - // - console.log(report); -}); +```bash +npm install @confluentinc/kafka-javascript ``` -The following table describes types of events. +Yarn and pnpm support is experimental. -|Event|Description| -|-------|----------| -| `disconnected` | The `disconnected` event is emitted when the broker has disconnected.

This event is emitted only when `.disconnect` is called. The wrapper will always try to reconnect otherwise. | -| `ready` | The `ready` event is emitted when the `Producer` is ready to send messages. | -| `event` | The `event` event is emitted when `librdkafka` reports an event (if you opted in via the `event_cb` option). | -| `event.log` | The `event.log` event is emitted when logging events come in (if you opted into logging via the `event_cb` option).

You will need to set a value for `debug` if you want to send information. | -| `event.stats` | The `event.stats` event is emitted when `librdkafka` reports stats (if you opted in by setting the `statistics.interval.ms` to a non-zero value). | -| `event.error` | The `event.error` event is emitted when `librdkafka` reports an error | -| `event.throttle` | The `event.throttle` event emitted when `librdkafka` reports throttling. | -| `delivery-report` | The `delivery-report` event is emitted when a delivery report has been found via polling.

To use this event, you must set `request.required.acks` to `1` or `-1` in topic configuration and `dr_cb` (or `dr_msg_cb` if you want the report to contain the message payload) to `true` in the `Producer` constructor options. | +# Getting Started -### Higher Level Producer +Below is a simple produce example for users migrating from KafkaJS. -The higher level producer is a variant of the producer which can propagate callbacks to you upon message delivery. +```javascript +// require('kafkajs') is replaced with require('@confluentinc/kafka-javascript').KafkaJS. +const { Kafka } = require("@confluentinc/kafka-javascript").KafkaJS; -```js -const producer = new Kafka.HighLevelProducer({ - 'metadata.broker.list': 'localhost:9092', -}); -``` - -This will enrich the produce call so it will have a callback to tell you when the message has been delivered. You lose the ability to specify opaque tokens. - -```js -producer.produce(topicName, null, Buffer.from('alliance4ever'), null, Date.now(), (err, offset) => { - // The offset if our acknowledgement level allows us to receive delivery offsets - console.log(offset); -}); -``` - -Additionally you can add serializers to modify the value of a produce for a key or value before it is sent over to Kafka. - -```js -producer.setValueSerializer((value) => { - return Buffer.from(JSON.stringify(value)); -}); -``` - -Otherwise the behavior of the class should be exactly the same. - -## Kafka.KafkaConsumer - -To read messages from Kafka, you use a `KafkaConsumer`. You instantiate a `KafkaConsumer` object as follows: - -```js -const consumer = new Kafka.KafkaConsumer({ - 'group.id': 'kafka', - 'metadata.broker.list': 'localhost:9092', -}, {}); -``` - -The first parameter is the global config, while the second parameter is the topic config that gets applied to all subscribed topics. To view a list of all supported configuration properties, see the [Configuration.md](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md) file described previously. Look for the `C` and `*` keys. - -The `group.id` and `metadata.broker.list` properties are required for a consumer. - -### Rebalancing - -Rebalancing is managed internally by `librdkafka` by default. If you would like to override this functionality, you may provide your own logic as a rebalance callback. - -```js -const consumer = new Kafka.KafkaConsumer({ - 'group.id': 'kafka', - 'metadata.broker.list': 'localhost:9092', - 'rebalance_cb': (err, assignment) => { - - if (err.code === Kafka.CODES.ERRORS.ERR__ASSIGN_PARTITIONS) { - // Note: this can throw when you are disconnected. Take care and wrap it in - // a try catch if that matters to you - this.assign(assignment); - } else if (err.code == Kafka.CODES.ERRORS.ERR__REVOKE_PARTITIONS){ - // Same as above - this.unassign(); - } else { - // We had a real error - console.error(err); - } - - } -}) -``` - -`this` is bound to the `KafkaConsumer` you have created. By specifying a `rebalance_cb` you can also listen to the `rebalance` event as an emitted event. This event is not emitted when using the internal `librdkafka` rebalancer. +async function producerStart() { + const kafka = new Kafka({ + kafkaJS: { + brokers: [''], + ssl: true, + sasl: { + mechanism: 'plain', + username: '', + password: '', + }, + } + }); -### Commits + const producer = kafka.producer(); -When you commit in `confluent-kafka-js`, the standard way is to queue the commit request up with the next `librdkafka` request to the broker. When doing this, there isn't a way to know the result of the commit. Luckily there is another callback you can listen to to get this information + await producer.connect(); -```js -const consumer = new Kafka.KafkaConsumer({ - 'group.id': 'kafka', - 'metadata.broker.list': 'localhost:9092', - 'offset_commit_cb': (err, topicPartitions) => { + console.log("Connected successfully"); - if (err) { - // There was an error committing - console.error(err); - } else { - // Commit went through. Let's log the topic partitions - console.log(topicPartitions); + const res = [] + for (let i = 0; i < 50; i++) { + res.push(producer.send({ + topic: 'test-topic', + messages: [ + { value: 'v222', partition: 0 }, + { value: 'v11', partition: 0, key: 'x' }, + ] + })); } + await Promise.all(res); - } -}) -``` - -`this` is bound to the `KafkaConsumer` you have created. By specifying an `offset_commit_cb` you can also listen to the `offset.commit` event as an emitted event. It receives an error and the list of topic partitions as argument. This is not emitted unless opted in. - -### Message Structure - -Messages that are returned by the `KafkaConsumer` have the following structure. - -```js -{ - value: Buffer.from('hi'), // message contents as a Buffer - size: 2, // size of the message, in bytes - topic: 'librdtesting-01', // topic the message comes from - offset: 1337, // offset the message was read from - partition: 1, // partition the message was on - key: 'someKey', // key of the message if present - timestamp: 1510325354780 // timestamp of message creation -} -``` - -### Stream API - -The stream API is the easiest way to consume messages. The following example illustrates the use of the stream API: - -```js -// Read from the librdtesting-01 topic... note that this creates a new stream on each call! -const stream = KafkaConsumer.createReadStream(globalConfig, topicConfig, { - topics: ['librdtesting-01'] -}); + await producer.disconnect(); -stream.on('data', (message) => { - console.log('Got message'); - console.log(message.value.toString()); -}); -``` - -You can also get the `consumer` from the streamConsumer, for using consumer methods. The following example illustrates that: - -```js -stream.consumer.commit(); // Commits all locally stored offsets -``` - -### Standard API - -You can also use the Standard API and manage callbacks and events yourself. You can choose different modes for consuming messages: - -* *Flowing mode*. This mode flows all of the messages it can read by maintaining an infinite loop in the event loop. It only stops when it detects the consumer has issued the `unsubscribe` or `disconnect` method. -* *Non-flowing mode*. This mode reads a single message from Kafka at a time manually. - -The following example illustrates flowing mode: -```js -// Flowing mode -consumer.connect(); - -consumer - .on('ready', () => { - consumer.subscribe(['librdtesting-01']); - - // Consume from the librdtesting-01 topic. This is what determines - // the mode we are running in. By not specifying a callback (or specifying - // only a callback) we get messages as soon as they are available. - consumer.consume(); - }) - .on('data', (data) => { - // Output the actual message contents - console.log(data.value.toString()); - }); -``` -The following example illustrates non-flowing mode: -```js -// Non-flowing mode -consumer.connect(); - -consumer - .on('ready', () => { - // Subscribe to the librdtesting-01 topic - // This makes subsequent consumes read from that topic. - consumer.subscribe(['librdtesting-01']); - - // Read one message every 1000 milliseconds - setInterval(() => { - consumer.consume(1); - }, 1000); - }) - .on('data', (data) => { - console.log('Message found! Contents below.'); - console.log(data.value.toString()); - }); -``` - -The following table lists important methods for this API. - -|Method|Description| -|-------|----------| -|`consumer.connect()` | Connects to the broker.

The `connect()` emits the event `ready` when it has successfully connected. If it does not, the error will be passed through the callback. | -|`consumer.disconnect()` | Disconnects from the broker.

The `disconnect()` method emits `disconnected` when it has disconnected. If it does not, the error will be passed through the callback. | -|`consumer.subscribe(topics)` | Subscribes to an array of topics. | -|`consumer.unsubscribe()` | Unsubscribes from the currently subscribed topics.

You cannot subscribe to different topics without calling the `unsubscribe()` method first. | -|`consumer.consume(cb)` | Gets messages from the existing subscription as quickly as possible. If `cb` is specified, invokes `cb(err, message)`.

This method keeps a background thread running to do the work. Note that the number of threads in nodejs process is limited by `UV_THREADPOOL_SIZE` (default value is 4) and using up all of them blocks other parts of the application that need threads. If you need multiple consumers then consider increasing `UV_THREADPOOL_SIZE` or using `consumer.consume(number, cb)` instead. | -|`consumer.consume(number, cb)` | Gets `number` of messages from the existing subscription. If `cb` is specified, invokes `cb(err, message)`. | -|`consumer.commit()` | Commits all locally stored offsets | -|`consumer.commit(topicPartition)` | Commits offsets specified by the topic partition | -|`consumer.commitMessage(message)` | Commits the offsets specified by the message | - -The following table lists events for this API. - -|Event|Description| -|-------|----------| -|`data` | When using the Standard API consumed messages are emitted in this event. | -|`partition.eof` | When using Standard API and the configuration option `enable.partition.eof` is set, `partition.eof` events are emitted in this event. The event contains `topic`, `partition` and `offset` properties. | -|`warning` | The event is emitted in case of `UNKNOWN_TOPIC_OR_PART` or `TOPIC_AUTHORIZATION_FAILED` errors when consuming in *Flowing mode*. Since the consumer will continue working if the error is still happening, the warning event should reappear after the next metadata refresh. To control the metadata refresh rate set `topic.metadata.refresh.interval.ms` property. Once you resolve the error, you can manually call `getMetadata` to speed up consumer recovery. | -|`disconnected` | The `disconnected` event is emitted when the broker disconnects.

This event is only emitted when `.disconnect` is called. The wrapper will always try to reconnect otherwise. | -|`ready` | The `ready` event is emitted when the `Consumer` is ready to read messages. | -|`event` | The `event` event is emitted when `librdkafka` reports an event (if you opted in via the `event_cb` option).| -|`event.log` | The `event.log` event is emitted when logging events occur (if you opted in for logging via the `event_cb` option).

You will need to set a value for `debug` if you want information to send. | -|`event.stats` | The `event.stats` event is emitted when `librdkafka` reports stats (if you opted in by setting the `statistics.interval.ms` to a non-zero value). | -|`event.error` | The `event.error` event is emitted when `librdkafka` reports an error | -|`event.throttle` | The `event.throttle` event is emitted when `librdkafka` reports throttling.| - -## Reading current offsets from the broker for a topic - -Some times you find yourself in the situation where you need to know the latest (and earliest) offset for one of your topics. Connected producers and consumers both allow you to query for these through `queryWaterMarkOffsets` like follows: - -```js -const timeout = 5000, partition = 0; -consumer.queryWatermarkOffsets('my-topic', partition, timeout, (err, offsets) => { - const high = offsets.highOffset; - const low = offsets.lowOffset; -}); - -producer.queryWatermarkOffsets('my-topic', partition, timeout, (err, offsets) => { - const high = offsets.highOffset; - const low = offsets.lowOffset; -}); - -An error will be returned if the client was not connected or the request timed out within the specified interval. - -``` - -## Metadata - -Both `Kafka.Producer` and `Kafka.KafkaConsumer` include a `getMetadata` method to retrieve metadata from Kafka. - -Getting metadata on any connection returns the following data structure: - -```js -{ - orig_broker_id: 1, - orig_broker_name: "broker_name", - brokers: [ - { - id: 1, - host: 'localhost', - port: 40 - } - ], - topics: [ - { - name: 'awesome-topic', - partitions: [ - { - id: 1, - leader: 20, - replicas: [1, 2], - isrs: [1, 2] - } - ] - } - ] + console.log("Disconnected successfully"); } -``` - -The following example illustrates how to use the `getMetadata` method. - -When fetching metadata for a specific topic, if a topic reference does not exist, one is created using the default config. -Please see the documentation on `Client.getMetadata` if you want to set configuration parameters, e.g. `acks`, on a topic to produce messages to. - -```js -const opts = { - topic: 'librdtesting-01', - timeout: 10000 -}; - -producer.getMetadata(opts, (err, metadata) => { - if (err) { - console.error('Error getting metadata'); - console.error(err); - } else { - console.log('Got metadata'); - console.log(metadata); - } -}); -``` -## Admin Client - -`confluent-kafka-js` now supports the admin client for creating, deleting, and scaling out topics. The `librdkafka` APIs also support altering configuration of topics and broker, but that is not currently implemented. - -To create an Admin client, you can do as follows: - -```js -const Kafka = require('confluent-kafka-js'); - -const client = Kafka.AdminClient.create({ - 'client.id': 'kafka-admin', - 'metadata.broker.list': 'broker01' -}); +producerStart(); ``` -This will instantiate the `AdminClient`, which will allow the calling of the admin methods. +1. If you're migrating from `kafkajs`, you can use the [migration guide](MIGRATION.md#kafkajs). +2. If you're migrating from `node-rdkafka`, you can use the [migration guide](MIGRATION.md#node-rdkafka). +3. If you're starting afresh, you can use the [quickstart guide](QUICKSTART.md). -```js -client.createTopic({ - topic: topicName, - num_partitions: 1, - replication_factor: 1 -}, (err) => { - // Done! -}); -``` - -All of the admin api methods can have an optional timeout as their penultimate parameter. +An in-depth reference may be found at [INTRODUCTION.md](INTRODUCTION.md). -The following table lists important methods for this API. - -|Method|Description| -|-------|----------| -|`client.disconnect()` | Destroy the admin client, making it invalid for further use. | -|`client.createTopic(topic, timeout, cb)` | Create a topic on the broker with the given configuration. See JS doc for more on structure of the topic object | -|`client.deleteTopic(topicName, timeout, cb)` | Delete a topic of the given name | -|`client.createPartitions(topicName, desiredPartitions, timeout, cb)` | Create partitions until the topic has the desired number of partitions. | +## Contributing -Check the tests for an example of how to use this API! +Bug reports and feedback is appreciated in the form of Github Issues. +For guidelines on contributing please see [CONTRIBUTING.md](CONTRIBUTING.md) diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 00000000..64bc0e3d --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,11 @@ +# Security Policy + +## Supported Versions + +Post version 1.0.0, the latest version will be supported. + +## Reporting a Vulnerability + +To report a vulnerability, please notify security@confluent.io + +If an issue is confirmed, a github issue will be created to help track progress with its resolution. diff --git a/bench/consumer-raw-rdkafka.js b/bench/consumer-raw-rdkafka.js index 5b27c1d2..a42461e8 100644 --- a/bench/consumer-raw-rdkafka.js +++ b/bench/consumer-raw-rdkafka.js @@ -1,5 +1,5 @@ /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * * Copyright (c) 2016-2023 Blizzard Entertainment * @@ -16,7 +16,7 @@ var topic = process.argv[3] || 'test'; var consumer = new Kafka.KafkaConsumer({ 'metadata.broker.list': host, - 'group.id': 'confluent-kafka-js-bench-s', + 'group.id': 'confluent-kafka-javascript-bench-s', 'fetch.wait.max.ms': 100, 'fetch.message.max.bytes': 1024 * 1024, 'enable.auto.commit': false diff --git a/bench/consumer-subscribe.js b/bench/consumer-subscribe.js index 6dbd7fc4..f46ee21d 100644 --- a/bench/consumer-subscribe.js +++ b/bench/consumer-subscribe.js @@ -1,5 +1,5 @@ /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * * Copyright (c) 2016-2023 Blizzard Entertainment * @@ -16,7 +16,7 @@ var topic = process.argv[3] || 'test'; var consumer = new Kafka.KafkaConsumer({ 'metadata.broker.list': host, - 'group.id': 'confluent-kafka-js-bench', + 'group.id': 'confluent-kafka-javascript-bench', 'fetch.wait.max.ms': 100, 'fetch.message.max.bytes': 1024 * 1024, 'enable.auto.commit': false diff --git a/bench/kafka-consumer-stream.js b/bench/kafka-consumer-stream.js index ff8888f9..6bf53779 100644 --- a/bench/kafka-consumer-stream.js +++ b/bench/kafka-consumer-stream.js @@ -1,5 +1,5 @@ /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * * Copyright (c) 2016-2023 Blizzard Entertainment * @@ -18,7 +18,7 @@ var topic = process.argv[3] || 'test'; var stream = Kafka.createReadStream({ 'metadata.broker.list': host, - 'group.id': 'confluent-kafka-js-benchs', + 'group.id': 'confluent-kafka-javascript-benchs', 'fetch.wait.max.ms': 100, 'fetch.message.max.bytes': 1024 * 1024, 'enable.auto.commit': false diff --git a/bench/producer-raw-rdkafka.js b/bench/producer-raw-rdkafka.js index e5d7cf56..c0d55e5c 100644 --- a/bench/producer-raw-rdkafka.js +++ b/bench/producer-raw-rdkafka.js @@ -1,5 +1,5 @@ /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * * Copyright (c) 2016-2023 Blizzard Entertainment * @@ -24,7 +24,7 @@ var MAX = process.argv[5] || 10000000; var producer = new Kafka.Producer({ 'metadata.broker.list': host, - 'group.id': 'confluent-kafka-js-bench', + 'group.id': 'confluent-kafka-javascript-bench', 'compression.codec': compression, 'retry.backoff.ms': 200, 'message.send.max.retries': 10, diff --git a/bench/producer-rdkafka.js b/bench/producer-rdkafka.js index d2fa37b2..97eb6bf6 100644 --- a/bench/producer-rdkafka.js +++ b/bench/producer-rdkafka.js @@ -1,5 +1,5 @@ /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * * Copyright (c) 2016-2023 Blizzard Entertainment * @@ -20,7 +20,7 @@ var MAX = process.argv[5] || 1000000; var stream = Kafka.Producer.createWriteStream({ 'metadata.broker.list': host, - 'group.id': 'confluent-kafka-js-bench', + 'group.id': 'confluent-kafka-javascript-bench', 'compression.codec': compression, 'retry.backoff.ms': 200, 'message.send.max.retries': 10, diff --git a/binding.gyp b/binding.gyp index 9ec16668..a70df362 100644 --- a/binding.gyp +++ b/binding.gyp @@ -7,7 +7,7 @@ }, "targets": [ { - "target_name": "confluent-kafka-js", + "target_name": "confluent-kafka-javascript", 'sources': [ 'src/binding.cc', 'src/callbacks.cc', @@ -86,34 +86,36 @@ ], 'conditions': [ [ - ['OS=="linux"', 'CKJS_LINKING="dynamic"'], + 'CKJS_LINKING=="dynamic"', { - "libraries": [ - "../build/deps/librdkafka.so", - "../build/deps/librdkafka++.so", - "-Wl,-rpath='$$ORIGIN/../deps'", - ], - } - ], - [ - ['OS=="linux"', 'CKJS_LINKING!="dynamic"'], + "conditions": [ + [ + 'OS=="mac"', + { + "libraries": [ + "../build/deps/librdkafka.dylib", + "../build/deps/librdkafka++.dylib", + "-Wl,-rpath,'$$ORIGIN/../deps'", + ], + }, + { + "libraries": [ + "../build/deps/librdkafka.so", + "../build/deps/librdkafka++.so", + "-Wl,-rpath,'$$ORIGIN/../deps'", + ], + }, + ] + ] + }, { "libraries": [ "../build/deps/librdkafka-static.a", "../build/deps/librdkafka++.a", - "-Wl,-rpath='$$ORIGIN/../deps'", + "-Wl,-rpath,'$$ORIGIN/../deps'", ], } ], - [ - 'OS=="mac"', - { - "libraries": [ - "../build/deps/librdkafka.dylib", - "../build/deps/librdkafka++.dylib", - ], - } - ] ], }, # Else link against globally installed rdkafka and use diff --git a/buf.gen.yaml b/buf.gen.yaml new file mode 100644 index 00000000..7fe5b593 --- /dev/null +++ b/buf.gen.yaml @@ -0,0 +1,8 @@ +# Learn more: https://buf.build/docs/configuration/v2/buf-gen-yaml +version: v2 +inputs: + - directory: proto +plugins: + - local: protoc-gen-es + opt: target=ts + out: schemaregistry diff --git a/ci/checks/librdkafka-correct-version.js b/ci/checks/librdkafka-correct-version.js index 5b2506e5..011a5481 100644 --- a/ci/checks/librdkafka-correct-version.js +++ b/ci/checks/librdkafka-correct-version.js @@ -61,7 +61,8 @@ function versionAsString(version) { const librdkafkaVersion = parseLibrdkafkaVersion(defines.RD_KAFKA_VERSION); const versionString = versionAsString(librdkafkaVersion); -if (pjs.librdkafka !== versionString) { +// If our version is a devel (early access) version, we might be on master. +if (pjs.librdkafka !== versionString && !pjs.version.includes('devel')) { console.error(`Librdkafka version of ${versionString} does not match package json: ${pjs.librdkafka}`); process.exit(1); } diff --git a/ci/librdkafka-defs-generator.js b/ci/librdkafka-defs-generator.js index be4113a1..f81b423e 100644 --- a/ci/librdkafka-defs-generator.js +++ b/ci/librdkafka-defs-generator.js @@ -52,7 +52,7 @@ function extractConfigItems(configStr) { } function processItem(configItem) { - // These items are overwritten by confluent-kafka-js + // These items are overwritten by confluent-kafka-javascript switch (configItem.property) { case 'dr_msg_cb': return { ...configItem, type: 'boolean' }; diff --git a/ci/tests/run_perf_test.sh b/ci/tests/run_perf_test.sh new file mode 100755 index 00000000..c55a67ab --- /dev/null +++ b/ci/tests/run_perf_test.sh @@ -0,0 +1,64 @@ +#!/bin/bash + +testresultConfluentProducerConsumer=$(mktemp) +testresultConfluentCtp=$(mktemp) +testresultKjsProducerConsumer=$(mktemp) +testresultKjsCtp=$(mktemp) + +MODE=confluent MESSAGE_COUNT=500000 node performance-consolidated.js --create-topics --consumer --producer 2>&1 | tee "$testresultConfluentProducerConsumer" +MODE=kafkajs MESSAGE_COUNT=500000 node performance-consolidated.js --create-topics --consumer --producer 2>&1 | tee "$testresultKjsProducerConsumer" +MODE=confluent MESSAGE_COUNT=5000 node performance-consolidated.js --create-topics --ctp 2>&1 | tee "$testresultConfluentCtp" +MODE=kafkajs MESSAGE_COUNT=5000 node performance-consolidated.js --create-topics --ctp 2>&1 | tee "$testresultKjsCtp" + +producerConfluent=$(grep "=== Producer Rate:" "$testresultConfluentProducerConsumer" | cut -d':' -f2 | tr -d ' ') +consumerConfluent=$(grep "=== Consumer Rate:" "$testresultConfluentProducerConsumer" | cut -d':' -f2 | tr -d ' ') +ctpConfluent=$(grep "=== Consume-Transform-Produce Rate:" "$testresultConfluentCtp" | cut -d':' -f2 | tr -d ' ') +producerKjs=$(grep "=== Producer Rate:" "$testresultKjsProducerConsumer" | cut -d':' -f2 | tr -d ' ') +consumerKjs=$(grep "=== Consumer Rate:" "$testresultKjsProducerConsumer" | cut -d':' -f2 | tr -d ' ') +ctpKjs=$(grep "=== Consume-Transform-Produce Rate:" "$testresultKjsCtp" | cut -d':' -f2 | tr -d ' ') + +echo "Producer rates: confluent $producerConfluent, kafkajs $producerKjs" +echo "Consumer rates: confluent $consumerConfluent, kafkajs $consumerKjs" +echo "CTP rates: confluent $ctpConfluent, kafkajs $ctpKjs" + +errcode=0 + +# Compare against KJS +if [[ $(echo "$producerConfluent < $producerKjs * 70 / 100" | bc -l) -eq 1 ]]; then + echo "Producer rates differ by more than 30%: confluent $producerConfluent, kafkajs $producerKjs" + errcode=1 +fi + +if [[ $(echo "$consumerConfluent < $consumerKjs * 70 / 100" | bc -l) -eq 1 ]]; then + echo "Consumer rates differ by more than 30%: confluent $consumerConfluent, kafkajs $consumerKjs" + # FIXME: improve consumer performance at least to KafkaJS level + errcode=0 +fi + +if [[ $(echo "$ctpConfluent < $ctpKjs * 70 / 100" | bc -l) -eq 1 ]]; then + echo "CTP rates differ by more than 30%: confluent $ctpConfluent, kafkajs $ctpKjs" + errcode=1 +fi + +# Compare against numbers set within semaphore config +TARGET_PRODUCE="${TARGET_PRODUCE_PERFORMANCE:-35}" +TARGET_CONSUME="${TARGET_CONSUME_PERFORMANCE:-18}" +TARGET_CTP="${TARGET_CTP_PERFORMANCE:-0.02}" + +if [[ $(echo "$producerConfluent < $TARGET_PRODUCE" | bc -l) -eq 1 ]]; then + echo "Confluent producer rate is below target: $producerConfluent" + errcode=1 +fi + +if [[ $(echo "$consumerConfluent < $TARGET_CONSUME" | bc -l) -eq 1 ]]; then + echo "Confluent consumer rate is below target: $consumerConfluent" + errcode=1 +fi + +if [[ $(echo "$ctpConfluent < $TARGET_CTP" | bc -l) -eq 1 ]]; then + echo "Confluent CTP rate is below target: $ctpConfluent" + errcode=1 +fi + +exit $errcode + diff --git a/ci/update-version.js b/ci/update-version.js index 4265ea9b..50957b74 100644 --- a/ci/update-version.js +++ b/ci/update-version.js @@ -84,24 +84,15 @@ function getBranch(cb) { } function getPackageVersion(tag, branch) { - const baseVersion = `v${tag.major}.${tag.minor}.${tag.patch}`; + let baseVersion = `v${tag.major}.${tag.minor}.${tag.patch}`; - console.log(`Package version is "${baseVersion}"`); - - // never publish with an suffix - // fixes https://github.com/confluentinc/confluent-kafka-js/issues/981 - // baseVersion += '-'; - - // if (tag.commit === 0 && branch === 'master') { - // return baseVersion; - // } - // if (branch !== 'master') { - // baseVersion += (tag.commit + 1 + '.' + branch); - // } else { - // baseVersion += (tag.commit + 1); - // } + // publish with a -devel suffix for EA and RC releases. + if (tag.prerelease.length > 0) { + baseVersion += '-' + tag.prerelease.join('-'); + } + console.log(`Package version is "${baseVersion}"`); return baseVersion; } @@ -114,7 +105,6 @@ getVersion((err, tag) => { if (err) { throw err; } - pjs.version = getPackageVersion(tag, branch); fs.writeFileSync(pjsPath, JSON.stringify(pjs, null, 2)); diff --git a/cpplint.py b/cpplint.py index 44726248..0e50a882 100644 --- a/cpplint.py +++ b/cpplint.py @@ -42,84 +42,155 @@ """ import codecs +import collections import copy import getopt +import glob +import itertools import math # for log import os import re -import sre_compile import string import sys +import sysconfig import unicodedata +import xml.etree.ElementTree +# if empty, use defaults +_valid_extensions = set([]) + +__VERSION__ = '1.7' _USAGE = """ -Syntax: cpplint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...] +Syntax: cpplint.py [--verbose=#] [--output=emacs|eclipse|vs7|junit|sed|gsed] + [--filter=-x,+y,...] [--counting=total|toplevel|detailed] [--root=subdir] - [--linelength=digits] + [--repository=path] + [--linelength=digits] [--headers=x,y,...] + [--recursive] + [--exclude=path] + [--extensions=hpp,cpp,...] + [--includeorder=default|standardcfirst] + [--config=filename] + [--quiet] + [--version] [file] ... + Style checker for C/C++ source files. + This is a fork of the Google style checker with minor extensions. + The style guidelines this tries to follow are those in - http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml + https://google.github.io/styleguide/cppguide.html Every problem is given a confidence score from 1-5, with 5 meaning we are certain of the problem, and 1 meaning it could be a legitimate construct. This will miss some errors, and is not a substitute for a code review. - To suppress false-positive errors of a certain category, add a - 'NOLINT(category)' comment to the line. NOLINT or NOLINT(*) - suppresses errors of all categories on that line. + To suppress false-positive errors of certain categories, add a + 'NOLINT(category[, category...])' comment to the line. NOLINT or NOLINT(*) + suppresses errors of all categories on that line. To suppress categories + on the next line use NOLINTNEXTLINE instead of NOLINT. To suppress errors in + a block of code 'NOLINTBEGIN(category[, category...])' comment to a line at + the start of the block and to end the block add a comment with 'NOLINTEND'. + NOLINT blocks are inclusive so any statements on the same line as a BEGIN + or END will have the error suppression applied. The files passed in will be linted; at least one file must be provided. - Default linted extensions are .cc, .cpp, .cu, .cuh and .h. Change the - extensions with the --extensions flag. + Default linted extensions are %s. + Other file types will be ignored. + Change the extensions with the --extensions flag. Flags: - output=vs7 + output=emacs|eclipse|vs7|junit|sed|gsed By default, the output is formatted to ease emacs parsing. Visual Studio - compatible output (vs7) may also be used. Other formats are unsupported. + compatible output (vs7) may also be used. Further support exists for + eclipse (eclipse), and JUnit (junit). XML parsers such as those used + in Jenkins and Bamboo may also be used. + The sed format outputs sed commands that should fix some of the errors. + Note that this requires gnu sed. If that is installed as gsed on your + system (common e.g. on macOS with homebrew) you can use the gsed output + format. Sed commands are written to stdout, not stderr, so you should be + able to pipe output straight to a shell to run the fixes. verbose=# Specify a number 0-5 to restrict errors to certain verbosity levels. + Errors with lower verbosity levels have lower confidence and are more + likely to be false positives. + + quiet + Don't print anything if no errors are found. filter=-x,+y,... Specify a comma-separated list of category-filters to apply: only error messages whose category names pass the filters will be printed. (Category names are printed with the message and look like "[whitespace/indent]".) Filters are evaluated left to right. - "-FOO" and "FOO" means "do not print categories that start with FOO". + "-FOO" means "do not print categories that start with FOO". "+FOO" means "do print categories that start with FOO". Examples: --filter=-whitespace,+whitespace/braces - --filter=whitespace,runtime/printf,+runtime/printf_format + --filter=-whitespace,-runtime/printf,+runtime/printf_format --filter=-,+build/include_what_you_use To see a list of all the categories used in cpplint, pass no arg: --filter= + Filters can directly be limited to files and also line numbers. The + syntax is category:file:line , where line is optional. The filter limitation + works for both + and - and can be combined with ordinary filters: + + Examples: --filter=-whitespace:foo.h,+whitespace/braces:foo.h + --filter=-whitespace,-runtime/printf:foo.h:14,+runtime/printf_format:foo.h + --filter=-,+build/include_what_you_use:foo.h:321 + counting=total|toplevel|detailed The total number of errors found is always printed. If 'toplevel' is provided, then the count of errors in each of the top-level categories like 'build' and 'whitespace' will also be printed. If 'detailed' is provided, then a count - is provided for each category like 'build/class'. + is provided for each category like 'legal/copyright'. + + repository=path + The top level directory of the repository, used to derive the header + guard CPP variable. By default, this is determined by searching for a + path that contains .git, .hg, or .svn. When this flag is specified, the + given path is used instead. This option allows the header guard CPP + variable to remain consistent even if members of a team have different + repository root directories (such as when checking out a subdirectory + with SVN). In addition, users of non-mainstream version control systems + can use this flag to ensure readable header guard CPP variables. + + Examples: + Assuming that Alice checks out ProjectName and Bob checks out + ProjectName/trunk and trunk contains src/chrome/ui/browser.h, then + with no --repository flag, the header guard CPP variable will be: + + Alice => TRUNK_SRC_CHROME_BROWSER_UI_BROWSER_H_ + Bob => SRC_CHROME_BROWSER_UI_BROWSER_H_ + + If Alice uses the --repository=trunk flag and Bob omits the flag or + uses --repository=. then the header guard CPP variable will be: + + Alice => SRC_CHROME_BROWSER_UI_BROWSER_H_ + Bob => SRC_CHROME_BROWSER_UI_BROWSER_H_ root=subdir The root directory used for deriving header guard CPP variable. - By default, the header guard CPP variable is calculated as the relative - path to the directory that contains .git, .hg, or .svn. When this flag - is specified, the relative path is calculated from the specified - directory. If the specified directory does not exist, this flag is - ignored. + This directory is relative to the top level directory of the repository + which by default is determined by searching for a directory that contains + .git, .hg, or .svn but can also be controlled with the --repository flag. + If the specified directory does not exist, this flag is ignored. Examples: - Assuming that src/.git exists, the header guard CPP variables for + Assuming that src is the top level directory of the repository (and + cwd=top/src), the header guard CPP variables for src/chrome/browser/ui/browser.h are: No flag => CHROME_BROWSER_UI_BROWSER_H_ --root=chrome => BROWSER_UI_BROWSER_H_ --root=chrome/browser => UI_BROWSER_H_ + --root=.. => SRC_CHROME_BROWSER_UI_BROWSER_H_ linelength=digits This is the allowed line length for the project. The default value is @@ -128,11 +199,50 @@ Examples: --linelength=120 + recursive + Search for files to lint recursively. Each directory given in the list + of files to be linted is replaced by all files that descend from that + directory. Files with extensions not in the valid extensions list are + excluded. + + exclude=path + Exclude the given path from the list of files to be linted. Relative + paths are evaluated relative to the current directory and shell globbing + is performed. This flag can be provided multiple times to exclude + multiple files. + + Examples: + --exclude=one.cc + --exclude=src/*.cc + --exclude=src/*.cc --exclude=test/*.cc + extensions=extension,extension,... The allowed file extensions that cpplint will check Examples: - --extensions=hpp,cpp + --extensions=%s + + includeorder=default|standardcfirst + For the build/include_order rule, the default is to blindly assume angle + bracket includes with file extension are c-system-headers (default), + even knowing this will have false classifications. + The default is established at google. + standardcfirst means to instead use an allow-list of known c headers and + treat all others as separate group of "other system headers". The C headers + included are those of the C-standard lib and closely related ones. + + config=filename + Search for config files with the specified name instead of CPPLINT.cfg + + headers=x,y,... + The header extensions that cpplint will treat as .h in checks. Values are + automatically added to --extensions list. + (by default, only files with extensions %s will be assumed to be headers) + + Examples: + --headers=%s + --headers=hpp,hxx + --headers=hpp cpplint.py supports per-directory configurations specified in CPPLINT.cfg files. CPPLINT.cfg file can contain a number of key=value pairs. @@ -142,6 +252,8 @@ filter=+filter1,-filter2,... exclude_files=regex linelength=80 + root=subdir + headers=x,y,... "set noparent" option prevents cpplint from traversing directory tree upwards looking for more .cfg files in parent directories. This option @@ -153,16 +265,22 @@ "exclude_files" allows to specify a regular expression to be matched against a file name. If the expression matches, the file is skipped and not run - through liner. + through the linter. "linelength" allows to specify the allowed line length for the project. + The "root" option is similar in function to the --root flag (see example + above). Paths are relative to the directory of the CPPLINT.cfg. + + The "headers" option is similar in function to the --headers flag + (see example above). + CPPLINT.cfg has an effect on files in the same directory and all sub-directories, unless overridden by a nested configuration file. Example file: filter=-build/include_order,+build/include_alpha - exclude_files=.*\.cc + exclude_files=.*\\.cc The above example disables build/include_order warning and enables build/include_alpha as well as excludes all .cc from being @@ -175,17 +293,20 @@ # If you add a new error message with a new category, add it to the list # here! cpplint_unittest.py should tell you if you forget to do this. _ERROR_CATEGORIES = [ - 'build/class', 'build/c++11', + 'build/c++17', 'build/deprecated', 'build/endif_comment', 'build/explicit_make_pair', 'build/forward_decl', 'build/header_guard', 'build/include', + 'build/include_subdir', 'build/include_alpha', 'build/include_order', 'build/include_what_you_use', + 'build/namespaces_headers', + 'build/namespaces_literals', 'build/namespaces', 'build/printf_format', 'build/storage_class', @@ -196,7 +317,6 @@ 'readability/check', 'readability/constructors', 'readability/fn_size', - 'readability/function', 'readability/inheritance', 'readability/multiline_comment', 'readability/multiline_string', @@ -214,7 +334,6 @@ 'runtime/invalid_increment', 'runtime/member_string_references', 'runtime/memset', - 'runtime/indentation_namespace', 'runtime/operator', 'runtime/printf', 'runtime/printf_format', @@ -227,11 +346,13 @@ 'whitespace/comma', 'whitespace/comments', 'whitespace/empty_conditional_body', + 'whitespace/empty_if_body', 'whitespace/empty_loop_body', 'whitespace/end_of_line', 'whitespace/ending_newline', 'whitespace/forcolon', 'whitespace/indent', + 'whitespace/indent_namespace', 'whitespace/line_length', 'whitespace/newline', 'whitespace/operators', @@ -241,10 +362,49 @@ 'whitespace/todo', ] +# keywords to use with --outputs which generate stdout for machine processing +_MACHINE_OUTPUTS = [ + 'junit', + 'sed', + 'gsed' +] + # These error categories are no longer enforced by cpplint, but for backwards- # compatibility they may still appear in NOLINT comments. _LEGACY_ERROR_CATEGORIES = [ + 'build/class', 'readability/streams', + 'readability/function', + ] + +# These prefixes for categories should be ignored since they relate to other +# tools which also use the NOLINT syntax, e.g. clang-tidy. +_OTHER_NOLINT_CATEGORY_PREFIXES = [ + 'clang-analyzer-', + 'abseil-', + 'altera-', + 'android-', + 'boost-', + 'bugprone-', + 'cert-', + 'concurrency-', + 'cppcoreguidelines-', + 'darwin-', + 'fuchsia-', + 'google-', + 'hicpp-', + 'linuxkernel-', + 'llvm-', + 'llvmlibc-', + 'misc-', + 'modernize-', + 'mpi-', + 'objc-', + 'openmp-', + 'performance-', + 'portability-', + 'readability-', + 'zircon-', ] # The default state of the category filter. This is overridden by the --filter= @@ -253,6 +413,16 @@ # All entries here should start with a '-' or '+', as in the --filter= flag. _DEFAULT_FILTERS = ['-build/include_alpha'] +# The default list of categories suppressed for C (not C++) files. +_DEFAULT_C_SUPPRESSED_CATEGORIES = [ + 'readability/casting', + ] + +# The default list of categories suppressed for Linux Kernel files. +_DEFAULT_KERNEL_SUPPRESSED_CATEGORIES = [ + 'whitespace/tab', + ] + # We used to check for high-bit characters, but after much discussion we # decided those were OK, as long as they were in UTF-8 and didn't represent # hard-coded international strings, which belong in a separate i18n file. @@ -265,7 +435,7 @@ 'alloc.h', 'builtinbuf.h', 'bvector.h', - 'complex.h', + # 'complex.h', collides with System C header "complex.h" since C11 'defalloc.h', 'deque.h', 'editbuf.h', @@ -311,7 +481,7 @@ 'tree.h', 'type_traits.h', 'vector.h', - # 17.6.1.2 C++ library headers + # C++ library headers 'algorithm', 'array', 'atomic', @@ -346,6 +516,7 @@ 'random', 'ratio', 'regex', + 'scoped_allocator', 'set', 'sstream', 'stack', @@ -364,7 +535,45 @@ 'utility', 'valarray', 'vector', - # 17.6.1.2 C++ headers for C library facilities + # C++14 headers + 'shared_mutex', + # C++17 headers + 'any', + 'charconv', + 'codecvt', + 'execution', + 'filesystem', + 'memory_resource', + 'optional', + 'string_view', + 'variant', + # C++20 headers + 'barrier', + 'bit', + 'compare', + 'concepts', + 'coroutine', + 'format', + 'latch' + 'numbers', + 'ranges', + 'semaphore', + 'source_location', + 'span', + 'stop_token', + 'syncstream', + 'version', + # C++23 headers + 'expected', + 'flat_map', + 'flat_set', + 'generator', + 'mdspan', + 'print', + 'spanstream', + 'stacktrace', + 'stdfloat', + # C++ headers for C library facilities 'cassert', 'ccomplex', 'cctype', @@ -393,49 +602,242 @@ 'cwctype', ]) +# C headers +_C_HEADERS = frozenset([ + # System C headers + 'assert.h', + 'complex.h', + 'ctype.h', + 'errno.h', + 'fenv.h', + 'float.h', + 'inttypes.h', + 'iso646.h', + 'limits.h', + 'locale.h', + 'math.h', + 'setjmp.h', + 'signal.h', + 'stdalign.h', + 'stdarg.h', + 'stdatomic.h', + 'stdbool.h', + 'stddef.h', + 'stdint.h', + 'stdio.h', + 'stdlib.h', + 'stdnoreturn.h', + 'string.h', + 'tgmath.h', + 'threads.h', + 'time.h', + 'uchar.h', + 'wchar.h', + 'wctype.h', + # C23 headers + 'stdbit.h', + 'stdckdint.h', + # additional POSIX C headers + 'aio.h', + 'arpa/inet.h', + 'cpio.h', + 'dirent.h', + 'dlfcn.h', + 'fcntl.h', + 'fmtmsg.h', + 'fnmatch.h', + 'ftw.h', + 'glob.h', + 'grp.h', + 'iconv.h', + 'langinfo.h', + 'libgen.h', + 'monetary.h', + 'mqueue.h', + 'ndbm.h', + 'net/if.h', + 'netdb.h', + 'netinet/in.h', + 'netinet/tcp.h', + 'nl_types.h', + 'poll.h', + 'pthread.h', + 'pwd.h', + 'regex.h', + 'sched.h', + 'search.h', + 'semaphore.h', + 'setjmp.h', + 'signal.h', + 'spawn.h', + 'strings.h', + 'stropts.h', + 'syslog.h', + 'tar.h', + 'termios.h', + 'trace.h', + 'ulimit.h', + 'unistd.h', + 'utime.h', + 'utmpx.h', + 'wordexp.h', + # additional GNUlib headers + 'a.out.h', + 'aliases.h', + 'alloca.h', + 'ar.h', + 'argp.h', + 'argz.h', + 'byteswap.h', + 'crypt.h', + 'endian.h', + 'envz.h', + 'err.h', + 'error.h', + 'execinfo.h', + 'fpu_control.h', + 'fstab.h', + 'fts.h', + 'getopt.h', + 'gshadow.h', + 'ieee754.h', + 'ifaddrs.h', + 'libintl.h', + 'mcheck.h', + 'mntent.h', + 'obstack.h', + 'paths.h', + 'printf.h', + 'pty.h', + 'resolv.h', + 'shadow.h', + 'sysexits.h', + 'ttyent.h', + # Additional linux glibc headers + 'dlfcn.h', + 'elf.h', + 'features.h', + 'gconv.h', + 'gnu-versions.h', + 'lastlog.h', + 'libio.h', + 'link.h', + 'malloc.h', + 'memory.h', + 'netash/ash.h', + 'netatalk/at.h', + 'netax25/ax25.h', + 'neteconet/ec.h', + 'netipx/ipx.h', + 'netiucv/iucv.h', + 'netpacket/packet.h', + 'netrom/netrom.h', + 'netrose/rose.h', + 'nfs/nfs.h', + 'nl_types.h', + 'nss.h', + 're_comp.h', + 'regexp.h', + 'sched.h', + 'sgtty.h', + 'stab.h', + 'stdc-predef.h', + 'stdio_ext.h', + 'syscall.h', + 'termio.h', + 'thread_db.h', + 'ucontext.h', + 'ustat.h', + 'utmp.h', + 'values.h', + 'wait.h', + 'xlocale.h', + # Hardware specific headers + 'arm_neon.h', + 'emmintrin.h', + 'xmmintin.h', + ]) + +# Folders of C libraries so commonly used in C++, +# that they have parity with standard C libraries. +C_STANDARD_HEADER_FOLDERS = frozenset([ + # standard C library + "sys", + # glibc for linux + "arpa", + "asm-generic", + "bits", + "gnu", + "net", + "netinet", + "protocols", + "rpc", + "rpcsvc", + "scsi", + # linux kernel header + "drm", + "linux", + "misc", + "mtd", + "rdma", + "sound", + "video", + "xen", + ]) + +# Type names +_TYPES = re.compile( + r'^(?:' + # [dcl.type.simple] + r'(char(16_t|32_t)?)|wchar_t|' + r'bool|short|int|long|signed|unsigned|float|double|' + # [support.types] + r'(ptrdiff_t|size_t|max_align_t|nullptr_t)|' + # [cstdint.syn] + r'(u?int(_fast|_least)?(8|16|32|64)_t)|' + r'(u?int(max|ptr)_t)|' + r')$') + # These headers are excluded from [build/include] and [build/include_order] # checks: # - Anything not following google file name conventions (containing an # uppercase character, such as Python.h or nsStringAPI.h, for example). # - Lua headers. -# - rdkafka.cpp header, because it would be located in different directories depending -# on whether it's pulled from librdkafka sources or librdkafka-dev package. _THIRD_PARTY_HEADERS_PATTERN = re.compile( - r'^(?:[^/]*[A-Z][^/]*\.h|lua\.h|lauxlib\.h|lualib\.h|rdkafkacpp\.h)$') + r'^(?:[^/]*[A-Z][^/]*\.h|lua\.h|lauxlib\.h|lualib\.h)$') + +# Pattern for matching FileInfo.BaseName() against test file name +_test_suffixes = ['_test', '_regtest', '_unittest'] +_TEST_FILE_SUFFIX = '(' + '|'.join(_test_suffixes) + r')$' +# Pattern that matches only complete whitespace, possibly across multiple lines. +_EMPTY_CONDITIONAL_BODY_PATTERN = re.compile(r'^\s*$', re.DOTALL) # Assertion macros. These are defined in base/logging.h and -# testing/base/gunit.h. Note that the _M versions need to come first -# for substring matching to work. +# testing/base/public/gunit.h. _CHECK_MACROS = [ 'DCHECK', 'CHECK', - 'EXPECT_TRUE_M', 'EXPECT_TRUE', - 'ASSERT_TRUE_M', 'ASSERT_TRUE', - 'EXPECT_FALSE_M', 'EXPECT_FALSE', - 'ASSERT_FALSE_M', 'ASSERT_FALSE', + 'EXPECT_TRUE', 'ASSERT_TRUE', + 'EXPECT_FALSE', 'ASSERT_FALSE', ] # Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE -_CHECK_REPLACEMENT = dict([(m, {}) for m in _CHECK_MACROS]) +_CHECK_REPLACEMENT = dict([(macro_var, {}) for macro_var in _CHECK_MACROS]) for op, replacement in [('==', 'EQ'), ('!=', 'NE'), ('>=', 'GE'), ('>', 'GT'), ('<=', 'LE'), ('<', 'LT')]: - _CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement - _CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement - _CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement - _CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement - _CHECK_REPLACEMENT['EXPECT_TRUE_M'][op] = 'EXPECT_%s_M' % replacement - _CHECK_REPLACEMENT['ASSERT_TRUE_M'][op] = 'ASSERT_%s_M' % replacement + _CHECK_REPLACEMENT['DCHECK'][op] = f'DCHECK_{replacement}' + _CHECK_REPLACEMENT['CHECK'][op] = f'CHECK_{replacement}' + _CHECK_REPLACEMENT['EXPECT_TRUE'][op] = f'EXPECT_{replacement}' + _CHECK_REPLACEMENT['ASSERT_TRUE'][op] = f'ASSERT_{replacement}' for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'), ('>=', 'LT'), ('>', 'LE'), ('<=', 'GT'), ('<', 'GE')]: - _CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement - _CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement - _CHECK_REPLACEMENT['EXPECT_FALSE_M'][op] = 'EXPECT_%s_M' % inv_replacement - _CHECK_REPLACEMENT['ASSERT_FALSE_M'][op] = 'ASSERT_%s_M' % inv_replacement + _CHECK_REPLACEMENT['EXPECT_FALSE'][op] = f'EXPECT_{inv_replacement}' + _CHECK_REPLACEMENT['ASSERT_FALSE'][op] = f'ASSERT_{inv_replacement}' # Alternative tokens and their replacements. For full list, see section 2.5 # Alternative tokens [lex.digraph] in the C++ standard. @@ -462,16 +864,17 @@ # False positives include C-style multi-line comments and multi-line strings # but those have always been troublesome for cpplint. _ALT_TOKEN_REPLACEMENT_PATTERN = re.compile( - r'[ =()](' + ('|'.join(_ALT_TOKEN_REPLACEMENT.keys())) + r')(?=[ (]|$)') + r'([ =()])(' + ('|'.join(_ALT_TOKEN_REPLACEMENT.keys())) + r')([ (]|$)') # These constants define types of headers for use with # _IncludeState.CheckNextIncludeOrder(). _C_SYS_HEADER = 1 _CPP_SYS_HEADER = 2 -_LIKELY_MY_HEADER = 3 -_POSSIBLE_MY_HEADER = 4 -_OTHER_HEADER = 5 +_OTHER_SYS_HEADER = 3 +_LIKELY_MY_HEADER = 4 +_POSSIBLE_MY_HEADER = 5 +_OTHER_HEADER = 6 # These constants define the current inline assembly state _NO_ASM = 0 # Outside of inline assembly block @@ -484,8 +887,28 @@ r'(?:\s+(volatile|__volatile__))?' r'\s*[{(]') - -_regexp_compile_cache = {} +# Match strings that indicate we're working on a C (not C++) file. +_SEARCH_C_FILE = re.compile(r'\b(?:LINT_C_FILE|' + r'vim?:\s*.*(\s*|:)filetype=c(\s*|:|$))') + +# Match string that indicates we're working on a Linux Kernel file. +_SEARCH_KERNEL_FILE = re.compile(r'\b(?:LINT_KERNEL_FILE)') + +# Commands for sed to fix the problem +_SED_FIXUPS = { + 'Remove spaces around =': r's/ = /=/', + 'Remove spaces around !=': r's/ != /!=/', + 'Remove space before ( in if (': r's/if (/if(/', + 'Remove space before ( in for (': r's/for (/for(/', + 'Remove space before ( in while (': r's/while (/while(/', + 'Remove space before ( in switch (': r's/switch (/switch(/', + 'Should have a space between // and comment': r's/\/\//\/\/ /', + 'Missing space before {': r's/\([^ ]\){/\1 {/', + 'Tab found, replace by spaces': r's/\t/ /g', + 'Line ends in whitespace. Consider deleting these extra spaces.': r's/\s*$//', + 'You don\'t need a ; after a }': r's/};/}/', + 'Missing space after ,': r's/,\([^ ]\)/, \1/g', +} # {str, set(int)}: a map from error categories to sets of linenumbers # on which those errors are expected and should be suppressed. @@ -494,17 +917,147 @@ # The root directory used for deriving header guard CPP variable. # This is set by --root flag. _root = None +_root_debug = False + +# The top level repository directory. If set, _root is calculated relative to +# this directory instead of the directory containing version control artifacts. +# This is set by the --repository flag. +_repository = None + +# Files to exclude from linting. This is set by the --exclude flag. +_excludes = None + +# Whether to suppress all PrintInfo messages, UNRELATED to --quiet flag +_quiet = False # The allowed line length of files. # This is set by --linelength flag. _line_length = 80 +# This allows to use different include order rule than default +_include_order = "default" + +# This allows different config files to be used +_config_filename = "CPPLINT.cfg" + +# Treat all headers starting with 'h' equally: .h, .hpp, .hxx etc. +# This is set by --headers flag. +_hpp_headers = set([]) + +class ErrorSuppressions: + """Class to track all error suppressions for cpplint""" + + class LineRange: + """Class to represent a range of line numbers for which an error is suppressed""" + def __init__(self, begin, end): + self.begin = begin + self.end = end + + def __str__(self): + return f'[{self.begin}-{self.end}]' + + def __contains__(self, obj): + return self.begin <= obj <= self.end + + def ContainsRange(self, other): + return self.begin <= other.begin and self.end >= other.end + + def __init__(self): + self._suppressions = collections.defaultdict(list) + self._open_block_suppression = None + + def _AddSuppression(self, category, line_range): + suppressed = self._suppressions[category] + if not (suppressed and suppressed[-1].ContainsRange(line_range)): + suppressed.append(line_range) + + def GetOpenBlockStart(self): + """:return: The start of the current open block or `-1` if there is not an open block""" + return self._open_block_suppression.begin if self._open_block_suppression else -1 + + def AddGlobalSuppression(self, category): + """Add a suppression for `category` which is suppressed for the whole file""" + self._AddSuppression(category, self.LineRange(0, math.inf)) + + def AddLineSuppression(self, category, linenum): + """Add a suppression for `category` which is suppressed only on `linenum`""" + self._AddSuppression(category, self.LineRange(linenum, linenum)) + + def StartBlockSuppression(self, category, linenum): + """Start a suppression block for `category` on `linenum`. inclusive""" + if self._open_block_suppression is None: + self._open_block_suppression = self.LineRange(linenum, math.inf) + self._AddSuppression(category, self._open_block_suppression) + + def EndBlockSuppression(self, linenum): + """End the current block suppression on `linenum`. inclusive""" + if self._open_block_suppression: + self._open_block_suppression.end = linenum + self._open_block_suppression = None + + def IsSuppressed(self, category, linenum): + """:return: `True` if `category` is suppressed for `linenum`""" + suppressed = self._suppressions[category] + self._suppressions[None] + return any(linenum in lr for lr in suppressed) + + def HasOpenBlock(self): + """:return: `True` if a block suppression was started but not ended""" + return self._open_block_suppression is not None + + def Clear(self): + """Clear all current error suppressions""" + self._suppressions.clear() + self._open_block_suppression = None + +_error_suppressions = ErrorSuppressions() + +def ProcessHppHeadersOption(val): + global _hpp_headers + try: + _hpp_headers = {ext.strip() for ext in val.split(',')} + except ValueError: + PrintUsage('Header extensions must be comma separated list.') + +def ProcessIncludeOrderOption(val): + if val is None or val == "default": + pass + elif val == "standardcfirst": + global _include_order + _include_order = val + else: + PrintUsage('Invalid includeorder value %s. Expected default|standardcfirst') + +def IsHeaderExtension(file_extension): + return file_extension in GetHeaderExtensions() + +def GetHeaderExtensions(): + if _hpp_headers: + return _hpp_headers + if _valid_extensions: + return {h for h in _valid_extensions if 'h' in h} + return set(['h', 'hh', 'hpp', 'hxx', 'h++', 'cuh']) + # The allowed extensions for file names -# This is set by --extensions flag. -_valid_extensions = set(['cc', 'h', 'cpp', 'cu', 'cuh']) +# This is set by --extensions flag +def GetAllExtensions(): + return GetHeaderExtensions().union(_valid_extensions or set( + ['c', 'cc', 'cpp', 'cxx', 'c++', 'cu'])) + +def ProcessExtensionsOption(val): + global _valid_extensions + try: + extensions = [ext.strip() for ext in val.split(',')] + _valid_extensions = set(extensions) + except ValueError: + PrintUsage('Extensions should be a comma-separated list of values;' + 'for example: extensions=hpp,cpp\n' + f'This could not be parsed: "{val}"') + +def GetNonHeaderExtensions(): + return GetAllExtensions().difference(GetHeaderExtensions()) def ParseNolintSuppressions(filename, raw_line, linenum, error): - """Updates the global list of error-suppressions. + """Updates the global list of line error-suppressions. Parses any NOLINT comments on the current line, updating the global error_suppressions store. Reports an error if the NOLINT comment @@ -516,79 +1069,91 @@ def ParseNolintSuppressions(filename, raw_line, linenum, error): linenum: int, the number of the current line. error: function, an error handler. """ - matched = Search(r'\bNOLINT(NEXTLINE)?\b(\([^)]+\))?', raw_line) + matched = re.search(r'\bNOLINT(NEXTLINE|BEGIN|END)?\b(\([^)]+\))?', raw_line) if matched: - if matched.group(1): - suppressed_line = linenum + 1 - else: - suppressed_line = linenum - category = matched.group(2) - if category in (None, '(*)'): # => "suppress all" - _error_suppressions.setdefault(None, set()).add(suppressed_line) + no_lint_type = matched.group(1) + if no_lint_type == 'NEXTLINE': + def ProcessCategory(category): + _error_suppressions.AddLineSuppression(category, linenum + 1) + elif no_lint_type == 'BEGIN': + if _error_suppressions.HasOpenBlock(): + error(filename, linenum, 'readability/nolint', 5, + f'NONLINT block already defined on line {_error_suppressions.GetOpenBlockStart()}') + + def ProcessCategory(category): + _error_suppressions.StartBlockSuppression(category, linenum) + elif no_lint_type == 'END': + if not _error_suppressions.HasOpenBlock(): + error(filename, linenum, 'readability/nolint', 5, 'Not in a NOLINT block') + + def ProcessCategory(category): + if category is not None: + error(filename, linenum, 'readability/nolint', 5, + f'NOLINT categories not supported in block END: {category}') + _error_suppressions.EndBlockSuppression(linenum) else: - if category.startswith('(') and category.endswith(')'): - category = category[1:-1] + def ProcessCategory(category): + _error_suppressions.AddLineSuppression(category, linenum) + categories = matched.group(2) + if categories in (None, '(*)'): # => "suppress all" + ProcessCategory(None) + elif categories.startswith('(') and categories.endswith(')'): + for category in set(map(lambda c: c.strip(), categories[1:-1].split(','))): if category in _ERROR_CATEGORIES: - _error_suppressions.setdefault(category, set()).add(suppressed_line) + ProcessCategory(category) + elif any(c for c in _OTHER_NOLINT_CATEGORY_PREFIXES if category.startswith(c)): + # Ignore any categories from other tools. + pass elif category not in _LEGACY_ERROR_CATEGORIES: error(filename, linenum, 'readability/nolint', 5, - 'Unknown NOLINT error category: %s' % category) + f'Unknown NOLINT error category: {category}') + +def ProcessGlobalSuppresions(lines): + """Deprecated; use ProcessGlobalSuppressions.""" + ProcessGlobalSuppressions(lines) + +def ProcessGlobalSuppressions(lines): + """Updates the list of global error suppressions. + + Parses any lint directives in the file that have global effect. + + Args: + lines: An array of strings, each representing a line of the file, with the + last element being empty if the file is terminated with a newline. + """ + for line in lines: + if _SEARCH_C_FILE.search(line): + for category in _DEFAULT_C_SUPPRESSED_CATEGORIES: + _error_suppressions.AddGlobalSuppression(category) + if _SEARCH_KERNEL_FILE.search(line): + for category in _DEFAULT_KERNEL_SUPPRESSED_CATEGORIES: + _error_suppressions.AddGlobalSuppression(category) def ResetNolintSuppressions(): """Resets the set of NOLINT suppressions to empty.""" - _error_suppressions.clear() + _error_suppressions.Clear() def IsErrorSuppressedByNolint(category, linenum): """Returns true if the specified error category is suppressed on this line. Consults the global error_suppressions map populated by - ParseNolintSuppressions/ResetNolintSuppressions. + ParseNolintSuppressions/ProcessGlobalSuppressions/ResetNolintSuppressions. Args: category: str, the category of the error. linenum: int, the current line number. Returns: - bool, True iff the error should be suppressed due to a NOLINT comment. - """ - return (linenum in _error_suppressions.get(category, set()) or - linenum in _error_suppressions.get(None, set())) - - -def Match(pattern, s): - """Matches the string with the pattern, caching the compiled regexp.""" - # The regexp compilation caching is inlined in both Match and Search for - # performance reasons; factoring it out into a separate function turns out - # to be noticeably expensive. - if pattern not in _regexp_compile_cache: - _regexp_compile_cache[pattern] = sre_compile.compile(pattern) - return _regexp_compile_cache[pattern].match(s) - - -def ReplaceAll(pattern, rep, s): - """Replaces instances of pattern in a string with a replacement. - - The compiled regex is kept in a cache shared by Match and Search. - - Args: - pattern: regex pattern - rep: replacement text - s: search string - - Returns: - string with replacements made (or original string if no replacements) + bool, True iff the error should be suppressed due to a NOLINT comment, + block suppression or global suppression. """ - if pattern not in _regexp_compile_cache: - _regexp_compile_cache[pattern] = sre_compile.compile(pattern) - return _regexp_compile_cache[pattern].sub(rep, s) + return _error_suppressions.IsSuppressed(category, linenum) -def Search(pattern, s): - """Searches the string for the pattern, caching the compiled regexp.""" - if pattern not in _regexp_compile_cache: - _regexp_compile_cache[pattern] = sre_compile.compile(pattern) - return _regexp_compile_cache[pattern].search(s) +def _IsSourceExtension(s): + """File extension (excluding dot) matches a source file extension.""" + return s in GetNonHeaderExtensions() class _IncludeState(object): @@ -609,11 +1174,13 @@ class _IncludeState(object): _MY_H_SECTION = 1 _C_SECTION = 2 _CPP_SECTION = 3 - _OTHER_H_SECTION = 4 + _OTHER_SYS_SECTION = 4 + _OTHER_H_SECTION = 5 _TYPE_NAMES = { _C_SYS_HEADER: 'C system header', _CPP_SYS_HEADER: 'C++ system header', + _OTHER_SYS_HEADER: 'other system header', _LIKELY_MY_HEADER: 'header this file implements', _POSSIBLE_MY_HEADER: 'header this file may implement', _OTHER_HEADER: 'other header', @@ -623,11 +1190,14 @@ class _IncludeState(object): _MY_H_SECTION: 'a header this file implements', _C_SECTION: 'C system header', _CPP_SECTION: 'C++ system header', + _OTHER_SYS_SECTION: 'other system header', _OTHER_H_SECTION: 'other header', } def __init__(self): self.include_list = [[]] + self._section = None + self._last_header = None self.ResetSection('') def FindHeader(self, header): @@ -698,7 +1268,7 @@ def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path): # If previous line was a blank line, assume that the headers are # intentionally sorted the way they are. if (self._last_header > header_path and - Match(r'^\s*#\s*include\b', clean_lines.elided[linenum - 1])): + re.match(r'^\s*#\s*include\b', clean_lines.elided[linenum - 1])): return False return True @@ -716,9 +1286,8 @@ def CheckNextIncludeOrder(self, header_type): error message describing what's wrong. """ - error_message = ('Found %s after %s' % - (self._TYPE_NAMES[header_type], - self._SECTION_NAMES[self._section])) + error_message = (f'Found {self._TYPE_NAMES[header_type]}' + f' after {self._SECTION_NAMES[self._section]}') last_section = self._section @@ -734,6 +1303,12 @@ def CheckNextIncludeOrder(self, header_type): else: self._last_header = '' return error_message + elif header_type == _OTHER_SYS_HEADER: + if self._section <= self._OTHER_SYS_SECTION: + self._section = self._OTHER_SYS_SECTION + else: + self._last_header = '' + return error_message elif header_type == _LIKELY_MY_HEADER: if self._section <= self._MY_H_SECTION: self._section = self._MY_H_SECTION @@ -768,16 +1343,32 @@ def __init__(self): self._filters_backup = self.filters[:] self.counting = 'total' # In what way are we counting errors? self.errors_by_category = {} # string to int dict storing error counts + self.quiet = False # Suppress non-error messagess? # output format: # "emacs" - format that emacs can parse (default) + # "eclipse" - format that eclipse can parse # "vs7" - format that Microsoft Visual Studio 7 can parse + # "junit" - format that Jenkins, Bamboo, etc can parse + # "sed" - returns a gnu sed command to fix the problem + # "gsed" - like sed, but names the command gsed, e.g. for macOS homebrew users self.output_format = 'emacs' + # For JUnit output, save errors and failures until the end so that they + # can be written into the XML + self._junit_errors = [] + self._junit_failures = [] + def SetOutputFormat(self, output_format): """Sets the output format for errors.""" self.output_format = output_format + def SetQuiet(self, quiet): + """Sets the module's quiet settings, and returns the previous setting.""" + last_quiet = self.quiet + self.quiet = quiet + return last_quiet + def SetVerboseLevel(self, level): """Sets the module's verbosity, and returns the previous setting.""" last_verbose_level = self.verbose_level @@ -815,7 +1406,7 @@ def AddFilters(self, filters): for filt in self.filters: if not (filt.startswith('+') or filt.startswith('-')): raise ValueError('Every filter in --filters must start with + or -' - ' (%s does not)' % filt) + f' ({filt} does not)') def BackupFilters(self): """ Saves the current filter list to backup storage.""" @@ -842,10 +1433,70 @@ def IncrementErrorCount(self, category): def PrintErrorCounts(self): """Print a summary of errors by category, and the total.""" - for category, count in self.errors_by_category.iteritems(): - sys.stderr.write('Category \'%s\' errors found: %d\n' % - (category, count)) - sys.stderr.write('Total errors found: %d\n' % self.error_count) + for category, count in sorted(dict.items(self.errors_by_category)): + self.PrintInfo(f'Category \'{category}\' errors found: {count}\n') + if self.error_count > 0: + self.PrintInfo(f'Total errors found: {self.error_count}\n') + + def PrintInfo(self, message): + # _quiet does not represent --quiet flag. + # Hide infos from stdout to keep stdout pure for machine consumption + if not _quiet and self.output_format not in _MACHINE_OUTPUTS: + sys.stdout.write(message) + + def PrintError(self, message): + if self.output_format == 'junit': + self._junit_errors.append(message) + else: + sys.stderr.write(message) + + def AddJUnitFailure(self, filename, linenum, message, category, confidence): + self._junit_failures.append((filename, linenum, message, category, + confidence)) + + def FormatJUnitXML(self): + num_errors = len(self._junit_errors) + num_failures = len(self._junit_failures) + + testsuite = xml.etree.ElementTree.Element('testsuite') + testsuite.attrib['errors'] = str(num_errors) + testsuite.attrib['failures'] = str(num_failures) + testsuite.attrib['name'] = 'cpplint' + + if num_errors == 0 and num_failures == 0: + testsuite.attrib['tests'] = str(1) + xml.etree.ElementTree.SubElement(testsuite, 'testcase', name='passed') + + else: + testsuite.attrib['tests'] = str(num_errors + num_failures) + if num_errors > 0: + testcase = xml.etree.ElementTree.SubElement(testsuite, 'testcase') + testcase.attrib['name'] = 'errors' + error = xml.etree.ElementTree.SubElement(testcase, 'error') + error.text = '\n'.join(self._junit_errors) + if num_failures > 0: + # Group failures by file + failed_file_order = [] + failures_by_file = {} + for failure in self._junit_failures: + failed_file = failure[0] + if failed_file not in failed_file_order: + failed_file_order.append(failed_file) + failures_by_file[failed_file] = [] + failures_by_file[failed_file].append(failure) + # Create a testcase for each file + for failed_file in failed_file_order: + failures = failures_by_file[failed_file] + testcase = xml.etree.ElementTree.SubElement(testsuite, 'testcase') + testcase.attrib['name'] = failed_file + failure = xml.etree.ElementTree.SubElement(testcase, 'failure') + template = '{0}: {1} [{2}] [{3}]' + texts = [template.format(f[1], f[2], f[3], f[4]) for f in failures] + failure.text = '\n'.join(texts) + + xml_decl = '\n' + return xml_decl + xml.etree.ElementTree.tostring(testsuite, 'utf-8').decode('utf-8') + _cpplint_state = _CppLintState() @@ -859,6 +1510,14 @@ def _SetOutputFormat(output_format): """Sets the module's output format.""" _cpplint_state.SetOutputFormat(output_format) +def _Quiet(): + """Return's the module's quiet setting.""" + return _cpplint_state.quiet + +def _SetQuiet(quiet): + """Set the module's quiet status, and return previous setting.""" + return _cpplint_state.SetQuiet(quiet) + def _VerboseLevel(): """Returns the module's verbosity setting.""" @@ -946,7 +1605,10 @@ def Check(self, error, filename, linenum): filename: The name of the current file. linenum: The number of the line to check. """ - if Match(r'T(EST|est)', self.current_function): + if not self.in_a_function: + return + + if re.match(r'T(EST|est)', self.current_function): base_trigger = self._TEST_TRIGGER else: base_trigger = self._NORMAL_TRIGGER @@ -959,9 +1621,8 @@ def Check(self, error, filename, linenum): error_level = 5 error(filename, linenum, 'readability/fn_size', error_level, 'Small and focused functions are preferred:' - ' %s has %d non-comment lines' - ' (error triggered by exceeding %d lines).' % ( - self.current_function, self.lines_in_function, trigger)) + f' {self.current_function} has {self.lines_in_function} non-comment lines' + f' (error triggered by exceeding {trigger} lines).') def End(self): """Stop analyzing function body.""" @@ -988,12 +1649,12 @@ def FullName(self): return os.path.abspath(self._filename).replace('\\', '/') def RepositoryName(self): - """FullName after removing the local path to the repository. + r"""FullName after removing the local path to the repository. If we have a real absolute path name here we can try to do something smart: detecting the root of the checkout and truncating /path/to/checkout from the name so that we get header guards that don't include things like - "C:\Documents and Settings\..." or "/home/username/..." in them and thus + "C:\\Documents and Settings\\..." or "/home/username/..." in them and thus people on different computers who have checked the source out to different locations won't see bogus errors. """ @@ -1002,6 +1663,20 @@ def RepositoryName(self): if os.path.exists(fullname): project_dir = os.path.dirname(fullname) + # If the user specified a repository path, it exists, and the file is + # contained in it, use the specified repository path + if _repository: + repo = FileInfo(_repository).FullName() + root_dir = project_dir + while os.path.exists(root_dir): + # allow case insensitive compare on Windows + if os.path.normcase(root_dir) == os.path.normcase(repo): + return os.path.relpath(fullname, root_dir).replace('\\', '/') + one_up_dir = os.path.dirname(root_dir) + if one_up_dir == root_dir: + break + root_dir = one_up_dir + if os.path.exists(os.path.join(project_dir, ".svn")): # If there's a .svn file in the current directory, we recursively look # up the directory tree for the top of the SVN checkout @@ -1016,12 +1691,14 @@ def RepositoryName(self): # Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by # searching up from the current path. - root_dir = os.path.dirname(fullname) - while (root_dir != os.path.dirname(root_dir) and - not os.path.exists(os.path.join(root_dir, ".git")) and - not os.path.exists(os.path.join(root_dir, ".hg")) and - not os.path.exists(os.path.join(root_dir, ".svn"))): - root_dir = os.path.dirname(root_dir) + root_dir = current_dir = os.path.dirname(fullname) + while current_dir != os.path.dirname(current_dir): + if (os.path.exists(os.path.join(current_dir, ".git")) or + os.path.exists(os.path.join(current_dir, ".hg")) or + os.path.exists(os.path.join(current_dir, ".svn"))): + root_dir = current_dir + break + current_dir = os.path.dirname(current_dir) if (os.path.exists(os.path.join(root_dir, ".git")) or os.path.exists(os.path.join(root_dir, ".hg")) or @@ -1051,7 +1728,7 @@ def BaseName(self): return self.Split()[1] def Extension(self): - """File extension - text following the final period.""" + """File extension - text following the final period, includes that period.""" return self.Split()[2] def NoExtension(self): @@ -1060,10 +1737,10 @@ def NoExtension(self): def IsSource(self): """File has a source file extension.""" - return self.Extension()[1:] in ('c', 'cc', 'cpp', 'cxx') + return _IsSourceExtension(self.Extension()[1:]) -def _ShouldPrintError(category, confidence, linenum): +def _ShouldPrintError(category, confidence, filename, linenum): """If confidence >= verbose, category passes filter and is not suppressed.""" # There are three ways we might decide not to print an error message: @@ -1077,11 +1754,16 @@ def _ShouldPrintError(category, confidence, linenum): is_filtered = False for one_filter in _Filters(): + filter_cat, filter_file, filter_line = _ParseFilterSelector(one_filter[1:]) + category_match = category.startswith(filter_cat) + file_match = filter_file == "" or filter_file == filename + line_match = filter_line == linenum or filter_line == -1 + if one_filter.startswith('-'): - if category.startswith(one_filter[1:]): + if category_match and file_match and line_match: is_filtered = True elif one_filter.startswith('+'): - if category.startswith(one_filter[1:]): + if category_match and file_match and line_match: is_filtered = False else: assert False # should have been checked for in SetFilter. @@ -1098,9 +1780,9 @@ def Error(filename, linenum, category, confidence, message): that is, how certain we are this is a legitimate style regression, and not a misidentification or a use that's sometimes justified. - False positives can be suppressed by the use of - "cpplint(category)" comments on the offending line. These are - parsed into _error_suppressions. + False positives can be suppressed by the use of "NOLINT(category)" + comments, NOLINTNEXTLINE or in blocks started by NOLINTBEGIN. These + are parsed into _error_suppressions. Args: filename: The name of the file containing the error. @@ -1113,17 +1795,28 @@ def Error(filename, linenum, category, confidence, message): and 1 meaning that it could be a legitimate construct. message: The error message. """ - if _ShouldPrintError(category, confidence, linenum): + if _ShouldPrintError(category, confidence, filename, linenum): _cpplint_state.IncrementErrorCount(category) if _cpplint_state.output_format == 'vs7': - sys.stderr.write('%s(%s): %s [%s] [%d]\n' % ( - filename, linenum, message, category, confidence)) + _cpplint_state.PrintError(f'{filename}({linenum}): error cpplint:' + f' [{category}] {message} [{confidence}]\n') elif _cpplint_state.output_format == 'eclipse': - sys.stderr.write('%s:%s: warning: %s [%s] [%d]\n' % ( - filename, linenum, message, category, confidence)) + sys.stderr.write(f'{filename}:{linenum}: warning:' + f' {message} [{category}] [{confidence}]\n') + elif _cpplint_state.output_format == 'junit': + _cpplint_state.AddJUnitFailure(filename, linenum, message, category, confidence) + elif _cpplint_state.output_format in ['sed', 'gsed']: + if message in _SED_FIXUPS: + sys.stdout.write(f"{_cpplint_state.output_format} -i" + f" '{linenum}{_SED_FIXUPS[message]}' {filename}" + f" # {message} [{category}] [{confidence}]\n") + else: + sys.stderr.write(f'# {filename}:{linenum}: ' + f' "{message}" [{category}] [{confidence}]\n') else: - sys.stderr.write('%s:%s: %s [%s] [%d]\n' % ( - filename, linenum, message, category, confidence)) + final_message = (f'{filename}:{linenum}: ' + f' {message} [{category}] [{confidence}]\n') + sys.stderr.write(final_message) # Matches standard C++ escape sequences per 2.13.2.3 of the C++ standard. @@ -1193,7 +1886,7 @@ def CleanseRawStrings(raw_lines): # Found the end of the string, match leading space for this # line and resume copying the original lines, and also insert # a "" on the last line. - leading_space = Match(r'^(\s*)\S', line) + leading_space = re.match(r'^(\s*)\S', line) line = leading_space.group(1) + '""' + line[end + len(delimiter):] delimiter = None else: @@ -1206,8 +1899,18 @@ def CleanseRawStrings(raw_lines): while delimiter is None: # Look for beginning of a raw string. # See 2.14.15 [lex.string] for syntax. - matched = Match(r'^(.*)\b(?:R|u8R|uR|UR|LR)"([^\s\\()]*)\((.*)$', line) - if matched: + # + # Once we have matched a raw string, we check the prefix of the + # line to make sure that the line is not part of a single line + # comment. It's done this way because we remove raw strings + # before removing comments as opposed to removing comments + # before removing raw strings. This is because there are some + # cpplint checks that requires the comments to be preserved, but + # we don't want to check comments that are inside raw strings. + matched = re.match(r'^(.*?)\b(?:R|u8R|uR|UR|LR)"([^\s\\()]*)\((.*)$', line) + if (matched and + not re.match(r'^([^\'"]|\'(\\.|[^\'])*\'|"(\\.|[^"])*")*//', + matched.group(1))): delimiter = ')' + matched.group(2) + '"' end = matched.group(3).find(delimiter) @@ -1251,7 +1954,7 @@ def FindNextMultiLineCommentEnd(lines, lineix): def RemoveMultiLineCommentsFromRange(lines, begin, end): """Clears a range of lines for multi-line comments.""" - # Having // dummy comments makes the lines non-empty, so we will not get + # Having // comments makes the lines non-empty, so we will not get # unnecessary blank line warnings later in the code. for i in range(begin, end): lines[i] = '/**/' @@ -1289,6 +1992,28 @@ def CleanseComments(line): return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line) +def ReplaceAlternateTokens(line): + """Replace any alternate token by its original counterpart. + + In order to comply with the google rule stating that unary operators should + never be followed by a space, an exception is made for the 'not' and 'compl' + alternate tokens. For these, any trailing space is removed during the + conversion. + + Args: + line: The line being processed. + + Returns: + The line with alternate tokens replaced. + """ + for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line): + token = _ALT_TOKEN_REPLACEMENT[match.group(2)] + tail = '' if match.group(2) in ['not', 'compl'] and match.group(3) == ' ' \ + else r'\3' + line = re.sub(match.re, rf'\1{token}{tail}', line, count=1) + return line + + class CleansedLines(object): """Holds 4 copies of all lines with different preprocessing applied to them. @@ -1301,15 +2026,17 @@ class CleansedLines(object): """ def __init__(self, lines): + if '-readability/alt_tokens' in _cpplint_state.filters: + for i, line in enumerate(lines): + lines[i] = ReplaceAlternateTokens(line) self.elided = [] self.lines = [] self.raw_lines = lines self.num_lines = len(lines) self.lines_without_raw_strings = CleanseRawStrings(lines) - for linenum in range(len(self.lines_without_raw_strings)): - self.lines.append(CleanseComments( - self.lines_without_raw_strings[linenum])) - elided = self._CollapseStrings(self.lines_without_raw_strings[linenum]) + for line in self.lines_without_raw_strings: + self.lines.append(CleanseComments(line)) + elided = self._CollapseStrings(line) self.elided.append(CleanseComments(elided)) def NumLines(self): @@ -1342,7 +2069,7 @@ def _CollapseStrings(elided): collapsed = '' while True: # Find the first quote character - match = Match(r'^([^\'"]*)([\'"])(.*)$', elided) + match = re.match(r'^([^\'"]*)([\'"])(.*)$', elided) if not match: collapsed += elided break @@ -1367,8 +2094,8 @@ def _CollapseStrings(elided): # correctly as long as there are digits on both sides of the # separator. So we are fine as long as we don't see something # like "0.'3" (gcc 4.9.0 will not allow this literal). - if Search(r'\b(?:0[bBxX]?|[1-9])[0-9a-fA-F]*$', head): - match_literal = Match(r'^((?:\'?[0-9a-zA-Z_])*)(.*)$', "'" + tail) + if re.search(r'\b(?:0[bBxX]?|[1-9])[0-9a-fA-F]*$', head): + match_literal = re.match(r'^((?:\'?[0-9a-zA-Z_])*)(.*)$', "'" + tail) collapsed += head + match_literal.group(1).replace("'", '') elided = match_literal.group(2) else: @@ -1397,7 +2124,7 @@ def FindEndOfExpressionInLine(line, startpos, stack): On finding an unclosed expression: (-1, None) Otherwise: (-1, new stack at end of this line) """ - for i in xrange(startpos, len(line)): + for i in range(startpos, len(line)): char = line[i] if char in '([{': # Found start of parenthesized expression, push to expression stack @@ -1410,7 +2137,7 @@ def FindEndOfExpressionInLine(line, startpos, stack): stack.pop() if not stack: return (-1, None) - elif i > 0 and Search(r'\boperator\s*$', line[0:i]): + elif i > 0 and re.search(r'\boperator\s*$', line[0:i]): # operator<, don't add to stack continue else: @@ -1439,7 +2166,7 @@ def FindEndOfExpressionInLine(line, startpos, stack): # Ignore "->" and operator functions if (i > 0 and - (line[i - 1] == '-' or Search(r'\boperator\s*$', line[0:i - 1]))): + (line[i - 1] == '-' or re.search(r'\boperator\s*$', line[0:i - 1]))): continue # Pop the stack if there is a matching '<'. Otherwise, ignore @@ -1486,7 +2213,7 @@ def CloseExpression(clean_lines, linenum, pos): """ line = clean_lines.elided[linenum] - if (line[pos] not in '({[<') or Match(r'<[<=]', line[pos:]): + if (line[pos] not in '({[<') or re.match(r'<[<=]', line[pos:]): return (line, clean_lines.NumLines(), -1) # Check first line @@ -1534,8 +2261,8 @@ def FindStartOfExpressionInLine(line, endpos, stack): # Ignore it if it's a "->" or ">=" or "operator>" if (i > 0 and (line[i - 1] == '-' or - Match(r'\s>=\s', line[i - 1:]) or - Search(r'\boperator\s*$', line[0:i]))): + re.match(r'\s>=\s', line[i - 1:]) or + re.search(r'\boperator\s*$', line[0:i]))): i -= 1 else: stack.append('>') @@ -1625,8 +2352,8 @@ def CheckForCopyright(filename, lines, error): """Logs an error if no Copyright message appears at the top of the file.""" # We'll say it should occur by line 10. Don't forget there's a - # dummy line at the front. - for line in xrange(1, min(len(lines), 11)): + # placeholder line at the front. + for line in range(1, min(len(lines), 11)): if re.search(r'Copyright', lines[line], re.I): break else: # means no copyright line was found error(filename, 0, 'legal/copyright', 5, @@ -1643,12 +2370,36 @@ def GetIndentLevel(line): Returns: An integer count of leading spaces, possibly zero. """ - indent = Match(r'^( *)\S', line) + indent = re.match(r'^( *)\S', line) if indent: return len(indent.group(1)) else: return 0 +def PathSplitToList(path): + """Returns the path split into a list by the separator. + + Args: + path: An absolute or relative path (e.g. '/a/b/c/' or '../a') + + Returns: + A list of path components (e.g. ['a', 'b', 'c]). + """ + lst = [] + while True: + (head, tail) = os.path.split(path) + if head == path: # absolute paths end + lst.append(head) + break + if tail == path: # relative paths end + lst.append(tail) + break + + path = head + lst.append(tail) + + lst.reverse() + return lst def GetHeaderGuardCPPVariable(filename): """Returns the CPP variable that should be used as a header guard. @@ -1668,11 +2419,62 @@ def GetHeaderGuardCPPVariable(filename): filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename) # Replace 'c++' with 'cpp'. filename = filename.replace('C++', 'cpp').replace('c++', 'cpp') - + fileinfo = FileInfo(filename) file_path_from_root = fileinfo.RepositoryName() - if _root: - file_path_from_root = re.sub('^' + _root + os.sep, '', file_path_from_root) + + def FixupPathFromRoot(): + if _root_debug: + sys.stderr.write(f"\n_root fixup, _root = '{_root}'," + f" repository name = '{fileinfo.RepositoryName()}'\n") + + # Process the file path with the --root flag if it was set. + if not _root: + if _root_debug: + sys.stderr.write("_root unspecified\n") + return file_path_from_root + + def StripListPrefix(lst, prefix): + # f(['x', 'y'], ['w, z']) -> None (not a valid prefix) + if lst[:len(prefix)] != prefix: + return None + # f(['a, 'b', 'c', 'd'], ['a', 'b']) -> ['c', 'd'] + return lst[(len(prefix)):] + + # root behavior: + # --root=subdir , lstrips subdir from the header guard + maybe_path = StripListPrefix(PathSplitToList(file_path_from_root), + PathSplitToList(_root)) + + if _root_debug: + sys.stderr.write(("_root lstrip (maybe_path=%s, file_path_from_root=%s," + + " _root=%s)\n") % (maybe_path, file_path_from_root, _root)) + + if maybe_path: + return os.path.join(*maybe_path) + + # --root=.. , will prepend the outer directory to the header guard + full_path = fileinfo.FullName() + # adapt slashes for windows + root_abspath = os.path.abspath(_root).replace('\\', '/') + + maybe_path = StripListPrefix(PathSplitToList(full_path), + PathSplitToList(root_abspath)) + + if _root_debug: + sys.stderr.write(("_root prepend (maybe_path=%s, full_path=%s, " + + "root_abspath=%s)\n") % (maybe_path, full_path, root_abspath)) + + if maybe_path: + return os.path.join(*maybe_path) + + if _root_debug: + sys.stderr.write(f"_root ignore, returning {file_path_from_root}\n") + + # --root=FAKE_DIR is ignored + return file_path_from_root + + file_path_from_root = FixupPathFromRoot() return re.sub(r'[^a-zA-Z0-9]', '_', file_path_from_root).upper() + '_' @@ -1696,7 +2498,12 @@ def CheckForHeaderGuard(filename, clean_lines, error): # and not the general NOLINT or NOLINT(*) syntax. raw_lines = clean_lines.lines_without_raw_strings for i in raw_lines: - if Search(r'//\s*NOLINT\(build/header_guard\)', i): + if re.search(r'//\s*NOLINT\(build/header_guard\)', i): + return + + # Allow pragma once instead of header guards + for i in raw_lines: + if re.search(r'^\s*#pragma\s+once', i): return cppvar = GetHeaderGuardCPPVariable(filename) @@ -1723,8 +2530,7 @@ def CheckForHeaderGuard(filename, clean_lines, error): if not ifndef or not define or ifndef != define: error(filename, 0, 'build/header_guard', 5, - 'No #ifndef header guard found, suggested CPP variable is: %s' % - cppvar) + f'No #ifndef header guard found, suggested CPP variable is: {cppvar}') return # The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__ @@ -1737,66 +2543,75 @@ def CheckForHeaderGuard(filename, clean_lines, error): ParseNolintSuppressions(filename, raw_lines[ifndef_linenum], ifndef_linenum, error) error(filename, ifndef_linenum, 'build/header_guard', error_level, - '#ifndef header guard has wrong style, please use: %s' % cppvar) + f'#ifndef header guard has wrong style, please use: {cppvar}') # Check for "//" comments on endif line. ParseNolintSuppressions(filename, raw_lines[endif_linenum], endif_linenum, error) - match = Match(r'#endif\s*//\s*' + cppvar + r'(_)?\b', endif) + match = re.match(r'#endif\s*//\s*' + cppvar + r'(_)?\b', endif) if match: if match.group(1) == '_': # Issue low severity warning for deprecated double trailing underscore error(filename, endif_linenum, 'build/header_guard', 0, - '#endif line should be "#endif // %s"' % cppvar) + f'#endif line should be "#endif // {cppvar}"') return # Didn't find the corresponding "//" comment. If this file does not # contain any "//" comments at all, it could be that the compiler # only wants "/**/" comments, look for those instead. no_single_line_comments = True - for i in xrange(1, len(raw_lines) - 1): + for i in range(1, len(raw_lines) - 1): line = raw_lines[i] - if Match(r'^(?:(?:\'(?:\.|[^\'])*\')|(?:"(?:\.|[^"])*")|[^\'"])*//', line): + if re.match(r'^(?:(?:\'(?:\.|[^\'])*\')|(?:"(?:\.|[^"])*")|[^\'"])*//', line): no_single_line_comments = False break if no_single_line_comments: - match = Match(r'#endif\s*/\*\s*' + cppvar + r'(_)?\s*\*/', endif) + match = re.match(r'#endif\s*/\*\s*' + cppvar + r'(_)?\s*\*/', endif) if match: if match.group(1) == '_': # Low severity warning for double trailing underscore error(filename, endif_linenum, 'build/header_guard', 0, - '#endif line should be "#endif /* %s */"' % cppvar) + f'#endif line should be "#endif /* {cppvar} */"') return # Didn't find anything error(filename, endif_linenum, 'build/header_guard', 5, - '#endif line should be "#endif // %s"' % cppvar) + f'#endif line should be "#endif // {cppvar}"') def CheckHeaderFileIncluded(filename, include_state, error): - """Logs an error if a .cc file does not include its header.""" + """Logs an error if a source file does not include its header.""" # Do not check test files - if filename.endswith('_test.cc') or filename.endswith('_unittest.cc'): - return - fileinfo = FileInfo(filename) - headerfile = filename[0:len(filename) - 2] + 'h' - if not os.path.exists(headerfile): + if re.search(_TEST_FILE_SUFFIX, fileinfo.BaseName()): return - headername = FileInfo(headerfile).RepositoryName() - first_include = 0 - for section_list in include_state.include_list: - for f in section_list: - if headername in f[0] or f[0] in headername: - return - if not first_include: - first_include = f[1] - error(filename, first_include, 'build/include', 5, - '%s should include its header file %s' % (fileinfo.RepositoryName(), - headername)) + first_include = message = None + basefilename = filename[0:len(filename) - len(fileinfo.Extension())] + for ext in GetHeaderExtensions(): + headerfile = basefilename + '.' + ext + if not os.path.exists(headerfile): + continue + headername = FileInfo(headerfile).RepositoryName() + include_uses_unix_dir_aliases = False + for section_list in include_state.include_list: + for f in section_list: + include_text = f[0] + if "./" in include_text: + include_uses_unix_dir_aliases = True + if headername in include_text or include_text in headername: + return + if not first_include: + first_include = f[1] + + message = f'{fileinfo.RepositoryName()} should include its header file {headername}' + if include_uses_unix_dir_aliases: + message += ". Relative paths like . and .. are not allowed." + + if message: + error(filename, first_include, 'build/include', 5, message) def CheckForBadCharacters(filename, lines, error): @@ -1817,7 +2632,7 @@ def CheckForBadCharacters(filename, lines, error): error: The function to call with any errors found. """ for linenum, line in enumerate(lines): - if u'\ufffd' in line: + if '\ufffd' in line: error(filename, linenum, 'readability/utf8', 5, 'Line contains invalid UTF-8 (or Unicode replacement character).') if '\0' in line: @@ -1929,7 +2744,7 @@ def CheckPosixThreading(filename, clean_lines, linenum, error): for single_thread_func, multithread_safe_func, pattern in _THREADING_LIST: # Additional pattern matching check to confirm that this is the # function we are looking for - if Search(pattern, line): + if re.search(pattern, line): error(filename, linenum, 'runtime/threadsafe_fn', 2, 'Consider using ' + multithread_safe_func + '...) instead of ' + single_thread_func + @@ -1949,7 +2764,7 @@ def CheckVlogArguments(filename, clean_lines, linenum, error): error: The function to call with any errors found. """ line = clean_lines.elided[linenum] - if Search(r'\bVLOG\((INFO|ERROR|WARNING|DFATAL|FATAL)\)', line): + if re.search(r'\bVLOG\((INFO|ERROR|WARNING|DFATAL|FATAL)\)', line): error(filename, linenum, 'runtime/vlog', 5, 'VLOG() should be used with numeric verbosity level. ' 'Use LOG() if you want symbolic severity levels.') @@ -1983,23 +2798,24 @@ def CheckInvalidIncrement(filename, clean_lines, linenum, error): def IsMacroDefinition(clean_lines, linenum): - if Search(r'^#define', clean_lines[linenum]): + if re.search(r'^#define', clean_lines[linenum]): return True - if linenum > 0 and Search(r'\\$', clean_lines[linenum - 1]): + if linenum > 0 and re.search(r'\\$', clean_lines[linenum - 1]): return True return False def IsForwardClassDeclaration(clean_lines, linenum): - return Match(r'^\s*(\btemplate\b)*.*class\s+\w+;\s*$', clean_lines[linenum]) + return re.match(r'^\s*(\btemplate\b)*.*class\s+\w+;\s*$', clean_lines[linenum]) class _BlockInfo(object): """Stores information about a generic block of code.""" - def __init__(self, seen_open_brace): + def __init__(self, linenum, seen_open_brace): + self.starting_linenum = linenum self.seen_open_brace = seen_open_brace self.open_parentheses = 0 self.inline_asm = _NO_ASM @@ -2048,17 +2864,16 @@ def IsBlockInfo(self): class _ExternCInfo(_BlockInfo): """Stores information about an 'extern "C"' block.""" - def __init__(self): - _BlockInfo.__init__(self, True) + def __init__(self, linenum): + _BlockInfo.__init__(self, linenum, True) class _ClassInfo(_BlockInfo): """Stores information about a class.""" def __init__(self, name, class_or_struct, clean_lines, linenum): - _BlockInfo.__init__(self, False) + _BlockInfo.__init__(self, linenum, False) self.name = name - self.starting_linenum = linenum self.is_derived = False self.check_namespace_indentation = True if class_or_struct == 'struct': @@ -2088,15 +2903,15 @@ def __init__(self, name, class_or_struct, clean_lines, linenum): def CheckBegin(self, filename, clean_lines, linenum, error): # Look for a bare ':' - if Search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]): + if re.search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]): self.is_derived = True def CheckEnd(self, filename, clean_lines, linenum, error): # If there is a DISALLOW macro, it should appear near the end of # the class. seen_last_thing_in_class = False - for i in xrange(linenum - 1, self.starting_linenum, -1): - match = Search( + for i in range(linenum - 1, self.starting_linenum, -1): + match = re.search( r'\b(DISALLOW_COPY_AND_ASSIGN|DISALLOW_IMPLICIT_CONSTRUCTORS)\(' + self.name + r'\)', clean_lines.elided[i]) @@ -2106,29 +2921,28 @@ def CheckEnd(self, filename, clean_lines, linenum, error): match.group(1) + ' should be the last thing in the class') break - if not Match(r'^\s*$', clean_lines.elided[i]): + if not re.match(r'^\s*$', clean_lines.elided[i]): seen_last_thing_in_class = True # Check that closing brace is aligned with beginning of the class. # Only do this if the closing brace is indented by only whitespaces. # This means we will not check single-line class definitions. - indent = Match(r'^( *)\}', clean_lines.elided[linenum]) + indent = re.match(r'^( *)\}', clean_lines.elided[linenum]) if indent and len(indent.group(1)) != self.class_indent: if self.is_struct: parent = 'struct ' + self.name else: parent = 'class ' + self.name error(filename, linenum, 'whitespace/indent', 3, - 'Closing brace should be aligned with beginning of %s' % parent) + f'Closing brace should be aligned with beginning of {parent}') class _NamespaceInfo(_BlockInfo): """Stores information about a namespace.""" def __init__(self, name, linenum): - _BlockInfo.__init__(self, False) + _BlockInfo.__init__(self, linenum, False) self.name = name or '' - self.starting_linenum = linenum self.check_namespace_indentation = True def CheckEnd(self, filename, clean_lines, linenum, error): @@ -2147,7 +2961,7 @@ def CheckEnd(self, filename, clean_lines, linenum, error): # deciding what these nontrivial things are, so this check is # triggered by namespace size only, which works most of the time. if (linenum - self.starting_linenum < 10 - and not Match(r'};*\s*(//|/\*).*\bnamespace\b', line)): + and not re.match(r'^\s*};*\s*(//|/\*).*\bnamespace\b', line)): return # Look for matching comment at end of namespace. @@ -2164,18 +2978,17 @@ def CheckEnd(self, filename, clean_lines, linenum, error): # expected namespace. if self.name: # Named namespace - if not Match((r'};*\s*(//|/\*).*\bnamespace\s+' + re.escape(self.name) + - r'[\*/\.\\\s]*$'), + if not re.match((r'^\s*};*\s*(//|/\*).*\bnamespace\s+' + + re.escape(self.name) + r'[\*/\.\\\s]*$'), line): error(filename, linenum, 'readability/namespace', 5, - 'Namespace should be terminated with "// namespace %s"' % - self.name) + f'Namespace should be terminated with "// namespace {self.name}"') else: # Anonymous namespace - if not Match(r'};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line): + if not re.match(r'^\s*};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line): # If "// namespace anonymous" or "// anonymous namespace (more text)", # mention "// anonymous namespace" as an acceptable form - if Match(r'}.*\b(namespace anonymous|anonymous namespace)\b', line): + if re.match(r'^\s*}.*\b(namespace anonymous|anonymous namespace)\b', line): error(filename, linenum, 'readability/namespace', 5, 'Anonymous namespace should be terminated with "// namespace"' ' or "// anonymous namespace"') @@ -2278,7 +3091,7 @@ def InTemplateArgumentList(self, clean_lines, linenum, pos): while linenum < clean_lines.NumLines(): # Find the earliest character that might indicate a template argument line = clean_lines.elided[linenum] - match = Match(r'^[^{};=\[\]\.<>]*(.)', line[pos:]) + match = re.match(r'^[^{};=\[\]\.<>]*(.)', line[pos:]) if not match: linenum += 1 pos = 0 @@ -2338,11 +3151,11 @@ def UpdatePreprocessor(self, line): Args: line: current line to check. """ - if Match(r'^\s*#\s*(if|ifdef|ifndef)\b', line): + if re.match(r'^\s*#\s*(if|ifdef|ifndef)\b', line): # Beginning of #if block, save the nesting stack here. The saved # stack will allow us to restore the parsing state in the #else case. self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack))) - elif Match(r'^\s*#\s*(else|elif)\b', line): + elif re.match(r'^\s*#\s*(else|elif)\b', line): # Beginning of #else block if self.pp_stack: if not self.pp_stack[-1].seen_else: @@ -2357,7 +3170,7 @@ def UpdatePreprocessor(self, line): else: # TODO(unknown): unexpected #else, issue warning? pass - elif Match(r'^\s*#\s*endif\b', line): + elif re.match(r'^\s*#\s*endif\b', line): # End of #if or #else blocks. if self.pp_stack: # If we saw an #else, we will need to restore the nesting @@ -2429,7 +3242,7 @@ def Update(self, filename, clean_lines, linenum, error): # declarations even if it weren't followed by a whitespace, this # is so that we don't confuse our namespace checker. The # missing spaces will be flagged by CheckSpacing. - namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line) + namespace_decl_match = re.match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line) if not namespace_decl_match: break @@ -2446,9 +3259,9 @@ def Update(self, filename, clean_lines, linenum, error): # such as in: # class LOCKABLE API Object { # }; - class_decl_match = Match( - r'^(\s*(?:template\s*<[\w\s<>,:]*>\s*)?' - r'(class|struct)\s+(?:[A-Z_]+\s+)*(\w+(?:::\w+)*))' + class_decl_match = re.match( + r'^(\s*(?:template\s*<[\w\s<>,:=]*>\s*)?' + r'(class|struct)\s+(?:[a-zA-Z0-9_]+\s+)*(\w+(?:::\w+)*))' r'(.*)$', line) if (class_decl_match and (not self.stack or self.stack[-1].open_parentheses == 0)): @@ -2476,7 +3289,7 @@ def Update(self, filename, clean_lines, linenum, error): # Update access control if we are inside a class/struct if self.stack and isinstance(self.stack[-1], _ClassInfo): classinfo = self.stack[-1] - access_match = Match( + access_match = re.match( r'^(.*)\b(public|private|protected|signals)(\s+(?:slots\s*)?)?' r':(?:[^:]|$)', line) @@ -2487,7 +3300,7 @@ def Update(self, filename, clean_lines, linenum, error): # check if the keywords are not preceded by whitespaces. indent = access_match.group(1) if (len(indent) != classinfo.class_indent + 1 and - Match(r'^\s*$', indent)): + re.match(r'^\s*$', indent)): if classinfo.is_struct: parent = 'struct ' + classinfo.name else: @@ -2496,13 +3309,13 @@ def Update(self, filename, clean_lines, linenum, error): if access_match.group(3): slots = access_match.group(3) error(filename, linenum, 'whitespace/indent', 3, - '%s%s: should be indented +1 space inside %s' % ( - access_match.group(2), slots, parent)) + f'{access_match.group(2)}{slots}:' + f' should be indented +1 space inside {parent}') # Consume braces or semicolons from what's left of the line while True: # Match first brace, semicolon, or closed parenthesis. - matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line) + matched = re.match(r'^[^{;)}]*([{;)}])(.*)$', line) if not matched: break @@ -2513,10 +3326,10 @@ def Update(self, filename, clean_lines, linenum, error): # stack otherwise. if not self.SeenOpenBrace(): self.stack[-1].seen_open_brace = True - elif Match(r'^extern\s*"[^"]*"\s*\{', line): - self.stack.append(_ExternCInfo()) + elif re.match(r'^extern\s*"[^"]*"\s*\{', line): + self.stack.append(_ExternCInfo(linenum)) else: - self.stack.append(_BlockInfo(True)) + self.stack.append(_BlockInfo(linenum, True)) if _MATCH_ASM.match(line): self.stack[-1].inline_asm = _BLOCK_ASM @@ -2550,28 +3363,6 @@ def InnermostClass(self): return classinfo return None - def CheckCompletedBlocks(self, filename, error): - """Checks that all classes and namespaces have been completely parsed. - - Call this when all lines in a file have been processed. - Args: - filename: The name of the current file. - error: The function to call with any errors found. - """ - # Note: This test can result in false positives if #ifdef constructs - # get in the way of brace matching. See the testBuildClass test in - # cpplint_unittest.py for an example of this. - for obj in self.stack: - if isinstance(obj, _ClassInfo): - error(filename, obj.starting_linenum, 'build/class', 5, - 'Failed to find complete declaration of class %s' % - obj.name) - elif isinstance(obj, _NamespaceInfo): - error(filename, obj.starting_linenum, 'build/namespaces', 5, - 'Failed to find complete declaration of namespace %s' % - obj.name) - - def CheckForNonStandardConstructs(filename, clean_lines, linenum, nesting_state, error): r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2. @@ -2604,46 +3395,47 @@ def CheckForNonStandardConstructs(filename, clean_lines, linenum, # Remove comments from the line, but leave in strings for now. line = clean_lines.lines[linenum] - if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line): + if re.search(r'printf\s*\(.*".*%[-+ ]?\d*q', line): error(filename, linenum, 'runtime/printf_format', 3, '%q in format strings is deprecated. Use %ll instead.') - if Search(r'printf\s*\(.*".*%\d+\$', line): + if re.search(r'printf\s*\(.*".*%\d+\$', line): error(filename, linenum, 'runtime/printf_format', 2, '%N$ formats are unconventional. Try rewriting to avoid them.') # Remove escaped backslashes before looking for undefined escapes. line = line.replace('\\\\', '') - if Search(r'("|\').*\\(%|\[|\(|{)', line): + if re.search(r'("|\').*\\(%|\[|\(|{)', line): error(filename, linenum, 'build/printf_format', 3, '%, [, (, and { are undefined character escapes. Unescape them.') # For the rest, work with both comments and strings removed. line = clean_lines.elided[linenum] - if Search(r'\b(const|volatile|void|char|short|int|long' + if re.search(r'\b(const|volatile|void|char|short|int|long' r'|float|double|signed|unsigned' r'|schar|u?int8|u?int16|u?int32|u?int64)' r'\s+(register|static|extern|typedef)\b', line): error(filename, linenum, 'build/storage_class', 5, - 'Storage class (static, extern, typedef, etc) should be first.') + 'Storage-class specifier (static, extern, typedef, etc) should be ' + 'at the beginning of the declaration.') - if Match(r'\s*#\s*endif\s*[^/\s]+', line): + if re.match(r'\s*#\s*endif\s*[^/\s]+', line): error(filename, linenum, 'build/endif_comment', 5, 'Uncommented text after #endif is non-standard. Use a comment.') - if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line): + if re.match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line): error(filename, linenum, 'build/forward_decl', 5, 'Inner-style forward declarations are invalid. Remove this line.') - if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?', + if re.search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?', line): error(filename, linenum, 'build/deprecated', 3, '>? and = 1 and not noarg_constructor and - len(defaulted_args) >= len(constructor_args) - 1)) + len(defaulted_args) >= len(constructor_args) - 1) or + # variadic arguments with zero or one argument + (len(constructor_args) <= 2 and + len(variadic_args) >= 1)) initializer_list_constructor = bool( onearg_constructor and - Search(r'\bstd\s*::\s*initializer_list\b', constructor_args[0])) + re.search(r'\bstd\s*::\s*initializer_list\b', constructor_args[0])) copy_constructor = bool( onearg_constructor and - Match(r'(const\s+)?%s(\s*<[^>]*>)?(\s+const)?\s*(?:<\w+>\s*)?&' - % re.escape(base_classname), constructor_args[0].strip())) + re.match(r'((const\s+(volatile\s+)?)?|(volatile\s+(const\s+)?))?' + rf'{re.escape(base_classname)}(\s*<[^>]*>)?(\s+const)?\s*(?:<\w+>\s*)?&', + constructor_args[0].strip()) + ) if (not is_marked_explicit and onearg_constructor and not initializer_list_constructor and not copy_constructor): - if defaulted_args: - error(filename, linenum, 'runtime/explicit', 5, + if defaulted_args or variadic_args: + error(filename, linenum, 'runtime/explicit', 4, 'Constructors callable with one argument ' 'should be marked explicit.') else: - error(filename, linenum, 'runtime/explicit', 5, + error(filename, linenum, 'runtime/explicit', 4, 'Single-parameter constructors should be marked explicit.') - elif is_marked_explicit and not onearg_constructor: - if noarg_constructor: - error(filename, linenum, 'runtime/explicit', 5, - 'Zero-parameter constructors should not be marked explicit.') - else: - error(filename, linenum, 'runtime/explicit', 0, - 'Constructors that require multiple arguments ' - 'should not be marked explicit.') def CheckSpacingForFunctionCall(filename, clean_lines, linenum, error): @@ -2756,7 +3543,7 @@ def CheckSpacingForFunctionCall(filename, clean_lines, linenum, error): r'\bfor\s*\((.*)\)\s*{', r'\bwhile\s*\((.*)\)\s*[{;]', r'\bswitch\s*\((.*)\)\s*{'): - match = Search(pattern, line) + match = re.search(pattern, line) if match: fncall = match.group(1) # look inside the parens for function calls break @@ -2775,25 +3562,26 @@ def CheckSpacingForFunctionCall(filename, clean_lines, linenum, error): # Note that we assume the contents of [] to be short enough that # they'll never need to wrap. if ( # Ignore control structures. - not Search(r'\b(if|for|while|switch|return|new|delete|catch|sizeof)\b', + not re.search(r'\b(if|elif|for|while|switch|return|new|delete|catch|sizeof)\b', fncall) and # Ignore pointers/references to functions. - not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and + not re.search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and # Ignore pointers/references to arrays. - not Search(r' \([^)]+\)\[[^\]]+\]', fncall)): - if Search(r'\w\s*\(\s(?!\s*\\$)', fncall): # a ( used for a fn call + not re.search(r' \([^)]+\)\[[^\]]+\]', fncall)): + if re.search(r'\w\s*\(\s(?!\s*\\$)', fncall): # a ( used for a fn call error(filename, linenum, 'whitespace/parens', 4, 'Extra space after ( in function call') - elif Search(r'\(\s+(?!(\s*\\)|\()', fncall): + elif re.search(r'\(\s+(?!(\s*\\)|\()', fncall): error(filename, linenum, 'whitespace/parens', 2, 'Extra space after (') - if (Search(r'\w\s+\(', fncall) and - not Search(r'#\s*define|typedef|using\s+\w+\s*=', fncall) and - not Search(r'\w\s+\((\w+::)*\*\w+\)\(', fncall) and - not Search(r'\bcase\s+\(', fncall)): + if (re.search(r'\w\s+\(', fncall) and + not re.search(r'_{0,2}asm_{0,2}\s+_{0,2}volatile_{0,2}\s+\(', fncall) and + not re.search(r'#\s*define|typedef|using\s+\w+\s*=', fncall) and + not re.search(r'\w\s+\((\w+::)*\*\w+\)\(', fncall) and + not re.search(r'\bcase\s+\(', fncall)): # TODO(unknown): Space after an operator function seem to be a common # error, silence those for now by restricting them to highest verbosity. - if Search(r'\boperator_*\b', line): + if re.search(r'\boperator_*\b', line): error(filename, linenum, 'whitespace/parens', 0, 'Extra space before ( in function call') else: @@ -2801,10 +3589,10 @@ def CheckSpacingForFunctionCall(filename, clean_lines, linenum, error): 'Extra space before ( in function call') # If the ) is followed only by a newline or a { + newline, assume it's # part of a control statement (if/while/etc), and don't complain - if Search(r'[^)]\s+\)\s*[^{\s]', fncall): + if re.search(r'[^)]\s+\)\s*[^{\s]', fncall): # If the closing parenthesis is preceded by only whitespaces, # try to give a more descriptive error message. - if Search(r'^\s+\)', fncall): + if re.search(r'^\s+\)', fncall): error(filename, linenum, 'whitespace/parens', 2, 'Closing ) should be moved to the previous line') else: @@ -2830,10 +3618,10 @@ def IsBlankLine(line): def CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line, error): is_namespace_indent_item = ( - len(nesting_state.stack) > 1 and - nesting_state.stack[-1].check_namespace_indentation and - isinstance(nesting_state.previous_stack_top, _NamespaceInfo) and - nesting_state.previous_stack_top == nesting_state.stack[-2]) + len(nesting_state.stack) >= 1 and + (isinstance(nesting_state.stack[-1], _NamespaceInfo) or + (isinstance(nesting_state.previous_stack_top, _NamespaceInfo))) + ) if ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item, clean_lines.elided, line): @@ -2846,7 +3634,7 @@ def CheckForFunctionLengths(filename, clean_lines, linenum, """Reports for long function bodies. For an overview why this is done, see: - http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions + https://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions Uses a simplistic algorithm assuming other style guidelines (especially spacing) are followed. @@ -2870,28 +3658,28 @@ def CheckForFunctionLengths(filename, clean_lines, linenum, starting_func = False regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ... - match_result = Match(regexp, line) + match_result = re.match(regexp, line) if match_result: # If the name is all caps and underscores, figure it's a macro and # ignore it, unless it's TEST or TEST_F. function_name = match_result.group(1).split()[-1] if function_name == 'TEST' or function_name == 'TEST_F' or ( - not Match(r'[A-Z_]+$', function_name)): + not re.match(r'[A-Z_]+$', function_name)): starting_func = True if starting_func: body_found = False - for start_linenum in xrange(linenum, clean_lines.NumLines()): + for start_linenum in range(linenum, clean_lines.NumLines()): start_line = lines[start_linenum] joined_line += ' ' + start_line.lstrip() - if Search(r'(;|})', start_line): # Declarations and trivial functions + if re.search(r'(;|})', start_line): # Declarations and trivial functions body_found = True break # ... ignore - elif Search(r'{', start_line): + if re.search(r'{', start_line): body_found = True - function = Search(r'((\w|:)*)\(', line).group(1) - if Match(r'TEST', function): # Handle TEST... macros - parameter_regexp = Search(r'(\(.*\))', joined_line) + function = re.search(r'((\w|:)*)\(', line).group(1) + if re.match(r'TEST', function): # Handle TEST... macros + parameter_regexp = re.search(r'(\(.*\))', joined_line) if parameter_regexp: # Ignore bad syntax function += parameter_regexp.group(1) else: @@ -2902,10 +3690,10 @@ def CheckForFunctionLengths(filename, clean_lines, linenum, # No body for the function (or evidence of a non-function) was found. error(filename, linenum, 'readability/fn_size', 5, 'Lint failed to find start of function body.') - elif Match(r'^\}\s*$', line): # function end + elif re.match(r'^\}\s*$', line): # function end function_state.Check(error, filename, linenum) function_state.End() - elif not Match(r'^\s*$', line): + elif not re.match(r'^\s*$', line): function_state.Count() # Count non-blank/non-comment lines. @@ -2925,11 +3713,9 @@ def CheckComment(line, filename, linenum, next_line_start, error): commentpos = line.find('//') if commentpos != -1: # Check if the // may be in quotes. If so, ignore it - # Comparisons made explicit for clarity -- pylint: disable=g-explicit-bool-comparison - if (line.count('"', 0, commentpos) - - line.count('\\"', 0, commentpos)) % 2 == 0: # not in quotes + if re.sub(r'\\.', '', line[0:commentpos]).count('"') % 2 == 0: # Allow one space for new scopes, two spaces otherwise: - if (not (Match(r'^.*{ *//', line) and next_line_start == commentpos) and + if (not (re.match(r'^.*{ *//', line) and next_line_start == commentpos) and ((commentpos >= 1 and line[commentpos-1] not in string.whitespace) or (commentpos >= 2 and @@ -2954,7 +3740,8 @@ def CheckComment(line, filename, linenum, next_line_start, error): '"// TODO(my_username): Stuff."') middle_whitespace = match.group(3) - # Comparisons made explicit for correctness -- pylint: disable=g-explicit-bool-comparison + # Comparisons made explicit for correctness + # -- pylint: disable=g-explicit-bool-comparison if middle_whitespace != ' ' and middle_whitespace != '': error(filename, linenum, 'whitespace/todo', 2, 'TODO(my_username) should be followed by a space') @@ -2962,42 +3749,12 @@ def CheckComment(line, filename, linenum, next_line_start, error): # If the comment contains an alphanumeric character, there # should be a space somewhere between it and the // unless # it's a /// or //! Doxygen comment. - if (Match(r'//[^ ]*\w', comment) and - not Match(r'(///|//\!)(\s+|$)', comment)): + if (re.match(r'//[^ ]*\w', comment) and + not re.match(r'(///|//\!)(\s+|$)', comment)): error(filename, linenum, 'whitespace/comments', 4, 'Should have a space between // and comment') -def CheckAccess(filename, clean_lines, linenum, nesting_state, error): - """Checks for improper use of DISALLOW* macros. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - nesting_state: A NestingState instance which maintains information about - the current stack of nested blocks being parsed. - error: The function to call with any errors found. - """ - line = clean_lines.elided[linenum] # get rid of comments and strings - - matched = Match((r'\s*(DISALLOW_COPY_AND_ASSIGN|' - r'DISALLOW_IMPLICIT_CONSTRUCTORS)'), line) - if not matched: - return - if nesting_state.stack and isinstance(nesting_state.stack[-1], _ClassInfo): - if nesting_state.stack[-1].access != 'private': - error(filename, linenum, 'readability/constructors', 3, - '%s must be in the private: section' % matched.group(1)) - - else: - # Found DISALLOW* macro outside a class declaration, or perhaps it - # was used inside a function when it should have been part of the - # class declaration. We could issue a warning here, but it - # probably resulted in a compiler error already. - pass - - def CheckSpacing(filename, clean_lines, linenum, nesting_state, error): """Checks for the correctness of various spacing issues in the code. @@ -3056,12 +3813,12 @@ def CheckSpacing(filename, clean_lines, linenum, nesting_state, error): # the previous line is indented 6 spaces, which may happen when the # initializers of a constructor do not fit into a 80 column line. exception = False - if Match(r' {6}\w', prev_line): # Initializer list? + if re.match(r' {6}\w', prev_line): # Initializer list? # We are looking for the opening column of initializer list, which # should be indented 4 spaces to cause 6 space indentation afterwards. search_position = linenum-2 while (search_position >= 0 - and Match(r' {6}\w', elided[search_position])): + and re.match(r' {6}\w', elided[search_position])): search_position -= 1 exception = (search_position >= 0 and elided[search_position][:5] == ' :') @@ -3072,9 +3829,9 @@ def CheckSpacing(filename, clean_lines, linenum, nesting_state, error): # or colon (for initializer lists) we assume that it is the last line of # a function header. If we have a colon indented 4 spaces, it is an # initializer list. - exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)', + exception = (re.match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)', prev_line) - or Match(r' {4}:', prev_line)) + or re.match(r' {4}:', prev_line)) if not exception: error(filename, linenum, 'whitespace/blank_line', 2, @@ -3091,16 +3848,16 @@ def CheckSpacing(filename, clean_lines, linenum, nesting_state, error): if linenum + 1 < clean_lines.NumLines(): next_line = raw[linenum + 1] if (next_line - and Match(r'\s*}', next_line) + and re.match(r'\s*}', next_line) and next_line.find('} else ') == -1): error(filename, linenum, 'whitespace/blank_line', 3, 'Redundant blank line at the end of a code block ' 'should be deleted.') - matched = Match(r'\s*(public|protected|private):', prev_line) + matched = re.match(r'\s*(public|protected|private):', prev_line) if matched: error(filename, linenum, 'whitespace/blank_line', 3, - 'Do not leave a blank line after "%s:"' % matched.group(1)) + f'Do not leave a blank line after "{matched.group(1)}:"') # Next, check comments next_line_start = 0 @@ -3112,16 +3869,17 @@ def CheckSpacing(filename, clean_lines, linenum, nesting_state, error): # get rid of comments and strings line = clean_lines.elided[linenum] - # You shouldn't have spaces before your brackets, except maybe after - # 'delete []' or 'return []() {};' - if Search(r'\w\s+\[', line) and not Search(r'(?:delete|return)\s+\[', line): + # You shouldn't have spaces before your brackets, except for C++11 attributes + # or maybe after 'delete []', 'return []() {};', or 'auto [abc, ...] = ...;'. + if (re.search(r'\w\s+\[(?!\[)', line) and + not re.search(r'(?:auto&?|delete|return)\s+\[', line)): error(filename, linenum, 'whitespace/braces', 5, 'Extra space before [') # In range-based for, we wanted spaces before and after the colon, but # not around "::" tokens that might appear. - if (Search(r'for *\(.*[^:]:[^: ]', line) or - Search(r'for *\(.*[^: ]:[^:]', line)): + if (re.search(r'for *\(.*[^:]:[^: ]', line) or + re.search(r'for *\(.*[^: ]:[^:]', line)): error(filename, linenum, 'whitespace/forcolon', 2, 'Missing space around colon in range-based for loop') @@ -3144,7 +3902,7 @@ def CheckOperatorSpacing(filename, clean_lines, linenum, error): # The replacement is done repeatedly to avoid false positives from # operators that call operators. while True: - match = Match(r'^(.*\boperator\b)(\S+)(\s*\(.*)$', line) + match = re.match(r'^(.*\boperator\b)(\S+)(\s*\(.*)$', line) if match: line = match.group(1) + ('_' * len(match.group(2))) + match.group(3) else: @@ -3154,12 +3912,12 @@ def CheckOperatorSpacing(filename, clean_lines, linenum, error): # Otherwise not. Note we only check for non-spaces on *both* sides; # sometimes people put non-spaces on one side when aligning ='s among # many lines (not that this is behavior that I approve of...) - if ((Search(r'[\w.]=', line) or - Search(r'=[\w.]', line)) - and not Search(r'\b(if|while|for) ', line) + if ((re.search(r'[\w.]=', line) or + re.search(r'=[\w.]', line)) + and not re.search(r'\b(if|while|for) ', line) # Operators taken from [lex.operators] in C++11 standard. - and not Search(r'(>=|<=|==|!=|&=|\^=|\|=|\+=|\*=|\/=|\%=)', line) - and not Search(r'operator=', line)): + and not re.search(r'(>=|<=|==|!=|&=|\^=|\|=|\+=|\*=|\/=|\%=)', line) + and not re.search(r'operator=', line)): error(filename, linenum, 'whitespace/operators', 4, 'Missing spaces around =') @@ -3176,18 +3934,19 @@ def CheckOperatorSpacing(filename, clean_lines, linenum, error): # macro context and don't do any checks. This avoids false # positives. # - # Note that && is not included here. Those are checked separately - # in CheckRValueReference - match = Search(r'[^<>=!\s](==|!=|<=|>=|\|\|)[^<>=!\s,;\)]', line) + # Note that && is not included here. This is because there are too + # many false positives due to RValue references. + match = re.search(r'[^<>=!\s](==|!=|<=|>=|\|\|)[^<>=!\s,;\)]', line) if match: + # TODO: support alternate operators error(filename, linenum, 'whitespace/operators', 3, - 'Missing spaces around %s' % match.group(1)) - elif not Match(r'#.*include', line): + f'Missing spaces around {match.group(1)}') + elif not re.match(r'#.*include', line): # Look for < that is not surrounded by spaces. This is only # triggered if both sides are missing spaces, even though # technically should should flag if at least one side is missing a # space. This is done to avoid some false positives with shifts. - match = Match(r'^(.*[^\s<])<[^\s=<,]', line) + match = re.match(r'^(.*[^\s<])<[^\s=<,]', line) if match: (_, _, end_pos) = CloseExpression( clean_lines, linenum, len(match.group(1))) @@ -3198,7 +3957,7 @@ def CheckOperatorSpacing(filename, clean_lines, linenum, error): # Look for > that is not surrounded by spaces. Similar to the # above, we only trigger if both sides are missing spaces to avoid # false positives with shifts. - match = Match(r'^(.*[^-\s>])>[^\s=>,]', line) + match = re.match(r'^(.*[^-\s>])>[^\s=>,]', line) if match: (_, _, start_pos) = ReverseCloseExpression( clean_lines, linenum, len(match.group(1))) @@ -3211,7 +3970,7 @@ def CheckOperatorSpacing(filename, clean_lines, linenum, error): # # We also allow operators following an opening parenthesis, since # those tend to be macros that deal with operators. - match = Search(r'(operator|[^\s(<])(?:L|UL|ULL|l|ul|ull)?<<([^\s,=<])', line) + match = re.search(r'(operator|[^\s(<])(?:L|UL|LL|ULL|l|ul|ll|ull)?<<([^\s,=<])', line) if (match and not (match.group(1).isdigit() and match.group(2).isdigit()) and not (match.group(1) == 'operator' and match.group(2) == ';')): error(filename, linenum, 'whitespace/operators', 3, @@ -3229,16 +3988,16 @@ def CheckOperatorSpacing(filename, clean_lines, linenum, error): # follows would be part of an identifier, and there should still be # a space separating the template type and the identifier. # type> alpha - match = Search(r'>>[a-zA-Z_]', line) + match = re.search(r'>>[a-zA-Z_]', line) if match: error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around >>') # There shouldn't be space around unary operators - match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line) + match = re.search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line) if match: error(filename, linenum, 'whitespace/operators', 4, - 'Extra space for operator %s' % match.group(1)) + f'Extra space for operator {match.group(1)}') def CheckParenthesisSpacing(filename, clean_lines, linenum, error): @@ -3253,30 +4012,29 @@ def CheckParenthesisSpacing(filename, clean_lines, linenum, error): line = clean_lines.elided[linenum] # No spaces after an if, while, switch, or for - match = Search(r' (if\(|for\(|while\(|switch\()', line) + match = re.search(r' (if\(|for\(|while\(|switch\()', line) if match: error(filename, linenum, 'whitespace/parens', 5, - 'Missing space before ( in %s' % match.group(1)) + f'Missing space before ( in {match.group(1)}') # For if/for/while/switch, the left and right parens should be # consistent about how many spaces are inside the parens, and # there should either be zero or one spaces inside the parens. # We don't want: "if ( foo)" or "if ( foo )". # Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed. - match = Search(r'\b(if|for|while|switch)\s*' + match = re.search(r'\b(if|for|while|switch)\s*' r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$', line) if match: if len(match.group(2)) != len(match.group(4)): if not (match.group(3) == ';' and len(match.group(2)) == 1 + len(match.group(4)) or - not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)): + not match.group(2) and re.search(r'\bfor\s*\(.*; \)', line)): error(filename, linenum, 'whitespace/parens', 5, - 'Mismatching spaces inside () in %s' % match.group(1)) + f'Mismatching spaces inside () in {match.group(1)}') if len(match.group(2)) not in [0, 1]: error(filename, linenum, 'whitespace/parens', 5, - 'Should have zero or one spaces inside ( and ) in %s' % - match.group(1)) + f'Should have zero or one spaces inside ( and ) in {match.group(1)}') def CheckCommaSpacing(filename, clean_lines, linenum, error): @@ -3301,8 +4059,9 @@ def CheckCommaSpacing(filename, clean_lines, linenum, error): # verify that lines contain missing whitespaces, second pass on raw # lines to confirm that those missing whitespaces are not due to # elided comments. - if (Search(r',[^,\s]', ReplaceAll(r'\boperator\s*,\s*\(', 'F(', line)) and - Search(r',[^,\s]', raw[linenum])): + match = re.search(r',[^,\s]', re.sub(r'\b__VA_OPT__\s*\(,\)', '', + re.sub(r'\boperator\s*,\s*\(', 'F(', line))) + if (match and re.search(r',[^,\s]', raw[linenum])): error(filename, linenum, 'whitespace/comma', 3, 'Missing space after ,') @@ -3310,27 +4069,95 @@ def CheckCommaSpacing(filename, clean_lines, linenum, error): # except for few corner cases # TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more # space after ; - if Search(r';[^\s};\\)/]', line): + if re.search(r';[^\s};\\)/]', line): error(filename, linenum, 'whitespace/semicolon', 3, 'Missing space after ;') -def CheckBracesSpacing(filename, clean_lines, linenum, error): +def _IsType(clean_lines, nesting_state, expr): + """Check if expression looks like a type name, returns true if so. + + Args: + clean_lines: A CleansedLines instance containing the file. + nesting_state: A NestingState instance which maintains information about + the current stack of nested blocks being parsed. + expr: The expression to check. + Returns: + True, if token looks like a type. + """ + # Keep only the last token in the expression + last_word = re.match(r'^.*(\b\S+)$', expr) + if last_word: + token = last_word.group(1) + else: + token = expr + + # Match native types and stdint types + if _TYPES.match(token): + return True + + # Try a bit harder to match templated types. Walk up the nesting + # stack until we find something that resembles a typename + # declaration for what we are looking for. + typename_pattern = (r'\b(?:typename|class|struct)\s+' + re.escape(token) + + r'\b') + block_index = len(nesting_state.stack) - 1 + while block_index >= 0: + if isinstance(nesting_state.stack[block_index], _NamespaceInfo): + return False + + # Found where the opening brace is. We want to scan from this + # line up to the beginning of the function, minus a few lines. + # template + # class C + # : public ... { // start scanning here + last_line = nesting_state.stack[block_index].starting_linenum + + next_block_start = 0 + if block_index > 0: + next_block_start = nesting_state.stack[block_index - 1].starting_linenum + first_line = last_line + while first_line >= next_block_start: + if clean_lines.elided[first_line].find('template') >= 0: + break + first_line -= 1 + if first_line < next_block_start: + # Didn't find any "template" keyword before reaching the next block, + # there are probably no template things to check for this block + block_index -= 1 + continue + + # Look for typename in the specified range + for i in range(first_line, last_line + 1, 1): + if re.search(typename_pattern, clean_lines.elided[i]): + return True + block_index -= 1 + + return False + + +def CheckBracesSpacing(filename, clean_lines, linenum, nesting_state, error): """Checks for horizontal spacing near commas. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. + nesting_state: A NestingState instance which maintains information about + the current stack of nested blocks being parsed. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # Except after an opening paren, or after another opening brace (in case of # an initializer list, for instance), you should have spaces before your - # braces. And since you should never have braces at the beginning of a line, - # this is an easy test. - match = Match(r'^(.*[^ ({>]){', line) + # braces when they are delimiting blocks, classes, namespaces etc. + # And since you should never have braces at the beginning of a line, + # this is an easy test. Except that braces used for initialization don't + # follow the same rule; we often don't want spaces before those. + match = re.match(r'^(.*[^ ({>]){', line) + if match: # Try a bit harder to check for brace initialization. This # happens in one of the following forms: @@ -3360,35 +4187,40 @@ def CheckBracesSpacing(filename, clean_lines, linenum, error): # There is a false negative with this approach if people inserted # spurious semicolons, e.g. "if (cond){};", but we will catch the # spurious semicolon with a separate check. + leading_text = match.group(1) (endline, endlinenum, endpos) = CloseExpression( clean_lines, linenum, len(match.group(1))) trailing_text = '' if endpos > -1: trailing_text = endline[endpos:] - for offset in xrange(endlinenum + 1, + for offset in range(endlinenum + 1, min(endlinenum + 3, clean_lines.NumLines() - 1)): trailing_text += clean_lines.elided[offset] - if not Match(r'^[\s}]*[{.;,)<>\]:]', trailing_text): + # We also suppress warnings for `uint64_t{expression}` etc., as the style + # guide recommends brace initialization for integral types to avoid + # overflow/truncation. + if (not re.match(r'^[\s}]*[{.;,)<>\]:]', trailing_text) + and not _IsType(clean_lines, nesting_state, leading_text)): error(filename, linenum, 'whitespace/braces', 5, 'Missing space before {') # Make sure '} else {' has spaces. - if Search(r'}else', line): + if re.search(r'}else', line): error(filename, linenum, 'whitespace/braces', 5, 'Missing space before else') # You shouldn't have a space before a semicolon at the end of the line. # There's a special case for "for" since the style guide allows space before # the semicolon there. - if Search(r':\s*;\s*$', line): + if re.search(r':\s*;\s*$', line): error(filename, linenum, 'whitespace/semicolon', 5, 'Semicolon defining empty statement. Use {} instead.') - elif Search(r'^\s*;\s*$', line): + elif re.search(r'^\s*;\s*$', line): error(filename, linenum, 'whitespace/semicolon', 5, 'Line contains only semicolon. If this should be an empty statement, ' 'use {} instead.') - elif (Search(r'\s+;\s*$', line) and - not Search(r'\bfor\b', line)): + elif (re.search(r'\s+;\s*$', line) and + not re.search(r'\bfor\b', line)): error(filename, linenum, 'whitespace/semicolon', 5, 'Extra space before last semicolon. If this should be an empty ' 'statement, use {} instead.') @@ -3407,410 +4239,10 @@ def IsDecltype(clean_lines, linenum, column): (text, _, start_col) = ReverseCloseExpression(clean_lines, linenum, column) if start_col < 0: return False - if Search(r'\bdecltype\s*$', text[0:start_col]): + if re.search(r'\bdecltype\s*$', text[0:start_col]): return True return False - -def IsTemplateParameterList(clean_lines, linenum, column): - """Check if the token ending on (linenum, column) is the end of template<>. - - Args: - clean_lines: A CleansedLines instance containing the file. - linenum: the number of the line to check. - column: end column of the token to check. - Returns: - True if this token is end of a template parameter list, False otherwise. - """ - (_, startline, startpos) = ReverseCloseExpression( - clean_lines, linenum, column) - if (startpos > -1 and - Search(r'\btemplate\s*$', clean_lines.elided[startline][0:startpos])): - return True - return False - - -def IsRValueType(typenames, clean_lines, nesting_state, linenum, column): - """Check if the token ending on (linenum, column) is a type. - - Assumes that text to the right of the column is "&&" or a function - name. - - Args: - typenames: set of type names from template-argument-list. - clean_lines: A CleansedLines instance containing the file. - nesting_state: A NestingState instance which maintains information about - the current stack of nested blocks being parsed. - linenum: the number of the line to check. - column: end column of the token to check. - Returns: - True if this token is a type, False if we are not sure. - """ - prefix = clean_lines.elided[linenum][0:column] - - # Get one word to the left. If we failed to do so, this is most - # likely not a type, since it's unlikely that the type name and "&&" - # would be split across multiple lines. - match = Match(r'^(.*)(\b\w+|[>*)&])\s*$', prefix) - if not match: - return False - - # Check text following the token. If it's "&&>" or "&&," or "&&...", it's - # most likely a rvalue reference used inside a template. - suffix = clean_lines.elided[linenum][column:] - if Match(r'&&\s*(?:[>,]|\.\.\.)', suffix): - return True - - # Check for known types and end of templates: - # int&& variable - # vector&& variable - # - # Because this function is called recursively, we also need to - # recognize pointer and reference types: - # int* Function() - # int& Function() - if (match.group(2) in typenames or - match.group(2) in ['char', 'char16_t', 'char32_t', 'wchar_t', 'bool', - 'short', 'int', 'long', 'signed', 'unsigned', - 'float', 'double', 'void', 'auto', '>', '*', '&']): - return True - - # If we see a close parenthesis, look for decltype on the other side. - # decltype would unambiguously identify a type, anything else is - # probably a parenthesized expression and not a type. - if match.group(2) == ')': - return IsDecltype( - clean_lines, linenum, len(match.group(1)) + len(match.group(2)) - 1) - - # Check for casts and cv-qualifiers. - # match.group(1) remainder - # -------------- --------- - # const_cast< type&& - # const type&& - # type const&& - if Search(r'\b(?:const_cast\s*<|static_cast\s*<|dynamic_cast\s*<|' - r'reinterpret_cast\s*<|\w+\s)\s*$', - match.group(1)): - return True - - # Look for a preceding symbol that might help differentiate the context. - # These are the cases that would be ambiguous: - # match.group(1) remainder - # -------------- --------- - # Call ( expression && - # Declaration ( type&& - # sizeof ( type&& - # if ( expression && - # while ( expression && - # for ( type&& - # for( ; expression && - # statement ; type&& - # block { type&& - # constructor { expression && - start = linenum - line = match.group(1) - match_symbol = None - while start >= 0: - # We want to skip over identifiers and commas to get to a symbol. - # Commas are skipped so that we can find the opening parenthesis - # for function parameter lists. - match_symbol = Match(r'^(.*)([^\w\s,])[\w\s,]*$', line) - if match_symbol: - break - start -= 1 - line = clean_lines.elided[start] - - if not match_symbol: - # Probably the first statement in the file is an rvalue reference - return True - - if match_symbol.group(2) == '}': - # Found closing brace, probably an indicate of this: - # block{} type&& - return True - - if match_symbol.group(2) == ';': - # Found semicolon, probably one of these: - # for(; expression && - # statement; type&& - - # Look for the previous 'for(' in the previous lines. - before_text = match_symbol.group(1) - for i in xrange(start - 1, max(start - 6, 0), -1): - before_text = clean_lines.elided[i] + before_text - if Search(r'for\s*\([^{};]*$', before_text): - # This is the condition inside a for-loop - return False - - # Did not find a for-init-statement before this semicolon, so this - # is probably a new statement and not a condition. - return True - - if match_symbol.group(2) == '{': - # Found opening brace, probably one of these: - # block{ type&& = ... ; } - # constructor{ expression && expression } - - # Look for a closing brace or a semicolon. If we see a semicolon - # first, this is probably a rvalue reference. - line = clean_lines.elided[start][0:len(match_symbol.group(1)) + 1] - end = start - depth = 1 - while True: - for ch in line: - if ch == ';': - return True - elif ch == '{': - depth += 1 - elif ch == '}': - depth -= 1 - if depth == 0: - return False - end += 1 - if end >= clean_lines.NumLines(): - break - line = clean_lines.elided[end] - # Incomplete program? - return False - - if match_symbol.group(2) == '(': - # Opening parenthesis. Need to check what's to the left of the - # parenthesis. Look back one extra line for additional context. - before_text = match_symbol.group(1) - if linenum > 1: - before_text = clean_lines.elided[linenum - 1] + before_text - before_text = match_symbol.group(1) - - # Patterns that are likely to be types: - # [](type&& - # for (type&& - # sizeof(type&& - # operator=(type&& - # - if Search(r'(?:\]|\bfor|\bsizeof|\boperator\s*\S+\s*)\s*$', before_text): - return True - - # Patterns that are likely to be expressions: - # if (expression && - # while (expression && - # : initializer(expression && - # , initializer(expression && - # ( FunctionCall(expression && - # + FunctionCall(expression && - # + (expression && - # - # The last '+' represents operators such as '+' and '-'. - if Search(r'(?:\bif|\bwhile|[-+=%^(]*>)?\s*$', - match_symbol.group(1)) - if match_func: - # Check for constructors, which don't have return types. - if Search(r'\b(?:explicit|inline)$', match_func.group(1)): - return True - implicit_constructor = Match(r'\s*(\w+)\((?:const\s+)?(\w+)', prefix) - if (implicit_constructor and - implicit_constructor.group(1) == implicit_constructor.group(2)): - return True - return IsRValueType(typenames, clean_lines, nesting_state, linenum, - len(match_func.group(1))) - - # Nothing before the function name. If this is inside a block scope, - # this is probably a function call. - return not (nesting_state.previous_stack_top and - nesting_state.previous_stack_top.IsBlockInfo()) - - if match_symbol.group(2) == '>': - # Possibly a closing bracket, check that what's on the other side - # looks like the start of a template. - return IsTemplateParameterList( - clean_lines, start, len(match_symbol.group(1))) - - # Some other symbol, usually something like "a=b&&c". This is most - # likely not a type. - return False - - -def IsDeletedOrDefault(clean_lines, linenum): - """Check if current constructor or operator is deleted or default. - - Args: - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - Returns: - True if this is a deleted or default constructor. - """ - open_paren = clean_lines.elided[linenum].find('(') - if open_paren < 0: - return False - (close_line, _, close_paren) = CloseExpression( - clean_lines, linenum, open_paren) - if close_paren < 0: - return False - return Match(r'\s*=\s*(?:delete|default)\b', close_line[close_paren:]) - - -def IsRValueAllowed(clean_lines, linenum, typenames): - """Check if RValue reference is allowed on a particular line. - - Args: - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - typenames: set of type names from template-argument-list. - Returns: - True if line is within the region where RValue references are allowed. - """ - # Allow region marked by PUSH/POP macros - for i in xrange(linenum, 0, -1): - line = clean_lines.elided[i] - if Match(r'GOOGLE_ALLOW_RVALUE_REFERENCES_(?:PUSH|POP)', line): - if not line.endswith('PUSH'): - return False - for j in xrange(linenum, clean_lines.NumLines(), 1): - line = clean_lines.elided[j] - if Match(r'GOOGLE_ALLOW_RVALUE_REFERENCES_(?:PUSH|POP)', line): - return line.endswith('POP') - - # Allow operator= - line = clean_lines.elided[linenum] - if Search(r'\boperator\s*=\s*\(', line): - return IsDeletedOrDefault(clean_lines, linenum) - - # Allow constructors - match = Match(r'\s*(?:[\w<>]+::)*([\w<>]+)\s*::\s*([\w<>]+)\s*\(', line) - if match and match.group(1) == match.group(2): - return IsDeletedOrDefault(clean_lines, linenum) - if Search(r'\b(?:explicit|inline)\s+[\w<>]+\s*\(', line): - return IsDeletedOrDefault(clean_lines, linenum) - - if Match(r'\s*[\w<>]+\s*\(', line): - previous_line = 'ReturnType' - if linenum > 0: - previous_line = clean_lines.elided[linenum - 1] - if Match(r'^\s*$', previous_line) or Search(r'[{}:;]\s*$', previous_line): - return IsDeletedOrDefault(clean_lines, linenum) - - # Reject types not mentioned in template-argument-list - while line: - match = Match(r'^.*?(\w+)\s*&&(.*)$', line) - if not match: - break - if match.group(1) not in typenames: - return False - line = match.group(2) - - # All RValue types that were in template-argument-list should have - # been removed by now. Those were allowed, assuming that they will - # be forwarded. - # - # If there are no remaining RValue types left (i.e. types that were - # not found in template-argument-list), flag those as not allowed. - return line.find('&&') < 0 - - -def GetTemplateArgs(clean_lines, linenum): - """Find list of template arguments associated with this function declaration. - - Args: - clean_lines: A CleansedLines instance containing the file. - linenum: Line number containing the start of the function declaration, - usually one line after the end of the template-argument-list. - Returns: - Set of type names, or empty set if this does not appear to have - any template parameters. - """ - # Find start of function - func_line = linenum - while func_line > 0: - line = clean_lines.elided[func_line] - if Match(r'^\s*$', line): - return set() - if line.find('(') >= 0: - break - func_line -= 1 - if func_line == 0: - return set() - - # Collapse template-argument-list into a single string - argument_list = '' - match = Match(r'^(\s*template\s*)<', clean_lines.elided[func_line]) - if match: - # template-argument-list on the same line as function name - start_col = len(match.group(1)) - _, end_line, end_col = CloseExpression(clean_lines, func_line, start_col) - if end_col > -1 and end_line == func_line: - start_col += 1 # Skip the opening bracket - argument_list = clean_lines.elided[func_line][start_col:end_col] - - elif func_line > 1: - # template-argument-list one line before function name - match = Match(r'^(.*)>\s*$', clean_lines.elided[func_line - 1]) - if match: - end_col = len(match.group(1)) - _, start_line, start_col = ReverseCloseExpression( - clean_lines, func_line - 1, end_col) - if start_col > -1: - start_col += 1 # Skip the opening bracket - while start_line < func_line - 1: - argument_list += clean_lines.elided[start_line][start_col:] - start_col = 0 - start_line += 1 - argument_list += clean_lines.elided[func_line - 1][start_col:end_col] - - if not argument_list: - return set() - - # Extract type names - typenames = set() - while True: - match = Match(r'^[,\s]*(?:typename|class)(?:\.\.\.)?\s+(\w+)(.*)$', - argument_list) - if not match: - break - typenames.add(match.group(1)) - argument_list = match.group(2) - return typenames - - -def CheckRValueReference(filename, clean_lines, linenum, nesting_state, error): - """Check for rvalue references. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - nesting_state: A NestingState instance which maintains information about - the current stack of nested blocks being parsed. - error: The function to call with any errors found. - """ - # Find lines missing spaces around &&. - # TODO(unknown): currently we don't check for rvalue references - # with spaces surrounding the && to avoid false positives with - # boolean expressions. - line = clean_lines.elided[linenum] - match = Match(r'^(.*\S)&&', line) - if not match: - match = Match(r'(.*)&&\S', line) - if (not match) or '(&&)' in line or Search(r'\boperator\s*$', match.group(1)): - return - - # Either poorly formed && or an rvalue reference, check the context - # to get a more accurate error message. Mostly we want to determine - # if what's to the left of "&&" is a type or not. - typenames = GetTemplateArgs(clean_lines, linenum) - and_pos = len(match.group(1)) - if IsRValueType(typenames, clean_lines, nesting_state, linenum, and_pos): - if not IsRValueAllowed(clean_lines, linenum, typenames): - error(filename, linenum, 'build/c++11', 3, - 'RValue references are an unapproved C++ feature.') - else: - error(filename, linenum, 'whitespace/operators', 3, - 'Missing spaces around &&') - - def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error): """Checks for additional blank line issues related to sections. @@ -3838,7 +4270,7 @@ def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error): linenum <= class_info.starting_linenum): return - matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum]) + matched = re.match(r'\s*(public|protected|private):', clean_lines.lines[linenum]) if matched: # Issue warning if the line before public/protected/private was # not a blank line, but don't do this if the previous line contains @@ -3850,20 +4282,20 @@ def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error): # common when defining classes in C macros. prev_line = clean_lines.lines[linenum - 1] if (not IsBlankLine(prev_line) and - not Search(r'\b(class|struct)\b', prev_line) and - not Search(r'\\$', prev_line)): + not re.search(r'\b(class|struct)\b', prev_line) and + not re.search(r'\\$', prev_line)): # Try a bit harder to find the beginning of the class. This is to # account for multi-line base-specifier lists, e.g.: # class Derived # : public Base { end_class_head = class_info.starting_linenum for i in range(class_info.starting_linenum, linenum): - if Search(r'\{\s*$', clean_lines.lines[i]): + if re.search(r'\{\s*$', clean_lines.lines[i]): end_class_head = i break if end_class_head < linenum - 1: error(filename, linenum, 'whitespace/blank_line', 3, - '"%s:" should be preceded by a blank line' % matched.group(1)) + f'"{matched.group(1)}:" should be preceded by a blank line') def GetPreviousNonBlankLine(clean_lines, linenum): @@ -3901,31 +4333,36 @@ def CheckBraces(filename, clean_lines, linenum, error): line = clean_lines.elided[linenum] # get rid of comments and strings - if Match(r'\s*{\s*$', line): + if re.match(r'\s*{\s*$', line): # We allow an open brace to start a line in the case where someone is using # braces in a block to explicitly create a new scope, which is commonly used # to control the lifetime of stack-allocated variables. Braces are also # used for brace initializers inside function calls. We don't detect this # perfectly: we just don't complain if the last non-whitespace character on # the previous non-blank line is ',', ';', ':', '(', '{', or '}', or if the - # previous line starts a preprocessor block. + # previous line starts a preprocessor block. We also allow a brace on the + # following line if it is part of an array initialization and would not fit + # within the 80 character limit of the preceding line. prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0] - if (not Search(r'[,;:}{(]\s*$', prevline) and - not Match(r'\s*#', prevline)): + if (not re.search(r'[,;:}{(]\s*$', prevline) and + not re.match(r'\s*#', prevline) and + not (GetLineWidth(prevline) > _line_length - 2 and '[]' in prevline)): error(filename, linenum, 'whitespace/braces', 4, '{ should almost always be at the end of the previous line') # An else clause should be on the same line as the preceding closing brace. - if Match(r'\s*else\b\s*(?:if\b|\{|$)', line): + if last_wrong := re.match(r'\s*else\b\s*(?:if\b|\{|$)', line): prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0] - if Match(r'\s*}\s*$', prevline): + if re.match(r'\s*}\s*$', prevline): error(filename, linenum, 'whitespace/newline', 4, 'An else should appear on the same line as the preceding }') + else: + last_wrong = False # If braces come on one side of an else, they should be on both. # However, we have to worry about "else if" that spans multiple lines! - if Search(r'else if\s*\(', line): # could be multi-line if - brace_on_left = bool(Search(r'}\s*else if\s*\(', line)) + if re.search(r'else if\s*\(', line): # could be multi-line if + brace_on_left = bool(re.search(r'}\s*else if\s*\(', line)) # find the ( after the if pos = line.find('else if') pos = line.find('(', pos) @@ -3935,19 +4372,29 @@ def CheckBraces(filename, clean_lines, linenum, error): if brace_on_left != brace_on_right: # must be brace after if error(filename, linenum, 'readability/braces', 5, 'If an else has a brace on one side, it should have it on both') - elif Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line): + # Prevent detection if statement has { and we detected an improper newline after } + elif re.search(r'}\s*else[^{]*$', line) or (re.match(r'[^}]*else\s*{', line) and not last_wrong): error(filename, linenum, 'readability/braces', 5, 'If an else has a brace on one side, it should have it on both') - # Likewise, an else should never have the else clause on the same line - if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line): - error(filename, linenum, 'whitespace/newline', 4, - 'Else clause should never be on same line as else (use 2 lines)') - - # In the same way, a do/while should never be on one line - if Match(r'\s*do [^\s{]', line): - error(filename, linenum, 'whitespace/newline', 4, - 'do/while clauses should not be on a single line') + # No control clauses with braces should have its contents on the same line + # Exclude } which will be covered by empty-block detect + # Exclude ; which may be used by while in a do-while + if keyword := re.search( + r'\b(else if|if|while|for|switch)' # These have parens + r'\s*\(.*\)\s*(?:\[\[(?:un)?likely\]\]\s*)?{\s*[^\s\\};]', line): + error(filename, linenum, 'whitespace/newline', 5, + f'Controlled statements inside brackets of {keyword.group(1)} clause' + ' should be on a separate line') + elif keyword := re.search( + r'\b(else|do|try)' # These don't have parens + r'\s*(?:\[\[(?:un)?likely\]\]\s*)?{\s*[^\s\\}]', line): + error(filename, linenum, 'whitespace/newline', 5, + f'Controlled statements inside brackets of {keyword.group(1)} clause' + ' should be on a separate line') + + # TODO: Err on if...else and do...while statements without braces; + # style guide has changed since the below comment was written # Check single-line if/else bodies. The style guide says 'curly braces are not # required for single-line statements'. We additionally allow multi-line, @@ -3956,21 +4403,21 @@ def CheckBraces(filename, clean_lines, linenum, error): # its line, and the line after that should have an indent level equal to or # lower than the if. We also check for ambiguous if/else nesting without # braces. - if_else_match = Search(r'\b(if\s*\(|else\b)', line) - if if_else_match and not Match(r'\s*#', line): + if_else_match = re.search(r'\b(if\s*(|constexpr)\s*\(|else\b)', line) + if if_else_match and not re.match(r'\s*#', line): if_indent = GetIndentLevel(line) endline, endlinenum, endpos = line, linenum, if_else_match.end() - if_match = Search(r'\bif\s*\(', line) + if_match = re.search(r'\bif\s*(|constexpr)\s*\(', line) if if_match: # This could be a multiline if condition, so find the end first. pos = if_match.end() - 1 (endline, endlinenum, endpos) = CloseExpression(clean_lines, linenum, pos) # Check for an opening brace, either directly after the if or on the next # line. If found, this isn't a single-statement conditional. - if (not Match(r'\s*{', endline[endpos:]) - and not (Match(r'\s*$', endline[endpos:]) + if (not re.match(r'\s*(?:\[\[(?:un)?likely\]\]\s*)?{', endline[endpos:]) + and not (re.match(r'\s*$', endline[endpos:]) and endlinenum < (len(clean_lines.elided) - 1) - and Match(r'\s*{', clean_lines.elided[endlinenum + 1]))): + and re.match(r'\s*{', clean_lines.elided[endlinenum + 1]))): while (endlinenum < len(clean_lines.elided) and ';' not in clean_lines.elided[endlinenum][endpos:]): endlinenum += 1 @@ -3980,11 +4427,11 @@ def CheckBraces(filename, clean_lines, linenum, error): # We allow a mix of whitespace and closing braces (e.g. for one-liner # methods) and a single \ after the semicolon (for macros) endpos = endline.find(';') - if not Match(r';[\s}]*(\\?)$', endline[endpos:]): + if not re.match(r';[\s}]*(\\?)$', endline[endpos:]): # Semicolon isn't the last character, there's something trailing. # Output a warning if the semicolon is not contained inside # a lambda expression. - if not Match(r'^[^{};]*\[[^\[\]]*\][^{}]*\{[^{}]*\}\s*\)*[;,]\s*$', + if not re.match(r'^[^{};]*\[[^\[\]]*\][^{}]*\{[^{}]*\}\s*\)*[;,]\s*$', endline): error(filename, linenum, 'readability/braces', 4, 'If/else bodies with multiple statements require braces') @@ -3995,7 +4442,7 @@ def CheckBraces(filename, clean_lines, linenum, error): # With ambiguous nested if statements, this will error out on the # if that *doesn't* match the else, regardless of whether it's the # inner one or outer one. - if (if_match and Match(r'\s*else\b', next_line) + if (if_match and re.match(r'\s*else\b', next_line) and next_indent != if_indent): error(filename, linenum, 'readability/braces', 4, 'Else clause should be indented at the same level as if. ' @@ -4019,9 +4466,9 @@ def CheckTrailingSemicolon(filename, clean_lines, linenum, error): # Block bodies should not be followed by a semicolon. Due to C++11 # brace initialization, there are more places where semicolons are - # required than not, so we use a whitelist approach to check these - # rather than a blacklist. These are the places where "};" should - # be replaced by just "}": + # required than not, so we explicitly list the allowed rules rather + # than listing the disallowed ones. These are the places where "};" + # should be replaced by just "}": # 1. Some flavor of block following closing parenthesis: # for (;;) {}; # while (...) {}; @@ -4061,7 +4508,7 @@ def CheckTrailingSemicolon(filename, clean_lines, linenum, error): # to namespaces. For now we do not warn for this case. # # Try matching case 1 first. - match = Match(r'^(.*\)\s*)\{', line) + match = re.match(r'^(.*\)\s*)\{', line) if match: # Matched closing parenthesis (case 1). Check the token before the # matching opening parenthesis, and don't warn if it looks like a @@ -4077,42 +4524,44 @@ def CheckTrailingSemicolon(filename, clean_lines, linenum, error): # - INTERFACE_DEF # - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED: # - # We implement a whitelist of safe macros instead of a blacklist of + # We implement a list of safe macros instead of a list of # unsafe macros, even though the latter appears less frequently in # google code and would have been easier to implement. This is because - # the downside for getting the whitelist wrong means some extra - # semicolons, while the downside for getting the blacklist wrong + # the downside for getting the allowed checks wrong means some extra + # semicolons, while the downside for getting disallowed checks wrong # would result in compile errors. # # In addition to macros, we also don't want to warn on # - Compound literals # - Lambdas - # - alignas specifier with anonymous structs: + # - alignas specifier with anonymous structs + # - decltype closing_brace_pos = match.group(1).rfind(')') opening_parenthesis = ReverseCloseExpression( clean_lines, linenum, closing_brace_pos) if opening_parenthesis[2] > -1: line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]] - macro = Search(r'\b([A-Z_]+)\s*$', line_prefix) - func = Match(r'^(.*\])\s*$', line_prefix) + macro = re.search(r'\b([A-Z_][A-Z0-9_]*)\s*$', line_prefix) + func = re.match(r'^(.*\])\s*$', line_prefix) if ((macro and macro.group(1) not in ( 'TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST', 'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED', 'LOCKS_EXCLUDED', 'INTERFACE_DEF')) or - (func and not Search(r'\boperator\s*\[\s*\]', func.group(1))) or - Search(r'\b(?:struct|union)\s+alignas\s*$', line_prefix) or - Search(r'\s+=\s*$', line_prefix)): + (func and not re.search(r'\boperator\s*\[\s*\]', func.group(1))) or + re.search(r'\b(?:struct|union)\s+alignas\s*$', line_prefix) or + re.search(r'\bdecltype$', line_prefix) or + re.search(r'\s+=\s*$', line_prefix)): match = None if (match and opening_parenthesis[1] > 1 and - Search(r'\]\s*$', clean_lines.elided[opening_parenthesis[1] - 1])): + re.search(r'\]\s*$', clean_lines.elided[opening_parenthesis[1] - 1])): # Multi-line lambda-expression match = None else: # Try matching cases 2-3. - match = Match(r'^(.*(?:else|\)\s*const)\s*)\{', line) + match = re.match(r'^(.*(?:else|\)\s*const)\s*)\{', line) if not match: # Try matching cases 4-6. These are always matched on separate lines. # @@ -4123,14 +4572,14 @@ def CheckTrailingSemicolon(filename, clean_lines, linenum, error): # // blank line # } prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0] - if prevline and Search(r'[;{}]\s*$', prevline): - match = Match(r'^(\s*)\{', line) + if prevline and re.search(r'[;{}]\s*$', prevline): + match = re.match(r'^(\s*)\{', line) # Check matching closing brace if match: (endline, endlinenum, endpos) = CloseExpression( clean_lines, linenum, len(match.group(1))) - if endpos > -1 and Match(r'^\s*;', endline[endpos:]): + if endpos > -1 and re.match(r'^\s*;', endline[endpos:]): # Current {} pair is eligible for semicolon check, and we have found # the redundant semicolon, output warning here. # @@ -4138,6 +4587,14 @@ def CheckTrailingSemicolon(filename, clean_lines, linenum, error): # outputting warnings for the matching closing brace, if there are # nested blocks with trailing semicolons, we will get the error # messages in reversed order. + + # We need to check the line forward for NOLINT + raw_lines = clean_lines.raw_lines + ParseNolintSuppressions(filename, raw_lines[endlinenum-1], endlinenum-1, + error) + ParseNolintSuppressions(filename, raw_lines[endlinenum], endlinenum, + error) + error(filename, endlinenum, 'readability/braces', 4, "You don't need a ; after a }") @@ -4159,16 +4616,16 @@ def CheckEmptyBlockBody(filename, clean_lines, linenum, error): # We also check "if" blocks here, since an empty conditional block # is likely an error. line = clean_lines.elided[linenum] - matched = Match(r'\s*(for|while|if)\s*\(', line) + matched = re.match(r'\s*(for|while|if)\s*\(', line) if matched: - # Find the end of the conditional expression + # Find the end of the conditional expression. (end_line, end_linenum, end_pos) = CloseExpression( clean_lines, linenum, line.find('(')) # Output warning if what follows the condition expression is a semicolon. # No warning for all other cases, including whitespace or newline, since we # have a separate check for semicolons preceded by whitespace. - if end_pos >= 0 and Match(r';', end_line[end_pos:]): + if end_pos >= 0 and re.match(r';', end_line[end_pos:]): if matched.group(1) == 'if': error(filename, end_linenum, 'whitespace/empty_conditional_body', 5, 'Empty conditional bodies should use {}') @@ -4176,6 +4633,75 @@ def CheckEmptyBlockBody(filename, clean_lines, linenum, error): error(filename, end_linenum, 'whitespace/empty_loop_body', 5, 'Empty loop bodies should use {} or continue') + # Check for if statements that have completely empty bodies (no comments) + # and no else clauses. + if end_pos >= 0 and matched.group(1) == 'if': + # Find the position of the opening { for the if statement. + # Return without logging an error if it has no brackets. + opening_linenum = end_linenum + opening_line_fragment = end_line[end_pos:] + # Loop until EOF or find anything that's not whitespace or opening {. + while not re.search(r'^\s*\{', opening_line_fragment): + if re.search(r'^(?!\s*$)', opening_line_fragment): + # Conditional has no brackets. + return + opening_linenum += 1 + if opening_linenum == len(clean_lines.elided): + # Couldn't find conditional's opening { or any code before EOF. + return + opening_line_fragment = clean_lines.elided[opening_linenum] + # Set opening_line (opening_line_fragment may not be entire opening line). + opening_line = clean_lines.elided[opening_linenum] + + # Find the position of the closing }. + opening_pos = opening_line_fragment.find('{') + if opening_linenum == end_linenum: + # We need to make opening_pos relative to the start of the entire line. + opening_pos += end_pos + (closing_line, closing_linenum, closing_pos) = CloseExpression( + clean_lines, opening_linenum, opening_pos) + if closing_pos < 0: + return + + # Now construct the body of the conditional. This consists of the portion + # of the opening line after the {, all lines until the closing line, + # and the portion of the closing line before the }. + if (clean_lines.raw_lines[opening_linenum] != + CleanseComments(clean_lines.raw_lines[opening_linenum])): + # Opening line ends with a comment, so conditional isn't empty. + return + if closing_linenum > opening_linenum: + # Opening line after the {. Ignore comments here since we checked above. + bodylist = list(opening_line[opening_pos+1:]) + # All lines until closing line, excluding closing line, with comments. + bodylist.extend(clean_lines.raw_lines[opening_linenum+1:closing_linenum]) + # Closing line before the }. Won't (and can't) have comments. + bodylist.append(clean_lines.elided[closing_linenum][:closing_pos-1]) + body = '\n'.join(bodylist) + else: + # If statement has brackets and fits on a single line. + body = opening_line[opening_pos+1:closing_pos-1] + + # Check if the body is empty + if not _EMPTY_CONDITIONAL_BODY_PATTERN.search(body): + return + # The body is empty. Now make sure there's not an else clause. + current_linenum = closing_linenum + current_line_fragment = closing_line[closing_pos:] + # Loop until EOF or find anything that's not whitespace or else clause. + while re.search(r'^\s*$|^(?=\s*else)', current_line_fragment): + if re.search(r'^(?=\s*else)', current_line_fragment): + # Found an else clause, so don't log an error. + return + current_linenum += 1 + if current_linenum == len(clean_lines.elided): + break + current_line_fragment = clean_lines.elided[current_linenum] + + # The body is empty and there's no else clause until EOF or other code. + error(filename, end_linenum, 'whitespace/empty_if_body', 4, + ('If statement had no body and no else clause')) + def FindCheckMacro(line): """Find a replaceable CHECK-like macro. @@ -4193,7 +4719,7 @@ def FindCheckMacro(line): # to make sure that we are matching the expected CHECK macro, as # opposed to some other macro that happens to contain the CHECK # substring. - matched = Match(r'^(.*\b' + macro + r'\s*)\(', line) + matched = re.match(r'^(.*\b' + macro + r'\s*)\(', line) if not matched: continue return (macro, len(matched.group(1))) @@ -4225,14 +4751,14 @@ def CheckCheck(filename, clean_lines, linenum, error): # If the check macro is followed by something other than a # semicolon, assume users will log their own custom error messages # and don't suggest any replacements. - if not Match(r'\s*;', last_line[end_pos:]): + if not re.match(r'\s*;', last_line[end_pos:]): return if linenum == end_line: expression = lines[linenum][start_pos + 1:end_pos - 1] else: expression = lines[linenum][start_pos + 1:] - for i in xrange(linenum + 1, end_line): + for i in range(linenum + 1, end_line): expression += lines[i] expression += last_line[0:end_pos - 1] @@ -4243,7 +4769,7 @@ def CheckCheck(filename, clean_lines, linenum, error): rhs = '' operator = None while expression: - matched = Match(r'^\s*(<<|<<=|>>|>>=|->\*|->|&&|\|\||' + matched = re.match(r'^\s*(<<|<<=|>>|>>=|->\*|->|&&|\|\||' r'==|!=|>=|>|<=|<|\()(.*)$', expression) if matched: token = matched.group(1) @@ -4277,9 +4803,9 @@ def CheckCheck(filename, clean_lines, linenum, error): # characters at once if possible. Trivial benchmark shows that this # is more efficient when the operands are longer than a single # character, which is generally the case. - matched = Match(r'^([^-=!<>()&|]+)(.*)$', expression) + matched = re.match(r'^([^-=!<>()&|]+)(.*)$', expression) if not matched: - matched = Match(r'^(\s*\S)(.*)$', expression) + matched = re.match(r'^(\s*\S)(.*)$', expression) if not matched: break lhs += matched.group(1) @@ -4303,7 +4829,7 @@ def CheckCheck(filename, clean_lines, linenum, error): lhs = lhs.strip() rhs = rhs.strip() match_constant = r'^([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')$' - if Match(match_constant, lhs) or Match(match_constant, rhs): + if re.match(match_constant, lhs) or re.match(match_constant, rhs): # Note: since we know both lhs and rhs, we can provide a more # descriptive error message like: # Consider using CHECK_EQ(x, 42) instead of CHECK(x == 42) @@ -4313,9 +4839,8 @@ def CheckCheck(filename, clean_lines, linenum, error): # We are still keeping the less descriptive message because if lhs # or rhs gets long, the error message might become unreadable. error(filename, linenum, 'readability/check', 2, - 'Consider using %s instead of %s(a %s b)' % ( - _CHECK_REPLACEMENT[check_macro][operator], - check_macro, operator)) + f'Consider using {_CHECK_REPLACEMENT[check_macro][operator]}' + f' instead of {check_macro}(a {operator} b)') def CheckAltTokens(filename, clean_lines, linenum, error): @@ -4330,7 +4855,7 @@ def CheckAltTokens(filename, clean_lines, linenum, error): line = clean_lines.elided[linenum] # Avoid preprocessor lines - if Match(r'^\s*#', line): + if re.match(r'^\s*#', line): return # Last ditch effort to avoid multi-line comments. This will not help @@ -4346,8 +4871,8 @@ def CheckAltTokens(filename, clean_lines, linenum, error): for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line): error(filename, linenum, 'readability/alt_tokens', 2, - 'Use operator %s instead of %s' % ( - _ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1))) + f'Use operator {_ALT_TOKEN_REPLACEMENT[match.group(2)]}' + f' instead of {match.group(2)}') def GetLineWidth(line): @@ -4360,12 +4885,22 @@ def GetLineWidth(line): The width of the line in column positions, accounting for Unicode combining characters and wide characters. """ - if isinstance(line, unicode): + if isinstance(line, str): width = 0 for uc in unicodedata.normalize('NFC', line): if unicodedata.east_asian_width(uc) in ('W', 'F'): width += 2 elif not unicodedata.combining(uc): + # Issue 337 + # https://mail.python.org/pipermail/python-list/2012-August/628809.html + if (sys.version_info.major, sys.version_info.minor) <= (3, 2): + # https://github.com/python/cpython/blob/2.7/Include/unicodeobject.h#L81 + is_wide_build = sysconfig.get_config_var("Py_UNICODE_SIZE") >= 4 + # https://github.com/python/cpython/blob/2.7/Objects/unicodeobject.c#L564 + is_low_surrogate = 0xDC00 <= ord(uc) <= 0xDFFF + if not is_wide_build and is_low_surrogate: + width -= 1 + width += 1 return width else: @@ -4395,6 +4930,7 @@ def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state, # raw strings, raw_lines = clean_lines.lines_without_raw_strings line = raw_lines[linenum] + prev = raw_lines[linenum - 1] if linenum > 0 else '' if line.find('\t') != -1: error(filename, linenum, 'whitespace/tab', 1, @@ -4412,32 +4948,37 @@ def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state, # if(match($0, " <<")) complain = 0; # if(match(prev, " +for \\(")) complain = 0; # if(prevodd && match(prevprev, " +for \\(")) complain = 0; - scope_or_label_pattern = r'\s*\w+\s*:\s*\\?$' + scope_or_label_pattern = r'\s*(?:public|private|protected|signals)(?:\s+(?:slots\s*)?)?:\s*\\?$' classinfo = nesting_state.InnermostClass() initial_spaces = 0 cleansed_line = clean_lines.elided[linenum] while initial_spaces < len(line) and line[initial_spaces] == ' ': initial_spaces += 1 - if line and line[-1].isspace(): - error(filename, linenum, 'whitespace/end_of_line', 4, - 'Line ends in whitespace. Consider deleting these extra spaces.') # There are certain situations we allow one space, notably for # section labels, and also lines containing multi-line raw strings. - elif ((initial_spaces == 1 or initial_spaces == 3) and - not Match(scope_or_label_pattern, cleansed_line) and - not (clean_lines.raw_lines[linenum] != line and - Match(r'^\s*""', line))): + # We also don't check for lines that look like continuation lines + # (of lines ending in double quotes, commas, equals, or angle brackets) + # because the rules for how to indent those are non-trivial. + if (not re.search(r'[",=><] *$', prev) and + (initial_spaces == 1 or initial_spaces == 3) and + not re.match(scope_or_label_pattern, cleansed_line) and + not (clean_lines.raw_lines[linenum] != line and + re.match(r'^\s*""', line))): error(filename, linenum, 'whitespace/indent', 3, 'Weird number of spaces at line-start. ' 'Are you using a 2-space indent?') + if line and line[-1].isspace(): + error(filename, linenum, 'whitespace/end_of_line', 4, + 'Line ends in whitespace. Consider deleting these extra spaces.') + # Check if the line is a header guard. is_header_guard = False - if file_extension == 'h': + if IsHeaderExtension(file_extension): cppvar = GetHeaderGuardCPPVariable(filename) - if (line.startswith('#ifndef %s' % cppvar) or - line.startswith('#define %s' % cppvar) or - line.startswith('#endif // %s' % cppvar)): + if (line.startswith(f'#ifndef {cppvar}') or + line.startswith(f'#define {cppvar}') or + line.startswith(f'#endif // {cppvar}')): is_header_guard = True # #include lines and header guards can be long, since there's no clean way to # split them. @@ -4447,20 +4988,23 @@ def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state, # # The "$Id:...$" comment may also get very long without it being the # developers fault. + # + # Doxygen documentation copying can get pretty long when using an overloaded + # function declaration if (not line.startswith('#include') and not is_header_guard and - not Match(r'^\s*//.*http(s?)://\S*$', line) and - not Match(r'^// \$Id:.*#[0-9]+ \$$', line)): + not re.match(r'^\s*//.*http(s?)://\S*$', line) and + not re.match(r'^\s*//\s*[^\s]*$', line) and + not re.match(r'^// \$Id:.*#[0-9]+ \$$', line) and + not re.match(r'^\s*/// [@\\](copydoc|copydetails|copybrief) .*$', line)): line_width = GetLineWidth(line) - extended_length = int((_line_length * 1.25)) - if line_width > extended_length: - error(filename, linenum, 'whitespace/line_length', 4, - 'Lines should very rarely be longer than %i characters' % - extended_length) - elif line_width > _line_length: + if line_width > _line_length: error(filename, linenum, 'whitespace/line_length', 2, - 'Lines should be <= %i characters long' % _line_length) + f'Lines should be <= {_line_length} characters long') if (cleansed_line.count(';') > 1 and + # allow simple single line lambdas + not re.match(r'^[^{};]*\[[^\[\]]*\][^{}]*\{[^{}\n\r]*\}', + line) and # for loops are allowed two ;'s (and may run over two lines). cleansed_line.find('for') == -1 and (GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or @@ -4476,14 +5020,12 @@ def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state, CheckBraces(filename, clean_lines, linenum, error) CheckTrailingSemicolon(filename, clean_lines, linenum, error) CheckEmptyBlockBody(filename, clean_lines, linenum, error) - CheckAccess(filename, clean_lines, linenum, nesting_state, error) CheckSpacing(filename, clean_lines, linenum, nesting_state, error) CheckOperatorSpacing(filename, clean_lines, linenum, error) CheckParenthesisSpacing(filename, clean_lines, linenum, error) CheckCommaSpacing(filename, clean_lines, linenum, error) - CheckBracesSpacing(filename, clean_lines, linenum, error) + CheckBracesSpacing(filename, clean_lines, linenum, nesting_state, error) CheckSpacingForFunctionCall(filename, clean_lines, linenum, error) - CheckRValueReference(filename, clean_lines, linenum, nesting_state, error) CheckCheck(filename, clean_lines, linenum, error) CheckAltTokens(filename, clean_lines, linenum, error) classinfo = nesting_state.InnermostClass() @@ -4519,38 +5061,25 @@ def _DropCommonSuffixes(filename): Returns: The filename with the common suffix removed. """ - for suffix in ('test.cc', 'regtest.cc', 'unittest.cc', - 'inl.h', 'impl.h', 'internal.h'): + for suffix in itertools.chain( + (f"{test_suffix.lstrip('_')}.{ext}" + for test_suffix, ext in itertools.product(_test_suffixes, GetNonHeaderExtensions())), + (f'{suffix}.{ext}' + for suffix, ext in itertools.product(['inl', 'imp', 'internal'], GetHeaderExtensions()))): if (filename.endswith(suffix) and len(filename) > len(suffix) and filename[-len(suffix) - 1] in ('-', '_')): return filename[:-len(suffix) - 1] return os.path.splitext(filename)[0] -def _IsTestFilename(filename): - """Determines if the given filename has a suffix that identifies it as a test. - - Args: - filename: The input filename. - - Returns: - True if 'filename' looks like a test, False otherwise. - """ - if (filename.endswith('_test.cc') or - filename.endswith('_unittest.cc') or - filename.endswith('_regtest.cc')): - return True - else: - return False - - -def _ClassifyInclude(fileinfo, include, is_system): +def _ClassifyInclude(fileinfo, include, used_angle_brackets, include_order="default"): """Figures out what kind of header 'include' is. Args: fileinfo: The current file cpplint is running over. A FileInfo instance. include: The path to a #included file. - is_system: True if the #include used <> rather than "". + used_angle_brackets: True if the #include used <> rather than "". + include_order: "default" or other value allowed in program arguments Returns: One of the _XXX_HEADER constants. @@ -4560,6 +5089,8 @@ def _ClassifyInclude(fileinfo, include, is_system): _C_SYS_HEADER >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True) _CPP_SYS_HEADER + >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', True, "standardcfirst") + _OTHER_SYS_HEADER >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False) _LIKELY_MY_HEADER >>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'), @@ -4570,13 +5101,24 @@ def _ClassifyInclude(fileinfo, include, is_system): """ # This is a list of all standard c++ header files, except # those already checked for above. - is_cpp_h = include in _CPP_HEADERS + is_cpp_header = include in _CPP_HEADERS + + # Mark include as C header if in list or in a known folder for standard-ish C headers. + is_std_c_header = (include_order == "default") or (include in _C_HEADERS + # additional linux glibc header folders + or re.search(rf'(?:{"|".join(C_STANDARD_HEADER_FOLDERS)})\/.*\.h', include)) + + # Headers with C++ extensions shouldn't be considered C system headers + include_ext = os.path.splitext(include)[1] + is_system = used_angle_brackets and include_ext not in ['.hh', '.hpp', '.hxx', '.h++'] if is_system: - if is_cpp_h: + if is_cpp_header: return _CPP_SYS_HEADER - else: + if is_std_c_header: return _C_SYS_HEADER + else: + return _OTHER_SYS_HEADER # If the target file and the include we're checking share a # basename when we drop common extensions, and the include @@ -4584,9 +5126,11 @@ def _ClassifyInclude(fileinfo, include, is_system): target_dir, target_base = ( os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName()))) include_dir, include_base = os.path.split(_DropCommonSuffixes(include)) + target_dir_pub = os.path.normpath(target_dir + '/../public') + target_dir_pub = target_dir_pub.replace('\\', '/') if target_base == include_base and ( include_dir == target_dir or - include_dir == os.path.normpath(target_dir + '/../public')): + include_dir == target_dir_pub): return _LIKELY_MY_HEADER # If the target and include share some initial basename @@ -4628,10 +5172,12 @@ def CheckIncludeLine(filename, clean_lines, linenum, include_state, error): # # We also make an exception for Lua headers, which follow google # naming convention but not the include convention. - match = Match(r'#include\s*"([^/]+\.h)"', line) - if match and not _THIRD_PARTY_HEADERS_PATTERN.match(match.group(1)): - error(filename, linenum, 'build/include', 4, - 'Include the directory when naming .h files') + match = re.match(r'#include\s*"([^/]+\.(.*))"', line) + if match: + if (IsHeaderExtension(match.group(2)) and + not _THIRD_PARTY_HEADERS_PATTERN.match(match.group(1))): + error(filename, linenum, 'build/include_subdir', 4, + 'Include the directory when naming header files') # we shouldn't include a file more than once. actually, there are a # handful of instances where doing so is okay, but in general it's @@ -4639,17 +5185,33 @@ def CheckIncludeLine(filename, clean_lines, linenum, include_state, error): match = _RE_PATTERN_INCLUDE.search(line) if match: include = match.group(2) - is_system = (match.group(1) == '<') + used_angle_brackets = match.group(1) == '<' duplicate_line = include_state.FindHeader(include) if duplicate_line >= 0: error(filename, linenum, 'build/include', 4, - '"%s" already included at %s:%s' % - (include, filename, duplicate_line)) - elif (include.endswith('.cc') and + f'"{include}" already included at {filename}:{duplicate_line}') + return + + for extension in GetNonHeaderExtensions(): + if (include.endswith('.' + extension) and os.path.dirname(fileinfo.RepositoryName()) != os.path.dirname(include)): - error(filename, linenum, 'build/include', 4, - 'Do not include .cc files from other packages') - elif not _THIRD_PARTY_HEADERS_PATTERN.match(include): + error(filename, linenum, 'build/include', 4, + 'Do not include .' + extension + ' files from other packages') + return + + # We DO want to include a 3rd party looking header if it matches the + # filename. Otherwise we get an erroneous error "...should include its + # header" error later. + third_src_header = False + for ext in GetHeaderExtensions(): + basefilename = filename[0:len(filename) - len(fileinfo.Extension())] + headerfile = basefilename + '.' + ext + headername = FileInfo(headerfile).RepositoryName() + if headername in include or include in headername: + third_src_header = True + break + + if third_src_header or not _THIRD_PARTY_HEADERS_PATTERN.match(include): include_state.include_list[-1].append((include, linenum)) # We want to ensure that headers appear in the right order: @@ -4664,16 +5226,16 @@ def CheckIncludeLine(filename, clean_lines, linenum, include_state, error): # track of the highest type seen, and complains if we see a # lower type after that. error_message = include_state.CheckNextIncludeOrder( - _ClassifyInclude(fileinfo, include, is_system)) + _ClassifyInclude(fileinfo, include, used_angle_brackets, _include_order)) if error_message: error(filename, linenum, 'build/include_order', 4, - '%s. Should be: %s.h, c system, c++ system, other.' % - (error_message, fileinfo.BaseName())) + f'{error_message}. Should be: {fileinfo.BaseName()}.h, c system,' + ' c++ system, other.') canonical_include = include_state.CanonicalizeAlphabeticalOrder(include) if not include_state.IsInAlphabeticalOrder( clean_lines, linenum, canonical_include): error(filename, linenum, 'build/include_alpha', 4, - 'Include "%s" not in alphabetical order' % include) + f'Include "{include}" not in alphabetical order') include_state.SetLastHeader(canonical_include) @@ -4703,7 +5265,7 @@ def _GetTextInside(text, start_pattern): # Give opening punctuations to get the matching close-punctuations. matching_punctuation = {'(': ')', '{': '}', '[': ']'} - closing_punctuation = set(matching_punctuation.itervalues()) + closing_punctuation = set(dict.values(matching_punctuation)) # Find the position to start extracting text. match = re.search(start_pattern, text, re.M) @@ -4758,6 +5320,9 @@ def _GetTextInside(text, start_pattern): _RE_PATTERN_CONST_REF_PARAM = ( r'(?:.*\s*\bconst\s*&\s*' + _RE_PATTERN_IDENT + r'|const\s+' + _RE_PATTERN_TYPE + r'\s*&\s*' + _RE_PATTERN_IDENT + r')') +# Stream types. +_RE_PATTERN_REF_STREAM_PARAM = ( + r'(?:.*stream\s*&\s*' + _RE_PATTERN_IDENT + r')') def CheckLanguage(filename, clean_lines, linenum, file_extension, @@ -4790,19 +5355,17 @@ def CheckLanguage(filename, clean_lines, linenum, file_extension, # Reset include state across preprocessor directives. This is meant # to silence warnings for conditional includes. - match = Match(r'^\s*#\s*(if|ifdef|ifndef|elif|else|endif)\b', line) + match = re.match(r'^\s*#\s*(if|ifdef|ifndef|elif|else|endif)\b', line) if match: include_state.ResetSection(match.group(1)) - # Make Windows paths like Unix. - fullname = os.path.abspath(filename).replace('\\', '/') - + # Perform other checks now that we are sure that this is not an include line CheckCasts(filename, clean_lines, linenum, error) CheckGlobalStatic(filename, clean_lines, linenum, error) CheckPrintf(filename, clean_lines, linenum, error) - if file_extension == 'h': + if IsHeaderExtension(file_extension): # TODO(unknown): check that 1-arg constructors are explicit. # How to tell it's a constructor? # (handled in CheckForNonStandardConstructs for now) @@ -4812,15 +5375,15 @@ def CheckLanguage(filename, clean_lines, linenum, file_extension, # Check if people are using the verboten C basic types. The only exception # we regularly allow is "unsigned short port" for port. - if Search(r'\bshort port\b', line): - if not Search(r'\bunsigned short port\b', line): + if re.search(r'\bshort port\b', line): + if not re.search(r'\bunsigned short port\b', line): error(filename, linenum, 'runtime/int', 4, 'Use "unsigned short" for ports, not "short"') else: - match = Search(r'\b(short|long(?! +double)|long long)\b', line) + match = re.search(r'\b(short|long(?! +double)|long long)\b', line) if match: error(filename, linenum, 'runtime/int', 4, - 'Use int16/int64/etc, rather than the C type %s' % match.group(1)) + f'Use int16/int64/etc, rather than the C type {match.group(1)}') # Check if some verboten operator overloading is going on # TODO(unknown): catch out-of-line unary operator&: @@ -4828,13 +5391,13 @@ def CheckLanguage(filename, clean_lines, linenum, file_extension, # int operator&(const X& x) { return 42; } // unary operator& # The trick is it's hard to tell apart from binary operator&: # class Y { int operator&(const Y& x) { return 23; } }; // binary operator& - if Search(r'\boperator\s*&\s*\(\s*\)', line): + if re.search(r'\boperator\s*&\s*\(\s*\)', line): error(filename, linenum, 'runtime/operator', 4, 'Unary operator& is dangerous. Do not use it.') # Check for suspicious usage of "if" like # } if (a == b) { - if Search(r'\}\s*if\s*\(', line): + if re.search(r'\}\s*if\s*\(', line): error(filename, linenum, 'readability/braces', 4, 'Did you mean "else if"? If not, start a new line for "if".') @@ -4847,28 +5410,32 @@ def CheckLanguage(filename, clean_lines, linenum, file_extension, # boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line); printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(') if printf_args: - match = Match(r'([\w.\->()]+)$', printf_args) + match = re.match(r'([\w.\->()]+)$', printf_args) if match and match.group(1) != '__VA_ARGS__': function_name = re.search(r'\b((?:string)?printf)\s*\(', line, re.I).group(1) error(filename, linenum, 'runtime/printf', 4, - 'Potential format string bug. Do %s("%%s", %s) instead.' - % (function_name, match.group(1))) + 'Potential format string bug. Do' + f' {function_name}("%s", {match.group(1)}) instead.') # Check for potential memset bugs like memset(buf, sizeof(buf), 0). - match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line) - if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)): + match = re.search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line) + if match and not re.match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)): error(filename, linenum, 'runtime/memset', 4, - 'Did you mean "memset(%s, 0, %s)"?' - % (match.group(1), match.group(2))) + f'Did you mean "memset({match.group(1)}, 0, {match.group(2)})"?') - if Search(r'\busing namespace\b', line): - error(filename, linenum, 'build/namespaces', 5, - 'Do not use namespace using-directives. ' - 'Use using-declarations instead.') + if re.search(r'\busing namespace\b', line): + if re.search(r'\bliterals\b', line): + error(filename, linenum, 'build/namespaces_literals', 5, + 'Do not use namespace using-directives. ' + 'Use using-declarations instead.') + else: + error(filename, linenum, 'build/namespaces', 5, + 'Do not use namespace using-directives. ' + 'Use using-declarations instead.') # Detect variable-length arrays. - match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line) + match = re.match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line) if (match and match.group(2) != 'return' and match.group(2) != 'delete' and match.group(3).find(']') == -1): # Split the size using space and arithmetic operators as delimiters. @@ -4882,17 +5449,17 @@ def CheckLanguage(filename, clean_lines, linenum, file_extension, skip_next = False continue - if Search(r'sizeof\(.+\)', tok): continue - if Search(r'arraysize\(\w+\)', tok): continue + if re.search(r'sizeof\(.+\)', tok): continue + if re.search(r'arraysize\(\w+\)', tok): continue tok = tok.lstrip('(') tok = tok.rstrip(')') if not tok: continue - if Match(r'\d+', tok): continue - if Match(r'0[xX][0-9a-fA-F]+', tok): continue - if Match(r'k[A-Z0-9]\w*', tok): continue - if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue - if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue + if re.match(r'\d+', tok): continue + if re.match(r'0[xX][0-9a-fA-F]+', tok): continue + if re.match(r'k[A-Z0-9]\w*', tok): continue + if re.match(r'(.+::)?k[A-Z0-9]\w*', tok): continue + if re.match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue # A catch all for tricky sizeof cases, including 'sizeof expression', # 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)' # requires skipping the next token because we split on ' ' and '*'. @@ -4909,12 +5476,12 @@ def CheckLanguage(filename, clean_lines, linenum, file_extension, # Check for use of unnamed namespaces in header files. Registration # macros are typically OK, so we allow use of "namespace {" on lines # that end with backslashes. - if (file_extension == 'h' - and Search(r'\bnamespace\s*{', line) + if (IsHeaderExtension(file_extension) + and re.search(r'\bnamespace\s*{', line) and line[-1] != '\\'): - error(filename, linenum, 'build/namespaces', 4, + error(filename, linenum, 'build/namespaces_headers', 4, 'Do not use unnamed namespaces in header files. See ' - 'http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces' + 'https://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces' ' for more information.') @@ -4930,14 +5497,18 @@ def CheckGlobalStatic(filename, clean_lines, linenum, error): line = clean_lines.elided[linenum] # Match two lines at a time to support multiline declarations - if linenum + 1 < clean_lines.NumLines() and not Search(r'[;({]', line): + if linenum + 1 < clean_lines.NumLines() and not re.search(r'[;({]', line): line += clean_lines.elided[linenum + 1].strip() # Check for people declaring static/global STL strings at the top level. # This is dangerous because the C++ language does not guarantee that - # globals with constructors are initialized before the first access. - match = Match( - r'((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\b(.*)', + # globals with constructors are initialized before the first access, and + # also because globals can be destroyed when some threads are still running. + # TODO(unknown): Generalize this to also find static unique_ptr instances. + # TODO(unknown): File bugs for clang-tidy to find these. + match = re.match( + r'((?:|static +)(?:|const +))(?::*std::)?string( +const)? +' + r'([a-zA-Z0-9_:]+)\b(.*)', line) # Remove false positives: @@ -4957,15 +5528,19 @@ def CheckGlobalStatic(filename, clean_lines, linenum, error): # matching identifiers. # string Class::operator*() if (match and - not Search(r'\bstring\b(\s+const)?\s*\*\s*(const\s+)?\w', line) and - not Search(r'\boperator\W', line) and - not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)*\s*\(([^"]|$)', match.group(3))): - error(filename, linenum, 'runtime/string', 4, - 'For a static/global string constant, use a C style string instead: ' - '"%schar %s[]".' % - (match.group(1), match.group(2))) - - if Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line): + not re.search(r'\bstring\b(\s+const)?\s*[\*\&]\s*(const\s+)?\w', line) and + not re.search(r'\boperator\W', line) and + not re.match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)*\s*\(([^"]|$)', match.group(4))): + if re.search(r'\bconst\b', line): + error(filename, linenum, 'runtime/string', 4, + 'For a static/global string constant, use a C style string instead:' + f' "{match.group(1)}char{match.group(2) or ""} {match.group(3)}[]".') + else: + error(filename, linenum, 'runtime/string', 4, + 'Static/global string variables are not permitted.') + + if (re.search(r'\b([A-Za-z0-9_]*_)\(\1\)', line) or + re.search(r'\b([A-Za-z0-9_]*_)\(CHECK_NOTNULL\(\1\)\)', line)): error(filename, linenum, 'runtime/init', 4, 'You seem to be initializing a member variable with itself.') @@ -4982,21 +5557,21 @@ def CheckPrintf(filename, clean_lines, linenum, error): line = clean_lines.elided[linenum] # When snprintf is used, the second argument shouldn't be a literal. - match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line) + match = re.search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line) if match and match.group(2) != '0': # If 2nd arg is zero, snprintf is used to calculate size. - error(filename, linenum, 'runtime/printf', 3, - 'If you can, use sizeof(%s) instead of %s as the 2nd arg ' - 'to snprintf.' % (match.group(1), match.group(2))) + error(filename, linenum, 'runtime/printf', 3, 'If you can, use' + f' sizeof({match.group(1)}) instead of {match.group(2)}' + ' as the 2nd arg to snprintf.') # Check if some verboten C functions are being used. - if Search(r'\bsprintf\s*\(', line): + if re.search(r'\bsprintf\s*\(', line): error(filename, linenum, 'runtime/printf', 5, 'Never use sprintf. Use snprintf instead.') - match = Search(r'\b(strcpy|strcat)\s*\(', line) + match = re.search(r'\b(strcpy|strcat)\s*\(', line) if match: error(filename, linenum, 'runtime/printf', 4, - 'Almost always, snprintf is better than %s' % match.group(1)) + f'Almost always, snprintf is better than {match.group(1)}') def IsDerivedFunction(clean_lines, linenum): @@ -5010,14 +5585,14 @@ def IsDerivedFunction(clean_lines, linenum): virt-specifier. """ # Scan back a few lines for start of current function - for i in xrange(linenum, max(-1, linenum - 10), -1): - match = Match(r'^([^()]*\w+)\(', clean_lines.elided[i]) + for i in range(linenum, max(-1, linenum - 10), -1): + match = re.match(r'^([^()]*\w+)\(', clean_lines.elided[i]) if match: # Look for "override" after the matching closing parenthesis line, _, closing_paren = CloseExpression( clean_lines, i, len(match.group(1))) return (closing_paren >= 0 and - Search(r'\boverride\b', line[closing_paren:])) + re.search(r'\boverride\b', line[closing_paren:])) return False @@ -5031,9 +5606,9 @@ def IsOutOfLineMethodDefinition(clean_lines, linenum): True if current line contains an out-of-line method definition. """ # Scan back a few lines for start of current function - for i in xrange(linenum, max(-1, linenum - 10), -1): - if Match(r'^([^()]*\w+)\(', clean_lines.elided[i]): - return Match(r'^[^()]*\w+::\w+\(', clean_lines.elided[i]) is not None + for i in range(linenum, max(-1, linenum - 10), -1): + if re.match(r'^([^()]*\w+)\(', clean_lines.elided[i]): + return re.match(r'^[^()]*\w+::\w+\(', clean_lines.elided[i]) is not None return False @@ -5047,24 +5622,24 @@ def IsInitializerList(clean_lines, linenum): True if current line appears to be inside constructor initializer list, False otherwise. """ - for i in xrange(linenum, 1, -1): + for i in range(linenum, 1, -1): line = clean_lines.elided[i] if i == linenum: - remove_function_body = Match(r'^(.*)\{\s*$', line) + remove_function_body = re.match(r'^(.*)\{\s*$', line) if remove_function_body: line = remove_function_body.group(1) - if Search(r'\s:\s*\w+[({]', line): + if re.search(r'\s:\s*\w+[({]', line): # A lone colon tend to indicate the start of a constructor # initializer list. It could also be a ternary operator, which # also tend to appear in constructor initializer lists as # opposed to parameter lists. return True - if Search(r'\}\s*,\s*$', line): + if re.search(r'\}\s*,\s*$', line): # A closing brace followed by a comma is probably the end of a # brace-initialized member in constructor initializer list. return True - if Search(r'[{};]\s*$', line): + if re.search(r'[{};]\s*$', line): # Found one of the following: # - A closing brace or semicolon, probably the end of the previous # function. @@ -5128,13 +5703,13 @@ def CheckForNonConstReference(filename, clean_lines, linenum, # that spans more than 2 lines, please use a typedef. if linenum > 1: previous = None - if Match(r'\s*::(?:[\w<>]|::)+\s*&\s*\S', line): + if re.match(r'\s*::(?:[\w<>]|::)+\s*&\s*\S', line): # previous_line\n + ::current_line - previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+[\w<>])\s*$', + previous = re.search(r'\b((?:const\s*)?(?:[\w<>]|::)+[\w<>])\s*$', clean_lines.elided[linenum - 1]) - elif Match(r'\s*[a-zA-Z_]([\w<>]|::)+\s*&\s*\S', line): + elif re.match(r'\s*[a-zA-Z_]([\w<>]|::)+\s*&\s*\S', line): # previous_line::\n + current_line - previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+::)\s*$', + previous = re.search(r'\b((?:const\s*)?(?:[\w<>]|::)+::)\s*$', clean_lines.elided[linenum - 1]) if previous: line = previous.group(1) + line.lstrip() @@ -5148,7 +5723,7 @@ def CheckForNonConstReference(filename, clean_lines, linenum, # Found the matching < on an earlier line, collect all # pieces up to current line. line = '' - for i in xrange(startline, linenum + 1): + for i in range(startline, linenum + 1): line += clean_lines.elided[i].strip() # Check for non-const references in function parameters. A single '&' may @@ -5172,15 +5747,15 @@ def CheckForNonConstReference(filename, clean_lines, linenum, # appear inside the second set of parentheses on the current line as # opposed to the first set. if linenum > 0: - for i in xrange(linenum - 1, max(0, linenum - 10), -1): + for i in range(linenum - 1, max(0, linenum - 10), -1): previous_line = clean_lines.elided[i] - if not Search(r'[),]\s*$', previous_line): + if not re.search(r'[),]\s*$', previous_line): break - if Match(r'^\s*:\s+\S', previous_line): + if re.match(r'^\s*:\s+\S', previous_line): return # Avoid preprocessors - if Search(r'\\\s*$', line): + if re.search(r'\\\s*$', line): return # Avoid constructor initializer lists @@ -5193,28 +5768,29 @@ def CheckForNonConstReference(filename, clean_lines, linenum, # # We also accept & in static_assert, which looks like a function but # it's actually a declaration expression. - whitelisted_functions = (r'(?:[sS]wap(?:<\w:+>)?|' + allowed_functions = (r'(?:[sS]wap(?:<\w:+>)?|' r'operator\s*[<>][<>]|' r'static_assert|COMPILE_ASSERT' r')\s*\(') - if Search(whitelisted_functions, line): + if re.search(allowed_functions, line): return - elif not Search(r'\S+\([^)]*$', line): - # Don't see a whitelisted function on this line. Actually we + elif not re.search(r'\S+\([^)]*$', line): + # Don't see an allowed function on this line. Actually we # didn't see any function name on this line, so this is likely a # multi-line parameter list. Try a bit harder to catch this case. - for i in xrange(2): + for i in range(2): if (linenum > i and - Search(whitelisted_functions, clean_lines.elided[linenum - i - 1])): + re.search(allowed_functions, clean_lines.elided[linenum - i - 1])): return - decls = ReplaceAll(r'{[^}]*}', ' ', line) # exclude function body + decls = re.sub(r'{[^}]*}', ' ', line) # exclude function body for parameter in re.findall(_RE_PATTERN_REF_PARAM, decls): - if not Match(_RE_PATTERN_CONST_REF_PARAM, parameter): + if (not re.match(_RE_PATTERN_CONST_REF_PARAM, parameter) and + not re.match(_RE_PATTERN_REF_STREAM_PARAM, parameter)): error(filename, linenum, 'runtime/references', 2, 'Is this a non-const reference? ' 'If so, make const or use a pointer: ' + - ReplaceAll(' *<', '<', parameter)) + re.sub(' *<', '<', parameter)) def CheckCasts(filename, clean_lines, linenum, error): @@ -5232,8 +5808,8 @@ def CheckCasts(filename, clean_lines, linenum, error): # I just try to capture the most common basic types, though there are more. # Parameterless conversion functions, such as bool(), are allowed as they are # probably a member operator declaration or default constructor. - match = Search( - r'(\bnew\s+|\S<\s*(?:const\s+)?)?\b' + match = re.search( + r'(\bnew\s+(?:const\s+)?|\S<\s*(?:const\s+)?)?\b' r'(int|float|double|bool|char|int32|uint32|int64|uint64)' r'(\([^)].*)', line) expecting_function = ExpectingFunctionArgs(clean_lines, linenum) @@ -5256,7 +5832,7 @@ def CheckCasts(filename, clean_lines, linenum, error): # Avoid arrays by looking for brackets that come after the closing # parenthesis. - if Match(r'\([^()]+\)\s*\[', match.group(3)): + if re.match(r'\([^()]+\)\s*\[', match.group(3)): return # Other things to ignore: @@ -5267,19 +5843,18 @@ def CheckCasts(filename, clean_lines, linenum, error): matched_funcptr = match.group(3) if (matched_new_or_template is None and not (matched_funcptr and - (Match(r'\((?:[^() ]+::\s*\*\s*)?[^() ]+\)\s*\(', + (re.match(r'\((?:[^() ]+::\s*\*\s*)?[^() ]+\)\s*\(', matched_funcptr) or matched_funcptr.startswith('(*)'))) and - not Match(r'\s*using\s+\S+\s*=\s*' + matched_type, line) and - not Search(r'new\(\S+\)\s*' + matched_type, line)): + not re.match(r'\s*using\s+\S+\s*=\s*' + matched_type, line) and + not re.search(r'new\(\S+\)\s*' + matched_type, line)): error(filename, linenum, 'readability/casting', 4, 'Using deprecated casting style. ' - 'Use static_cast<%s>(...) instead' % - matched_type) + f'Use static_cast<{matched_type}>(...) instead') if not expecting_function: CheckCStyleCast(filename, clean_lines, linenum, 'static_cast', - r'\((int|float|double|bool|char|u?int(16|32|64))\)', error) + r'\((int|float|double|bool|char|u?int(16|32|64)|size_t)\)', error) # This doesn't catch all cases. Consider (const char * const)"hello". # @@ -5304,7 +5879,7 @@ def CheckCasts(filename, clean_lines, linenum, error): # # This is not a cast: # reference_type&(int* function_param); - match = Search( + match = re.search( r'(?:[^\w]&\(([^)*][^)]*)\)[\w(])|' r'(?:[^\w]&(static|dynamic|down|reinterpret)_cast\b)', line) if match: @@ -5312,7 +5887,7 @@ def CheckCasts(filename, clean_lines, linenum, error): # dereferenced by the casted pointer, as opposed to the casted # pointer itself. parenthesis_error = False - match = Match(r'^(.*&(?:static|dynamic|down|reinterpret)_cast\b)<', line) + match = re.match(r'^(.*&(?:static|dynamic|down|reinterpret)_cast\b)<', line) if match: _, y1, x1 = CloseExpression(clean_lines, linenum, len(match.group(1))) if x1 >= 0 and clean_lines.elided[y1][x1] == '(': @@ -5321,7 +5896,7 @@ def CheckCasts(filename, clean_lines, linenum, error): extended_line = clean_lines.elided[y2][x2:] if y2 < clean_lines.NumLines() - 1: extended_line += clean_lines.elided[y2 + 1] - if Match(r'\s*(?:->|\[)', extended_line): + if re.match(r'\s*(?:->|\[)', extended_line): parenthesis_error = True if parenthesis_error: @@ -5353,89 +5928,38 @@ def CheckCStyleCast(filename, clean_lines, linenum, cast_type, pattern, error): False otherwise. """ line = clean_lines.elided[linenum] - match = Search(pattern, line) + match = re.search(pattern, line) if not match: return False # Exclude lines with keywords that tend to look like casts context = line[0:match.start(1) - 1] - if Match(r'.*\b(?:sizeof|alignof|alignas|[_A-Z][_A-Z0-9]*)\s*$', context): + if re.match(r'.*\b(?:sizeof|alignof|alignas|[_A-Z][_A-Z0-9]*)\s*$', context): return False # Try expanding current context to see if we one level of # parentheses inside a macro. if linenum > 0: - for i in xrange(linenum - 1, max(0, linenum - 5), -1): + for i in range(linenum - 1, max(0, linenum - 5), -1): context = clean_lines.elided[i] + context - if Match(r'.*\b[_A-Z][_A-Z0-9]*\s*\((?:\([^()]*\)|[^()])*$', context): + if re.match(r'.*\b[_A-Z][_A-Z0-9]*\s*\((?:\([^()]*\)|[^()])*$', context): return False # operator++(int) and operator--(int) - if context.endswith(' operator++') or context.endswith(' operator--'): + if (context.endswith(' operator++') or context.endswith(' operator--') or + context.endswith('::operator++') or context.endswith('::operator--')): return False - # A single unnamed argument for a function tends to look like old - # style cast. If we see those, don't issue warnings for deprecated - # casts, instead issue warnings for unnamed arguments where - # appropriate. - # - # These are things that we want warnings for, since the style guide - # explicitly require all parameters to be named: - # Function(int); - # Function(int) { - # ConstMember(int) const; - # ConstMember(int) const { - # ExceptionMember(int) throw (...); - # ExceptionMember(int) throw (...) { - # PureVirtual(int) = 0; - # [](int) -> bool { - # - # These are functions of some sort, where the compiler would be fine - # if they had named parameters, but people often omit those - # identifiers to reduce clutter: - # (FunctionPointer)(int); - # (FunctionPointer)(int) = value; - # Function((function_pointer_arg)(int)) - # Function((function_pointer_arg)(int), int param) - # ; - # <(FunctionPointerTemplateArgument)(int)>; + # A single unnamed argument for a function tends to look like old style cast. + # If we see those, don't issue warnings for deprecated casts. remainder = line[match.end(0):] - if Match(r'^\s*(?:;|const\b|throw\b|final\b|override\b|[=>{),]|->)', + if re.match(r'^\s*(?:;|const\b|throw\b|final\b|override\b|[=>{),]|->)', remainder): - # Looks like an unnamed parameter. - - # Don't warn on any kind of template arguments. - if Match(r'^\s*>', remainder): - return False - - # Don't warn on assignments to function pointers, but keep warnings for - # unnamed parameters to pure virtual functions. Note that this pattern - # will also pass on assignments of "0" to function pointers, but the - # preferred values for those would be "nullptr" or "NULL". - matched_zero = Match(r'^\s=\s*(\S+)\s*;', remainder) - if matched_zero and matched_zero.group(1) != '0': - return False - - # Don't warn on function pointer declarations. For this we need - # to check what came before the "(type)" string. - if Match(r'.*\)\s*$', line[0:match.start(0)]): - return False - - # Don't warn if the parameter is named with block comments, e.g.: - # Function(int /*unused_param*/); - raw_line = clean_lines.raw_lines[linenum] - if '/*' in raw_line: - return False - - # Passed all filters, issue warning here. - error(filename, linenum, 'readability/function', 3, - 'All parameters should be named in a function') - return True + return False # At this point, all that should be left is actual casts. error(filename, linenum, 'readability/casting', 4, - 'Using C-style cast. Use %s<%s>(...) instead' % - (cast_type, match.group(1))) + f'Using C-style cast. Use {cast_type}<{match.group(1)}>(...) instead') return True @@ -5452,13 +5976,13 @@ def ExpectingFunctionArgs(clean_lines, linenum): of function types. """ line = clean_lines.elided[linenum] - return (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or + return (re.match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or (linenum >= 2 and - (Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$', + (re.match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$', clean_lines.elided[linenum - 1]) or - Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$', + re.match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$', clean_lines.elided[linenum - 2]) or - Search(r'\bstd::m?function\s*\<\s*$', + re.search(r'\bstd::m?function\s*\<\s*$', clean_lines.elided[linenum - 1])))) @@ -5483,13 +6007,16 @@ def ExpectingFunctionArgs(clean_lines, linenum): )), ('', ('numeric_limits',)), ('', ('list',)), - ('', ('map', 'multimap',)), - ('', ('allocator',)), + ('', ('multimap',)), + ('', ('allocator', 'make_shared', 'make_unique', 'shared_ptr', + 'unique_ptr', 'weak_ptr')), ('', ('queue', 'priority_queue',)), ('', ('set', 'multiset',)), ('', ('stack',)), ('', ('char_traits', 'basic_string',)), ('', ('tuple',)), + ('', ('unordered_map', 'unordered_multimap')), + ('', ('unordered_set', 'unordered_multiset')), ('', ('pair',)), ('', ('vector',)), @@ -5500,26 +6027,77 @@ def ExpectingFunctionArgs(clean_lines, linenum): ('', ('slist',)), ) -_RE_PATTERN_STRING = re.compile(r'\bstring\b') - -_re_pattern_algorithm_header = [] -for _template in ('copy', 'max', 'min', 'min_element', 'sort', 'swap', - 'transform'): - # Match max(..., ...), max(..., ...), but not foo->max, foo.max or - # type::max(). - _re_pattern_algorithm_header.append( - (re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'), - _template, - '')) +_HEADERS_MAYBE_TEMPLATES = ( + ('', ('copy', 'max', 'min', 'min_element', 'sort', + 'transform', + )), + ('', ('forward', 'make_pair', 'move', 'swap')), + ) +# Non templated types or global objects +_HEADERS_TYPES_OR_OBJS = ( + # String and others are special -- it is a non-templatized type in STL. + ('', ('string',)), + ('', ('cin', 'cout', 'cerr', 'clog', 'wcin', 'wcout', + 'wcerr', 'wclog')), + ('', ('FILE', 'fpos_t'))) + +# Non templated functions +_HEADERS_FUNCTIONS = ( + ('', ('fopen', 'freopen', + 'fclose', 'fflush', 'setbuf', 'setvbuf', 'fread', + 'fwrite', 'fgetc', 'getc', 'fgets', 'fputc', 'putc', + 'fputs', 'getchar', 'gets', 'putchar', 'puts', 'ungetc', + 'scanf', 'fscanf', 'sscanf', 'vscanf', 'vfscanf', + 'vsscanf', 'printf', 'fprintf', 'sprintf', 'snprintf', + 'vprintf', 'vfprintf', 'vsprintf', 'vsnprintf', + 'ftell', 'fgetpos', 'fseek', 'fsetpos', + 'clearerr', 'feof', 'ferror', 'perror', + 'tmpfile', 'tmpnam'),),) + +_re_pattern_headers_maybe_templates = [] +for _header, _templates in _HEADERS_MAYBE_TEMPLATES: + for _template in _templates: + # Match max(..., ...), max(..., ...), but not foo->max, foo.max or + # 'type::max()'. + _re_pattern_headers_maybe_templates.append( + (re.compile(r'((\bstd::)|[^>.:])\b' + _template + r'(<.*?>)?\([^\)]'), + _template, + _header)) + +# Map is often overloaded. Only check, if it is fully qualified. +# Match 'std::map(...)', but not 'map(...)'' +_re_pattern_headers_maybe_templates.append( + (re.compile(r'(std\b::\bmap\s*\<)|(^(std\b::\b)map\b\(\s*\<)'), + 'map<>', + '')) + +# Other scripts may reach in and modify this pattern. _re_pattern_templates = [] for _header, _templates in _HEADERS_CONTAINING_TEMPLATES: for _template in _templates: _re_pattern_templates.append( - (re.compile(r'(\<|\b)' + _template + r'\s*\<'), + (re.compile(r'((^|(^|\s|((^|\W)::))std::)|[^>.:]\b)' + _template + r'\s*\<'), _template + '<>', _header)) +_re_pattern_types_or_objs = [] +for _header, _types_or_objs in _HEADERS_TYPES_OR_OBJS: + for _type_or_obj in _types_or_objs: + _re_pattern_types_or_objs.append( + (re.compile(r'\b' + _type_or_obj + r'\b'), + _type_or_obj, + _header)) + +_re_pattern_functions = [] +for _header, _functions in _HEADERS_FUNCTIONS: + for _function in _functions: + # Match printf(..., ...), but not foo->printf, foo.printf or + # 'type::printf()'. + _re_pattern_functions.append( + (re.compile(r'([^>.]|^)\b' + _function + r'\([^\)]'), + _function, + _header)) def FilesBelongToSameModule(filename_cc, filename_h): """Check if these two filenames belong to the same module. @@ -5542,7 +6120,7 @@ def FilesBelongToSameModule(filename_cc, filename_h): some false positives. This should be sufficiently rare in practice. Args: - filename_cc: is the path for the .cc file + filename_cc: is the path for the source (e.g. .cc) file filename_h: is the path for the header path Returns: @@ -5550,20 +6128,23 @@ def FilesBelongToSameModule(filename_cc, filename_h): bool: True if filename_cc and filename_h belong to the same module. string: the additional prefix needed to open the header file. """ + fileinfo_cc = FileInfo(filename_cc) + if fileinfo_cc.Extension().lstrip('.') not in GetNonHeaderExtensions(): + return (False, '') - if not filename_cc.endswith('.cc'): + fileinfo_h = FileInfo(filename_h) + if not IsHeaderExtension(fileinfo_h.Extension().lstrip('.')): return (False, '') - filename_cc = filename_cc[:-len('.cc')] - if filename_cc.endswith('_unittest'): - filename_cc = filename_cc[:-len('_unittest')] - elif filename_cc.endswith('_test'): - filename_cc = filename_cc[:-len('_test')] + + filename_cc = filename_cc[:-(len(fileinfo_cc.Extension()))] + matched_test_suffix = re.search(_TEST_FILE_SUFFIX, fileinfo_cc.BaseName()) + if matched_test_suffix: + filename_cc = filename_cc[:-len(matched_test_suffix.group(1))] + filename_cc = filename_cc.replace('/public/', '/') filename_cc = filename_cc.replace('/internal/', '/') - if not filename_h.endswith('.h'): - return (False, '') - filename_h = filename_h[:-len('.h')] + filename_h = filename_h[:-(len(fileinfo_h.Extension()))] if filename_h.endswith('-inl'): filename_h = filename_h[:-len('-inl')] filename_h = filename_h.replace('/public/', '/') @@ -5576,33 +6157,6 @@ def FilesBelongToSameModule(filename_cc, filename_h): return files_belong_to_same_module, common_path -def UpdateIncludeState(filename, include_dict, io=codecs): - """Fill up the include_dict with new includes found from the file. - - Args: - filename: the name of the header to read. - include_dict: a dictionary in which the headers are inserted. - io: The io factory to use to read the file. Provided for testability. - - Returns: - True if a header was successfully added. False otherwise. - """ - headerfile = None - try: - headerfile = io.open(filename, 'r', 'utf8', 'replace') - except IOError: - return False - linenum = 0 - for line in headerfile: - linenum += 1 - clean_line = CleanseComments(line) - match = _RE_PATTERN_INCLUDE.search(clean_line) - if match: - include = match.group(2) - include_dict.setdefault(include, linenum) - return True - - def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error, io=codecs): """Reports for missing stl includes. @@ -5624,72 +6178,46 @@ def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error, required = {} # A map of header name to linenumber and the template entity. # Example of required: { '': (1219, 'less<>') } - for linenum in xrange(clean_lines.NumLines()): + for linenum in range(clean_lines.NumLines()): line = clean_lines.elided[linenum] if not line or line[0] == '#': continue - # String is special -- it is a non-templatized type in STL. - matched = _RE_PATTERN_STRING.search(line) - if matched: - # Don't warn about strings in non-STL namespaces: - # (We check only the first match per line; good enough.) - prefix = line[:matched.start()] - if prefix.endswith('std::') or not prefix.endswith('::'): - required[''] = (linenum, 'string') + _re_patterns = [] + _re_patterns.extend(_re_pattern_types_or_objs) + _re_patterns.extend(_re_pattern_functions) + for pattern, item, header in _re_patterns: + matched = pattern.search(line) + if matched: + # Don't warn about strings in non-STL namespaces: + # (We check only the first match per line; good enough.) + prefix = line[:matched.start()] + if prefix.endswith('std::') or not prefix.endswith('::'): + required[header] = (linenum, item) - for pattern, template, header in _re_pattern_algorithm_header: + for pattern, template, header in _re_pattern_headers_maybe_templates: if pattern.search(line): required[header] = (linenum, template) # The following function is just a speed up, no semantics are changed. - if not '<' in line: # Reduces the cpu time usage by skipping lines. + if '<' not in line: # Reduces the cpu time usage by skipping lines. continue for pattern, template, header in _re_pattern_templates: - if pattern.search(line): - required[header] = (linenum, template) + matched = pattern.search(line) + if matched: + # Don't warn about IWYU in non-STL namespaces: + # (We check only the first match per line; good enough.) + prefix = line[:matched.start()] + if prefix.endswith('std::') or not prefix.endswith('::'): + required[header] = (linenum, template) - # The policy is that if you #include something in foo.h you don't need to - # include it again in foo.cc. Here, we will look at possible includes. # Let's flatten the include_state include_list and copy it into a dictionary. include_dict = dict([item for sublist in include_state.include_list for item in sublist]) - # Did we find the header for this file (if any) and successfully load it? - header_found = False - - # Use the absolute path so that matching works properly. - abs_filename = FileInfo(filename).FullName() - - # For Emacs's flymake. - # If cpplint is invoked from Emacs's flymake, a temporary file is generated - # by flymake and that file name might end with '_flymake.cc'. In that case, - # restore original file name here so that the corresponding header file can be - # found. - # e.g. If the file name is 'foo_flymake.cc', we should search for 'foo.h' - # instead of 'foo_flymake.h' - abs_filename = re.sub(r'_flymake\.cc$', '.cc', abs_filename) - - # include_dict is modified during iteration, so we iterate over a copy of - # the keys. - header_keys = include_dict.keys() - for header in header_keys: - (same_module, common_path) = FilesBelongToSameModule(abs_filename, header) - fullpath = common_path + header - if same_module and UpdateIncludeState(fullpath, include_dict, io): - header_found = True - - # If we can't find the header file for a .cc, assume it's because we don't - # know where to look. In that case we'll give up as we're not sure they - # didn't include it in the .h file. - # TODO(unknown): Do a better job of finding .h files so we are confident that - # not having the .h file means there isn't one. - if filename.endswith('.cc') and not header_found: - return - # All the lines have been processed, report the errors found. - for required_header_unstripped in required: + for required_header_unstripped in sorted(required, key=required.__getitem__): template = required[required_header_unstripped][1] if required_header_unstripped.strip('<>"') not in include_dict: error(filename, required[required_header_unstripped][0], @@ -5721,31 +6249,6 @@ def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error): ' OR use pair directly OR if appropriate, construct a pair directly') -def CheckDefaultLambdaCaptures(filename, clean_lines, linenum, error): - """Check that default lambda captures are not used. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - line = clean_lines.elided[linenum] - - # A lambda introducer specifies a default capture if it starts with "[=" - # or if it starts with "[&" _not_ followed by an identifier. - match = Match(r'^(.*)\[\s*(?:=|&[^\w])', line) - if match: - # Found a potential error, check what comes after the lambda-introducer. - # If it's not open parenthesis (for lambda-declarator) or open brace - # (for compound-statement), it's not a lambda. - line, _, pos = CloseExpression(clean_lines, linenum, len(match.group(1))) - if pos >= 0 and Match(r'^\s*[{(]', line[pos:]): - error(filename, linenum, 'build/c++11', - 4, # 4 = high confidence - 'Default lambda captures are an unapproved C++ feature.') - - def CheckRedundantVirtual(filename, clean_lines, linenum, error): """Check if line contains a redundant "virtual" function-specifier. @@ -5757,20 +6260,20 @@ def CheckRedundantVirtual(filename, clean_lines, linenum, error): """ # Look for "virtual" on current line. line = clean_lines.elided[linenum] - virtual = Match(r'^(.*)(\bvirtual\b)(.*)$', line) + virtual = re.match(r'^(.*)(\bvirtual\b)(.*)$', line) if not virtual: return # Ignore "virtual" keywords that are near access-specifiers. These # are only used in class base-specifier and do not apply to member # functions. - if (Search(r'\b(public|protected|private)\s+$', virtual.group(1)) or - Match(r'^\s+(public|protected|private)\b', virtual.group(3))): + if (re.search(r'\b(public|protected|private)\s+$', virtual.group(1)) or + re.match(r'^\s+(public|protected|private)\b', virtual.group(3))): return # Ignore the "virtual" keyword from virtual base classes. Usually # there is a column on the same line in these cases (virtual base # classes are rare in google3 because multiple inheritance is rare). - if Match(r'^.*[^:]:[^:].*$', line): return + if re.match(r'^.*[^:]:[^:].*$', line): return # Look for the next opening parenthesis. This is the start of the # parameter list (possibly on the next line shortly after virtual). @@ -5780,9 +6283,9 @@ def CheckRedundantVirtual(filename, clean_lines, linenum, error): end_col = -1 end_line = -1 start_col = len(virtual.group(2)) - for start_line in xrange(linenum, min(linenum + 3, clean_lines.NumLines())): + for start_line in range(linenum, min(linenum + 3, clean_lines.NumLines())): line = clean_lines.elided[start_line][start_col:] - parameter_list = Match(r'^([^(]*)\(', line) + parameter_list = re.match(r'^([^(]*)\(', line) if parameter_list: # Match parentheses to find the end of the parameter list (_, end_line, end_col) = CloseExpression( @@ -5795,18 +6298,18 @@ def CheckRedundantVirtual(filename, clean_lines, linenum, error): # Look for "override" or "final" after the parameter list # (possibly on the next few lines). - for i in xrange(end_line, min(end_line + 3, clean_lines.NumLines())): + for i in range(end_line, min(end_line + 3, clean_lines.NumLines())): line = clean_lines.elided[i][end_col:] - match = Search(r'\b(override|final)\b', line) + match = re.search(r'\b(override|final)\b', line) if match: error(filename, linenum, 'readability/inheritance', 4, ('"virtual" is redundant since function is ' - 'already declared as "%s"' % match.group(1))) + f'already declared as "{match.group(1)}"')) # Set end_col to check whole lines after we are done with the # first line. end_col = 0 - if Search(r'[^\w]\s*$', line): + if re.search(r'[^\w]\s*$', line): break @@ -5833,7 +6336,7 @@ def CheckRedundantOverrideOrFinal(filename, clean_lines, linenum, error): return # Check that at most one of "override" or "final" is present, not both - if Search(r'\boverride\b', fragment) and Search(r'\bfinal\b', fragment): + if re.search(r'\boverride\b', fragment) and re.search(r'\bfinal\b', fragment): error(filename, linenum, 'readability/inheritance', 4, ('"override" is redundant since function is ' 'already declared as "final"')) @@ -5853,15 +6356,17 @@ def IsBlockInNameSpace(nesting_state, is_forward_declaration): Whether or not the new block is directly in a namespace. """ if is_forward_declaration: - if len(nesting_state.stack) >= 1 and ( - isinstance(nesting_state.stack[-1], _NamespaceInfo)): - return True - else: - return False + return len(nesting_state.stack) >= 1 and ( + isinstance(nesting_state.stack[-1], _NamespaceInfo)) - return (len(nesting_state.stack) > 1 and - nesting_state.stack[-1].check_namespace_indentation and - isinstance(nesting_state.stack[-2], _NamespaceInfo)) + if len(nesting_state.stack) >= 1: + if isinstance(nesting_state.stack[-1], _NamespaceInfo): + return True + elif (len(nesting_state.stack) > 1 and + isinstance(nesting_state.previous_stack_top, _NamespaceInfo) and + isinstance(nesting_state.stack[-2], _NamespaceInfo)): + return True + return False def ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item, @@ -5900,14 +6405,14 @@ def ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item, def CheckItemIndentationInNamespace(filename, raw_lines_no_comments, linenum, error): line = raw_lines_no_comments[linenum] - if Match(r'^\s+', line): - error(filename, linenum, 'runtime/indentation_namespace', 4, - 'Do not indent within a namespace') + if re.match(r'^\s+', line): + error(filename, linenum, 'whitespace/indent_namespace', 4, + 'Do not indent within a namespace.') def ProcessLine(filename, file_extension, clean_lines, line, include_state, function_state, nesting_state, error, - extra_check_functions=[]): + extra_check_functions=None): """Processes a single line in the file. Args: @@ -5944,14 +6449,15 @@ def ProcessLine(filename, file_extension, clean_lines, line, CheckPosixThreading(filename, clean_lines, line, error) CheckInvalidIncrement(filename, clean_lines, line, error) CheckMakePairUsesDeduction(filename, clean_lines, line, error) - CheckDefaultLambdaCaptures(filename, clean_lines, line, error) CheckRedundantVirtual(filename, clean_lines, line, error) CheckRedundantOverrideOrFinal(filename, clean_lines, line, error) - for check_fn in extra_check_functions: - check_fn(filename, clean_lines, line, error) + if extra_check_functions: + for check_fn in extra_check_functions: + check_fn(filename, clean_lines, line, error) -def FlagCxx11Features(filename, clean_lines, linenum, error): - """Flag those c++11 features that we only allow in certain places. + +def FlagCxxHeaders(filename, clean_lines, linenum, error): + """Flag C++ headers that the styleguide restricts. Args: filename: The name of the current file. @@ -5961,43 +6467,24 @@ def FlagCxx11Features(filename, clean_lines, linenum, error): """ line = clean_lines.elided[linenum] + include = re.match(r'\s*#\s*include\s+[<"]([^<"]+)[">]', line) + # Flag unapproved C++11 headers. - include = Match(r'\s*#\s*include\s+[<"]([^<"]+)[">]', line) if include and include.group(1) in ('cfenv', - 'condition_variable', 'fenv.h', - 'future', - 'mutex', - 'thread', - 'chrono', 'ratio', - 'regex', - 'system_error', ): error(filename, linenum, 'build/c++11', 5, - ('<%s> is an unapproved C++11 header.') % include.group(1)) - - # The only place where we need to worry about C++11 keywords and library - # features in preprocessor directives is in macro definitions. - if Match(r'\s*#', line) and not Match(r'\s*#\s*define\b', line): return - - # These are classes and free functions. The classes are always - # mentioned as std::*, but we only catch the free functions if - # they're not found by ADL. They're alphabetical by header. - for top_name in ( - # type_traits - 'alignment_of', - 'aligned_union', - ): - if Search(r'\bstd::%s\b' % top_name, line): - error(filename, linenum, 'build/c++11', 5, - ('std::%s is an unapproved C++11 class or function. Send c-style ' - 'an example of where it would make your code more readable, and ' - 'they may let you use it.') % top_name) + f"<{include.group(1)}> is an unapproved C++11 header.") + + # filesystem is the only unapproved C++17 header + if include and include.group(1) == 'filesystem': + error(filename, linenum, 'build/c++17', 5, + " is an unapproved C++17 header.") def ProcessFileData(filename, file_extension, lines, error, - extra_check_functions=[]): + extra_check_functions=None): """Performs lint checks and reports any errors to the given error function. Args: @@ -6021,24 +6508,26 @@ def ProcessFileData(filename, file_extension, lines, error, ResetNolintSuppressions() CheckForCopyright(filename, lines, error) - + ProcessGlobalSuppressions(lines) RemoveMultiLineComments(filename, lines, error) clean_lines = CleansedLines(lines) - if file_extension == 'h': + if IsHeaderExtension(file_extension): CheckForHeaderGuard(filename, clean_lines, error) - for line in xrange(clean_lines.NumLines()): + for line in range(clean_lines.NumLines()): ProcessLine(filename, file_extension, clean_lines, line, include_state, function_state, nesting_state, error, extra_check_functions) - FlagCxx11Features(filename, clean_lines, line, error) - nesting_state.CheckCompletedBlocks(filename, error) + FlagCxxHeaders(filename, clean_lines, line, error) + if _error_suppressions.HasOpenBlock(): + error(filename, _error_suppressions.GetOpenBlockStart(), 'readability/nolint', 5, + 'NONLINT block never ended') CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error) - + # Check that the .cc file has included its header if it exists. - if file_extension == 'cc': + if _IsSourceExtension(file_extension): CheckHeaderFileIncluded(filename, include_state, error) # We check here rather than inside ProcessLine so that we see raw @@ -6065,13 +6554,13 @@ def ProcessConfigOverrides(filename): if not base_name: break # Reached the root directory. - cfg_file = os.path.join(abs_path, "CPPLINT.cfg") + cfg_file = os.path.join(abs_path, _config_filename) abs_filename = abs_path if not os.path.isfile(cfg_file): continue try: - with open(cfg_file) as file_handle: + with codecs.open(cfg_file, 'r', 'utf8', 'replace') as file_handle: for line in file_handle: line, _, _ = line.partition('#') # Remove comments. if not line.strip(): @@ -6094,36 +6583,48 @@ def ProcessConfigOverrides(filename): if base_name: pattern = re.compile(val) if pattern.match(base_name): - sys.stderr.write('Ignoring "%s": file excluded by "%s". ' + if _cpplint_state.quiet: + # Suppress "Ignoring file" warning when using --quiet. + return False + _cpplint_state.PrintInfo(f'Ignoring "{filename}": file excluded by "{cfg_file}". ' 'File path component "%s" matches ' 'pattern "%s"\n' % - (filename, cfg_file, base_name, val)) + (base_name, val)) return False elif name == 'linelength': global _line_length try: - _line_length = int(val) + _line_length = int(val) except ValueError: - sys.stderr.write('Line length must be numeric.') + _cpplint_state.PrintError('Line length must be numeric.') + elif name == 'extensions': + ProcessExtensionsOption(val) + elif name == 'root': + global _root + # root directories are specified relative to CPPLINT.cfg dir. + _root = os.path.join(os.path.dirname(cfg_file), val) + elif name == 'headers': + ProcessHppHeadersOption(val) + elif name == 'includeorder': + ProcessIncludeOrderOption(val) else: - sys.stderr.write( - 'Invalid configuration option (%s) in file %s\n' % - (name, cfg_file)) + _cpplint_state.PrintError( + f'Invalid configuration option ({name}) in file {cfg_file}\n') except IOError: - sys.stderr.write( - "Skipping config file '%s': Can't open for reading\n" % cfg_file) + _cpplint_state.PrintError( + f"Skipping config file '{cfg_file}': Can't open for reading\n") keep_looking = False # Apply all the accumulated filters in reverse order (top-level directory # config options having the least priority). - for filter in reversed(cfg_filters): - _AddFilters(filter) + for cfg_filter in reversed(cfg_filters): + _AddFilters(cfg_filter) return True -def ProcessFile(filename, vlevel, extra_check_functions=[]): +def ProcessFile(filename, vlevel, extra_check_functions=None): """Does google-lint on a single file. Args: @@ -6139,6 +6640,7 @@ def ProcessFile(filename, vlevel, extra_check_functions=[]): _SetVerboseLevel(vlevel) _BackupFilters() + old_errors = _cpplint_state.error_count if not ProcessConfigOverrides(filename): _RestoreFilters() @@ -6160,7 +6662,8 @@ def ProcessFile(filename, vlevel, extra_check_functions=[]): codecs.getwriter('utf8'), 'replace').read().split('\n') else: - lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n') + with codecs.open(filename, 'r', 'utf8', 'replace') as target_file: + lines = target_file.read().split('\n') # Remove trailing '\r'. # The -1 accounts for the extra trailing blank line we get from split() @@ -6172,8 +6675,9 @@ def ProcessFile(filename, vlevel, extra_check_functions=[]): lf_lines.append(linenum + 1) except IOError: - sys.stderr.write( - "Skipping input '%s': Can't open for reading\n" % filename) + # TODO: Maybe make this have an exit code of 2 after all is done + _cpplint_state.PrintError( + f"Skipping input '{filename}': Can't open for reading\n") _RestoreFilters() return @@ -6182,9 +6686,9 @@ def ProcessFile(filename, vlevel, extra_check_functions=[]): # When reading from stdin, the extension is unknown, so no cpplint tests # should rely on the extension. - if filename != '-' and file_extension not in _valid_extensions: - sys.stderr.write('Ignoring %s; not a valid file name ' - '(%s)\n' % (filename, ', '.join(_valid_extensions))) + if filename != '-' and file_extension not in GetAllExtensions(): + _cpplint_state.PrintError(f'Ignoring {filename}; not a valid file name' + f' ({(", ".join(GetAllExtensions()))})\n') else: ProcessFileData(filename, file_extension, lines, Error, extra_check_functions) @@ -6207,7 +6711,10 @@ def ProcessFile(filename, vlevel, extra_check_functions=[]): Error(filename, linenum, 'whitespace/newline', 1, 'Unexpected \\r (^M) found; better to use only \\n') - sys.stderr.write('Done processing %s\n' % filename) + # Suppress printing anything if --quiet was passed unless the error + # count has increased after processing this file. + if not _cpplint_state.quiet or old_errors != _cpplint_state.error_count: + _cpplint_state.PrintInfo(f'Done processing {filename}\n') _RestoreFilters() @@ -6217,19 +6724,28 @@ def PrintUsage(message): Args: message: The optional error message. """ - sys.stderr.write(_USAGE) + sys.stderr.write(_USAGE % (sorted(list(GetAllExtensions())), + ','.join(sorted(list(GetAllExtensions()))), + sorted(GetHeaderExtensions()), + ','.join(sorted(GetHeaderExtensions())))) + if message: sys.exit('\nFATAL ERROR: ' + message) else: - sys.exit(1) + sys.exit(0) +def PrintVersion(): + sys.stdout.write('Cpplint fork (https://github.com/cpplint/cpplint)\n') + sys.stdout.write('cpplint ' + __VERSION__ + '\n') + sys.stdout.write('Python ' + sys.version + '\n') + sys.exit(0) def PrintCategories(): """Prints a list of all the error-categories used by error messages. These are the categories used to filter messages via --filter. """ - sys.stderr.write(''.join(' %s\n' % cat for cat in _ERROR_CATEGORIES)) + sys.stderr.write(''.join(f' {cat}\n' for cat in _ERROR_CATEGORIES)) sys.exit(0) @@ -6246,27 +6762,43 @@ def ParseArguments(args): """ try: (opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=', + 'v=', + 'version', 'counting=', 'filter=', 'root=', + 'repository=', 'linelength=', - 'extensions=']) + 'extensions=', + 'exclude=', + 'recursive', + 'headers=', + 'includeorder=', + 'config=', + 'quiet']) except getopt.GetoptError: PrintUsage('Invalid arguments.') verbosity = _VerboseLevel() output_format = _OutputFormat() filters = '' + quiet = _Quiet() counting_style = '' + recursive = False for (opt, val) in opts: if opt == '--help': PrintUsage(None) + if opt == '--version': + PrintVersion() elif opt == '--output': - if val not in ('emacs', 'vs7', 'eclipse'): - PrintUsage('The only allowed output formats are emacs, vs7 and eclipse.') + if val not in ('emacs', 'vs7', 'eclipse', 'junit', 'sed', 'gsed'): + PrintUsage('The only allowed output formats are emacs, vs7, eclipse ' + 'sed, gsed and junit.') output_format = val - elif opt == '--verbose': + elif opt == '--quiet': + quiet = True + elif opt == '--verbose' or opt == '--v': verbosity = int(val) elif opt == '--filter': filters = val @@ -6279,44 +6811,157 @@ def ParseArguments(args): elif opt == '--root': global _root _root = val + elif opt == '--repository': + global _repository + _repository = val elif opt == '--linelength': global _line_length try: - _line_length = int(val) + _line_length = int(val) except ValueError: - PrintUsage('Line length must be digits.') + PrintUsage('Line length must be digits.') + elif opt == '--exclude': + global _excludes + if not _excludes: + _excludes = set() + _excludes.update(glob.glob(val)) elif opt == '--extensions': - global _valid_extensions - try: - _valid_extensions = set(val.split(',')) - except ValueError: - PrintUsage('Extensions must be comma seperated list.') + ProcessExtensionsOption(val) + elif opt == '--headers': + ProcessHppHeadersOption(val) + elif opt == '--recursive': + recursive = True + elif opt == '--includeorder': + ProcessIncludeOrderOption(val) + elif opt == '--config': + global _config_filename + _config_filename = val + if os.path.basename(_config_filename) != _config_filename: + PrintUsage('Config file name must not include directory components.') if not filenames: PrintUsage('No files were specified.') + if recursive: + filenames = _ExpandDirectories(filenames) + + if _excludes: + filenames = _FilterExcludedFiles(filenames) + _SetOutputFormat(output_format) + _SetQuiet(quiet) _SetVerboseLevel(verbosity) _SetFilters(filters) _SetCountingStyle(counting_style) + filenames.sort() return filenames +def _ParseFilterSelector(parameter): + """Parses the given command line parameter for file- and line-specific + exclusions. + readability/casting:file.cpp + readability/casting:file.cpp:43 -def main(): - filenames = ParseArguments(sys.argv[1:]) + Args: + parameter: The parameter value of --filter + + Returns: + [category, filename, line]. + Category is always given. + Filename is either a filename or empty if all files are meant. + Line is either a line in filename or -1 if all lines are meant. + """ + colon_pos = parameter.find(":") + if colon_pos == -1: + return parameter, "", -1 + category = parameter[:colon_pos] + second_colon_pos = parameter.find(":", colon_pos + 1) + if second_colon_pos == -1: + return category, parameter[colon_pos + 1:], -1 + else: + return category, parameter[colon_pos + 1: second_colon_pos], \ + int(parameter[second_colon_pos + 1:]) + +def _ExpandDirectories(filenames): + """Searches a list of filenames and replaces directories in the list with + all files descending from those directories. Files with extensions not in + the valid extensions list are excluded. - # Change stderr to write with replacement characters so we don't die - # if we try to print something containing non-ASCII characters. - sys.stderr = codecs.StreamReaderWriter(sys.stderr, - codecs.getreader('utf8'), - codecs.getwriter('utf8'), - 'replace') + Args: + filenames: A list of files or directories - _cpplint_state.ResetErrorCounts() + Returns: + A list of all files that are members of filenames or descended from a + directory in filenames + """ + expanded = set() for filename in filenames: - ProcessFile(filename, _cpplint_state.verbose_level) - _cpplint_state.PrintErrorCounts() + if not os.path.isdir(filename): + expanded.add(filename) + continue + + for root, _, files in os.walk(filename): + for loopfile in files: + fullname = os.path.join(root, loopfile) + if fullname.startswith('.' + os.path.sep): + fullname = fullname[len('.' + os.path.sep):] + expanded.add(fullname) + + filtered = [] + for filename in expanded: + if os.path.splitext(filename)[1][1:] in GetAllExtensions(): + filtered.append(filename) + return filtered + +def _FilterExcludedFiles(fnames): + """Filters out files listed in the --exclude command line switch. File paths + in the switch are evaluated relative to the current working directory + """ + exclude_paths = [os.path.abspath(f) for f in _excludes] + # because globbing does not work recursively, exclude all subpath of all excluded entries + return [f for f in fnames + if not any(e for e in exclude_paths + if _IsParentOrSame(e, os.path.abspath(f)))] + +def _IsParentOrSame(parent, child): + """Return true if child is subdirectory of parent. + Assumes both paths are absolute and don't contain symlinks. + """ + parent = os.path.normpath(parent) + child = os.path.normpath(child) + if parent == child: + return True + + prefix = os.path.commonprefix([parent, child]) + if prefix != parent: + return False + # Note: os.path.commonprefix operates on character basis, so + # take extra care of situations like '/foo/ba' and '/foo/bar/baz' + child_suffix = child[len(prefix):] + child_suffix = child_suffix.lstrip(os.sep) + return child == os.path.join(prefix, child_suffix) + +def main(): + filenames = ParseArguments(sys.argv[1:]) + backup_err = sys.stderr + try: + # Change stderr to write with replacement characters so we don't die + # if we try to print something containing non-ASCII characters. + sys.stderr = codecs.StreamReader(sys.stderr, 'replace') + + _cpplint_state.ResetErrorCounts() + for filename in filenames: + ProcessFile(filename, _cpplint_state.verbose_level) + # If --quiet is passed, suppress printing error count unless there are errors. + if not _cpplint_state.quiet or _cpplint_state.error_count > 0: + _cpplint_state.PrintErrorCounts() + + if _cpplint_state.output_format == 'junit': + sys.stderr.write(_cpplint_state.FormatJUnitXML()) + + finally: + sys.stderr = backup_err sys.exit(_cpplint_state.error_count > 0) diff --git a/deps/librdkafka b/deps/librdkafka index 95a542c8..9416dd80 160000 --- a/deps/librdkafka +++ b/deps/librdkafka @@ -1 +1 @@ -Subproject commit 95a542c87c61d2c45b445f91c73dd5442eb04f3c +Subproject commit 9416dd80fb0dba71ff73a8cb4d2b919f54651006 diff --git a/deps/librdkafka.gyp b/deps/librdkafka.gyp index 3dc6ff48..94057d96 100644 --- a/deps/librdkafka.gyp +++ b/deps/librdkafka.gyp @@ -1,4 +1,7 @@ { + "variables": { + "CKJS_LINKING%": "= 0); + consumer.unsubscribe(); done(); }); }; @@ -180,6 +200,7 @@ describe('Consumer/Producer', function() { consumer.consume(100000, function(err, messages) { t.ifError(err); t.equal(messages.length, 1); + consumer.unsubscribe(); done(); }); }; @@ -228,12 +249,13 @@ describe('Consumer/Producer', function() { setTimeout(function() { producer.produce(topic, null, buffer, null); - }, 500) - consumer.setDefaultConsumeTimeout(2000); + }, 500); + consumer.setDefaultConsumeTimeout(20000); consumer.consume(1000, function(err, messages) { t.ifError(err); t.equal(messages.length, 1); t.deepStrictEqual(events, ["data", "partition.eof"]); + consumer.unsubscribe(); done(); }); }); @@ -261,12 +283,13 @@ describe('Consumer/Producer', function() { setTimeout(function() { producer.produce(topic, null, buffer, null); - }, 2000) - consumer.setDefaultConsumeTimeout(3000); + }, 4000); + consumer.setDefaultConsumeTimeout(20000); consumer.consume(1000, function(err, messages) { t.ifError(err); t.equal(messages.length, 1); t.deepStrictEqual(events, ["partition.eof", "data", "partition.eof"]); + consumer.unsubscribe(); done(); }); }); @@ -276,7 +299,6 @@ describe('Consumer/Producer', function() { var key = 'key'; crypto.randomBytes(4096, function(ex, buffer) { - producer.setPollInterval(10); producer.once('delivery-report', function(err, report) { @@ -292,6 +314,7 @@ describe('Consumer/Producer', function() { t.equal(key, message.key, 'invalid message key'); t.equal(topic, message.topic, 'invalid message topic'); t.ok(message.offset >= 0, 'invalid message offset'); + consumer.unsubscribe(); done(); }); @@ -306,7 +329,6 @@ describe('Consumer/Producer', function() { }); it('should emit \'partition.eof\' events in consumeLoop', function(done) { - crypto.randomBytes(4096, function(ex, buffer) { producer.setPollInterval(10); @@ -314,7 +336,6 @@ describe('Consumer/Producer', function() { t.ifError(err); }); - var events = []; var offsets = []; @@ -337,11 +358,11 @@ describe('Consumer/Producer', function() { setTimeout(function() { producer.produce(topic, null, buffer); - }, 2000); + }, 4000); setTimeout(function() { producer.produce(topic, null, buffer); - }, 4000); + }, 6000); setTimeout(function() { t.deepStrictEqual(events, ['partition.eof', 'data', 'partition.eof', 'data', 'partition.eof']); @@ -352,8 +373,9 @@ describe('Consumer/Producer', function() { startOffset + 1, startOffset + 1, startOffset + 2 ]); + consumer.unsubscribe(); done(); - }, 6000); + }, 8000); }); }); @@ -386,16 +408,26 @@ describe('Consumer/Producer', function() { run_headers_test(done, headers); }); - it('should be able to produce and consume messages with one header value as int: consumeLoop', function(done) { + it('should be able to produce and consume messages with one header value as string with unicode: consumeLoop', function(done) { var headers = [ - { key: 10 } + { key: '10👍' }, + { key: 'こんにちは' }, + { key: '🌍🌎🌏' } ]; run_headers_test(done, headers); }); - it('should be able to produce and consume messages with one header value as float: consumeLoop', function(done) { + it('should be able to produce and consume messages with one header value as string with emojis: consumeLoop', function(done) { var headers = [ - { key: 1.11 } + { key: '😀😃😄😁' } + ]; + run_headers_test(done, headers); + }); + + it('should be able to produce and consume messages with one header value as string in other languages: consumeLoop', function(done) { + var headers = [ + { key: '你好' }, + { key: 'Привет' } ]; run_headers_test(done, headers); }); @@ -422,18 +454,36 @@ describe('Consumer/Producer', function() { it('should be able to produce and consume messages with multiple headers with mixed values: consumeLoop', function(done) { var headers = [ - { key1: 'value1' }, - { key2: Buffer.from('value2') }, - { key3: 100 }, - { key4: 10.1 }, + { key1: 'value1' }, + { key2: Buffer.from('value2') } ]; run_headers_test(done, headers); }); + it('should not be able to produce any non-string and non-buffer headers: consumeLoop', function(done) { + producer.setPollInterval(10); + + const headerCases = [ + [ { key: 10 } ], + [ { key: null }], + [ { key: undefined }], + ]; + for (const headerCase of headerCases) { + const buffer = Buffer.from('value'); + const key = 'key'; + t.throws( + () => producer.produce(topic, null, buffer, key, null, '', headerCase), + 'must be string or buffer' + ); + } + + done(); + }); + it('should be able to produce and consume messages: empty buffer key and empty value', function(done) { var emptyString = ''; var key = Buffer.from(emptyString); - var value = Buffer.from(''); + var value = Buffer.from(emptyString); producer.setPollInterval(10); @@ -441,7 +491,8 @@ describe('Consumer/Producer', function() { t.notEqual(message.value, null, 'message should not be null'); t.equal(value.toString(), message.value.toString(), 'invalid message value'); t.equal(emptyString, message.key, 'invalid message key'); - done(); + consumer.unsubscribe(); + done(); }); consumer.subscribe([topic]); @@ -462,6 +513,7 @@ describe('Consumer/Producer', function() { t.notEqual(message.value, null, 'message should not be null'); t.equal(value.toString(), message.value.toString(), 'invalid message value'); t.equal(key, message.key, 'invalid message key'); + consumer.unsubscribe(); done(); }); @@ -482,6 +534,7 @@ describe('Consumer/Producer', function() { consumer.once('data', function(message) { t.equal(value, message.value, 'invalid message value'); t.equal(key, message.key, 'invalid message key'); + consumer.unsubscribe(); done(); }); @@ -507,7 +560,7 @@ describe('Consumer/Producer', function() { beforeEach(function(done) { consumer = new Kafka.KafkaConsumer(consumerOpts, { - 'auto.offset.reset': 'largest', + 'auto.offset.reset': 'smallest', }); consumer.connect({}, function(err, d) { @@ -551,6 +604,7 @@ describe('Consumer/Producer', function() { }); consumer.subscribe([topic]); + consumer.setDefaultConsumeTimeout(4000); consumer.consume(); setTimeout(function() { @@ -594,7 +648,7 @@ describe('Consumer/Producer', function() { } }; consumer = new Kafka.KafkaConsumer(consumerOpts, { - 'auto.offset.reset': 'largest', + 'auto.offset.reset': 'smallest', }); eventListener(consumer); @@ -602,6 +656,7 @@ describe('Consumer/Producer', function() { t.ifError(err); t.equal(typeof d, 'object', 'metadata should be returned'); consumer.subscribe([topic]); + consumer.setDefaultConsumeTimeout(4000); consumer.consume(); setTimeout(function() { producer.produce(topic, null, Buffer.from(''), ''); @@ -650,6 +705,7 @@ describe('Consumer/Producer', function() { t.equal(topic, message.topic, 'invalid message topic'); t.ok(message.offset >= 0, 'invalid message offset'); assert_headers_match(headers, message.headers); + consumer.unsubscribe(); done(); }); @@ -660,8 +716,6 @@ describe('Consumer/Producer', function() { var timestamp = new Date().getTime(); producer.produce(topic, null, buffer, key, timestamp, "", headers); }, 2000); - }); } - }); diff --git a/e2e/consumer.spec.js b/e2e/consumer.spec.js index 257055d4..c408ad45 100644 --- a/e2e/consumer.spec.js +++ b/e2e/consumer.spec.js @@ -1,5 +1,5 @@ /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * Copyright (c) 2016-2023 Blizzard Entertainment * * This software may be modified and distributed under the terms @@ -10,17 +10,20 @@ var t = require('assert'); var crypto = require('crypto'); var eventListener = require('./listener'); +const { createTopics, deleteTopics } = require('./topicUtils'); var KafkaConsumer = require('../').KafkaConsumer; var kafkaBrokerList = process.env.KAFKA_HOST || 'localhost:9092'; -var topic = 'test'; describe('Consumer', function() { var gcfg; + let topic; + let createdTopics = []; - beforeEach(function() { + beforeEach(function(done) { var grp = 'kafka-mocha-grp-' + crypto.randomBytes(20).toString('hex'); + topic = 'test' + crypto.randomBytes(20).toString('hex'); gcfg = { 'bootstrap.servers': kafkaBrokerList, 'group.id': grp, @@ -28,6 +31,12 @@ describe('Consumer', function() { 'rebalance_cb': true, 'enable.auto.commit': false }; + createTopics([{topic, num_partitions: 1, replication_factor: 1}], kafkaBrokerList, done); + createdTopics.push(topic); + }); + + after(function(done) { + deleteTopics(createdTopics, kafkaBrokerList, done); }); describe('commit', function() { @@ -94,34 +103,50 @@ describe('Consumer', function() { t.equal(position.length, 0); }); - it('after assign, should get committed array without offsets ', function(done) { - consumer.assign([{topic:topic, partition:0}]); - // Defer this for a second - setTimeout(function() { - consumer.committed(null, 1000, function(err, committed) { - t.ifError(err); - t.equal(committed.length, 1); - t.equal(typeof committed[0], 'object', 'TopicPartition should be an object'); - t.deepStrictEqual(committed[0].partition, 0); - t.equal(committed[0].offset, undefined); - done(); - }); + it('after assign, should get committed array without offsets ', function (done) { + consumer.assign([{ topic: topic, partition: 0 }]); + consumer.committed(null, 1000, function (err, committed) { + t.ifError(err); + t.equal(committed.length, 1); + t.equal(typeof committed[0], 'object', 'TopicPartition should be an object'); + t.deepStrictEqual(committed[0].partition, 0); + t.equal(committed[0].offset, undefined); + done(); }, 1000); }); - it('after assign and commit, should get committed offsets', function(done) { + it('after assign and commit, should get committed offsets with same metadata', function(done) { consumer.assign([{topic:topic, partition:0}]); - consumer.commitSync({topic:topic, partition:0, offset:1000}); + consumer.commitSync({topic:topic, partition:0, offset:1000, metadata: 'A string with unicode ǂ'}); consumer.committed(null, 1000, function(err, committed) { t.ifError(err); t.equal(committed.length, 1); t.equal(typeof committed[0], 'object', 'TopicPartition should be an object'); t.deepStrictEqual(committed[0].partition, 0); t.deepStrictEqual(committed[0].offset, 1000); + t.deepStrictEqual(committed[0].metadata, 'A string with unicode ǂ'); done(); }); }); + it('after assign and commit, a different consumer should get the same committed offsets and metadata', function(done) { + consumer.assign([{topic:topic, partition:0}]); + consumer.commitSync({topic:topic, partition:0, offset:1000, metadata: 'A string with unicode ǂ'}); + + let consumer2 = new KafkaConsumer(gcfg, {}); + consumer2.connect({ timeout: 2000 }, function (err, info) { + consumer2.committed([{ topic, partition: 0 }], 1000, function (err, committed) { + t.ifError(err); + t.equal(committed.length, 1); + t.equal(typeof committed[0], 'object', 'TopicPartition should be an object'); + t.deepStrictEqual(committed[0].partition, 0); + t.deepStrictEqual(committed[0].offset, 1000); + t.deepStrictEqual(committed[0].metadata, 'A string with unicode ǂ'); + consumer2.disconnect(done); + }); + }); + }); + it('after assign, before consume, position should return an array without offsets', function(done) { consumer.assign([{topic:topic, partition:0}]); var position = consumer.position(); @@ -154,7 +179,7 @@ describe('Consumer', function() { consumer.connect({ timeout: 2000 }, function(err, info) { t.ifError(err); consumer.assign([{ - topic: 'test', + topic, partition: 0, offset: 0 }]); @@ -172,7 +197,7 @@ describe('Consumer', function() { it('should be able to seek', function(cb) { consumer.seek({ - topic: 'test', + topic, partition: 0, offset: 0 }, 1, function(err) { @@ -183,7 +208,7 @@ describe('Consumer', function() { it('should be able to seek with a timeout of 0', function(cb) { consumer.seek({ - topic: 'test', + topic, partition: 0, offset: 0 }, 0, function(err) { @@ -217,7 +242,7 @@ describe('Consumer', function() { t.equal(0, consumer.subscription().length); consumer.subscribe([topic]); t.equal(1, consumer.subscription().length); - t.equal('test', consumer.subscription()[0]); + t.equal(topic, consumer.subscription()[0]); t.equal(0, consumer.assignments().length); }); @@ -308,6 +333,7 @@ describe('Consumer', function() { consumer.subscribe([topic]); + consumer.setDefaultConsumeTimeout(500); // Topic might not have any messages. consumer.consume(1, function(err, messages) { t.ifError(err); diff --git a/e2e/groups.spec.js b/e2e/groups.spec.js index bc22f13e..e5f989aa 100644 --- a/e2e/groups.spec.js +++ b/e2e/groups.spec.js @@ -1,5 +1,5 @@ /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * * Copyright (c) 2016-2023 Blizzard Entertainment * diff --git a/e2e/listener.js b/e2e/listener.js index 81c9637f..bb3c1691 100644 --- a/e2e/listener.js +++ b/e2e/listener.js @@ -1,5 +1,5 @@ /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * Copyright (c) 2016-2023 Blizzard Entertainment * * This software may be modified and distributed under the terms diff --git a/e2e/oauthbearer_cb.spec.js b/e2e/oauthbearer_cb.spec.js new file mode 100644 index 00000000..10c440ee --- /dev/null +++ b/e2e/oauthbearer_cb.spec.js @@ -0,0 +1,101 @@ +/* + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library + * + * Copyright (c) 2024 Confluent, Inc. + * + * This software may be modified and distributed under the terms + * of the MIT license. See the LICENSE.txt file for details. + */ + +var Kafka = require('../'); +var t = require('assert'); + +var eventListener = require('./listener'); + +var kafkaBrokerList = process.env.KAFKA_HOST || 'localhost:9092'; + +const oauthbearer_config = 'key=value'; +let oauthbearer_cb_called = 0; + +let oauthbearer_cb_callback = function (config, cb) { + console.log("Called oauthbearer_cb with given config: " + config); + t.equal(config, oauthbearer_config); + oauthbearer_cb_called++; + + // The broker is not expected to be configured for oauthbearer authentication. + // We just want to make sure that token refresh callback is triggered. + cb(new Error('oauthbearer_cb error'), null); +}; + +let oauthbearer_cb_async = async function (config) { + console.log("Called oauthbearer_cb with given config: " + config); + t.equal(config, oauthbearer_config); + oauthbearer_cb_called++; + + // The broker is not expected to be configured for oauthbearer authentication. + // We just want to make sure that token refresh callback is triggered. + throw new Error('oauthbearer_cb error'); +}; + +for (const oauthbearer_cb of [oauthbearer_cb_async, oauthbearer_cb_callback]) { + describe('Client with ' + (oauthbearer_cb.name), function () { + + const commonConfig = { + 'metadata.broker.list': kafkaBrokerList, + 'debug': 'all', + 'security.protocol': 'SASL_PLAINTEXT', + 'sasl.mechanisms': 'OAUTHBEARER', + 'oauthbearer_token_refresh_cb': oauthbearer_cb, + 'sasl.oauthbearer.config': oauthbearer_config, + } + + const checkClient = function (client, done, useCb) { + eventListener(client); + + client.on('error', function (e) { + t.match(e.message, /oauthbearer_cb error/); + }); + + // The default timeout for the connect is 30s, so even if we + // call disconnect() midway, the test ends up being at least 30s. + client.connect({timeout: 2000}); + + // We don't actually expect the connection to succeed, but we want to + // make sure that the oauthbearer_cb is called so give it a couple seconds. + setTimeout(() => { + t.equal(oauthbearer_cb_called >= 1, true); + client.disconnect(() => { + done(); + }); + client = null; + if (!useCb) // for admin client, where disconnect is sync. + done(); + }, 2000); + } + + beforeEach(function (done) { + oauthbearer_cb_called = 0; + done(); + }); + + it('as producer', function (done) { + let producer = new Kafka.Producer(commonConfig); + checkClient(producer, done, true); + producer = null; + }).timeout(5000); + + it('as consumer', function (done) { + const config = Object.assign({ 'group.id': 'gid' }, commonConfig); + let consumer = new Kafka.KafkaConsumer(config); + checkClient(consumer, done, true); + consumer = null; + }).timeout(5000); + + it('as admin', function (done) { + let admin = new Kafka.AdminClient.create(commonConfig); + checkClient(admin, done, false); + admin = null; + }).timeout(5000); + + }); +} diff --git a/e2e/producer-transaction.spec.js b/e2e/producer-transaction.spec.js index 452c5e45..652e8edb 100644 --- a/e2e/producer-transaction.spec.js +++ b/e2e/producer-transaction.spec.js @@ -1,5 +1,5 @@ /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * * Copyright (c) 2016-2023 Blizzard Entertainment * diff --git a/e2e/producer.spec.js b/e2e/producer.spec.js index 0ed1ff24..7b5a7667 100644 --- a/e2e/producer.spec.js +++ b/e2e/producer.spec.js @@ -1,5 +1,5 @@ /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * * Copyright (c) 2016-2023 Blizzard Entertainment * @@ -211,7 +211,7 @@ describe('Producer', function() { producer.produce('test', null, Buffer.from('hai'), 'key'); }); - it('should produce a message with an empty payload and empty key (https://github.com/confluentinc/confluent-kafka-js/issues/117)', function(done) { + it('should produce a message with an empty payload and empty key (https://github.com/confluentinc/confluent-kafka-javascript/issues/117)', function(done) { var tt = setInterval(function() { producer.poll(); }, 200); @@ -232,7 +232,7 @@ describe('Producer', function() { producer.produce('test', null, Buffer.from(''), ''); }); - it('should produce a message with a null payload and null key (https://github.com/confluentinc/confluent-kafka-js/issues/117)', function(done) { + it('should produce a message with a null payload and null key (https://github.com/confluentinc/confluent-kafka-javascript/issues/117)', function(done) { producer.setPollInterval(10); producer.once('delivery-report', function(err, report) { @@ -250,7 +250,7 @@ describe('Producer', function() { producer.produce('test', null, null, null); }); - it('should produce an int64 key (https://github.com/confluentinc/confluent-kafka-js/issues/208)', function(done) { + it('should produce an int64 key (https://github.com/confluentinc/confluent-kafka-javascript/issues/208)', function(done) { var v1 = 0x0000000000000084; var arr = new Uint8Array(8); diff --git a/e2e/schemaregistry/schemaregistry-avro.spec.ts b/e2e/schemaregistry/schemaregistry-avro.spec.ts new file mode 100644 index 00000000..4f8eabf8 --- /dev/null +++ b/e2e/schemaregistry/schemaregistry-avro.spec.ts @@ -0,0 +1,591 @@ +import { KafkaJS } from '@confluentinc/kafka-javascript'; +import { + Metadata, + SchemaRegistryClient, + SchemaInfo +} from '../../schemaregistry/schemaregistry-client'; +import { beforeEach, afterEach, describe, expect, it } from '@jest/globals'; +import { clientConfig } from '../../test/schemaregistry/test-constants'; +import { AvroDeserializer, AvroSerializer, AvroSerializerConfig } from '../../schemaregistry/serde/avro'; +import { SerdeType } from "../../schemaregistry/serde/serde"; +import stringify from 'json-stringify-deterministic'; +import { v4 } from 'uuid'; + +let schemaRegistryClient: SchemaRegistryClient; +let serializerConfig: AvroSerializerConfig; +let serializer: AvroSerializer; +let deserializer: AvroDeserializer; +let producer: KafkaJS.Producer; +let consumer: KafkaJS.Consumer; + +const kafkaBrokerList = 'localhost:9092'; +const kafka = new KafkaJS.Kafka({ + kafkaJS: { + brokers: [kafkaBrokerList], + }, +}); + + +const userSchemaString: string = stringify({ + type: 'record', + name: 'User', + fields: [ + { name: 'name', type: 'string' }, + { name: 'age', type: 'int' }, + ], +}); + +const messageValue = { + "name": "Bob Jones", + "age": 25 +}; + +const metadata: Metadata = { + properties: { + owner: 'Bob Jones', + email: 'bob@acme.com', + }, +}; + +const schemaInfo: SchemaInfo = { + schema: userSchemaString, + metadata: metadata +}; + +describe('Schema Registry Avro Integration Test', () => { + + beforeEach(async () => { + schemaRegistryClient = new SchemaRegistryClient(clientConfig); + + producer = kafka.producer({ + kafkaJS: { + allowAutoTopicCreation: true, + acks: 1, + compression: KafkaJS.CompressionTypes.GZIP, + } + }); + await producer.connect(); + + consumer = kafka.consumer({ + kafkaJS: { + groupId: 'test-group', + fromBeginning: true, + partitionAssigners: [KafkaJS.PartitionAssigners.roundRobin], + }, + }); + }); + + afterEach(async () => { + await producer.disconnect(); + }); + + it("Should serialize and deserialize Avro", async () => { + const testTopic = v4(); + + await schemaRegistryClient.register(testTopic + "-value", schemaInfo); + + serializerConfig = { useLatestVersion: true }; + serializer = new AvroSerializer(schemaRegistryClient, SerdeType.VALUE, serializerConfig); + deserializer = new AvroDeserializer(schemaRegistryClient, SerdeType.VALUE, {}); + + const outgoingMessage = { + key: 'key', + value: await serializer.serialize(testTopic, messageValue) + }; + + await producer.send({ + topic: testTopic, + messages: [outgoingMessage] + }); + + await consumer.connect(); + await consumer.subscribe({ topic: testTopic }); + let messageRcvd = false; + await consumer.run({ + eachMessage: async ({ message }) => { + const decodedMessage = { + ...message, + value: await deserializer.deserialize(testTopic, message.value as Buffer) + }; + messageRcvd = true; + + expect(decodedMessage.value).toMatchObject(messageValue); + }, + }); + + // Wait around until we get a message, and then disconnect. + while (!messageRcvd) { + await new Promise((resolve) => setTimeout(resolve, 100)); + } + + await consumer.disconnect(); + }, 30000); + + it('Should fail to serialize with useLatestVersion enabled and autoRegisterSchemas disabled', async () => { + const testTopic = v4(); + + serializerConfig = { autoRegisterSchemas: false, useLatestVersion: true }; + serializer = new AvroSerializer(schemaRegistryClient, SerdeType.VALUE, serializerConfig); + + const messageValue = { "name": "Bob Jones", "age": 25 }; + + await expect(serializer.serialize(testTopic, messageValue)).rejects.toThrowError(); + }); + + it('Should serialize with autoRegisterSchemas enabled and useLatestVersion disabled', async () => { + const testTopic = v4(); + await schemaRegistryClient.register(testTopic +' -value', schemaInfo); + + serializerConfig = { autoRegisterSchemas: true, useLatestVersion: false }; + serializer = new AvroSerializer(schemaRegistryClient, SerdeType.VALUE, serializerConfig); + + const messageValue = { "name": "Bob Jones", "age": 25 }; + + await serializer.serialize(testTopic, messageValue); + }); + //TODO: Add test for Incompatible Types. The current Kafka Client runs console.error instead of throwing error + //Should use a spy, Jest wasn't playing nice with the spy + + it('Should produce generic message to multiple topics', async () => { + const topic1 = v4(); + const topic2 = v4(); + + await schemaRegistryClient.register(topic1, schemaInfo); + await schemaRegistryClient.register(topic2, schemaInfo); + + serializerConfig = { autoRegisterSchemas: true }; + serializer = new AvroSerializer(schemaRegistryClient, SerdeType.VALUE, serializerConfig); + deserializer = new AvroDeserializer(schemaRegistryClient, SerdeType.VALUE, {}); + + const outgoingMessage1 = { + key: 'key', + value: await serializer.serialize(topic1, messageValue) + }; + + const outgoingMessage2 = { + key: 'key', + value: await serializer.serialize(topic2, messageValue) + }; + + await producer.send( + { topic: topic1, messages: [outgoingMessage1] }, + ); + + await producer.send( + { topic: topic2, messages: [outgoingMessage2] }, + ); + + let consumer2 = kafka.consumer({ + kafkaJS: { + groupId: 'test-group', + fromBeginning: true, + partitionAssigners: [KafkaJS.PartitionAssigners.roundRobin], + }, + }); + + await consumer.connect(); + await consumer.subscribe({ topic: topic1 }); + await consumer2.connect(); + await consumer2.subscribe({ topic: topic2 }); + + let messageRcvd = false; + let messageRcvd2 = false; + + await consumer.run({ + eachMessage: async ({ message }) => { + const decodedMessage = { + ...message, + value: await deserializer.deserialize(topic1, message.value as Buffer) + }; + messageRcvd = true; + expect(decodedMessage.value).toMatchObject(messageValue); + }, + }); + + await consumer2.run({ + eachMessage: async ({ message }) => { + const decodedMessage = { + ...message, + value: await deserializer.deserialize(topic2, message.value as Buffer) + }; + messageRcvd2 = true; + expect(decodedMessage.value).toMatchObject(messageValue); + }, + }); + + while (!messageRcvd || !messageRcvd2) { + await new Promise((resolve) => setTimeout(resolve, 100)); + } + + await consumer.disconnect(); + await consumer2.disconnect(); + }, 30000); +}); + +describe('Schema Registry Avro Integration Test - Primitives', () => { + beforeEach(async () => { + schemaRegistryClient = new SchemaRegistryClient(clientConfig); + + producer = kafka.producer({ + kafkaJS: { + allowAutoTopicCreation: true, + acks: 1, + compression: KafkaJS.CompressionTypes.GZIP, + } + }); + await producer.connect(); + serializerConfig = { useLatestVersion: true }; + + serializer = new AvroSerializer(schemaRegistryClient, SerdeType.VALUE, serializerConfig); + deserializer = new AvroDeserializer(schemaRegistryClient, SerdeType.VALUE, {}); + consumer = kafka.consumer({ + kafkaJS: { + groupId: 'test-group', + fromBeginning: true, + partitionAssigners: [KafkaJS.PartitionAssigners.roundRobin], + }, + }); + }); + + afterEach(async () => { + await producer.disconnect(); + }); + + it('Should serialize and deserialize string', async () => { + const stringTopic = v4(); + + const stringSchemaString = stringify({ + type: 'string', + }); + + const stringSchemaInfo: SchemaInfo = { + schema: stringSchemaString, + metadata: metadata + }; + + await schemaRegistryClient.register(stringTopic + "-value", stringSchemaInfo); + + const stringMessageValue = "Hello, World!"; + const outgoingStringMessage = { + key: 'key', + value: await serializer.serialize(stringTopic, stringMessageValue) + }; + + await producer.send({ + topic: stringTopic, + messages: [outgoingStringMessage] + }); + + await consumer.connect(); + + await consumer.subscribe({ topic: stringTopic }); + + let messageRcvd = false; + await consumer.run({ + eachMessage: async ({ message }) => { + const decodedMessage = { + ...message, + value: await deserializer.deserialize(stringTopic, message.value as Buffer) + }; + messageRcvd = true; + expect(decodedMessage.value).toBe(stringMessageValue); + }, + }); + + while (!messageRcvd) { + await new Promise((resolve) => setTimeout(resolve, 100)); + } + + await consumer.disconnect(); + }, 30000); + + it('Should serialize and deserialize bytes', async () => { + const topic = v4(); + + const schemaString = stringify({ + type: 'bytes', + }); + + const stringSchemaInfo: SchemaInfo = { + schema: schemaString, + metadata: metadata + }; + + await schemaRegistryClient.register(topic + "-value", stringSchemaInfo); + + const messageValue = Buffer.from("Hello, World!"); + const outgoingMessage = { + key: 'key', + value: await serializer.serialize(topic, messageValue) + }; + + await producer.send({ + topic: topic, + messages: [outgoingMessage] + }); + + await consumer.connect(); + + await consumer.subscribe({ topic }); + + let messageRcvd = false; + await consumer.run({ + eachMessage: async ({ message }) => { + const decodedMessage = { + ...message, + value: await deserializer.deserialize(topic, message.value as Buffer) + }; + messageRcvd = true; + expect(decodedMessage.value).toBe(messageValue); + }, + }); + + while (!messageRcvd) { + await new Promise((resolve) => setTimeout(resolve, 100)); + } + + await consumer.disconnect(); + }, 30000); + + it('Should serialize and deserialize int', async () => { + const topic = v4(); + + const schemaString = stringify({ + type: 'int', + }); + + const stringSchemaInfo: SchemaInfo = { + schema: schemaString, + metadata: metadata + }; + + await schemaRegistryClient.register(topic + "-value", stringSchemaInfo); + + const messageValue = 25; + const outgoingMessage = { + key: 'key', + value: await serializer.serialize(topic, messageValue) + }; + + await producer.send({ + topic: topic, + messages: [outgoingMessage] + }); + + await consumer.connect(); + + await consumer.subscribe({ topic }); + + let messageRcvd = false; + await consumer.run({ + eachMessage: async ({ message }) => { + const decodedMessage = { + ...message, + value: await deserializer.deserialize(topic, message.value as Buffer) + }; + messageRcvd = true; + expect(decodedMessage.value).toBe(messageValue); + }, + }); + + while (!messageRcvd) { + await new Promise((resolve) => setTimeout(resolve, 100)); + } + + await consumer.disconnect(); + }, 30000); + + it('Should serialize and deserialize long', async () => { + const topic = v4(); + + const schemaString = stringify({ + type: 'long', + }); + + const stringSchemaInfo: SchemaInfo = { + schema: schemaString, + metadata: metadata + }; + + await schemaRegistryClient.register(topic + "-value", stringSchemaInfo); + + const messageValue = 25; + const outgoingMessage = { + key: 'key', + value: await serializer.serialize(topic, messageValue) + }; + + await producer.send({ + topic: topic, + messages: [outgoingMessage] + }); + + await consumer.connect(); + + await consumer.subscribe({ topic }); + + let messageRcvd = false; + await consumer.run({ + eachMessage: async ({ message }) => { + const decodedMessage = { + ...message, + value: await deserializer.deserialize(topic, message.value as Buffer) + }; + messageRcvd = true; + expect(decodedMessage.value).toBe(messageValue); + }, + }); + + while (!messageRcvd) { + await new Promise((resolve) => setTimeout(resolve, 100)); + } + + await consumer.disconnect(); + }, 30000); + + it('Should serialize and deserialize boolean', async () => { + const topic = v4(); + + const schemaString = stringify({ + type: 'boolean', + }); + + const stringSchemaInfo: SchemaInfo = { + schema: schemaString, + metadata: metadata + }; + + await schemaRegistryClient.register(topic + "-value", stringSchemaInfo); + + const messageValue = true; + const outgoingMessage = { + key: 'key', + value: await serializer.serialize(topic, messageValue) + }; + + await producer.send({ + topic: topic, + messages: [outgoingMessage] + }); + + await consumer.connect(); + + await consumer.subscribe({ topic }); + + let messageRcvd = false; + await consumer.run({ + eachMessage: async ({ message }) => { + const decodedMessage = { + ...message, + value: await deserializer.deserialize(topic, message.value as Buffer) + }; + messageRcvd = true; + expect(decodedMessage.value).toBe(messageValue); + }, + }); + + while (!messageRcvd) { + await new Promise((resolve) => setTimeout(resolve, 100)); + } + + await consumer.disconnect(); + }, 30000); + + it('Should serialize and deserialize float', async () => { + const topic = v4(); + + const schemaString = stringify({ + type: 'float', + }); + + const stringSchemaInfo: SchemaInfo = { + schema: schemaString, + metadata: metadata + }; + + await schemaRegistryClient.register(topic + "-value", stringSchemaInfo); + + const messageValue = 1.354; + const outgoingMessage = { + key: 'key', + value: await serializer.serialize(topic, messageValue) + }; + + await producer.send({ + topic: topic, + messages: [outgoingMessage] + }); + + await consumer.connect(); + + await consumer.subscribe({ topic }); + + let messageRcvd = false; + await consumer.run({ + eachMessage: async ({ message }) => { + const decodedMessage = { + ...message, + value: await deserializer.deserialize(topic, message.value as Buffer) + }; + messageRcvd = true; + expect(decodedMessage.value).toBe(messageValue); + }, + }); + + while (!messageRcvd) { + await new Promise((resolve) => setTimeout(resolve, 100)); + } + + await consumer.disconnect(); + }, 30000); + + it('Should serialize and deserialize double', async () => { + const topic = v4(); + + const schemaString = stringify({ + type: 'double', + }); + + const stringSchemaInfo: SchemaInfo = { + schema: schemaString, + metadata: metadata + }; + + await schemaRegistryClient.register(topic + "-value", stringSchemaInfo); + + const messageValue = 1.354; + const outgoingMessage = { + key: 'key', + value: await serializer.serialize(topic, messageValue) + }; + + await producer.send({ + topic: topic, + messages: [outgoingMessage] + }); + + await consumer.connect(); + + await consumer.subscribe({ topic }); + + let messageRcvd = false; + await consumer.run({ + eachMessage: async ({ message }) => { + const decodedMessage = { + ...message, + value: await deserializer.deserialize(topic, message.value as Buffer) + }; + messageRcvd = true; + expect(decodedMessage.value).toBe(messageValue); + }, + }); + + while (!messageRcvd) { + await new Promise((resolve) => setTimeout(resolve, 100)); + } + + await consumer.disconnect(); + }, 30000); + + //Waiting on the null case +}); \ No newline at end of file diff --git a/e2e/schemaregistry/schemaregistry-client.spec.ts b/e2e/schemaregistry/schemaregistry-client.spec.ts new file mode 100644 index 00000000..70dcda93 --- /dev/null +++ b/e2e/schemaregistry/schemaregistry-client.spec.ts @@ -0,0 +1,227 @@ +import { + Compatibility, + SchemaRegistryClient, + ServerConfig, + SchemaInfo, + SchemaMetadata, + Metadata +} from '../../schemaregistry/schemaregistry-client'; +import { beforeEach, describe, expect, it } from '@jest/globals'; +import { clientConfig } from '../../test/schemaregistry/test-constants'; +import { v4 } from 'uuid'; + +/* eslint-disable @typescript-eslint/no-non-null-asserted-optional-chain */ + +let schemaRegistryClient: SchemaRegistryClient; +const testSubject = 'integ-test-subject'; +const testServerConfigSubject = 'integ-test-server-config-subject'; + +const schemaString: string = JSON.stringify({ + type: 'record', + name: 'User', + fields: [ + { name: 'name', type: 'string' }, + { name: 'age', type: 'int' }, + ], +}); + +const metadata: Metadata = { + properties: { + owner: 'Bob Jones', + email: 'bob@acme.com', + }, +}; + +const schemaInfo: SchemaInfo = { + schema: schemaString, + metadata: metadata, +}; + +const backwardCompatibleSchemaString: string = JSON.stringify({ + type: 'record', + name: 'User', + fields: [ + { name: 'name', type: 'string' }, + { name: 'age', type: 'int' }, + { name: 'email', type: 'string', default: "" }, + ], +}); + +const backwardCompatibleMetadata: Metadata = { + properties: { + owner: 'Bob Jones2', + email: 'bob@acme.com', + }, +}; + +const backwardCompatibleSchemaInfo: SchemaInfo = { + schema: backwardCompatibleSchemaString, + schemaType: 'AVRO', + metadata: backwardCompatibleMetadata, +}; + +describe('SchemaRegistryClient Integration Test', () => { + + beforeEach(async () => { + schemaRegistryClient = new SchemaRegistryClient(clientConfig); + const subjects: string[] = await schemaRegistryClient.getAllSubjects(); + + if (subjects && subjects.includes(testSubject)) { + await schemaRegistryClient.deleteSubject(testSubject); + await schemaRegistryClient.deleteSubject(testSubject, true); + } + + if (subjects && subjects.includes(testServerConfigSubject)) { + await schemaRegistryClient.deleteSubject(testServerConfigSubject); + await schemaRegistryClient.deleteSubject(testServerConfigSubject, true); + } + }); + + it("Should return RestError when retrieving non-existent schema", async () => { + await expect(schemaRegistryClient.getLatestSchemaMetadata(v4())).rejects.toThrow(); + }); + + it('Should register, retrieve, and delete a schema', async () => { + // Register a schema + const registerResponse: SchemaMetadata = await schemaRegistryClient.registerFullResponse(testSubject, schemaInfo); + expect(registerResponse).toBeDefined(); + + const schemaId = registerResponse?.id!; + const version = registerResponse?.version!; + + const getSchemaResponse: SchemaInfo = await schemaRegistryClient.getBySubjectAndId(testSubject, schemaId); + expect(getSchemaResponse).toEqual(schemaInfo); + + const getIdResponse: number = await schemaRegistryClient.getId(testSubject, schemaInfo); + expect(getIdResponse).toEqual(schemaId); + + // Delete the schema + const deleteSubjectResponse: number = await schemaRegistryClient.deleteSubjectVersion(testSubject, version); + expect(deleteSubjectResponse).toEqual(version); + + const permanentDeleteSubjectResponse: number = await schemaRegistryClient.deleteSubjectVersion(testSubject, version, true); + expect(permanentDeleteSubjectResponse).toEqual(version); + }); + + it('Should get all versions and a specific version of a schema', async () => { + // Register a schema + const registerResponse: SchemaMetadata = await schemaRegistryClient.registerFullResponse(testSubject, schemaInfo); + expect(registerResponse).toBeDefined(); + + const version = registerResponse?.version!; + + const getVersionResponse: number = await schemaRegistryClient.getVersion(testSubject, schemaInfo); + expect(getVersionResponse).toEqual(version); + + const allVersionsResponse: number[] = await schemaRegistryClient.getAllVersions(testSubject); + expect(allVersionsResponse).toEqual([version]); + }); + + it('Should get schema metadata', async () => { + // Register a schema + const registerResponse: SchemaMetadata = await schemaRegistryClient.registerFullResponse(testSubject, schemaInfo); + expect(registerResponse).toBeDefined(); + + const schemaVersion: number = registerResponse?.version!; + + const registerResponse2: SchemaMetadata = await schemaRegistryClient.registerFullResponse(testSubject, backwardCompatibleSchemaInfo); + expect(registerResponse2).toBeDefined(); + + const schemaMetadata: SchemaMetadata = { + id: registerResponse?.id!, + version: schemaVersion, + schema: schemaInfo.schema, + subject: testSubject, + metadata: metadata, + }; + + const schemaMetadata2: SchemaMetadata = { + id: registerResponse2?.id!, + version: registerResponse2?.version!, + schema: backwardCompatibleSchemaInfo.schema, + subject: testSubject, + metadata: backwardCompatibleMetadata, + }; + + const getLatestMetadataResponse: SchemaMetadata = await schemaRegistryClient.getLatestSchemaMetadata(testSubject); + expect(schemaMetadata2).toEqual(getLatestMetadataResponse); + + const getMetadataResponse: SchemaMetadata = await schemaRegistryClient.getSchemaMetadata(testSubject, schemaVersion); + expect(schemaMetadata).toEqual(getMetadataResponse); + + const keyValueMetadata: { [key: string]: string } = { + 'owner': 'Bob Jones', + 'email': 'bob@acme.com' + } + + const getLatestWithMetadataResponse: SchemaMetadata = await schemaRegistryClient.getLatestWithMetadata(testSubject, keyValueMetadata); + expect(schemaMetadata).toEqual(getLatestWithMetadataResponse); + }); + + it('Should test compatibility for a version and subject, getting and updating', async () => { + const registerResponse: SchemaMetadata = await schemaRegistryClient.registerFullResponse(testSubject, schemaInfo); + expect(registerResponse).toBeDefined(); + + const version = registerResponse?.version!; + + const updateCompatibilityResponse: Compatibility = await schemaRegistryClient.updateCompatibility(testSubject, Compatibility.BACKWARD_TRANSITIVE); + expect(updateCompatibilityResponse).toEqual(Compatibility.BACKWARD_TRANSITIVE); + + const getCompatibilityResponse: Compatibility = await schemaRegistryClient.getCompatibility(testSubject); + expect(getCompatibilityResponse).toEqual(Compatibility.BACKWARD_TRANSITIVE); + + const testSubjectCompatibilityResponse: boolean = await schemaRegistryClient.testSubjectCompatibility(testSubject, backwardCompatibleSchemaInfo); + expect(testSubjectCompatibilityResponse).toEqual(true); + + const testCompatibilityResponse: boolean = await schemaRegistryClient.testCompatibility(testSubject, version, backwardCompatibleSchemaInfo); + expect(testCompatibilityResponse).toEqual(true); + }); + + it('Should update and get default compatibility', async () => { + const updateDefaultCompatibilityResponse: Compatibility = await schemaRegistryClient.updateDefaultCompatibility(Compatibility.FULL); + expect(updateDefaultCompatibilityResponse).toEqual(Compatibility.FULL); + + const getDefaultCompatibilityResponse: Compatibility = await schemaRegistryClient.getDefaultCompatibility(); + expect(getDefaultCompatibilityResponse).toEqual(Compatibility.FULL); + }); + + it('Should update and get subject Config', async () => { + const subjectConfigRequest: ServerConfig = { + compatibility: Compatibility.FULL, + normalize: true + }; + + const subjectConfigResponse: ServerConfig = { + compatibilityLevel: Compatibility.FULL, + normalize: true + }; + + const registerResponse: SchemaMetadata = await schemaRegistryClient.registerFullResponse(testServerConfigSubject, schemaInfo); + expect(registerResponse).toBeDefined(); + + const updateConfigResponse: ServerConfig = await schemaRegistryClient.updateConfig(testServerConfigSubject, subjectConfigRequest); + expect(updateConfigResponse).toBeDefined(); + + const getConfigResponse: ServerConfig = await schemaRegistryClient.getConfig(testServerConfigSubject); + expect(getConfigResponse).toEqual(subjectConfigResponse); + }); + + it('Should get and set default Config', async () => { + const serverConfigRequest: ServerConfig = { + compatibility: Compatibility.FULL, + normalize: false + }; + + const serverConfigResponse: ServerConfig = { + compatibilityLevel: Compatibility.FULL, + normalize: false + }; + + const updateDefaultConfigResponse: ServerConfig = await schemaRegistryClient.updateDefaultConfig(serverConfigRequest); + expect(updateDefaultConfigResponse).toBeDefined(); + + const getDefaultConfigResponse: ServerConfig = await schemaRegistryClient.getDefaultConfig(); + expect(getDefaultConfigResponse).toEqual(serverConfigResponse); + }); + +}); diff --git a/e2e/schemaregistry/schemaregistry-json.spec.ts b/e2e/schemaregistry/schemaregistry-json.spec.ts new file mode 100644 index 00000000..08b85138 --- /dev/null +++ b/e2e/schemaregistry/schemaregistry-json.spec.ts @@ -0,0 +1,448 @@ +import { KafkaJS } from '@confluentinc/kafka-javascript'; +import { + Metadata, + SchemaRegistryClient, + SchemaInfo, + Reference +} from '../../schemaregistry/schemaregistry-client'; +import { beforeEach, afterEach, describe, expect, it } from '@jest/globals'; +import { clientConfig } from '../../test/schemaregistry/test-constants'; +import { JsonSerializer, JsonSerializerConfig, JsonDeserializer } from '../../schemaregistry/serde/json'; +import { SerdeType } from "../../schemaregistry/serde/serde"; +import stringify from 'json-stringify-deterministic'; +import { v4 } from 'uuid'; + +let schemaRegistryClient: SchemaRegistryClient; +let serializerConfig: JsonSerializerConfig; +let serializer: JsonSerializer; +let deserializer: JsonDeserializer; +let producer: KafkaJS.Producer; +let consumer: KafkaJS.Consumer; + + +const kafkaBrokerList = 'localhost:9092'; +const kafka = new KafkaJS.Kafka({ + kafkaJS: { + brokers: [kafkaBrokerList], + }, +}); + +//Inspired by dotnet client +const schemaString: string = stringify({ + "$schema": "http://json-schema.org/draft-04/schema#", + "title": "Person", + "type": "object", + "additionalProperties": false, + "required": [ + "FirstName", + "LastName" + ], + "properties": { + "FirstName": { + "type": "string" + }, + "MiddleName": { + "type": [ + "null", + "string" + ] + }, + "LastName": { + "type": "string" + }, + "Gender": { + "oneOf": [ + { + "$ref": "#/definitions/Gender" + } + ] + }, + "NumberWithRange": { + "type": "integer", + "format": "int32", + "maximum": 5.0, + "minimum": 2.0 + }, + "Birthday": { + "type": "string", + "format": "date-time" + }, + "Company": { + "oneOf": [ + { + "$ref": "#/definitions/Company" + }, + { + "type": "null" + } + ] + }, + "Cars": { + "type": [ + "array", + "null" + ], + "items": { + "$ref": "#/definitions/Car" + } + } + }, + "definitions": { + "Gender": { + "type": "integer", + "description": "", + "x-enumNames": [ + "Male", + "Female" + ], + "enum": [ + 0, + 1 + ] + }, + "Company": { + "type": "object", + "additionalProperties": false, + "properties": { + "Name": { + "type": [ + "null", + "string" + ] + } + } + }, + "Car": { + "type": "object", + "additionalProperties": false, + "properties": { + "Name": { + "type": [ + "null", + "string" + ] + }, + "Manufacturer": { + "oneOf": [ + { + "$ref": "#/definitions/Company" + }, + { + "type": "null" + } + ] + } + } + } + } +}); + +const orderDetailsSchema: SchemaInfo = { + + schema: stringify({ + "$schema": "http://json-schema.org/draft-07/schema#", + "$id": "http://example.com/order_details.schema.json", + "title": "OrderDetails", + "description": "Order Details", + "type": "object", + "properties": { + "id": { + "description": "Order Id", + "type": "integer" + }, + "customer": { + "description": "Customer", + "$ref": "http://example.com/customer.schema.json" + }, + "payment_id": { + "description": "Payment Id", + "type": "string" + } + }, + "required": ["id", "customer"] + }), + schemaType: 'JSON', +}; + +const orderSchema: SchemaInfo = { + schema: stringify({ + "$schema": "http://json-schema.org/draft-07/schema#", + "$id": "http://example.com/referencedproduct.schema.json", + "title": "Order", + "description": "Order", + "type": "object", + "properties": { + "order_details": { + "description": "Order Details", + "$ref": "http://example.com/order_details.schema.json" + }, + "order_date": { + "description": "Order Date", + "type": "string", + "format": "date-time" + } + }, + "required": ["order_details"] + }), + schemaType: 'JSON', +}; + +const customerSchema: SchemaInfo = { + schema: stringify({ + "$schema": "http://json-schema.org/draft-07/schema#", + "$id": "http://example.com/customer.schema.json", + "title": "Customer", + "description": "Customer Data", + "type": "object", + "properties": { + "name": { + "Description": "Customer name", + "type": "string" + }, + "id": { + "description": "Customer id", + "type": "integer" + }, + "email": { + "description": "Customer email", + "type": "string" + } + }, + "required": ["name", "id"] + }), + schemaType: 'JSON', +}; + +const messageValue = { + "firstName": "Real", + "middleName": "Name", + "lastName": "LastName D. Roger", + "gender": "Male", + "numberWithRange": 3, + "birthday": 7671, + "company": { + "name": "WarpStream" + }, + "cars": [ + { + "name": "Flink", + "manufacturer": { + "name": "Immerok" + } + }, + { + "name": "Car", + "manufacturer": { + "name": "Car Maker" + } + } + ] +}; + + +const metadata: Metadata = { + properties: { + owner: 'Bob Jones', + email: 'bob@acme.com', + }, +}; + +const schemaInfo: SchemaInfo = { + schema: schemaString, + metadata: metadata, + schemaType: 'JSON' +}; + +describe('SchemaRegistryClient json Integration Test', () => { + + beforeEach(async () => { + schemaRegistryClient = new SchemaRegistryClient(clientConfig); + + producer = kafka.producer({ + kafkaJS: { + allowAutoTopicCreation: true, + acks: 1, + compression: KafkaJS.CompressionTypes.GZIP, + } + }); + await producer.connect(); + + consumer = kafka.consumer({ + kafkaJS: { + groupId: 'test-group', + fromBeginning: true, + partitionAssigners: [KafkaJS.PartitionAssigners.roundRobin], + }, + }); + }); + + afterEach(async () => { + await producer.disconnect(); + }); + + it("Should serialize and deserialize json", async () => { + const testTopic = v4(); + + await schemaRegistryClient.register(testTopic + "-value", schemaInfo); + + serializerConfig = { useLatestVersion: true }; + serializer = new JsonSerializer(schemaRegistryClient, SerdeType.VALUE, serializerConfig); + deserializer = new JsonDeserializer(schemaRegistryClient, SerdeType.VALUE, {}); + + const outgoingMessage = { + key: 'key', + value: await serializer.serialize(testTopic, messageValue) + }; + + await producer.send({ + topic: testTopic, + messages: [outgoingMessage] + }); + + consumer = kafka.consumer({ + kafkaJS: { + groupId: 'test-group', + fromBeginning: true, + partitionAssigners: [KafkaJS.PartitionAssigners.roundRobin], + }, + }); + + await consumer.connect(); + await consumer.subscribe({ topic: testTopic }); + let messageRcvd = false; + await consumer.run({ + eachMessage: async ({ message }) => { + const decodedMessage = { + ...message, + value: await deserializer.deserialize(testTopic, message.value as Buffer) + }; + messageRcvd = true; + + expect(decodedMessage.value).toMatchObject(messageValue); + }, + }); + + // Wait around until we get a message, and then disconnect. + while (!messageRcvd) { + await new Promise((resolve) => setTimeout(resolve, 100)); + } + + await consumer.disconnect(); + }, 30000); + + it("Should serialize with autoRegisterSchemas enabled and useLatestVersion disabled", async () => { + const testTopic = v4(); + + serializerConfig = { autoRegisterSchemas: true, useLatestVersion: false }; + serializer = new JsonSerializer(schemaRegistryClient, SerdeType.VALUE, serializerConfig); + + const outgoingMessage = { + key: 'key', + value: await serializer.serialize(testTopic, messageValue) + }; + + await producer.send({ + topic: testTopic, + messages: [outgoingMessage] + }); + + }); + + it('Should fail to serialize with UseLatestVersion enabled and autoRegisterSchemas disabled', async () => { + const testTopic = v4(); + + serializerConfig = { autoRegisterSchemas: false, useLatestVersion: true }; + serializer = new JsonSerializer(schemaRegistryClient, SerdeType.VALUE, serializerConfig); + + const messageValue = { "name": "Bob Jones", "age": 25 }; + + await expect(serializer.serialize(testTopic, messageValue)).rejects.toThrowError(); + }); + + it("Should serialize referenced schemas", async () => { + const testTopic = v4(); + serializerConfig = { useLatestVersion: true }; + serializer = new JsonSerializer(schemaRegistryClient, SerdeType.VALUE, serializerConfig); + deserializer = new JsonDeserializer(schemaRegistryClient, SerdeType.VALUE, {}); + + const customerSubject = v4(); + const orderDetailsSubject = v4(); + + await schemaRegistryClient.register(customerSubject, customerSchema); + const customerIdVersion: number = (await schemaRegistryClient.getLatestSchemaMetadata(customerSubject)).version!; + + const customerReference: Reference = { + name: "http://example.com/customer.schema.json", + subject: customerSubject, + version: customerIdVersion, + }; + orderDetailsSchema.references = [customerReference]; + + await schemaRegistryClient.register(orderDetailsSubject, orderDetailsSchema); + const orderDetailsIdVersion: number = (await schemaRegistryClient.getLatestSchemaMetadata(orderDetailsSubject)).version!; + + const orderDetailsReference: Reference = { + name: "http://example.com/order_details.schema.json", + subject: orderDetailsSubject, + version: orderDetailsIdVersion, + }; + orderSchema.references = [orderDetailsReference]; + + await schemaRegistryClient.register(testTopic + "-value", orderSchema); + + const order = { + order_details: { + id: 1, + customer: { + name: "Bob Jones", + id: 1, + email: "bob@jones.com" + }, + payment_id: "1234" + }, + order_date: "2021-07-15T12:00:00Z" + }; + + const outgoingMessage = { + key: 'key', + value: await serializer.serialize(testTopic, order) + }; + + await producer.send({ + topic: testTopic, + messages: [outgoingMessage] + }); + + consumer = kafka.consumer({ + kafkaJS: { + groupId: 'test-group', + fromBeginning: true, + partitionAssigners: [KafkaJS.PartitionAssigners.roundRobin], + }, + }); + + await consumer.connect(); + + await consumer.subscribe({ topic: testTopic }); + + let messageRcvd = false; + await consumer.run({ + eachMessage: async ({ message }) => { + const decodedMessage = { + ...message, + value: await deserializer.deserialize(testTopic, message.value as Buffer) + }; + messageRcvd = true; + + expect(decodedMessage.value).toMatchObject(order); + }, + }); + + // Wait around until we get a message, and then disconnect. + while (!messageRcvd) { + await new Promise((resolve) => setTimeout(resolve, 100)); + } + + await consumer.disconnect(); + }, 30000); +}); \ No newline at end of file diff --git a/e2e/topicUtils.js b/e2e/topicUtils.js new file mode 100644 index 00000000..dab62568 --- /dev/null +++ b/e2e/topicUtils.js @@ -0,0 +1,81 @@ +/* + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library + * Copyright (c) 2024 Confluent, Inc. + * + * This software may be modified and distributed under the terms + * of the MIT license. See the LICENSE.txt file for details. + */ + +module.exports = { createTopics, deleteTopics }; + +var Kafka = require('../'); + +// Create topics and wait for them to be created in the metadata. +function createTopics(topics, brokerList, cb) { + const client = Kafka.AdminClient.create({ + 'client.id': 'kafka-test-admin-client', + 'metadata.broker.list': brokerList, + }); + let promises = []; + for (const topic of topics) { + client.createTopic(topic, (err) => { + promises.push(new Promise((resolve, reject) => { + if (err && err.code !== Kafka.CODES.ERR_TOPIC_ALREADY_EXISTS) { + reject(err); + } + resolve(); + })); + }); + } + + Promise.all(promises).then(() => { + let interval = setInterval(() => { + client.listTopics((err, topicList) => { + if (err) { + client.disconnect(); + clearInterval(interval); + cb(err); + return; + } + for (const topic of topics) { + if (!topicList.includes(topic.topic)) { + return; + } + } + client.disconnect(); + clearInterval(interval); + cb(); + }); + }, 100); + }).catch((err) => { + client.disconnect(); + cb(err); + }); +} + +// Delete topics. +function deleteTopics(topics, brokerList, cb) { + const client = Kafka.AdminClient.create({ + 'client.id': 'kafka-test-admin-client', + 'metadata.broker.list': brokerList, + }); + let promises = []; + for (const topic of topics) { + client.deleteTopic(topic, (err) => { + promises.push(new Promise((resolve, reject) => { + if (err && err.code !== Kafka.CODES.ERR_UNKNOWN_TOPIC_OR_PART) { + reject(err); + } + resolve(); + })); + }); + } + + Promise.all(promises).then(() => { + client.disconnect(); + cb(); + }).catch((err) => { + client.disconnect(); + cb(err); + }); +} diff --git a/eslint.config.js b/eslint.config.js new file mode 100644 index 00000000..15141b46 --- /dev/null +++ b/eslint.config.js @@ -0,0 +1,75 @@ +const js = require("@eslint/js"); +const jest = require('eslint-plugin-jest'); +const ts = require('typescript-eslint'); +const tsdoc = require('eslint-plugin-tsdoc'); + +const ckjsSpecificSettings = { + languageOptions: { + globals: { + "require": "readonly", + "module": "writable", + "setImmediate": "readonly", + "setTimeout": "readonly", + "clearTimeout": "readonly", + "setInterval": "readonly", + "clearInterval": "readonly", + "console": "readonly" + } + }, + "rules": { + "eqeqeq": ["error", "always"], + "no-use-before-define": ["error", "nofunc"], + "no-caller": "error", + "no-new": "error", + "no-eq-null": "error", + "no-trailing-spaces": "error", + "no-constant-condition": "off", + "semi": "error" + } +}; + +const ckjsSpecificJestSettings = { + "rules": { + "jest/no-disabled-tests": "off", + "jest/no-conditional-expect": "off", + } +}; + +module.exports = ts.config( + { + ignores: ["**/dist/"] + }, + { + ...js.configs.recommended, + files: ["lib/**/*.js", "test/promisified/**/*.js"], + ignores: [] + }, + { + ...ckjsSpecificSettings, + files: ["lib/**/*.js", "test/promisified/**/*.js"], + ignores: [] + }, + { + ...jest.configs['flat/recommended'], + files: ["test/promisified/**/*.js"] + }, + { + ...ckjsSpecificJestSettings, + files: ["test/promisified/**/*.js"] + }, + ...ts.configs.recommended.map((config) => ({ + ...config, + ignores: ["**/*.js", "types/rdkafka.d.ts", "types/kafkajs.d.ts", "types/config.d.ts"], + plugins: { + "@typescript-eslint": ts.plugin, + tsdoc, + }, + rules: { + ...config.rules, + "prefer-const": "warn", + "@typescript-eslint/no-explicit-any": "warn", + "@typescript-eslint/no-unused-vars": "warn", + "tsdoc/syntax": "warn", + } + })), +); diff --git a/examples/consumer-confluent-cloud.js b/examples/consumer-confluent-cloud.js new file mode 100644 index 00000000..5881ac2d --- /dev/null +++ b/examples/consumer-confluent-cloud.js @@ -0,0 +1,56 @@ +const { Kafka, ErrorCodes } = require('@confluentinc/kafka-javascript').KafkaJS; + +async function consumerStart() { + let consumer; + let stopped = false; + + const CLUSTER_BOOTSTRAP_URL = 'your_cluster_url_here'; + const CLUSTER_API_KEY = 'your_cluster_api_key_here'; + const CLUSTER_API_SECRET = 'your_cluster_api_secret_here'; + + // Set up signals for a graceful shutdown. + const disconnect = () => { + process.off('SIGINT', disconnect); + process.off('SIGTERM', disconnect); + stopped = true; + consumer.commitOffsets() + .finally(() => + consumer.disconnect() + ) + .finally(() => + console.log("Disconnected successfully") + ); + } + process.on('SIGINT', disconnect); + process.on('SIGTERM', disconnect); + + // Initialization + consumer = new Kafka().consumer({ + 'bootstrap.servers': `${CLUSTER_BOOTSTRAP_URL}`, + 'security.protocol': 'SASL_SSL', + 'sasl.mechanisms': 'PLAIN', + 'sasl.username': `${CLUSTER_API_KEY}`, + 'sasl.password': `${CLUSTER_API_SECRET}`, + 'group.id': 'test-group', + 'auto.offset.reset': 'earliest', + 'enable.partition.eof': 'true', + }); + + await consumer.connect(); + console.log("Connected successfully"); + await consumer.subscribe({ topics: ["test-topic"] }); + + consumer.run({ + eachMessage: async ({ topic, partition, message }) => { + console.log({ + topic, + partition, + offset: message.offset, + key: message.key?.toString(), + value: message.value.toString(), + }); + } + }); +} + +consumerStart(); diff --git a/examples/consumer.js b/examples/consumer.js new file mode 100644 index 00000000..f6437d3d --- /dev/null +++ b/examples/consumer.js @@ -0,0 +1,87 @@ +const { Kafka, ErrorCodes } = require('@confluentinc/kafka-javascript').KafkaJS; + +async function consumerStart() { + let consumer; + let stopped = false; + + // Pause/Resume example, pause and resume alternately every 2 seconds. + let pauseResumeLoopStarted = false; + const pauseResumeLoop = async () => { + let paused = false; + pauseResumeLoopStarted = true; + while (!stopped) { + await new Promise((resolve) => setTimeout(resolve, 2000)); + if (stopped) + break; + + const assignment = consumer.assignment(); + if (paused) { + console.log(`Resuming partitions ${JSON.stringify(assignment)}`) + consumer.resume(assignment); + } else { + console.log(`Pausing partitions ${JSON.stringify(assignment)}`); + consumer.pause(assignment); + } + paused = !paused; + } + }; + + // Set up signals for a graceful shutdown. + const disconnect = () => { + process.off('SIGINT', disconnect); + process.off('SIGTERM', disconnect); + stopped = true; + consumer.commitOffsets() + .finally(() => + consumer.disconnect() + ) + .finally(() => + console.log("Disconnected successfully") + ); + } + process.on('SIGINT', disconnect); + process.on('SIGTERM', disconnect); + + + + // Initialization + consumer = new Kafka().consumer({ + 'bootstrap.servers': 'localhost:9092', + 'group.id': 'test-group', + 'auto.offset.reset': 'earliest', + 'enable.partition.eof': 'true', + 'rebalance_cb': (err, assignment) => { + switch (err.code) { + case ErrorCodes.ERR__ASSIGN_PARTITIONS: + console.log(`Assigned partitions ${JSON.stringify(assignment)}`); + if (!pauseResumeLoopStarted) // Start the pause/resume loop for the example. + pauseResumeLoop(); + break; + case ErrorCodes.ERR__REVOKE_PARTITIONS: + console.log(`Revoked partitions ${JSON.stringify(assignment)}`); + break; + default: + console.error(err); + } + }, + }); + + await consumer.connect(); + console.log("Connected successfully"); + await consumer.subscribe({ topics: ["test-topic"] }); + + consumer.run({ + eachMessage: async ({ topic, partition, message }) => { + console.log({ + topic, + partition, + headers: message.headers, + offset: message.offset, + key: message.key?.toString(), + value: message.value.toString(), + }); + } + }); +} + +consumerStart(); diff --git a/examples/eos.js b/examples/eos.js new file mode 100644 index 00000000..7a96c3c2 --- /dev/null +++ b/examples/eos.js @@ -0,0 +1,77 @@ +const { Kafka } = require('@confluentinc/kafka-javascript').KafkaJS; + +async function eosStart() { + const consumer = new Kafka().consumer({ + 'bootstrap.servers': 'localhost:9092', + 'group.id': 'test-group4', + 'enable.auto.commit': false, + 'auto.offset.reset': 'earliest', + }); + + const producer = new Kafka().producer({ + 'bootstrap.servers': 'localhost:9092', + 'transactional.id': 'txid', + }); + + await consumer.connect(); + await producer.connect(); + + await consumer.subscribe({ + topics: ["consumeTopic"] + }); + + // The run method acts like a consume-transform-produce loop. + consumer.run({ + eachMessage: async ({ topic, partition, message }) => { + const msgAckString = JSON.stringify({ + topic, + partition, + offset: message.offset, + key: message.key?.toString(), + value: message.value.toString() + }); + + console.log(msgAckString); + + try { + const transaction = await producer.transaction(); + + await transaction.send({ + topic: 'produceTopic', + messages: [ + { value: 'consumed a message: ' + msgAckString }, + ] + }); + + await transaction.sendOffsets({ + consumer, + topics: [ + { + topic, + partitions: [ + { partition, offset: message.offset }, + ], + } + ], + }); + + await transaction.commit(); + + } catch (e) { + console.log({ e, s: "ERROR" }); + await transaction.abort(); + } + }, + }); + + const disconnect = async () => { + process.off('SIGINT', disconnect); + process.off('SIGTERM', disconnect); + await consumer.disconnect(); + await producer.disconnect(); + } + process.on('SIGINT', disconnect); + process.on('SIGTERM', disconnect); +} + +eosStart(); diff --git a/examples/kafkajs/admin/create-topics.js b/examples/kafkajs/admin/create-topics.js new file mode 100644 index 00000000..99f976ca --- /dev/null +++ b/examples/kafkajs/admin/create-topics.js @@ -0,0 +1,79 @@ +// require('kafkajs') is replaced with require('@confluentinc/kafka-javascript').KafkaJS. +const { Kafka } = require('@confluentinc/kafka-javascript').KafkaJS; +const { parseArgs } = require('node:util'); + +async function adminStart() { + const args = parseArgs({ + options: { + 'bootstrap-servers': { + type: 'string', + short: 'b', + default: 'localhost:9092', + }, + 'topic': { + type: 'string', + short: 't', + default: 'test-topic', + }, + 'timeout': { + type: 'string', + short: 'm', + default: undefined, + }, + 'num-partitions': { + type: 'string', + short: 'p', + default: '3', + }, + 'replication-factor': { + type: 'string', + short: 'r', + default: '1', + } + }, + }); + + let { + 'bootstrap-servers': bootstrapServers, + timeout, + 'num-partitions': numPartitions, + 'replication-factor': replicationFactor, + topic, + } = args.values; + + if (timeout) { + timeout = Number(timeout) || 0; + } + + numPartitions = Number(numPartitions) || 3; + replicationFactor = Number(replicationFactor) || 1; + + const kafka = new Kafka({ + kafkaJS: { + brokers: [bootstrapServers], + } + }); + + const admin = kafka.admin(); + await admin.connect(); + + try { + await admin.createTopics({ + topics: [ + { + topic: topic, + numPartitions: numPartitions, + replicationFactor: replicationFactor, + } + ], + timeout, + }); + console.log(`Topic "${topic}" created successfully`); + } catch(err) { + console.log(`Topic creation failed`, err); + } + + await admin.disconnect(); +} + +adminStart(); diff --git a/examples/kafkajs/admin/delete-groups.js b/examples/kafkajs/admin/delete-groups.js new file mode 100644 index 00000000..bb196c09 --- /dev/null +++ b/examples/kafkajs/admin/delete-groups.js @@ -0,0 +1,64 @@ +// require('kafkajs') is replaced with require('@confluentinc/kafka-javascript').KafkaJS. +const { Kafka } = require('@confluentinc/kafka-javascript').KafkaJS; +const { parseArgs } = require('node:util'); + +async function adminStart() { + const args = parseArgs({ + options: { + 'bootstrap-servers': { + type: 'string', + short: 'b', + default: 'localhost:9092', + }, + 'timeout': { + type: 'string', + short: 'm', + default: undefined, + }, + 'group-ids': { + type: 'string', + short: 'g', + multiple: true, + default: [], + }, + }, + }); + + let { + 'bootstrap-servers': bootstrapServers, + timeout, + 'group-ids': groupIds, + } = args.values; + + if (!groupIds.length) { + console.error('Group ids are required'); + process.exit(1); + } + + if (timeout) { + timeout = Number(timeout) || 0; + } + + const kafka = new Kafka({ + kafkaJS: { + brokers: [bootstrapServers], + } + }); + + const admin = kafka.admin(); + await admin.connect(); + + try { + await admin.deleteGroups( + groupIds, + { timeout }, + ); + console.log(`Groups "${groupIds.join(',')}" deleted successfully`); + } catch(err) { + console.log(`Group deletion failed`, err); + } + + await admin.disconnect(); +} + +adminStart(); diff --git a/examples/kafkajs/admin/delete-topics.js b/examples/kafkajs/admin/delete-topics.js new file mode 100644 index 00000000..ff2b77d7 --- /dev/null +++ b/examples/kafkajs/admin/delete-topics.js @@ -0,0 +1,64 @@ +// require('kafkajs') is replaced with require('@confluentinc/kafka-javascript').KafkaJS. +const { Kafka } = require('@confluentinc/kafka-javascript').KafkaJS; +const { parseArgs } = require('node:util'); + +async function adminStart() { + const args = parseArgs({ + options: { + 'bootstrap-servers': { + type: 'string', + short: 'b', + default: 'localhost:9092', + }, + 'timeout': { + type: 'string', + short: 'm', + default: undefined, + }, + 'topics': { + type: 'string', + short: 't', + multiple: true, + default: [], + }, + }, + }); + + let { + 'bootstrap-servers': bootstrapServers, + timeout, + topics, + } = args.values; + + if (!topics.length) { + console.error('Topics names is required'); + process.exit(1); + } + + if (timeout) { + timeout = Number(timeout) || 0; + } + + const kafka = new Kafka({ + kafkaJS: { + brokers: [bootstrapServers], + } + }); + + const admin = kafka.admin(); + await admin.connect(); + + try { + await admin.deleteTopics({ + topics, + timeout, + }); + console.log(`Topics "${topics.join(',')}" deleted successfully`); + } catch(err) { + console.log(`Topic deletion failed`, err); + } + + await admin.disconnect(); +} + +adminStart(); diff --git a/examples/kafkajs/admin/describe-groups.js b/examples/kafkajs/admin/describe-groups.js new file mode 100644 index 00000000..b0317254 --- /dev/null +++ b/examples/kafkajs/admin/describe-groups.js @@ -0,0 +1,88 @@ +// require('kafkajs') is replaced with require('@confluentinc/kafka-javascript').KafkaJS. +const { Kafka, ConsumerGroupStates } = require('@confluentinc/kafka-javascript').KafkaJS; +const { parseArgs } = require('node:util'); + +function printNode(node, prefix = '') { + if (!node) + return; + console.log(`${prefix}\tHost: ${node.host}`); + console.log(`${prefix}\tPort: ${node.port}`); + console.log(`${prefix}\tRack: ${node.rack}`); +} + +async function adminStart() { + const args = parseArgs({ + options: { + 'bootstrap-servers': { + type: 'string', + short: 'b', + default: 'localhost:9092', + }, + 'timeout': { + type: 'string', + short: 'm', + default: undefined, + }, + 'groups': { + type: 'string', + short: 'g', + multiple: true, + default: [], + }, + 'include-authorized-operations': { + type: 'boolean', + short: 'i', + default: false, + } + }, + }); + + let { + 'bootstrap-servers': bootstrapServers, + timeout, + groups, + 'include-authorized-operations': includeAuthorizedOperations, + } = args.values; + + if (timeout) { + timeout = Number(timeout) || 0; + } + + const kafka = new Kafka({ + kafkaJS: { + brokers: [bootstrapServers], + } + }); + + const admin = kafka.admin(); + await admin.connect(); + + try { + const groupDescriptions = await admin.describeGroups( + groups, + { + timeout, + includeAuthorizedOperations, + } + ); + for (const group of groupDescriptions.groups) { + console.log(`Group id: ${group.groupId}`); + console.log(`\tError: ${group.error}`); + console.log(`\tProtocol: ${group.protocol}`); + console.log(`\tProtocol type: ${group.protocolType}`); + console.log(`\tPartition assignor: ${group.partitionAssignor}`); + console.log(`\tState: ${group.state}`); + console.log(`\tCoordinator: ${group.coordinator ? group.coordinator.id : group.coordinator}`); + printNode(group.coordinator, '\t'); + console.log(`\tAuthorized operations: ${group.authorizedOperations}`); + console.log(`\tIs simple: ${group.isSimpleConsumerGroup}`); + console.log(`\tState: ${group.state}`); + } + } catch(err) { + console.log('Describe groups failed', err); + } + + await admin.disconnect(); +} + +adminStart(); diff --git a/examples/kafkajs/admin/list-groups.js b/examples/kafkajs/admin/list-groups.js new file mode 100644 index 00000000..3e287a4c --- /dev/null +++ b/examples/kafkajs/admin/list-groups.js @@ -0,0 +1,66 @@ +// require('kafkajs') is replaced with require('@confluentinc/kafka-javascript').KafkaJS. +const { Kafka, ConsumerGroupStates } = require('@confluentinc/kafka-javascript').KafkaJS; +const { parseArgs } = require('node:util'); + +async function adminStart() { + const args = parseArgs({ + options: { + 'bootstrap-servers': { + type: 'string', + short: 'b', + default: 'localhost:9092', + }, + 'timeout': { + type: 'string', + short: 'm', + default: undefined, + }, + 'states': { + type: 'string', + short: 's', + multiple: true, + default: [], + } + }, + }); + + let { + 'bootstrap-servers': bootstrapServers, + states: matchConsumerGroupStates, + timeout, + } = args.values; + + if (timeout) { + timeout = Number(timeout) || 0; + } + matchConsumerGroupStates = matchConsumerGroupStates.map( + state => ConsumerGroupStates[state]); + + const kafka = new Kafka({ + kafkaJS: { + brokers: [bootstrapServers], + } + }); + + const admin = kafka.admin(); + await admin.connect(); + + try { + const groupOverview = await admin.listGroups({ + timeout, + matchConsumerGroupStates + }); + for (const group of groupOverview.groups) { + console.log(`Group id: ${group.groupId}`); + console.log(`\tType: ${group.protocolType}`); + console.log(`\tIs simple: ${group.isSimpleConsumerGroup}`); + console.log(`\tState: ${group.state}`); + } + } catch(err) { + console.log('List topics failed', err); + } + + await admin.disconnect(); +} + +adminStart(); diff --git a/examples/kafkajs/admin/list-topics.js b/examples/kafkajs/admin/list-topics.js new file mode 100644 index 00000000..fe98290a --- /dev/null +++ b/examples/kafkajs/admin/list-topics.js @@ -0,0 +1,51 @@ +// require('kafkajs') is replaced with require('@confluentinc/kafka-javascript').KafkaJS. +const { Kafka } = require('@confluentinc/kafka-javascript').KafkaJS; +const { parseArgs } = require('node:util'); + +async function adminStart() { + const args = parseArgs({ + options: { + 'bootstrap-servers': { + type: 'string', + short: 'b', + default: 'localhost:9092', + }, + 'timeout': { + type: 'string', + short: 'm', + default: undefined, + }, + }, + }); + + let { + 'bootstrap-servers': bootstrapServers, + timeout + } = args.values; + + if (timeout) { + timeout = Number(timeout) || 0; + } + + const kafka = new Kafka({ + kafkaJS: { + brokers: [bootstrapServers], + } + }); + + const admin = kafka.admin(); + await admin.connect(); + + try { + const topics = await admin.listTopics({ timeout }); + for (const topic of topics) { + console.log(`Topic name: ${topic}`); + } + } catch(err) { + console.log('List topics failed', err); + } + + await admin.disconnect(); +} + +adminStart(); diff --git a/examples/kafkajs/consumer-confluent-cloud.js b/examples/kafkajs/consumer-confluent-cloud.js new file mode 100644 index 00000000..aa10b0c7 --- /dev/null +++ b/examples/kafkajs/consumer-confluent-cloud.js @@ -0,0 +1,72 @@ +// require('kafkajs') is replaced with require('@confluentinc/kafka-javascript').KafkaJS. +const { Kafka } = require('@confluentinc/kafka-javascript').KafkaJS; + +async function consumerStart() { + let consumer; + var stopped = false; + + const CLUSTER_BOOTSTRAP_URL = 'your_cluster_url_here'; + const CLUSTER_API_KEY = 'your_cluster_api_key_here'; + const CLUSTER_API_SECRET = 'your_cluster_api_secret_here'; + + const kafka = new Kafka({ + kafkaJS: { + brokers: [`${CLUSTER_BOOTSTRAP_URL}`], + ssl: true, + sasl: { + mechanism: 'plain', + username: `${CLUSTER_API_KEY}`, + password: `${CLUSTER_API_SECRET}`, + }, + } + }); + + consumer = kafka.consumer({ + kafkaJS: { + groupId: 'test-group', + }, + + /* Properties from librdkafka can also be used */ + 'auto.commit.interval.ms': 6000, + }); + + await consumer.connect(); + console.log("Connected successfully"); + + await consumer.subscribe({ + topics: [ + "test-topic" + ] + }) + + // Start consuming messages. + consumer.run({ + eachMessage: async ({ topic, partition, message }) => { + console.log({ + topic, + partition, + offset: message.offset, + key: message.key?.toString(), + value: message.value.toString(), + }); + }, + }); + + // Disconnect example + const disconnect = () => { + process.off('SIGINT', disconnect); + process.off('SIGTERM', disconnect); + stopped = true; + consumer.commitOffsets() + .finally(() => + consumer.disconnect() + ) + .finally(() => + console.log("Disconnected successfully") + ); + } + process.on('SIGINT', disconnect); + process.on('SIGTERM', disconnect); +} + +consumerStart(); diff --git a/examples/kafkajs/consumer.js b/examples/kafkajs/consumer.js index 22d5ccc7..fd4579b8 100644 --- a/examples/kafkajs/consumer.js +++ b/examples/kafkajs/consumer.js @@ -1,119 +1,119 @@ -const { Kafka } = require('../..').KafkaJS -//const { Kafka } = require('kafkajs') +// require('kafkajs') is replaced with require('@confluentinc/kafka-javascript').KafkaJS. +const { Kafka, ErrorCodes } = require('@confluentinc/kafka-javascript').KafkaJS; async function consumerStart() { - let consumer; - var stopped = false; + let consumer; + var stopped = false; - const kafka = new Kafka({ - brokers: [''], - ssl: true, - connectionTimeout: 5000, - sasl: { - mechanism: 'plain', - username: '', - password: '', - }, - }); + const kafka = new Kafka({ + kafkaJS: { + brokers: ['localhost:9092'], + ssl: true, + connectionTimeout: 5000, + sasl: { + mechanism: 'plain', + username: '', + password: '', + }, + } + }); - consumer = kafka.consumer({ + consumer = kafka.consumer({ + kafkaJS: { groupId: 'test-group', - rebalanceListener: { - onPartitionsAssigned: async (assignment) => { - console.log(`Assigned partitions ${JSON.stringify(assignment)}`); - }, - onPartitionsRevoked: async (assignment) => { - console.log(`Revoked partitions ${JSON.stringify(assignment)}`); - if (!stopped) { - await consumer.commitOffsets().catch((e) => { - console.error(`Failed to commit ${e}`); - }) - } - } - }, - rdKafka: { - 'enable.auto.commit': false + autoCommit: false, + }, + /* Properties from librdkafka can also be used */ + rebalance_cb: (err, assignment) => { + if (err.code === ErrorCodes.ERR__ASSIGN_PARTITIONS) { + console.log(`Assigned partitions ${JSON.stringify(assignment)}`); + } else if (err.code === ErrorCodes.ERR__REVOKE_PARTITIONS) { + console.log(`Revoked partitions ${JSON.stringify(assignment)}`); + } else { + console.error(`Rebalance error ${err}`); } - }); + }, + 'auto.commit.interval.ms': 6000, + }); + + await consumer.connect(); + console.log("Connected successfully"); - await consumer.connect(); - console.log("Connected successfully"); + await consumer.subscribe({ + topics: [ + "test-topic" + ] + }) - await consumer.subscribe({ - topics: [ - "topic2" - ] - }) + // Batch consumer, commit and seek example + var batch = 0; + consumer.run({ + eachMessage: async ({ topic, partition, message }) => { + console.log({ + topic, + partition, + offset: message.offset, + key: message.key?.toString(), + value: message.value.toString(), + }) - // Batch consumer, commit and seek example - var batch = 0; - consumer.run({ - eachMessage: async ({ topic, partition, message }) => { - console.log({ + if (++batch % 100 == 0) { + await consumer.seek({ topic, partition, - offset: message.offset, - key: message.key?.toString(), - value: message.value.toString(), - }) - - if (++batch % 100 == 0) { - await consumer.seek({ - topic, - partition, - offset: -2 - }); - await consumer.commitOffsets(); - batch = 0; - } - }, - }); + offset: -2 + }); + await consumer.commitOffsets(); + batch = 0; + } + }, + }); - // Pause/Resume example - const pauseResumeLoop = async () => { - let paused = false; - let ticks = 0; - while (!stopped) { - await new Promise((resolve) => setTimeout(resolve, 100)); - if (stopped) - break; + // Pause/Resume example + const pauseResumeLoop = async () => { + let paused = false; + let ticks = 0; + while (!stopped) { + await new Promise((resolve) => setTimeout(resolve, 100)); + if (stopped) + break; - ticks++; - if (ticks == 200) { - ticks = 0; - const assignment = consumer.assignment(); - if (paused) { - console.log(`Resuming partitions ${JSON.stringify(assignment)}`) - consumer.resume(assignment); - } else { - console.log(`Pausing partitions ${JSON.stringify(assignment)}`); - consumer.pause(assignment); - } - paused = !paused; + ticks++; + if (ticks == 200) { + ticks = 0; + const assignment = consumer.assignment(); + if (paused) { + console.log(`Resuming partitions ${JSON.stringify(assignment)}`) + consumer.resume(assignment); + } else { + console.log(`Pausing partitions ${JSON.stringify(assignment)}`); + consumer.pause(assignment); } + paused = !paused; } } + } - if (consumer.assignment) { - // KafkaJS doesn't have assignment() - pauseResumeLoop() - } + if (consumer.assignment()) { + // KafkaJS doesn't have assignment() + pauseResumeLoop() + } - // Disconnect example - const disconnect = () => { - process.off('SIGINT', disconnect); - process.off('SIGTERM', disconnect); - stopped = true; - consumer.commitOffsets() + // Disconnect example + const disconnect = () => { + process.off('SIGINT', disconnect); + process.off('SIGTERM', disconnect); + stopped = true; + consumer.commitOffsets() .finally(() => consumer.disconnect() ) .finally(() => console.log("Disconnected successfully") ); - } - process.on('SIGINT', disconnect); - process.on('SIGTERM', disconnect); + } + process.on('SIGINT', disconnect); + process.on('SIGTERM', disconnect); } consumerStart() diff --git a/examples/kafkajs/eos.js b/examples/kafkajs/eos.js index 6c9f85ca..78e0d5f5 100644 --- a/examples/kafkajs/eos.js +++ b/examples/kafkajs/eos.js @@ -1,26 +1,30 @@ -const { Kafka } = require('../..').KafkaJS -//const { Kafka } = require('kafkajs') +// require('kafkajs') is replaced with require('@confluentinc/kafka-javascript').KafkaJS. +const { Kafka } = require('@confluentinc/kafka-javascript').KafkaJS; async function eosStart() { const kafka = new Kafka({ - brokers: [''], - ssl: true, - sasl: { - mechanism: 'plain', - username: '', - password: '', + kafkaJS: { + brokers: [''], + ssl: true, + sasl: { + mechanism: 'plain', + username: '', + password: '', + } } }); const consumer = kafka.consumer({ - groupId: 'groupId', - rdKafka: { - "enable.auto.commit": false, - }, + kafkaJS: { + groupId: 'groupId', + autoCommit: false, + } }); const producer = kafka.producer({ - transactionalId: 'txid' + kafkaJS: { + transactionalId: 'txid' + } }); await consumer.connect(); @@ -34,7 +38,8 @@ async function eosStart() { // The run method acts like a consume-transform-produce loop. consumer.run({ eachMessage: async ({ topic, partition, message }) => { - const msgAckString = JSON.stringify({topic, + const msgAckString = JSON.stringify({ + topic, partition, offset: message.offset, key: message.key?.toString(), diff --git a/examples/kafkajs/oauthbearer_calback_authentication/oauthbearer_callback_authentication.js b/examples/kafkajs/oauthbearer_calback_authentication/oauthbearer_callback_authentication.js new file mode 100644 index 00000000..3fa9ee4b --- /dev/null +++ b/examples/kafkajs/oauthbearer_calback_authentication/oauthbearer_callback_authentication.js @@ -0,0 +1,65 @@ +const { Kafka } = require('@confluentinc/kafka-javascript').KafkaJS; +var jwt = require('jsonwebtoken'); + +// This example uses the Producer for demonstration purposes. +// It is the same whether you use a Consumer/AdminClient. + +async function token_refresh(oauthbearer_config /* string - passed from config */) { + console.log("Called token_refresh with given config: " + oauthbearer_config); + // At this point, we can use the information in the token, make + // some API calls, fetch something from a file... + // For the illustration, everything is hard-coded. + const principal = 'admin'; + // In seconds - needed by jsonwebtoken library + const exp_seconds = Math.floor(Date.now() / 1000) + (60 * 60); + // In milliseconds - needed by kafka-javascript. + const exp_ms = exp_seconds * 1000; + + // For illustration, we're not signing our JWT (algorithm: none). + // For production uses-cases, it should be signed. + const value = jwt.sign( + { 'sub': principal, exp: exp_seconds, 'scope': 'requiredScope' }, '', { algorithm: 'none' }); + + // SASL extensions can be passed as Map or key/value pairs in an object. + const extensions = { + traceId: '123' + }; + + // The callback is called with the new token, its lifetime, and the principal. + // The extensions are optional and may be omitted. + console.log("Finished token_refresh, triggering callback: with value: " + + value.slice(0, 10) + "..., lifetime: " + exp_ms + + ", principal: " + principal + ", extensions: " + JSON.stringify(extensions)); + + // If no token could be fetched or an error occurred, an Error can be thrown instead. + return { value, lifetime: exp_ms, principal, extensions }; +} + +async function run() { + const kafka = new Kafka({}); + const producer = kafka.producer({ + kafkaJS: { + brokers: ['localhost:46611'], + sasl: { + mechanism: 'oauthbearer', + oauthBearerProvider: token_refresh, + }, + }, + 'sasl.oauthbearer.config': 'someConfigPropertiesKey=value', + }); + + await producer.connect(); + console.log("Producer connected"); + + const deliveryReport = await producer.send({ + topic: 'topic', + messages: [ + { value: 'Hello world!' }, + ], + }); + console.log("Producer sent message", deliveryReport); + + await producer.disconnect(); +} + +run().catch(console.error); \ No newline at end of file diff --git a/examples/kafkajs/oauthbearer_calback_authentication/package.json b/examples/kafkajs/oauthbearer_calback_authentication/package.json new file mode 100644 index 00000000..0dbf5ff8 --- /dev/null +++ b/examples/kafkajs/oauthbearer_calback_authentication/package.json @@ -0,0 +1,16 @@ +{ + "name": "oauthbearer_calback_authentication", + "version": "1.0.0", + "description": "", + "main": "index.js", + "scripts": { + "test": "echo \"Error: no test specified\" && exit 1" + }, + "keywords": [], + "author": "", + "license": "ISC", + "dependencies": { + "@confluentinc/kafka-javascript": "file:../../..", + "jsonwebtoken": "^9.0.2" + } +} diff --git a/examples/kafkajs/producer-confluent-cloud.js b/examples/kafkajs/producer-confluent-cloud.js new file mode 100644 index 00000000..0e57ff5b --- /dev/null +++ b/examples/kafkajs/producer-confluent-cloud.js @@ -0,0 +1,44 @@ +// require('kafkajs') is replaced with require('@confluentinc/kafka-javascript').KafkaJS. +const { Kafka } = require('@confluentinc/kafka-javascript').KafkaJS; + +async function producerStart() { + const CLUSTER_BOOTSTRAP_URL = 'your_cluster_url_here'; + const CLUSTER_API_KEY = 'your_cluster_api_key_here'; + const CLUSTER_API_SECRET = 'your_cluster_api_secret_here'; + + const kafka = new Kafka({ + kafkaJS: { + brokers: [`${CLUSTER_BOOTSTRAP_URL}`], + ssl: true, + sasl: { + mechanism: 'plain', + username: `${CLUSTER_API_KEY}`, + password: `${CLUSTER_API_SECRET}`, + }, + } + }); + + const producer = kafka.producer(); + + await producer.connect(); + + console.log("Connected successfully"); + + const res = [] + for (let i = 0; i < 50; i++) { + res.push(producer.send({ + topic: 'test-topic', + messages: [ + { value: 'v222', partition: 0 }, + { value: 'v11', partition: 0, key: 'x' }, + ] + })); + } + await Promise.all(res); + + await producer.disconnect(); + + console.log("Disconnected successfully"); +} + +producerStart(); diff --git a/examples/kafkajs/producer.js b/examples/kafkajs/producer.js index 57c1e6ac..87f7ef11 100644 --- a/examples/kafkajs/producer.js +++ b/examples/kafkajs/producer.js @@ -1,14 +1,16 @@ -const { Kafka } = require('../..').KafkaJS -//const { Kafka } = require('kafkajs') +// require('kafkajs') is replaced with require('@confluentinc/kafka-javascript').KafkaJS. +const { Kafka } = require('@confluentinc/kafka-javascript').KafkaJS; async function producerStart() { const kafka = new Kafka({ - brokers: [''], - ssl: true, - sasl: { - mechanism: 'plain', - username: '', - password: '', + kafkaJS: { + brokers: [''], + ssl: true, + sasl: { + mechanism: 'plain', + username: '', + password: '', + }, } }); @@ -19,16 +21,16 @@ async function producerStart() { console.log("Connected successfully"); const res = [] - for(let i = 0; i < 50; i++) { + for (let i = 0; i < 50; i++) { res.push(producer.send({ topic: 'topic2', messages: [ - {value: 'v222', partition: 0}, - {value: 'v11', partition: 0, key: 'x'}, + { value: 'v222', partition: 0 }, + { value: 'v11', partition: 0, key: 'x' }, ] })); } - await Promise.allSettled(res); + await Promise.all(res); await producer.disconnect(); diff --git a/examples/kafkajs/sr.js b/examples/kafkajs/sr.js new file mode 100644 index 00000000..a10c6293 --- /dev/null +++ b/examples/kafkajs/sr.js @@ -0,0 +1,126 @@ +// require('kafkajs') is replaced with require('@confluentinc/kafka-javascript').KafkaJS. +const { Kafka } = require('@confluentinc/kafka-javascript').KafkaJS; + +// Note: The @confluentinc/schemaregistry will need to be installed separately to run this example, +// as it isn't a dependency of confluent-kafka-javascript. +const { SchemaRegistryClient, SerdeType, AvroSerializer, AvroDeserializer} = require('@confluentinc/schemaregistry'); + +const registry = new SchemaRegistryClient({ baseURLs: [''] }) +const kafka = new Kafka({ + kafkaJS: { + brokers: [''], + ssl: true, + sasl: { + mechanism: 'plain', + username: '', + password: '', + }, + } +}); + +let consumer = kafka.consumer({ + kafkaJS: { + groupId: "test-group", + fromBeginning: true, + }, +}); +let producer = kafka.producer(); + +const schemaA = { + type: 'record', + namespace: 'test', + name: 'A', + fields: [ + { name: 'id', type: 'int' }, + { name: 'b', type: 'test.B' }, + ], +}; + +const schemaB = { + type: 'record', + namespace: 'test', + name: 'B', + fields: [{ name: 'id', type: 'int' }], +}; + +const topicName = 'test-topic'; +const subjectName = topicName + '-value'; + +const run = async () => { + // Register schemaB. + await registry.register( + 'avro-b', + { + schemaType: 'AVRO', + schema: JSON.stringify(schemaB), + } + ); + const response = await registry.getLatestSchemaMetadata('avro-b'); + const version = response.version + + // Register schemaA, which references schemaB. + const id = await registry.register( + subjectName, + { + schemaType: 'AVRO', + schema: JSON.stringify(schemaA), + references: [ + { + name: 'test.B', + subject: 'avro-b', + version, + }, + ], + } + ) + + // Create an Avro serializer + const ser = new AvroSerializer(registry, SerdeType.VALUE, { useLatestVersion: true }); + + // Produce a message with schemaA. + await producer.connect() + const outgoingMessage = { + key: 'key', + value: await ser.serialize(topicName, { id: 1, b: { id: 2 } }), + } + await producer.send({ + topic: topicName, + messages: [outgoingMessage] + }); + console.log("Producer sent its message.") + await producer.disconnect(); + producer = null; + + // Create an Avro deserializer + const deser = new AvroDeserializer(registry, SerdeType.VALUE, {}); + + await consumer.connect() + await consumer.subscribe({ topic: topicName }) + + let messageRcvd = false; + await consumer.run({ + eachMessage: async ({ message }) => { + const decodedMessage = { + ...message, + value: await deser.deserialize(topicName, message.value) + }; + console.log("Consumer received message.\nBefore decoding: " + JSON.stringify(message) + "\nAfter decoding: " + JSON.stringify(decodedMessage)); + messageRcvd = true; + }, + }); + + // Wait around until we get a message, and then disconnect. + while (!messageRcvd) { + await new Promise((resolve) => setTimeout(resolve, 100)); + } + + await consumer.disconnect(); + consumer = null; +} + +run().catch (async e => { + console.error(e); + consumer && await consumer.disconnect(); + producer && await producer.disconnect(); + process.exit(1); +}) diff --git a/examples/node-rdkafka/consumer-confluent-cloud.md b/examples/node-rdkafka/consumer-confluent-cloud.md new file mode 100644 index 00000000..629825ee --- /dev/null +++ b/examples/node-rdkafka/consumer-confluent-cloud.md @@ -0,0 +1,53 @@ +Connecting to a Kafka Consumer is easy. Let's try to connect to one using +the Stream implementation + +```js +/* + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library + * + * Copyright (c) 2023 Confluent, Inc. + * + * This software may be modified and distributed under the terms + * of the MIT license. See the LICENSE.txt file for details. + */ + +const Transform = require('stream').Transform; + +const Kafka = require('../'); + +const CLUSTER_BOOTSTRAP_URL = 'your_cluster_url_here'; +const CLUSTER_API_KEY = 'your_cluster_api_key_here'; +const CLUSTER_API_SECRET = 'your_cluster_api_secret_here'; + +const stream = Kafka.KafkaConsumer.createReadStream({ + 'bootstrap.servers': `${CLUSTER_BOOTSTRAP_URL}`, + 'group.id': 'test-group', + 'socket.keepalive.enable': true, + 'enable.auto.commit': false, + 'security.protocol': 'SASL_SSL', + 'sasl.mechanisms': 'PLAIN', + 'sasl.username': `${CLUSTER_API_KEY}`, + 'sasl.password': `${CLUSTER_API_SECRET}`, +}, {}, { + topics: 'test', + waitInterval: 0, + objectMode: false +}); + +stream.on('error', function(err) { + if (err) console.log(err); + process.exit(1); +}); + +stream + .pipe(process.stdout); + +stream.on('error', function(err) { + console.log(err); + process.exit(1); +}); + +stream.consumer.on('event.error', function(err) { + console.log(err); +}) +``` diff --git a/examples/consumer-flow.md b/examples/node-rdkafka/consumer-flow.md similarity index 91% rename from examples/consumer-flow.md rename to examples/node-rdkafka/consumer-flow.md index 62e5e31f..ee2470b8 100644 --- a/examples/consumer-flow.md +++ b/examples/node-rdkafka/consumer-flow.md @@ -3,7 +3,7 @@ the Flowing implementation ```js /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * * Copyright (c) 2016-2023 Blizzard Entertainment * @@ -16,7 +16,7 @@ var Kafka = require('../'); var consumer = new Kafka.KafkaConsumer({ //'debug': 'all', 'metadata.broker.list': 'localhost:9092', - 'group.id': 'confluent-kafka-js-consumer-flow-example', + 'group.id': 'confluent-kafka-javascript-consumer-flow-example', 'enable.auto.commit': false }); diff --git a/examples/consumer.md b/examples/node-rdkafka/consumer.md similarity index 92% rename from examples/consumer.md rename to examples/node-rdkafka/consumer.md index 4e09cc2f..0c0b6752 100644 --- a/examples/consumer.md +++ b/examples/node-rdkafka/consumer.md @@ -3,7 +3,7 @@ the Stream implementation ```js /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * * Copyright (c) 2016-2023 Blizzard Entertainment * diff --git a/examples/docker-alpine.md b/examples/node-rdkafka/docker-alpine.md similarity index 63% rename from examples/docker-alpine.md rename to examples/node-rdkafka/docker-alpine.md index 9339e191..5f407c5f 100644 --- a/examples/docker-alpine.md +++ b/examples/node-rdkafka/docker-alpine.md @@ -1,4 +1,4 @@ -When using docker to install `confluent-kafka-js`, you need to make sure you install appropriate library dependencies. Alpine linux is a lighter weight version of linux and does not come with the same base libraries as other distributions (like glibc). +When using docker to install `confluent-kafka-javascript`, you need to make sure you install appropriate library dependencies. Alpine linux is a lighter weight version of linux and does not come with the same base libraries as other distributions (like glibc). You can see some of the differences here: https://linuxacademy.com/blog/cloud/alpine-linux-and-docker/ @@ -24,7 +24,7 @@ RUN mkdir -p /usr/local/app # Move to the app directory WORKDIR /usr/local/app -# Install confluent-kafka-js -RUN npm install confluent-kafka-js +# Install confluent-kafka-javascript +RUN npm install confluent-kafka-javascript # Copy package.json first to check if an npm install is needed ``` diff --git a/examples/node-rdkafka/high-level-producer-confluent-cloud.md b/examples/node-rdkafka/high-level-producer-confluent-cloud.md new file mode 100644 index 00000000..e3b8f9aa --- /dev/null +++ b/examples/node-rdkafka/high-level-producer-confluent-cloud.md @@ -0,0 +1,40 @@ +```js +const Kafka = require('../'); + +const CLUSTER_BOOTSTRAP_URL = 'your_cluster_url_here'; +const CLUSTER_API_KEY = 'your_cluster_api_key_here'; +const CLUSTER_API_SECRET = 'your_cluster_api_secret_here'; + +const producer = new Kafka.HighLevelProducer({ + 'bootstrap.servers': `${CLUSTER_BOOTSTRAP_URL}`, + 'security.protocol': 'SASL_SSL', + 'sasl.mechanisms': 'PLAIN', + 'sasl.username': `${CLUSTER_API_KEY}`, + 'sasl.password': `${CLUSTER_API_SECRET}`, +}); + +// Throw away the keys +producer.setKeySerializer(function(v) { + return new Promise((resolve, reject) => { + setTimeout(() => { + resolve(null); + }, 20); + }); +}); + +// Take the message field +producer.setValueSerializer(function(v) { + return Buffer.from(v.message); +}); + +producer.connect(null, function() { + producer.produce('test', null, { + message: 'alliance4ever', + }, null, Date.now(), function(err, offset) { + // The offset if our acknowledgement level allows us to receive delivery offsets + setImmediate(function() { + producer.disconnect(); + }); + }); +}); +``` diff --git a/examples/high-level-producer.md b/examples/node-rdkafka/high-level-producer.md similarity index 100% rename from examples/high-level-producer.md rename to examples/node-rdkafka/high-level-producer.md diff --git a/examples/metadata.md b/examples/node-rdkafka/metadata.md similarity index 87% rename from examples/metadata.md rename to examples/node-rdkafka/metadata.md index 0fd4c08b..92e8d538 100644 --- a/examples/metadata.md +++ b/examples/node-rdkafka/metadata.md @@ -1,6 +1,6 @@ ```js /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * * Copyright (c) 2016-2023 Blizzard Entertainment * diff --git a/examples/node-rdkafka/oauthbearer_callback_authentication/oauthbearer_callback_authentication.js b/examples/node-rdkafka/oauthbearer_callback_authentication/oauthbearer_callback_authentication.js new file mode 100644 index 00000000..e587b506 --- /dev/null +++ b/examples/node-rdkafka/oauthbearer_callback_authentication/oauthbearer_callback_authentication.js @@ -0,0 +1,82 @@ +const Kafka = require('@confluentinc/kafka-javascript'); +var jwt = require('jsonwebtoken'); + +// This example uses the Producer for demonstration purposes. +// It is the same whether you use a Consumer/AdminClient. + +function token_refresh(oauthbearer_config /* string - passed from config */, cb) { + console.log("Called token_refresh with given config: " + oauthbearer_config); + // At this point, we can use the information in the token, make + // some API calls, fetch something from a file... + // For the illustration, everything is hard-coded. + const principal = 'admin'; + // In seconds - needed by jsonwebtoken library + const exp_seconds = Math.floor(Date.now() / 1000) + (60 * 60); + // In milliseconds - needed by kafka-javascript. + const exp_ms = exp_seconds * 1000; + + // For illustration, we're not signing our JWT (algorithm: none). + // For production uses-cases, it should be signed. + const tokenValue = jwt.sign( + { 'sub': principal, exp: exp_seconds, 'scope': 'requiredScope' }, '', { algorithm: 'none' }); + + // SASL extensions can be passed as Map or key/value pairs in an object. + const extensions = { + traceId: '123' + }; + + // The callback is called with the new token, its lifetime, and the principal. + // The extensions are optional and may be omitted. + console.log("Finished token_refresh, triggering callback: with tokenValue: " + + tokenValue.slice(0, 10) + "..., lifetime: " + exp_ms + + ", principal: " + principal + ", extensions: " + JSON.stringify(extensions)); + cb( + // If no token could be fetched or an error occurred, a new Error can be + // and passed as the first parameter and the second parameter omitted. + null, + { tokenValue, lifetime: exp_ms, principal, extensions }); +} + +function run() { + const producer = new Kafka.Producer({ + 'metadata.broker.list': 'localhost:60125', + 'dr_cb': true, + // 'debug': 'all' + + // Config important for OAUTHBEARER: + 'security.protocol': 'SASL_PLAINTEXT', + 'sasl.mechanisms': 'OAUTHBEARER', + 'sasl.oauthbearer.config': 'someConfigPropertiesKey=value', + 'oauthbearer_token_refresh_cb': token_refresh, + }); + + producer.connect(); + + producer.on('event.log', (event) => { + console.log(event); + }); + + producer.on('ready', () => { + console.log('Producer is ready!'); + producer.setPollInterval(1000); + console.log("Producing message."); + producer.produce( + 'topic', + null, // partition - let partitioner choose + Buffer.from('messageValue'), + 'messageKey', + ); + }); + + producer.on('error', (err) => { + console.error("Encountered error in producer: " + err.message); + }); + + producer.on('delivery-report', function (err, report) { + console.log('delivery-report: ' + JSON.stringify(report)); + // since we just want to produce one message, close shop. + producer.disconnect(); + }); +} + +run(); diff --git a/examples/node-rdkafka/oauthbearer_callback_authentication/package.json b/examples/node-rdkafka/oauthbearer_callback_authentication/package.json new file mode 100644 index 00000000..c5f50a42 --- /dev/null +++ b/examples/node-rdkafka/oauthbearer_callback_authentication/package.json @@ -0,0 +1,15 @@ +{ + "name": "oauthbearer_callback_authentication", + "version": "1.0.0", + "description": "", + "main": "oauthbearer_callback_authentication.js", + "scripts": { + "test": "echo \"Error: no test specified\" && exit 1" + }, + "author": "", + "license": "MIT", + "dependencies": { + "@confluentinc/kafka-javascript": "file:../../..", + "jsonwebtoken": "^9.0.2" + } +} diff --git a/examples/producer-cluster.md b/examples/node-rdkafka/producer-cluster.md similarity index 96% rename from examples/producer-cluster.md rename to examples/node-rdkafka/producer-cluster.md index be3dee8e..76fa91ff 100644 --- a/examples/producer-cluster.md +++ b/examples/node-rdkafka/producer-cluster.md @@ -1,6 +1,6 @@ ```js /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * * Copyright (c) 2016-2023 Blizzard Entertainment * diff --git a/examples/producer.md b/examples/node-rdkafka/producer.md similarity index 96% rename from examples/producer.md rename to examples/node-rdkafka/producer.md index 0518c675..de493e1a 100644 --- a/examples/producer.md +++ b/examples/node-rdkafka/producer.md @@ -1,6 +1,6 @@ ```js /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * * Copyright (c) 2016-2023 Blizzard Entertainment * diff --git a/examples/package.json b/examples/package.json new file mode 100644 index 00000000..b8eb8b2a --- /dev/null +++ b/examples/package.json @@ -0,0 +1,14 @@ +{ + "name": "examples", + "version": "1.0.0", + "description": "", + "main": "index.js", + "scripts": { + "test": "echo \"Error: no test specified\" && exit 1" + }, + "author": "", + "license": "MIT", + "dependencies": { + "@confluentinc/kafka-javascript": "file:.." + } +} diff --git a/examples/performance/README.md b/examples/performance/README.md new file mode 100644 index 00000000..5a34d49a --- /dev/null +++ b/examples/performance/README.md @@ -0,0 +1,39 @@ +# Performance Benchmarking + +The library can be benchmarked by running the following command: + +```bash +node performance-consolidated.js [--producer] [--consumer] [--ctp] [--all] +``` + +The `--producer` flag will run the producer benchmark, the `--consumer` flag +will run the consumer benchmark, and the `--ctp` flag will run the +consume-transform-produce benchmark. + +The `--create-topics` flag will create the topics before running the benchmarks +(and delete any existing topics of the same name). It's recommended to use this +unless the number of partitions or replication factor needs to be changed. + +If no flags are provided, no benchmarks will be run. If the `--all` flag is +provided, all benchmarks will be run ignoring any other flags. + +The benchmarks assume topics are already created (unless usig `--create-topics`). +The consumer benchmark assumes that the topic already has at least `MESSAGE_COUNT` messages within, +which can generally be done by running the producer benchmark along with it. + +The following environment variables can be set to configure the benchmark, with +default values given in parentheses. + +| Variable | Description | Default | +|----------|-------------|---------| +| KAFKA_BROKERS | Kafka brokers to connect to | localhost:9092 | +| KAFKA_TOPIC | Kafka topic to produce to/consume from | test-topic | +| KAFKA_TOPIC2 | Kafka topic to produce to after consumption in consume-transform-produce | test-topic2 | +| MESSAGE_COUNT | Number of messages to produce/consume | 1000000 | +| MESSAGE_SIZE | Size of each message in bytes | 256 | +| BATCH_SIZE | Number of messages to produce in a single batch | 100 | +| COMPRESSION | Compression codec to use (None, GZIP, Snappy, LZ4, ZSTD) | None | +| WARMUP_MESSAGES | Number of messages to produce before starting the produce benchmark | BATCH_SIZE * 10 | +| MESSAGE_PROCESS_TIME_MS | Time to sleep after consuming each message in the consume-transform-produce benchmark. Simulates "transform". May be 0. | 5 | +| CONSUME_TRANSFORM_PRODUCE_CONCURRENCY | partitionsConsumedConcurrently for the consume-transform-produce benchmark | 1 | +| MODE | Mode to run the benchmarks in (confluent, kafkajs). Can be used for comparison with KafkaJS | confluent | diff --git a/examples/performance/package.json b/examples/performance/package.json new file mode 100644 index 00000000..151d0816 --- /dev/null +++ b/examples/performance/package.json @@ -0,0 +1,16 @@ +{ + "name": "performance", + "version": "1.0.0", + "main": "performance-promisified.js", + "scripts": { + "test": "echo \"Error: no test specified\" && exit 1" + }, + "keywords": [], + "author": "", + "license": "ISC", + "description": "", + "dependencies": { + "@confluentinc/kafka-javascript": "file:../..", + "kafkajs": "^2.2.4" + } +} diff --git a/examples/performance/performance-consolidated.js b/examples/performance/performance-consolidated.js new file mode 100644 index 00000000..eeff3965 --- /dev/null +++ b/examples/performance/performance-consolidated.js @@ -0,0 +1,71 @@ +const mode = process.env.MODE ? process.env.MODE : 'confluent'; + +let runProducer, runConsumer, runConsumeTransformProduce, runCreateTopics; +if (mode === 'confluent') { + ({ runProducer, runConsumer, runConsumeTransformProduce, runCreateTopics } = require('./performance-primitives')); +} else { + ({ runProducer, runConsumer, runConsumeTransformProduce, runCreateTopics } = require('./performance-primitives-kafkajs')); +} + +const brokers = process.env.KAFKA_BROKERS || 'localhost:9092'; +const topic = process.env.KAFKA_TOPIC || 'test-topic'; +const topic2 = process.env.KAFKA_TOPIC2 || 'test-topic2'; +const messageCount = process.env.MESSAGE_COUNT ? +process.env.MESSAGE_COUNT : 1000000; +const messageSize = process.env.MESSAGE_SIZE ? +process.env.MESSAGE_SIZE : 256; +const batchSize = process.env.BATCH_SIZE ? +process.env.BATCH_SIZE : 100; +const compression = process.env.COMPRESSION || 'None'; +const warmupMessages = process.env.WARMUP_MESSAGES ? +process.env.WARMUP_MESSAGES : (batchSize * 10); +const messageProcessTimeMs = process.env.MESSAGE_PROCESS_TIME_MS ? +process.env.MESSAGE_PROCESS_TIME_MS : 5; +const ctpConcurrency = process.env.CONSUME_TRANSFORM_PRODUCE_CONCURRENCY ? +process.env.CONSUME_TRANSFORM_PRODUCE_CONCURRENCY : 1; + +(async function () { + const producer = process.argv.includes('--producer'); + const consumer = process.argv.includes('--consumer'); + const ctp = process.argv.includes('--ctp'); + const all = process.argv.includes('--all'); + const createTopics = process.argv.includes('--create-topics'); + + if (createTopics || all) { + console.log("=== Creating Topics (deleting if they exist already):"); + console.log(` Brokers: ${brokers}`); + console.log(` Topic: ${topic}`); + console.log(` Topic2: ${topic2}`); + await runCreateTopics(brokers, topic, topic2); + } + + if (producer || all) { + console.log("=== Running Basic Producer Performance Test:") + console.log(` Brokers: ${brokers}`); + console.log(` Topic: ${topic}`); + console.log(` Message Count: ${messageCount}`); + console.log(` Message Size: ${messageSize}`); + console.log(` Batch Size: ${batchSize}`); + console.log(` Compression: ${compression}`); + console.log(` Warmup Messages: ${warmupMessages}`); + const producerRate = await runProducer(brokers, topic, batchSize, warmupMessages, messageCount, messageSize, compression); + console.log("=== Producer Rate: ", producerRate); + } + + if (consumer || all) { + // If user runs this without --producer then they are responsible for seeding the topic. + console.log("=== Running Basic Consumer Performance Test:") + console.log(` Brokers: ${brokers}`); + console.log(` Topic: ${topic}`); + console.log(` Message Count: ${messageCount}`); + const consumerRate = await runConsumer(brokers, topic, messageCount); + console.log("=== Consumer Rate: ", consumerRate); + } + + if (ctp || all) { + console.log("=== Running Consume-Transform-Produce Performance Test:") + console.log(` Brokers: ${brokers}`); + console.log(` ConsumeTopic: ${topic}`); + console.log(` ProduceTopic: ${topic2}`); + console.log(` Message Count: ${messageCount}`); + // Seed the topic with messages + await runProducer(brokers, topic, batchSize, warmupMessages, messageCount, messageSize, compression); + const ctpRate = await runConsumeTransformProduce(brokers, topic, topic2, warmupMessages, messageCount, messageProcessTimeMs, ctpConcurrency); + console.log("=== Consume-Transform-Produce Rate: ", ctpRate); + } + +})(); \ No newline at end of file diff --git a/examples/performance/performance-primitives-kafkajs.js b/examples/performance/performance-primitives-kafkajs.js new file mode 100644 index 00000000..6058d9f2 --- /dev/null +++ b/examples/performance/performance-primitives-kafkajs.js @@ -0,0 +1,241 @@ +const { Kafka, CompressionTypes } = require('kafkajs'); +const { randomBytes } = require('crypto'); +const { hrtime } = require('process'); + +module.exports = { + runProducer, + runConsumer, + runConsumeTransformProduce, + runCreateTopics, +}; + +async function runCreateTopics(brokers, topic, topic2) { + const kafka = new Kafka({ + clientId: 'kafka-test-performance', + brokers: brokers.split(','), + }); + + const admin = kafka.admin(); + await admin.connect(); + + for (let t of [topic, topic2]) { + let topicCreated = await admin.createTopics({ + topics: [{ topic: t, numPartitions: 3 }], + }).catch(console.error); + if (topicCreated) { + console.log(`Created topic ${t}`); + continue; + } + + console.log(`Topic ${t} already exists, deleting and recreating.`); + await admin.deleteTopics({ topics: [t] }).catch(console.error); + await new Promise(resolve => setTimeout(resolve, 1000)); /* Propagate. */ + await admin.createTopics({ + topics: [ + { topic: t, numPartitions: 3 }, + ], + }).catch(console.error); + console.log(`Created topic ${t}`); + } + + await admin.disconnect(); +} + +async function runProducer(brokers, topic, batchSize, warmupMessages, totalMessageCnt, msgSize, compression) { + let totalMessagesSent = 0; + let totalBytesSent = 0; + + const message = { + value: randomBytes(msgSize), + } + + const messages = Array(batchSize).fill(message); + + const kafka = new Kafka({ + clientId: 'kafka-test-performance', + brokers: brokers.split(','), + }); + + const producer = kafka.producer(); + await producer.connect(); + + console.log('Sending ' + warmupMessages + ' warmup messages.'); + while (warmupMessages > 0) { + await producer.send({ + topic, + messages, + compression: CompressionTypes[compression], + }); + warmupMessages -= batchSize; + } + console.log('Sent warmup messages'); + + // Now that warmup is done, start measuring... + let startTime; + let promises = []; + startTime = hrtime(); + let messagesDispatched = 0; + + // The double while-loop allows us to send a bunch of messages and then + // await them all at once. We need the second while loop to keep sending + // in case of queue full errors, which surface only on awaiting. + while (totalMessageCnt == -1 || messagesDispatched < totalMessageCnt) { + while (totalMessageCnt == -1 || messagesDispatched < totalMessageCnt) { + promises.push(producer.send({ + topic, + messages, + compression: CompressionTypes[compression], + }).then(() => { + totalMessagesSent += batchSize; + totalBytesSent += batchSize * msgSize; + }).catch((err) => { + console.error(err); + throw err; + })); + messagesDispatched += batchSize; + } + await Promise.all(promises); + } + let elapsed = hrtime(startTime); + let durationNanos = elapsed[0] * 1e9 + elapsed[1]; + let rate = (totalBytesSent / durationNanos) * 1e9 / (1024 * 1024); /* MB/s */ + console.log(`Sent ${totalMessagesSent} messages, ${totalBytesSent} bytes; rate is ${rate} MB/s`); + + await producer.disconnect(); + return rate; +} + +async function runConsumer(brokers, topic, totalMessageCnt) { + const kafka = new Kafka({ + clientId: 'kafka-test-performance', + brokers: brokers.split(','), + }); + + const consumer = kafka.consumer({ + groupId: 'test-group' + Math.random(), + }); + await consumer.connect(); + await consumer.subscribe({ topic, fromBeginning: true }); + + let messagesReceived = 0; + let messagesMeasured = 0; + let totalMessageSize = 0; + let startTime; + let rate; + const skippedMessages = 100; + + console.log("Starting consumer."); + + consumer.run({ + autoCommit: false, + eachMessage: async ({ topic, partition, message }) => { + messagesReceived++; + + if (messagesReceived >= skippedMessages) { + messagesMeasured++; + totalMessageSize += message.value.length; + + if (messagesReceived === skippedMessages) { + startTime = hrtime(); + } else if (messagesMeasured === totalMessageCnt) { + let elapsed = hrtime(startTime); + let durationNanos = elapsed[0] * 1e9 + elapsed[1]; + rate = (totalMessageSize / durationNanos) * 1e9 / (1024 * 1024); /* MB/s */ + console.log(`Recvd ${messagesMeasured} messages, ${totalMessageSize} bytes; rate is ${rate} MB/s`); + consumer.pause([{ topic }]); + } + } + } + }); + + totalMessageSize = 0; + + await new Promise((resolve) => { + let interval = setInterval(() => { + if (messagesReceived >= totalMessageCnt) { + clearInterval(interval); + resolve(); + } + }, 1000); + }); + + await consumer.disconnect(); + return rate; +} + +async function runConsumeTransformProduce(brokers, consumeTopic, produceTopic, warmupMessages, totalMessageCnt, messageProcessTimeMs, ctpConcurrency) { + const kafka = new Kafka({ + clientId: 'kafka-test-performance', + brokers: brokers.split(','), + }); + + const producer = kafka.producer({}); + await producer.connect(); + + const consumer = kafka.consumer({ + groupId: 'test-group' + Math.random(), + }); + await consumer.connect(); + await consumer.subscribe({ topic: consumeTopic, fromBeginning: true }); + + let messagesReceived = 0; + let messagesMeasured = 0; + let totalMessageSize = 0; + let startTime; + let rate; + const skippedMessages = warmupMessages; + + console.log("Starting consume-transform-produce."); + + consumer.run({ + autoCommit: false, + partitionsConsumedConcurrently: ctpConcurrency, + eachMessage: async ({ topic, partition, message }) => { + messagesReceived++; + + if (messagesReceived >= skippedMessages) { + messagesMeasured++; + totalMessageSize += message.value.length; + + if (messagesReceived === skippedMessages) + startTime = hrtime(); + + /* Simulate message processing for messageProcessTimeMs */ + if (messageProcessTimeMs > 0) { + await new Promise((resolve) => setTimeout(resolve, messageProcessTimeMs)); + } + await producer.send({ + topic: produceTopic, + messages: [{ value: message.value }], + }) + + if (messagesMeasured === totalMessageCnt) { + let elapsed = hrtime(startTime); + let durationNanos = elapsed[0] * 1e9 + elapsed[1]; + rate = (totalMessageSize / durationNanos) * 1e9 / (1024 * 1024); /* MB/s */ + console.log(`Recvd, transformed and sent ${messagesMeasured} messages, ${totalMessageSize} bytes; rate is ${rate} MB/s`); + consumer.pause([{ topic }]); + } + } else { + await producer.send({ + topic: produceTopic, + messages: [{ value: message.value }], + }) + } + } + }); + + totalMessageSize = 0; + await new Promise((resolve) => { + let interval = setInterval(() => { + if (messagesMeasured >= totalMessageCnt) { + clearInterval(interval); + resolve(); + } + }, 1000); + }); + + await consumer.disconnect(); + await producer.disconnect(); + return rate; +} diff --git a/examples/performance/performance-primitives.js b/examples/performance/performance-primitives.js new file mode 100644 index 00000000..ed2810cf --- /dev/null +++ b/examples/performance/performance-primitives.js @@ -0,0 +1,261 @@ +const { Kafka, ErrorCodes, CompressionTypes } = require('../../').KafkaJS; +const { randomBytes } = require('crypto'); +const { hrtime } = require('process'); + +module.exports = { + runProducer, + runConsumer, + runConsumeTransformProduce, + runCreateTopics, +}; + +async function runCreateTopics(brokers, topic, topic2) { + const kafka = new Kafka({ + 'client.id': 'kafka-test-performance', + "metadata.broker.list": brokers, + }); + + const admin = kafka.admin(); + await admin.connect(); + + for (let t of [topic, topic2]) { + let topicCreated = await admin.createTopics({ + topics: [{ topic: t, numPartitions: 3 }], + }).catch(console.error); + if (topicCreated) { + console.log(`Created topic ${t}`); + continue; + } + + console.log(`Topic ${t} already exists, deleting and recreating.`); + await admin.deleteTopics({ topics: [t] }).catch(console.error); + await new Promise(resolve => setTimeout(resolve, 1000)); /* Propagate. */ + await admin.createTopics({ + topics: [ + { topic: t, numPartitions: 3 }, + ], + }).catch(console.error); + console.log(`Created topic ${t}`); + await new Promise(resolve => setTimeout(resolve, 1000)); /* Propagate. */ + } + + await admin.disconnect(); +} + +async function runProducer(brokers, topic, batchSize, warmupMessages, totalMessageCnt, msgSize, compression) { + let totalMessagesSent = 0; + let totalBytesSent = 0; + + const message = { + value: randomBytes(msgSize), + } + + const messages = Array(batchSize).fill(message); + + const kafka = new Kafka({ + 'client.id': 'kafka-test-performance', + 'metadata.broker.list': brokers, + 'compression.codec': CompressionTypes[compression], + }); + + const producer = kafka.producer(); + await producer.connect(); + + console.log('Sending ' + warmupMessages + ' warmup messages.'); + while (warmupMessages > 0) { + await producer.send({ + topic, + messages, + }); + warmupMessages -= batchSize; + } + console.log('Sent warmup messages'); + + // Now that warmup is done, start measuring... + let startTime; + let promises = []; + startTime = hrtime(); + let messagesDispatched = 0; + + // The double while-loop allows us to send a bunch of messages and then + // await them all at once. We need the second while loop to keep sending + // in case of queue full errors, which surface only on awaiting. + while (totalMessageCnt == -1 || messagesDispatched < totalMessageCnt) { + while (totalMessageCnt == -1 || messagesDispatched < totalMessageCnt) { + promises.push(producer.send({ + topic, + messages, + }).then(() => { + totalMessagesSent += batchSize; + totalBytesSent += batchSize * msgSize; + }).catch((err) => { + if (err.code === ErrorCodes.ERR__QUEUE_FULL) { + /* do nothing, just send them again */ + messagesDispatched -= batchSize; + } else { + console.error(err); + throw err; + } + })); + messagesDispatched += batchSize; + } + await Promise.all(promises); + } + let elapsed = hrtime(startTime); + let durationNanos = elapsed[0] * 1e9 + elapsed[1]; + let rate = (totalBytesSent / durationNanos) * 1e9 / (1024 * 1024); /* MB/s */ + console.log(`Sent ${totalMessagesSent} messages, ${totalBytesSent} bytes; rate is ${rate} MB/s`); + + await producer.disconnect(); + return rate; +} + +async function runConsumer(brokers, topic, totalMessageCnt) { + const kafka = new Kafka({ + 'client.id': 'kafka-test-performance', + 'metadata.broker.list': brokers, + }); + + const consumer = kafka.consumer({ + 'group.id': 'test-group' + Math.random(), + 'enable.auto.commit': false, + 'auto.offset.reset': 'earliest', + 'fetch.queue.backoff.ms': '100', + }); + await consumer.connect(); + await consumer.subscribe({ topic }); + + let messagesReceived = 0; + let messagesMeasured = 0; + let totalMessageSize = 0; + let startTime; + let rate; + const skippedMessages = 100; + + console.log("Starting consumer."); + + consumer.run({ + eachMessage: async ({ topic, partition, message }) => { + messagesReceived++; + + if (messagesReceived >= skippedMessages) { + messagesMeasured++; + totalMessageSize += message.value.length; + + if (messagesReceived === skippedMessages) { + startTime = hrtime(); + } else if (messagesMeasured === totalMessageCnt) { + let elapsed = hrtime(startTime); + let durationNanos = elapsed[0] * 1e9 + elapsed[1]; + rate = (totalMessageSize / durationNanos) * 1e9 / (1024 * 1024); /* MB/s */ + console.log(`Recvd ${messagesMeasured} messages, ${totalMessageSize} bytes; rate is ${rate} MB/s`); + consumer.pause([{ topic }]); + } + } + } + }); + + totalMessageSize = 0; + await new Promise((resolve) => { + let interval = setInterval(() => { + if (messagesMeasured >= totalMessageCnt) { + clearInterval(interval); + resolve(); + } + }, 1000); + }); + + await consumer.disconnect(); + return rate; +} + +async function runConsumeTransformProduce(brokers, consumeTopic, produceTopic, warmupMessages, totalMessageCnt, messageProcessTimeMs, ctpConcurrency) { + console.log("here"); + const kafka = new Kafka({ + 'client.id': 'kafka-test-performance', + 'metadata.broker.list': brokers, + }); + + const producer = kafka.producer({ + /* We want things to be flushed immediately as we'll be awaiting this. */ + 'linger.ms': 0 + }); + await producer.connect(); + + const consumer = kafka.consumer({ + 'group.id': 'test-group' + Math.random(), + 'enable.auto.commit': false, + 'auto.offset.reset': 'earliest', + + /* These fields are more-or-less required for cases where eachMessage includes + * any async operatiosn, else `partitionsConsumedConcurrently` does not have + * much effect. Reason for this is that, internally, librdkafka fetches + * a large number of messages from one topic partition and that fills the + * cache up, and we end up underutilizing concurrency. + * TODO: remove or change these, discuss this issue and make changes in the code. */ + 'message.max.bytes': 1000, + 'fetch.max.bytes': 1000, + }); + await consumer.connect(); + await consumer.subscribe({ topic: consumeTopic }); + + let messagesReceived = 0; + let messagesMeasured = 0; + let totalMessageSize = 0; + let startTime; + let rate; + const skippedMessages = warmupMessages; + + console.log("Starting consume-transform-produce."); + + consumer.run({ + partitionsConsumedConcurrently: ctpConcurrency, + eachMessage: async ({ topic, partition, message }) => { + messagesReceived++; + + if (messagesReceived >= skippedMessages) { + messagesMeasured++; + totalMessageSize += message.value.length; + + if (messagesReceived === skippedMessages) + startTime = hrtime(); + + /* Simulate message processing for messageProcessTimeMs */ + if (messageProcessTimeMs > 0) { + await new Promise((resolve) => setTimeout(resolve, messageProcessTimeMs)); + } + await producer.send({ + topic: produceTopic, + messages: [{ value: message.value }], + }) + + if (messagesMeasured === totalMessageCnt) { + let elapsed = hrtime(startTime); + let durationNanos = elapsed[0] * 1e9 + elapsed[1]; + rate = (totalMessageSize / durationNanos) * 1e9 / (1024 * 1024); /* MB/s */ + console.log(`Recvd, transformed and sent ${messagesMeasured} messages, ${totalMessageSize} bytes; rate is ${rate} MB/s`); + consumer.pause([{ topic }]); + } + } else { + await producer.send({ + topic: produceTopic, + messages: [{ value: message.value }], + }) + } + } + }); + + totalMessageSize = 0; + await new Promise((resolve) => { + let interval = setInterval(() => { + if (messagesMeasured >= totalMessageCnt) { + clearInterval(interval); + resolve(); + } + }, 1000); + }); + + await consumer.disconnect(); + await producer.disconnect(); + return rate; +} diff --git a/examples/producer-confluent-cloud.js b/examples/producer-confluent-cloud.js new file mode 100644 index 00000000..94438e8f --- /dev/null +++ b/examples/producer-confluent-cloud.js @@ -0,0 +1,44 @@ +const { Kafka } = require('@confluentinc/kafka-javascript').KafkaJS; + +async function producerStart() { + const CLUSTER_BOOTSTRAP_URL = 'your_cluster_url_here'; + const CLUSTER_API_KEY = 'your_cluster_api_key_here'; + const CLUSTER_API_SECRET = 'your_cluster_api_secret_here'; + + const producer = new Kafka().producer({ + 'bootstrap.servers': `${CLUSTER_BOOTSTRAP_URL}`, + 'security.protocol': 'SASL_SSL', + 'sasl.mechanisms': 'PLAIN', + 'sasl.username': `${CLUSTER_API_KEY}`, + 'sasl.password': `${CLUSTER_API_SECRET}`, + 'acks': 'all', + }); + + await producer.connect(); + console.log("Connected successfully"); + + const res = [] + for (let i = 0; i < 50; i++) { + res.push(producer.send({ + topic: 'test-topic', + messages: [ + { value: 'v222', partition: 1 }, + { value: 'v11', partition: 0, key: 'x' }, + ] + })); + } + + const produceRecords = await Promise.all(res); + + // Produce records is an array of delivery reports for each call to `send`. + // In case `messages` contains more than one message to the same topic-partition, only the last + // delivery report is included in the array. + console.log("Produced messages, first delivery report:\n" + JSON.stringify(produceRecords[0], null, 2)); + console.log("Produced messages, last delivery report:\n" + JSON.stringify(produceRecords[produceRecords.length - 1], null, 2)); + + await producer.disconnect(); + + console.log("Disconnected successfully"); +} + +producerStart(); diff --git a/examples/producer.js b/examples/producer.js new file mode 100644 index 00000000..440cbcb8 --- /dev/null +++ b/examples/producer.js @@ -0,0 +1,47 @@ +const { Kafka } = require('@confluentinc/kafka-javascript').KafkaJS; + +async function producerStart() { + const producer = new Kafka().producer({ + 'bootstrap.servers': 'localhost:9092', + 'acks': 'all', + }); + + await producer.connect(); + console.log("Connected successfully"); + + const res = [] + for (let i = 0; i < 50; i++) { + res.push(producer.send({ + topic: 'test-topic', + messages: [ + { + value: 'v1', + partition: 0, + key: 'x', + headers: { + 'header1': ['h1v1', 'h1v2'], + 'header3': 'h3v3', + } + }, + { + value: 'v2', + key: 'y', + } + ] + })); + } + + const produceRecords = await Promise.all(res); + + // Produce records is an array of delivery reports for each call to `send`. + // In case `messages` contains more than one message to the same topic-partition, only the last + // delivery report is included in the array. + console.log("Produced messages, first delivery report:\n" + JSON.stringify(produceRecords[0], null, 2)); + console.log("Produced messages, last delivery report:\n" + JSON.stringify(produceRecords[produceRecords.length - 1], null, 2)); + + await producer.disconnect(); + + console.log("Disconnected successfully"); +} + +producerStart(); diff --git a/examples/typescript/.gitignore b/examples/typescript/.gitignore new file mode 100644 index 00000000..a6c7c285 --- /dev/null +++ b/examples/typescript/.gitignore @@ -0,0 +1 @@ +*.js diff --git a/examples/typescript/kafkajs.ts b/examples/typescript/kafkajs.ts new file mode 100644 index 00000000..faf11afb --- /dev/null +++ b/examples/typescript/kafkajs.ts @@ -0,0 +1,90 @@ +import { KafkaJS } from '@confluentinc/kafka-javascript'; + +const bootstrapServer = ''; + +async function runProducer() { + const kafka = new KafkaJS.Kafka({ + kafkaJS: { + brokers: [bootstrapServer], + }, + }); + + const producer = kafka.producer({ + kafkaJS: { + allowAutoTopicCreation: true, + acks: 1, + compression: KafkaJS.CompressionTypes.GZIP, + } + }); + + await producer.connect(); + + await producer.send({ + topic: 'test-topic', + messages: [ + { + value: 'Hello World!', + key: 'key1', + headers: { + 'header1': 'value1', + 'header2': [Buffer.from('value2'), 'value3'] + } + }, + ], + }); + + await producer.disconnect(); +} + +async function runConsumer() { + const kafka = new KafkaJS.Kafka({ + kafkaJS: { + brokers: [bootstrapServer], + }, + }); + + const consumer = kafka.consumer({ + kafkaJS: { + groupId: 'test-group' + Math.random(), + fromBeginning: true, + partitionAssigners: [KafkaJS.PartitionAssigners.roundRobin], + }, + }); + + await consumer.connect(); + await consumer.subscribe({ topic: 'test-topic' }); + + await consumer.run({ + eachMessage: async ({ message }) => { + console.log({ + key: message.key ? message.key.toString() : null, + value: message.value ? message.value.toString() : null, + headers: message.headers, + }); + }, + }); + + await new Promise((resolve) => setTimeout(resolve, 30000)); + await consumer.disconnect(); +} + +async function runAdminClient() { + const kafka = new KafkaJS.Kafka({ + kafkaJS: { + brokers: [bootstrapServer], + }, + }); + + const admin = kafka.admin() + await admin.connect(); + + await admin.createTopics({ topics: [{ topic: 'test-topic' }] }); + console.log("Created topic"); + + await admin.disconnect(); +} + +runAdminClient() + .then(runProducer) + .then(runConsumer) + .catch(console.error); \ No newline at end of file diff --git a/examples/typescript/node-rdkafka.ts b/examples/typescript/node-rdkafka.ts new file mode 100644 index 00000000..81d7cde1 --- /dev/null +++ b/examples/typescript/node-rdkafka.ts @@ -0,0 +1,82 @@ +import * as RdKafka from '@confluentinc/kafka-javascript'; + +const bootstrapServers = ''; + +function runProducer() { + const producer = new RdKafka.Producer({ + 'bootstrap.servers': bootstrapServers, + 'dr_msg_cb': true, + }); + + producer.connect(); + + producer.on('ready', () => { + console.log("Producer is ready"); + producer.setPollInterval(100); + producer.produce('test-topic', null, Buffer.from('Hello World!'), null, Date.now()); + }); + + producer.on('event.error', (err) => { + console.error(err); + }); + + producer.on('delivery-report', (err, report) => { + console.log("Delivery report received:"); + console.log({err, report}); + producer.disconnect(err => { + if (err) + console.log("Error disconnecting producer ", err); + console.log("Disconnected producer"); + }); + }); +} + +function runConsumer() { + const consumer = new RdKafka.KafkaConsumer({ + 'group.id': 'test-group', + 'bootstrap.servers': bootstrapServers, + }, { + 'auto.offset.reset': 'earliest', + }); + + consumer.connect(); + + consumer.on('ready', () => { + console.log("Consumer is ready"); + consumer.subscribe(['test-topic']); + consumer.consume(); + }); + + consumer.on('data', (data) => { + console.log("Received data"); + console.log(data); + }); + + consumer.on('event.error', (err) => { + console.error(err); + }); + + setTimeout(() => consumer.disconnect(), 30000); +} + +function runAdminClient() { + const admin = RdKafka.AdminClient.create({ + "bootstrap.servers": bootstrapServers, + }); + + admin.createTopic({ topic: "test-topic", num_partitions: 1, replication_factor: 1 }, (err) => { + if (err) { + console.error(err); + admin.disconnect(); + return; + } + console.log("Created topic"); + admin.disconnect(); + }); + +} + +// As an example, run each with some time gap to allow the prior one to finish. +runAdminClient(); +setTimeout(runProducer, 5000); +setTimeout(runConsumer, 25000); \ No newline at end of file diff --git a/examples/typescript/package.json b/examples/typescript/package.json new file mode 100644 index 00000000..33a10079 --- /dev/null +++ b/examples/typescript/package.json @@ -0,0 +1,18 @@ +{ + "name": "typescript", + "version": "1.0.0", + "description": "", + "main": "index.js", + "scripts": { + "test": "echo \"Error: no test specified\" && exit 1" + }, + "author": "", + "license": "MIT", + "dependencies": { + "@confluentinc/kafka-javascript": "file:../..", + "typescript": "^5.4.4" + }, + "devDependencies": { + "@types/node": "^20.12.5" + } +} diff --git a/index.d.ts b/index.d.ts index bfbbdcbe..8cc4d295 100644 --- a/index.d.ts +++ b/index.d.ts @@ -1,367 +1,3 @@ -import { Readable, ReadableOptions, Writable, WritableOptions } from 'stream'; -import { EventEmitter } from 'events'; -import { - GlobalConfig, - TopicConfig, - ConsumerGlobalConfig, - ConsumerTopicConfig, - ProducerGlobalConfig, - ProducerTopicConfig, -} from './types/config'; - -export * from './types/config'; -export * from './types/errors'; -import { Kafka } from './types/kafkajs'; -import * as errors from './types/errors'; - -export interface LibrdKafkaError { - message: string; - code: number; - errno: number; - origin: string; - stack?: string; - isFatal?: boolean; - isRetriable?: boolean; - isTxnRequiresAbort?: boolean; -} - -export interface ReadyInfo { - name: string; -} - -export interface ClientMetrics { - connectionOpened: number; -} - -export interface MetadataOptions { - topic?: string; - allTopics?: boolean; - timeout?: number; -} - -export interface BrokerMetadata { - id: number; - host: string; - port: number; -} - -export interface PartitionMetadata { - id: number; - leader: number; - replicas: number[]; - isrs: number[]; -} - -export interface TopicMetadata { - name: string; - partitions: PartitionMetadata[]; -} - -export interface Metadata { - orig_broker_id: number; - orig_broker_name: string; - topics: TopicMetadata[]; - brokers: BrokerMetadata[]; -} - -export interface WatermarkOffsets{ - lowOffset: number; - highOffset: number; -} - -export interface TopicPartition { - topic: string; - partition: number; -} - -export interface TopicPartitionOffset extends TopicPartition{ - offset: number; -} - -export type TopicPartitionTime = TopicPartitionOffset; - -export type EofEvent = TopicPartitionOffset; - -export type Assignment = TopicPartition | TopicPartitionOffset; - -export interface DeliveryReport extends TopicPartitionOffset { - value?: MessageValue; - size: number; - key?: MessageKey; - timestamp?: number; - opaque?: any; -} - -export type NumberNullUndefined = number | null | undefined; - -export type MessageKey = Buffer | string | null | undefined; -export type MessageHeader = { [key: string]: string | Buffer }; -export type MessageValue = Buffer | null; -export type SubscribeTopic = string | RegExp; -export type SubscribeTopicList = SubscribeTopic[]; - -export interface Message extends TopicPartitionOffset { - value: MessageValue; - size: number; - topic: string; - key?: MessageKey; - timestamp?: number; - headers?: MessageHeader[]; - opaque?: any; -} - -export interface ReadStreamOptions extends ReadableOptions { - topics: SubscribeTopicList | SubscribeTopic | ((metadata: Metadata) => SubscribeTopicList); - waitInterval?: number; - fetchSize?: number; - objectMode?: boolean; - highWaterMark?: number; - autoClose?: boolean; - streamAsBatch?: boolean; - connectOptions?: any; -} - -export interface WriteStreamOptions extends WritableOptions { - encoding?: string; - objectMode?: boolean; - topic?: string; - autoClose?: boolean; - pollInterval?: number; - connectOptions?: any; -} - -export interface ProducerStream extends Writable { - producer: Producer; - connect(metadataOptions?: MetadataOptions): void; - close(cb?: () => void): void; -} - -export interface ConsumerStream extends Readable { - consumer: KafkaConsumer; - connect(options: ConsumerGlobalConfig): void; - close(cb?: () => void): void; -} - -type KafkaClientEvents = 'disconnected' | 'ready' | 'connection.failure' | 'event.error' | 'event.stats' | 'event.log' | 'event.event' | 'event.throttle'; -type KafkaConsumerEvents = 'data' | 'partition.eof' | 'rebalance' | 'rebalance.error' | 'subscribed' | 'unsubscribed' | 'unsubscribe' | 'offset.commit' | KafkaClientEvents; -type KafkaProducerEvents = 'delivery-report' | KafkaClientEvents; - -type EventListenerMap = { - // ### Client - // connectivity events - 'disconnected': (metrics: ClientMetrics) => void, - 'ready': (info: ReadyInfo, metadata: Metadata) => void, - 'connection.failure': (error: LibrdKafkaError, metrics: ClientMetrics) => void, - // event messages - 'event.error': (error: LibrdKafkaError) => void, - 'event.stats': (eventData: any) => void, - 'event.log': (eventData: any) => void, - 'event.event': (eventData: any) => void, - 'event.throttle': (eventData: any) => void, - // ### Consumer only - // domain events - 'data': (arg: Message) => void, - 'partition.eof': (arg: EofEvent) => void, - 'rebalance': (err: LibrdKafkaError, assignments: TopicPartition[]) => void, - 'rebalance.error': (err: Error) => void, - // connectivity events - 'subscribed': (topics: SubscribeTopicList) => void, - 'unsubscribe': () => void, - 'unsubscribed': () => void, - // offsets - 'offset.commit': (error: LibrdKafkaError, topicPartitions: TopicPartitionOffset[]) => void, - // ### Producer only - // delivery - 'delivery-report': (error: LibrdKafkaError, report: DeliveryReport) => void, -} - -type EventListener = K extends keyof EventListenerMap ? EventListenerMap[K] : never; - -export abstract class Client extends EventEmitter { - constructor(globalConf: GlobalConfig, SubClientType: any, topicConf: TopicConfig); - - connect(metadataOptions?: MetadataOptions, cb?: (err: LibrdKafkaError, data: Metadata) => any): this; - - getClient(): any; - - connectedTime(): number; - - getLastError(): LibrdKafkaError; - - disconnect(cb?: (err: any, data: ClientMetrics) => any): this; - disconnect(timeout: number, cb?: (err: any, data: ClientMetrics) => any): this; - - isConnected(): boolean; - - getMetadata(metadataOptions?: MetadataOptions, cb?: (err: LibrdKafkaError, data: Metadata) => any): any; - - queryWatermarkOffsets(topic: string, partition: number, timeout: number, cb?: (err: LibrdKafkaError, offsets: WatermarkOffsets) => any): any; - queryWatermarkOffsets(topic: string, partition: number, cb?: (err: LibrdKafkaError, offsets: WatermarkOffsets) => any): any; - - on(event: E, listener: EventListener): this; - once(event: E, listener: EventListener): this; -} - -export class KafkaConsumer extends Client { - constructor(conf: ConsumerGlobalConfig, topicConf: ConsumerTopicConfig); - - assign(assignments: Assignment[]): this; - - assignments(): Assignment[]; - - commit(topicPartition: TopicPartitionOffset | TopicPartitionOffset[]): this; - commit(): this; - - commitMessage(msg: TopicPartitionOffset): this; - - commitMessageSync(msg: TopicPartitionOffset): this; - - commitSync(topicPartition: TopicPartitionOffset | TopicPartitionOffset[]): this; - - committed(toppars: TopicPartition[], timeout: number, cb: (err: LibrdKafkaError, topicPartitions: TopicPartitionOffset[]) => void): this; - committed(timeout: number, cb: (err: LibrdKafkaError, topicPartitions: TopicPartitionOffset[]) => void): this; - - consume(number: number, cb?: (err: LibrdKafkaError, messages: Message[]) => void): void; - consume(cb: (err: LibrdKafkaError, messages: Message[]) => void): void; - consume(): void; - - getWatermarkOffsets(topic: string, partition: number): WatermarkOffsets; - - offsetsStore(topicPartitions: TopicPartitionOffset[]): any; - - pause(topicPartitions: TopicPartition[]): any; - - position(toppars?: TopicPartition[]): TopicPartitionOffset[]; - - resume(topicPartitions: TopicPartition[]): any; - - seek(toppar: TopicPartitionOffset, timeout: number | null, cb: (err: LibrdKafkaError) => void): this; - - setDefaultConsumeTimeout(timeoutMs: number): void; - - setDefaultConsumeLoopTimeoutDelay(timeoutMs: number): void; - - subscribe(topics: SubscribeTopicList): this; - - subscription(): string[]; - - unassign(): this; - - unsubscribe(): this; - - offsetsForTimes(topicPartitions: TopicPartitionTime[], timeout: number, cb?: (err: LibrdKafkaError, offsets: TopicPartitionOffset[]) => any): void; - offsetsForTimes(topicPartitions: TopicPartitionTime[], cb?: (err: LibrdKafkaError, offsets: TopicPartitionOffset[]) => any): void; - - static createReadStream(conf: ConsumerGlobalConfig, topicConfig: ConsumerTopicConfig, streamOptions: ReadStreamOptions | number): ConsumerStream; -} - -export class Producer extends Client { - constructor(conf: ProducerGlobalConfig, topicConf?: ProducerTopicConfig); - - flush(timeout?: NumberNullUndefined, cb?: (err: LibrdKafkaError) => void): this; - - poll(): this; - - produce(topic: string, partition: NumberNullUndefined, message: MessageValue, key?: MessageKey, timestamp?: NumberNullUndefined, opaque?: any, headers?: MessageHeader[]): any; - - setPollInterval(interval: number): this; - - static createWriteStream(conf: ProducerGlobalConfig, topicConf: ProducerTopicConfig, streamOptions: WriteStreamOptions): ProducerStream; - - initTransactions(cb: (err: LibrdKafkaError) => void): void; - initTransactions(timeout: number, cb: (err: LibrdKafkaError) => void): void; - beginTransaction(cb: (err: LibrdKafkaError) => void): void; - commitTransaction(cb: (err: LibrdKafkaError) => void): void; - commitTransaction(timeout: number, cb: (err: LibrdKafkaError) => void): void; - abortTransaction(cb: (err: LibrdKafkaError) => void): void; - abortTransaction(timeout: number, cb: (err: LibrdKafkaError) => void): void; - sendOffsetsToTransaction(offsets: TopicPartitionOffset[], consumer: KafkaConsumer, cb: (err: LibrdKafkaError) => void): void; - sendOffsetsToTransaction(offsets: TopicPartitionOffset[], consumer: KafkaConsumer, timeout: number, cb: (err: LibrdKafkaError) => void): void; -} - -export class HighLevelProducer extends Producer { - produce(topic: string, partition: NumberNullUndefined, message: any, key: any, timestamp: NumberNullUndefined, callback: (err: any, offset?: NumberNullUndefined) => void): any; - produce(topic: string, partition: NumberNullUndefined, message: any, key: any, timestamp: NumberNullUndefined, headers: MessageHeader[], callback: (err: any, offset?: NumberNullUndefined) => void): any; - - setKeySerializer(serializer: (key: any, cb: (err: any, key: MessageKey) => void) => void): void; - setKeySerializer(serializer: (key: any) => MessageKey | Promise): void; - setValueSerializer(serializer: (value: any, cb: (err: any, value: MessageValue) => void) => void): void; - setValueSerializer(serializer: (value: any) => MessageValue | Promise): void; -} - -export const features: string[]; - -export const librdkafkaVersion: string; - -export function createReadStream(conf: ConsumerGlobalConfig, topicConf: ConsumerTopicConfig, streamOptions: ReadStreamOptions | number): ConsumerStream; - -export function createWriteStream(conf: ProducerGlobalConfig, topicConf: ProducerTopicConfig, streamOptions: WriteStreamOptions): ProducerStream; - -export interface NewTopic { - topic: string; - num_partitions: number; - replication_factor: number; - config?: { - 'cleanup.policy'?: 'delete' | 'compact' | 'delete,compact' | 'compact,delete'; - 'compression.type'?: 'gzip' | 'snappy' | 'lz4' | 'zstd' | 'uncompressed' | 'producer'; - 'delete.retention.ms'?: string; - 'file.delete.delay.ms'?: string; - 'flush.messages'?: string; - 'flush.ms'?: string; - 'follower.replication.throttled.replicas'?: string; - 'index.interval.bytes'?: string; - 'leader.replication.throttled.replicas'?: string; - 'max.compaction.lag.ms'?: string; - 'max.message.bytes'?: string; - 'message.format.version'?: string; - 'message.timestamp.difference.max.ms'?: string; - 'message.timestamp.type'?: string; - 'min.cleanable.dirty.ratio'?: string; - 'min.compaction.lag.ms'?: string; - 'min.insync.replicas'?: string; - 'preallocate'?: string; - 'retention.bytes'?: string; - 'retention.ms'?: string; - 'segment.bytes'?: string; - 'segment.index.bytes'?: string; - 'segment.jitter.ms'?: string; - 'segment.ms'?: string; - 'unclean.leader.election.enable'?: string; - 'message.downconversion.enable'?: string; - } | { [cfg: string]: string; }; -} - -export interface IAdminClient { - createTopic(topic: NewTopic, cb?: (err: LibrdKafkaError) => void): void; - createTopic(topic: NewTopic, timeout?: number, cb?: (err: LibrdKafkaError) => void): void; - - deleteTopic(topic: string, cb?: (err: LibrdKafkaError) => void): void; - deleteTopic(topic: string, timeout?: number, cb?: (err: LibrdKafkaError) => void): void; - - createPartitions(topic: string, desiredPartitions: number, cb?: (err: LibrdKafkaError) => void): void; - createPartitions(topic: string, desiredPartitions: number, timeout?: number, cb?: (err: LibrdKafkaError) => void): void; - - disconnect(): void; -} - -export abstract class AdminClient { - static create(conf: GlobalConfig): IAdminClient; -} - -export type RdKafka = { - Consumer: KafkaConsumer, - Producer: Producer, - HighLevelProducer: HighLevelProducer, - AdminClient: AdminClient, - KafkaConsumer: KafkaConsumer, - createReadStream: typeof KafkaConsumer.createReadStream, - createWriteStream: typeof Producer.createWriteStream, - CODES: typeof errors.CODES, - Topic: (name: string) => string, - features: typeof features, - librdkafkaVersion: typeof librdkafkaVersion, -} - -export type KafkaJS = { - Kafka: Kafka -} +export * from './types/rdkafka'; +export * as RdKafka from './types/rdkafka'; +export * as KafkaJS from './types/kafkajs'; \ No newline at end of file diff --git a/jest.config.js b/jest.config.js new file mode 100644 index 00000000..e3471a95 --- /dev/null +++ b/jest.config.js @@ -0,0 +1,5 @@ +module.exports = { + transform: { + '^.+\\.tsx?$': 'ts-jest', + }, + }; diff --git a/lib/admin.js b/lib/admin.js index 76c76de7..fe8750cc 100644 --- a/lib/admin.js +++ b/lib/admin.js @@ -1,22 +1,56 @@ /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * * Copyright (c) 2016-2023 Blizzard Entertainment + * (c) 2024 Confluent, Inc * * This software may be modified and distributed under the terms * of the MIT license. See the LICENSE.txt file for details. */ 'use strict'; +/* TODO: Think of a way to fetch these from within librdkafka instead of this + * hardcoded list. + * New additions won't be automatically added to this list. + */ +const ConsumerGroupStates = Object.seal({ + UNKNOWN: 0, + PREPARING_REBALANCE: 1, + COMPLETING_REBALANCE: 2, + STABLE: 3, + DEAD: 4, + EMPTY: 5, +}); + +const AclOperationTypes = Object.seal({ + UNKNOWN: 0, + ANY: 1, + ALL: 2, + READ: 3, + WRITE: 4, + CREATE: 5, + DELETE: 6, + ALTER: 7, + DESCRIBE: 8, + CLUSTER_ACTION: 9, + DESCRIBE_CONFIGS: 10, + ALTER_CONFIGS: 11, + IDEMPOTENT_WRITE: 12, +}); + module.exports = { create: createAdminClient, + ConsumerGroupStates, + AclOperationTypes, }; var Client = require('./client'); var util = require('util'); var Kafka = require('../librdkafka'); var LibrdKafkaError = require('./error'); -var shallowCopy = require('./util').shallowCopy; +var { shallowCopy } = require('./util'); + +util.inherits(AdminClient, Client); /** * Create a new AdminClient for making topics, partitions, and more. @@ -24,10 +58,19 @@ var shallowCopy = require('./util').shallowCopy; * This is a factory method because it immediately starts an * active handle with the brokers. * + * @param {object} conf - Key value pairs to configure the admin client + * @param {object} eventHandlers - optional key value pairs of event handlers to attach to the client + * */ -function createAdminClient(conf) { +function createAdminClient(conf, eventHandlers) { var client = new AdminClient(conf); + if (eventHandlers && typeof eventHandlers === 'object') { + for (const key in eventHandlers) { + client.on(key, eventHandlers[key]); + } + } + // Wrap the error so we throw if it failed with some context LibrdKafkaError.wrap(client.connect(), true); @@ -47,7 +90,7 @@ function createAdminClient(conf) { * * * Once you instantiate this object, it will have a handle to the kafka broker. - * Unlike the other confluent-kafka-js classes, this class does not ensure that + * Unlike the other confluent-kafka-javascript classes, this class does not ensure that * it is connected to the upstream broker. Instead, making an action will * validate that. * @@ -76,7 +119,7 @@ function AdminClient(conf) { * for the topic. */ - this._client = new Kafka.AdminClient(conf); + Client.call(this, conf, Kafka.AdminClient); this._isConnected = false; this.globalConfig = conf; } @@ -89,9 +132,11 @@ function AdminClient(conf) { * * Unlike the other connect methods, this one is synchronous. */ -AdminClient.prototype.connect = function() { +AdminClient.prototype.connect = function () { + this._client.configureCallbacks(true, this._cb_configs); LibrdKafkaError.wrap(this._client.connect(), true); this._isConnected = true; + this.emit('ready', { name: this._client.name() }); }; /** @@ -100,9 +145,12 @@ AdminClient.prototype.connect = function() { * This is a synchronous method, but all it does is clean up * some memory and shut some threads down */ -AdminClient.prototype.disconnect = function() { +AdminClient.prototype.disconnect = function () { LibrdKafkaError.wrap(this._client.disconnect(), true); this._isConnected = false; + // The AdminClient doesn't provide a callback. So we can't + // wait for completion. + this._client.configureCallbacks(false, this._cb_configs); }; /** @@ -112,7 +160,7 @@ AdminClient.prototype.disconnect = function() { * @param {number} timeout - Number of milliseconds to wait while trying to create the topic. * @param {function} cb - The callback to be executed when finished */ -AdminClient.prototype.createTopic = function(topic, timeout, cb) { +AdminClient.prototype.createTopic = function (topic, timeout, cb) { if (!this._isConnected) { throw new Error('Client is disconnected'); } @@ -126,7 +174,7 @@ AdminClient.prototype.createTopic = function(topic, timeout, cb) { timeout = 5000; } - this._client.createTopic(topic, timeout, function(err) { + this._client.createTopic(topic, timeout, function (err) { if (err) { if (cb) { cb(LibrdKafkaError.create(err)); @@ -147,7 +195,7 @@ AdminClient.prototype.createTopic = function(topic, timeout, cb) { * @param {number} timeout - Number of milliseconds to wait while trying to delete the topic. * @param {function} cb - The callback to be executed when finished */ -AdminClient.prototype.deleteTopic = function(topic, timeout, cb) { +AdminClient.prototype.deleteTopic = function (topic, timeout, cb) { if (!this._isConnected) { throw new Error('Client is disconnected'); } @@ -161,7 +209,7 @@ AdminClient.prototype.deleteTopic = function(topic, timeout, cb) { timeout = 5000; } - this._client.deleteTopic(topic, timeout, function(err) { + this._client.deleteTopic(topic, timeout, function (err) { if (err) { if (cb) { cb(LibrdKafkaError.create(err)); @@ -184,7 +232,7 @@ AdminClient.prototype.deleteTopic = function(topic, timeout, cb) { * @param {number} timeout - Number of milliseconds to wait while trying to create the partitions. * @param {function} cb - The callback to be executed when finished */ -AdminClient.prototype.createPartitions = function(topic, totalPartitions, timeout, cb) { +AdminClient.prototype.createPartitions = function (topic, totalPartitions, timeout, cb) { if (!this._isConnected) { throw new Error('Client is disconnected'); } @@ -198,7 +246,7 @@ AdminClient.prototype.createPartitions = function(topic, totalPartitions, timeou timeout = 5000; } - this._client.createPartitions(topic, totalPartitions, timeout, function(err) { + this._client.createPartitions(topic, totalPartitions, timeout, function (err) { if (err) { if (cb) { cb(LibrdKafkaError.create(err)); @@ -211,3 +259,181 @@ AdminClient.prototype.createPartitions = function(topic, totalPartitions, timeou } }); }; + +/** + * List consumer groups. + * @param {any} options + * @param {number?} options.timeout - The request timeout in milliseconds. + * May be unset (default: 5000) + * @param {import("../").ConsumerGroupStates[]?} options.matchConsumerGroupStates - + * A list of consumer group states to match. May be unset, fetches all states (default: unset). + * @param {function} cb - The callback to be executed when finished. + * + * Valid ways to call this function: + * listGroups(cb) + * listGroups(options, cb) + */ +AdminClient.prototype.listGroups = function (options, cb) { + if (!this._isConnected) { + throw new Error('Client is disconnected'); + } + + if (typeof options === 'function') { + cb = options; + options = {}; + } + + if (!options) { + options = {}; + } + + this._client.listGroups(options, function (err, groups) { + if (err) { + if (cb) { + cb(LibrdKafkaError.create(err)); + } + return; + } + + if (cb) { + cb(null, groups); + } + }); +}; + +/** + * Describe consumer groups. + * @param {string[]} groups - The names of the groups to describe. + * @param {any?} options + * @param {number?} options.timeout - The request timeout in milliseconds. + * May be unset (default: 5000) + * @param {boolean?} options.includeAuthorizedOperations - If true, include operations allowed on the group by the calling client (default: false). + * @param {function} cb - The callback to be executed when finished. + * + * Valid ways to call this function: + * describeGroups(groups, cb) + * describeGroups(groups, options, cb) + */ +AdminClient.prototype.describeGroups = function (groups, options, cb) { + if (!this._isConnected) { + throw new Error('Client is disconnected'); + } + + if (typeof options === 'function') { + cb = options; + options = {}; + } + + if (!options) { + options = {}; + } + + this._client.describeGroups(groups, options, function (err, descriptions) { + if (err) { + if (cb) { + cb(LibrdKafkaError.create(err)); + } + return; + } + + if (cb) { + cb(null, descriptions); + } + }); +}; + +/** + * Delete consumer groups. + * @param {string[]} groups - The names of the groups to delete. + * @param {any?} options + * @param {number?} options.timeout - The request timeout in milliseconds. + * May be unset (default: 5000) + * @param {function} cb - The callback to be executed when finished. + * + * Valid ways to call this function: + * deleteGroups(groups, cb) + * deleteGroups(groups, options, cb) + */ +AdminClient.prototype.deleteGroups = function (groups, options, cb) { + if (!this._isConnected) { + throw new Error('Client is disconnected'); + } + + if (typeof options === 'function') { + cb = options; + options = {}; + } + + if (!options) { + options = {}; + } + + this._client.deleteGroups(groups, options, function (err, reports) { + if (err) { + if (cb) { + cb(LibrdKafkaError.create(err)); + } + return; + } + + if (cb) { + cb(null, reports); + } + }); +}; + +/** + * List topics. + * + * @param {any?} options + * @param {number?} options.timeout - The request timeout in milliseconds. + * May be unset (default: 5000) + * @param {function} cb - The callback to be executed when finished. + * + * Valid ways to call this function: + * listTopics(cb) + * listTopics(options, cb) + */ +AdminClient.prototype.listTopics = function (options, cb) { + if (!this._isConnected) { + throw new Error('Client is disconnected'); + } + + if (typeof options === 'function') { + cb = options; + options = {}; + } + + if (!options) { + options = {}; + } + + // Always set allTopics to true, since we need a list. + options.allTopics = true; + if (!Object.hasOwn(options, 'timeout')) { + options.timeout = 5000; + } + + // This definitely isn't the fastest way to list topics as + // this makes a pretty large metadata request. But for the sake + // of AdminAPI, this is okay. + this._client.getMetadata(options, function (err, metadata) { + if (err) { + if (cb) { + cb(LibrdKafkaError.create(err)); + } + return; + } + + const topics = []; + if (metadata.topics) { + for (const topic of metadata.topics) { + topics.push(topic.name); + } + } + + if (cb) { + cb(null, topics); + } + }); +}; diff --git a/lib/client.js b/lib/client.js index 9ba38b69..84f976cf 100644 --- a/lib/client.js +++ b/lib/client.js @@ -1,7 +1,8 @@ /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * * Copyright (c) 2016-2023 Blizzard Entertainment + * (c) 2023 Confluent, Inc. * * This software may be modified and distributed under the terms * of the MIT license. See the LICENSE.txt file for details. @@ -12,7 +13,8 @@ module.exports = Client; var Emitter = require('events').EventEmitter; var util = require('util'); var Kafka = require('../librdkafka.js'); -var assert = require('assert'); + +const { bindingVersion, dictToStringList } = require('./util'); var LibrdKafkaError = require('./error'); @@ -45,14 +47,23 @@ function Client(globalConf, SubClientType, topicConf) { // the producer and consumer main wrappers var no_event_cb = globalConf.event_cb === false; - topicConf = topicConf || {}; // delete this because librdkafka will complain since this particular // key is a real conf value delete globalConf.event_cb; + // These properties are not meant to be user-set. + // Clients derived from this might want to change them, but for + // now we override them. + globalConf['client.software.name'] = 'confluent-kafka-javascript'; + globalConf['client.software.version'] = `${bindingVersion}-librdkafka-${Kafka.librdkafkaVersion}`; + this._client = new SubClientType(globalConf, topicConf); + // We should not modify the globalConf object. We have cloned it already. + delete globalConf['client.software.name']; + delete globalConf['client.software.version']; + var extractFunctions = function(obj) { obj = obj || {}; var obj2 = {}; @@ -62,12 +73,12 @@ function Client(globalConf, SubClientType, topicConf) { } } return obj2; - } + }; this._cb_configs = { global: extractFunctions(globalConf), topic: extractFunctions(topicConf), event: {}, - } + }; if (!no_event_cb) { this._cb_configs.event.event_cb = function(eventType, eventData) { @@ -88,6 +99,55 @@ function Client(globalConf, SubClientType, topicConf) { }.bind(this); } + if (Object.hasOwn(this._cb_configs.global, 'oauthbearer_token_refresh_cb')) { + const savedCallback = this._cb_configs.global.oauthbearer_token_refresh_cb; + this._cb_configs.global.oauthbearer_token_refresh_cb = (oauthbearer_config) => { + if (this._isDisconnecting) { + // Don't call the callback if we're in the middle of disconnecting. + // This is especially important when the credentials are wrong, and + // we might want to disconnect without ever completing connection. + return; + } + + // This sets the token or error within librdkafka, and emits any + // errors on the emitter. + const postProcessTokenRefresh = (err, token) => { + try { + if (err) { + throw err; + } + let { tokenValue, lifetime, principal, extensions } = token; + + // If the principal isn't there, set an empty principal. + if (!principal) { + principal = ''; + } + + // Convert extensions from a Map/object to a list that librdkafka expects. + extensions = dictToStringList(extensions); + + this._client.setOAuthBearerToken(tokenValue, lifetime, principal, extensions); + } catch (e) { + e.message = "oauthbearer_token_refresh_cb: " + e.message; + this._client.setOAuthBearerTokenFailure(e.message); + this.emit('error', e); + } + }; + const returnPromise = savedCallback(oauthbearer_config, postProcessTokenRefresh); + + // If it looks like a promise, and quacks like a promise, it is a promise + // (or an async function). We expect the callback NOT to have been called + // in such a case. + if (returnPromise && (typeof returnPromise.then === 'function')) { + returnPromise.then((token) => { + postProcessTokenRefresh(null, token); + }).catch(err => { + postProcessTokenRefresh(err); + }); + } + }; + } + this.metrics = {}; this._isConnected = false; this.errorCounter = 0; @@ -383,8 +443,6 @@ Client.prototype.queryWatermarkOffsets = function(topic, partition, timeout, cb) } } - var self = this; - if (typeof timeout === 'function') { cb = timeout; timeout = 1000; @@ -429,8 +487,6 @@ Client.prototype.offsetsForTimes = function(toppars, timeout, cb) { } } - var self = this; - if (typeof timeout === 'function') { cb = timeout; timeout = 1000; @@ -455,6 +511,21 @@ Client.prototype.offsetsForTimes = function(toppars, timeout, cb) { }); }; +/** + * Change SASL credentials to be sent on the next authentication attempt. + * + * @param {string} username + * @param {string} password + * @note Only applicable if SASL authentication is being used. + */ +Client.prototype.setSaslCredentials = function(username, password) { + if (!this.isConnected()) { + return; + } + + this._client.setSaslCredentials(username, password); +}; + /** * Wrap a potential RdKafka error. * diff --git a/lib/error.js b/lib/error.js index 5cd6c4f0..ccdc263e 100644 --- a/lib/error.js +++ b/lib/error.js @@ -1,5 +1,5 @@ /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * * Copyright (c) 2016-2023 Blizzard Entertainment * @@ -21,13 +21,13 @@ LibrdKafkaError.wrap = errorWrap; * Enum for identifying errors reported by the library * * You can find this list in the C++ code at - * https://github.com/edenhill/librdkafka/blob/master/src-cpp/rdkafkacpp.h#L148 + * https://github.com/confluentinc/librdkafka/blob/master/src-cpp/rdkafkacpp.h#L148 * * @readonly * @enum {number} * @constant */ -// ====== Generated from librdkafka 2.3.0 file src-cpp/rdkafkacpp.h ====== +// ====== Generated from librdkafka master file src-cpp/rdkafkacpp.h ====== LibrdKafkaError.codes = { /* Internal errors to rdkafka: */ @@ -405,7 +405,7 @@ function LibrdKafkaError(e) { this.origin = 'kafka'; } Error.captureStackTrace(this, this.constructor); - } else if (!util.isError(e)) { + } else if (!(Object.prototype.toString(e) === "[object Error]" || e instanceof Error)) { // This is the better way this.message = e.message; this.code = e.code; @@ -446,9 +446,9 @@ function LibrdKafkaError(e) { } - if (e.hasOwnProperty('isFatal')) this.isFatal = e.isFatal; - if (e.hasOwnProperty('isRetriable')) this.isRetriable = e.isRetriable; - if (e.hasOwnProperty('isTxnRequiresAbort')) this.isTxnRequiresAbort = e.isTxnRequiresAbort; + if (Object.hasOwn(e, 'isFatal')) this.isFatal = e.isFatal; + if (Object.hasOwn(e, 'isRetriable')) this.isRetriable = e.isRetriable; + if (Object.hasOwn(e, 'isTxnRequiresAbort')) this.isTxnRequiresAbort = e.isTxnRequiresAbort; } diff --git a/lib/kafka-consumer-stream.js b/lib/kafka-consumer-stream.js index fcc07197..6deb85e7 100644 --- a/lib/kafka-consumer-stream.js +++ b/lib/kafka-consumer-stream.js @@ -1,5 +1,5 @@ /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * * Copyright (c) 2016-2023 Blizzard Entertainment * diff --git a/lib/kafka-consumer.js b/lib/kafka-consumer.js index dd981ade..bfc09f95 100644 --- a/lib/kafka-consumer.js +++ b/lib/kafka-consumer.js @@ -1,7 +1,8 @@ /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * * Copyright (c) 2016-2023 Blizzard Entertainment + * (c) 2023 Confluent, Inc. * * This software may be modified and distributed under the terms * of the MIT license. See the LICENSE.txt file for details. @@ -19,6 +20,7 @@ var TopicPartition = require('./topic-partition'); var shallowCopy = require('./util').shallowCopy; var DEFAULT_CONSUME_LOOP_TIMEOUT_DELAY = 500; var DEFAULT_CONSUME_TIME_OUT = 1000; +const DEFAULT_IS_TIMEOUT_ONLY_FOR_FIRST_MESSAGE = false; util.inherits(KafkaConsumer, Client); /** @@ -61,9 +63,17 @@ function KafkaConsumer(conf, topicConf) { // That's it try { if (err.code === -175 /*ERR__ASSIGN_PARTITIONS*/) { - self.assign(assignment); + if (self.rebalanceProtocol() === 'COOPERATIVE') { + self.incrementalAssign(assignment); + } else { + self.assign(assignment); + } } else if (err.code === -174 /*ERR__REVOKE_PARTITIONS*/) { - self.unassign(); + if (self.rebalanceProtocol() === 'COOPERATIVE') { + self.incrementalUnassign(assignment); + } else { + self.unassign(); + } } } catch (e) { // Ignore exceptions if we are not connected @@ -133,6 +143,7 @@ function KafkaConsumer(conf, topicConf) { this._consumeTimeout = DEFAULT_CONSUME_TIME_OUT; this._consumeLoopTimeoutDelay = DEFAULT_CONSUME_LOOP_TIMEOUT_DELAY; + this._consumeIsTimeoutOnlyForFirstMessage = DEFAULT_IS_TIMEOUT_ONLY_FOR_FIRST_MESSAGE; } /** @@ -151,6 +162,20 @@ KafkaConsumer.prototype.setDefaultConsumeLoopTimeoutDelay = function(intervalMs) this._consumeLoopTimeoutDelay = intervalMs; }; +/** + * If true: + * In consume(number, cb), we will wait for `timeoutMs` for the first message to be fetched. + * Subsequent messages will not be waited for and will be fetched (upto `number`) if already ready. + * + * If false: + * In consume(number, cb), we will wait for upto `timeoutMs` for each message to be fetched. + * + * @param {boolean} isTimeoutOnlyForFirstMessage + */ +KafkaConsumer.prototype.setDefaultIsTimeoutOnlyForFirstMessage = function(isTimeoutOnlyForFirstMessage) { + this._consumeIsTimeoutOnlyForFirstMessage = isTimeoutOnlyForFirstMessage; +}; + /** * Get a stream representation of this KafkaConsumer * @@ -202,7 +227,6 @@ KafkaConsumer.prototype.committed = function(toppars, timeout, cb) { toppars = toppars || this.assignments(); } - var self = this; this._client.committed(toppars, timeout, function(err, topicPartitions) { if (err) { cb(LibrdKafkaError.create(err)); @@ -239,7 +263,6 @@ KafkaConsumer.prototype.committed = function(toppars, timeout, cb) { * @return {Client} - Returns itself */ KafkaConsumer.prototype.seek = function(toppar, timeout, cb) { - var self = this; this._client.seek(TopicPartition.create(toppar), timeout, function(err) { if (err) { cb(LibrdKafkaError.create(err)); @@ -252,11 +275,13 @@ KafkaConsumer.prototype.seek = function(toppar, timeout, cb) { }; /** - * Assign the consumer specific partitions and topics + * Assign the consumer specific partitions and topics. Used for + * eager (non-cooperative) rebalancing. * * @param {array} assignments - Assignments array. Should contain * objects with topic and partition set. * @return {Client} - Returns itself + * @sa KafkaConsumer::incrementalAssign */ KafkaConsumer.prototype.assign = function(assignments) { @@ -265,9 +290,11 @@ KafkaConsumer.prototype.assign = function(assignments) { }; /** - * Unassign the consumer from its assigned partitions and topics. + * Unassign the consumer from its assigned partitions and topics.Used for + * eager (non-cooperative) rebalancing. * * @return {Client} - Returns itself + * @sa KafkaConsumer::incrementalUnassign */ KafkaConsumer.prototype.unassign = function() { @@ -275,6 +302,33 @@ KafkaConsumer.prototype.unassign = function() { return this; }; +/** + * Assign the consumer specific partitions and topics. Used for + * cooperative rebalancing. + * + * @param {array} assignments - Assignments array. Should contain + * objects with topic and partition set. Assignments are additive. + * @return {Client} - Returns itself + * @sa KafkaConsumer::assign + */ +KafkaConsumer.prototype.incrementalAssign = function(assignments) { + this._client.incrementalAssign(TopicPartition.map(assignments)); + return this; +}; + +/** + * Unassign the consumer specific partitions and topics. Used for + * cooperative rebalancing. + * + * @param {array} assignments - Assignments array. Should contain + * objects with topic and partition set. Assignments are subtractive. + * @return {Client} - Returns itself + * @sa KafkaConsumer::unassign + */ +KafkaConsumer.prototype.incrementalUnassign = function(assignments) { + this._client.incrementalUnassign(TopicPartition.map(assignments)); + return this; +}; /** * Get the assignments for the consumer @@ -286,6 +340,28 @@ KafkaConsumer.prototype.assignments = function() { return this._errorWrap(this._client.assignments(), true); }; +/** + * Is current assignment in rebalance callback lost? + * + * @note This method should only be called from within the rebalance callback + * when partitions are revoked. + * + * @return {boolean} true if assignment was lost + */ + +KafkaConsumer.prototype.assignmentLost = function() { + return this._client.assignmentLost(); +}; + +/** + * Get the type of rebalance protocol used in the consumer group. + * + * @returns "NONE" (if not in a group yet), "COOPERATIVE" or "EAGER". + */ +KafkaConsumer.prototype.rebalanceProtocol = function() { + return this._client.rebalanceProtocol(); +}; + /** * Subscribe to an array of topics (synchronously). * @@ -386,7 +462,6 @@ KafkaConsumer.prototype.unsubscribe = function() { */ KafkaConsumer.prototype.consume = function(number, cb) { var timeoutMs = this._consumeTimeout !== undefined ? this._consumeTimeout : DEFAULT_CONSUME_TIME_OUT; - var self = this; if ((number && typeof number === 'number') || (number && cb)) { @@ -399,7 +474,7 @@ KafkaConsumer.prototype.consume = function(number, cb) { this._consumeNum(timeoutMs, number, cb); } else { - // See https://github.com/confluentinc/confluent-kafka-js/issues/220 + // See https://github.com/confluentinc/confluent-kafka-javascript/issues/220 // Docs specify just a callback can be provided but really we needed // a fallback to the number argument // @deprecated @@ -463,7 +538,7 @@ KafkaConsumer.prototype._consumeLoop = function(timeoutMs, cb) { KafkaConsumer.prototype._consumeNum = function(timeoutMs, numMessages, cb) { var self = this; - this._client.consume(timeoutMs, numMessages, function(err, messages, eofEvents) { + this._client.consume(timeoutMs, numMessages, this._consumeIsTimeoutOnlyForFirstMessage, function(err, messages, eofEvents) { if (err) { err = LibrdKafkaError.create(err); if (cb) { @@ -477,7 +552,7 @@ KafkaConsumer.prototype._consumeNum = function(timeoutMs, numMessages, cb) { function emitEofEventsFor(messageIndex) { while (currentEofEventsIndex < eofEvents.length && eofEvents[currentEofEventsIndex].messageIndex === messageIndex) { delete eofEvents[currentEofEventsIndex].messageIndex; - self.emit('partition.eof', eofEvents[currentEofEventsIndex]) + self.emit('partition.eof', eofEvents[currentEofEventsIndex]); ++currentEofEventsIndex; } } @@ -540,7 +615,8 @@ KafkaConsumer.prototype.commitMessage = function(msg) { var topicPartition = { topic: msg.topic, partition: msg.partition, - offset: msg.offset + 1 + offset: msg.offset + 1, + leaderEpoch: msg.leaderEpoch }; this._errorWrap(this._client.commit(topicPartition), true); @@ -575,13 +651,34 @@ KafkaConsumer.prototype.commitMessageSync = function(msg) { var topicPartition = { topic: msg.topic, partition: msg.partition, - offset: msg.offset + 1 + offset: msg.offset + 1, + leaderEpoch: msg.leaderEpoch, }; this._errorWrap(this._client.commitSync(topicPartition), true); return this; }; +/** + * Commits a list of offsets per topic partition, using provided callback. + * + * @param {TopicPartition[]} toppars - Topic partition list to commit + * offsets for. Defaults to the current assignment + * @param {Function} cb - Callback method to execute when finished + * @return {Client} - Returns itself + */ +KafkaConsumer.prototype.commitCb = function(toppars, cb) { + this._client.commitCb(toppars, function(err) { + if (err) { + cb(LibrdKafkaError.create(err)); + return; + } + + cb(null); + }); + return this; +}; + /** * Get last known offsets from the client. * @@ -614,7 +711,7 @@ KafkaConsumer.prototype.getWatermarkOffsets = function(topic, partition) { * * enable.auto.offset.store must be set to false to use this API, * - * @see https://github.com/edenhill/librdkafka/blob/261371dc0edef4cea9e58a076c8e8aa7dc50d452/src-cpp/rdkafkacpp.h#L1702 + * @see https://github.com/confluentinc/librdkafka/blob/261371dc0edef4cea9e58a076c8e8aa7dc50d452/src-cpp/rdkafkacpp.h#L1702 * * @param {Array.} topicPartitions - Topic partitions with offsets to store offsets for. * @throws {LibrdKafkaError} - Throws when there is no offset stored @@ -627,6 +724,26 @@ KafkaConsumer.prototype.offsetsStore = function(topicPartitions) { return this._errorWrap(this._client.offsetsStore(topicPartitions), true); }; +/** + * Store offset for a single topic partition. Do not use this method. + * This method is meant for internal use, and the API is not guaranteed to be stable. + * Use offsetsStore instead. + * + * @param {string} topic - Topic to store offset for. + * @param {number} partition - Partition of the provided topic to store offset for. + * @param {number} offset - Offset to store. + * @param {number} leaderEpoch - Leader epoch of the provided offset. + * @throws {LibrdKafkaError} - Throws when there is no offset stored + */ +KafkaConsumer.prototype._offsetsStoreSingle = function(topic, partition, offset, leaderEpoch) { + if (!this.isConnected()) { + throw new Error('Client is disconnected'); + } + + return this._errorWrap( + this._client.offsetsStoreSingle(topic, partition, offset, leaderEpoch), true); +}; + /** * Resume consumption for the provided list of partitions. * diff --git a/lib/kafkajs/_admin.js b/lib/kafkajs/_admin.js new file mode 100644 index 00000000..d265174c --- /dev/null +++ b/lib/kafkajs/_admin.js @@ -0,0 +1,426 @@ +const RdKafka = require('../rdkafka'); +const { kafkaJSToRdKafkaConfig, + createKafkaJsErrorFromLibRdKafkaError, + DefaultLogger, + CompatibilityErrorMessages, + createBindingMessageMetadata, + logLevel, + checkAllowedKeys, + loggerTrampoline, + severityToLogLevel, +} = require('./_common'); +const error = require('./_error'); + +/** + * NOTE: The Admin client is currently in an experimental state with many + * features missing or incomplete, and the API is subject to change. + */ + +const AdminState = Object.freeze({ + INIT: 0, + CONNECTING: 1, + CONNECTED: 4, + DISCONNECTING: 5, + DISCONNECTED: 6, +}); + +class Admin { + /** + * The config supplied by the user. + * @type {import("../../types/kafkajs").AdminConstructorConfig|null} + */ + #userConfig = null; + + /** + * The config realized after processing any compatibility options. + * @type {import("../../types/config").GlobalConfig|null} + */ + #internalConfig = null; + + /** + * internalClient is the node-rdkafka client used by the API. + * @type {import("../rdkafka").AdminClient|null} + */ + #internalClient = null; + /** + * state is the current state of the admin client. + * @type {AdminState} + */ + #state = AdminState.INIT; + + /** + * A logger for the admin client. + * @type {import("../../types/kafkajs").Logger} + */ + #logger = new DefaultLogger(); + + /** + * connectPromiseFunc is the set of promise functions used to resolve/reject the connect() promise. + * @type {{resolve: Function, reject: Function}|{}} + */ + #connectPromiseFunc = null; + + /** + * The client name used by the admin client for logging - determined by librdkafka + * using a combination of clientId and an integer. + * @type {string|undefined} + */ + #clientName = undefined; + + /** + * Convenience function to create the metadata object needed for logging. + */ + #createAdminBindingMessageMetadata() { + return createBindingMessageMetadata(this.#clientName); + } + + /** + * @constructor + * @param {import("../../types/kafkajs").AdminConstructorConfig} config + */ + constructor(config) { + this.#userConfig = config; + } + + #config() { + if (!this.#internalConfig) + this.#internalConfig = this.#finalizedConfig(); + return this.#internalConfig; + } + + #kafkaJSToAdminConfig(kjsConfig) { + if (!kjsConfig || Object.keys(kjsConfig).length === 0) { + return {}; + } + + const disallowedKey = checkAllowedKeys('admin', kjsConfig); + if (disallowedKey) { + throw new error.KafkaJSError(CompatibilityErrorMessages.unsupportedKey(disallowedKey), { code: error.ErrorCodes.ERR__INVALID_ARG }); + } + + const rdKafkaConfig = kafkaJSToRdKafkaConfig(kjsConfig); + + /* Set the logger */ + if (Object.hasOwn(kjsConfig, 'logger')) { + this.#logger = kjsConfig.logger; + } + + /* Set the log level - INFO for compatibility with kafkaJS, or DEBUG if that is turned + * on using the logLevel property. rdKafkaConfig.log_level is guaranteed to be set if we're + * here, and containing the correct value. */ + this.#logger.setLogLevel(severityToLogLevel[rdKafkaConfig.log_level]); + + return rdKafkaConfig; + } + + #finalizedConfig() { + let compatibleConfig = this.#kafkaJSToAdminConfig(this.#userConfig.kafkaJS); + + /* There can be multiple different and conflicting config directives for setting the log level: + * 1. If there's a kafkaJS block: + * a. If there's a logLevel directive in the kafkaJS block, set the logger level accordingly. + * b. If there's no logLevel directive, set the logger level to INFO. + * (both these are already handled in the conversion method above). + * 2. If there is a log_level or debug directive in the main config, set the logger level accordingly. + * !This overrides any different value provided in the kafkaJS block! + * a. If there's a log_level directive, set the logger level accordingly. + * b. If there's a debug directive, set the logger level to DEBUG regardless of anything else. This is because + * librdkafka ignores log_level if debug is set, and our behaviour should be identical. + * 3. There's nothing at all. Take no action in this case, let the logger use its default log level. + */ + if (Object.hasOwn(this.#userConfig, 'log_level')) { + this.#logger.setLogLevel(severityToLogLevel[this.#userConfig.log_level]); + } + + if (Object.hasOwn(this.#userConfig, 'debug')) { + this.#logger.setLogLevel(logLevel.DEBUG); + } + + let rdKafkaConfig = Object.assign(compatibleConfig, this.#userConfig); + + /* Delete properties which are already processed, or cannot be passed to node-rdkafka */ + delete rdKafkaConfig.kafkaJS; + + return rdKafkaConfig; + } + + #readyCb() { + if (this.#state !== AdminState.CONNECTING) { + /* The connectPromiseFunc might not be set, so we throw such an error. It's a state error that we can't recover from. Probably a bug. */ + throw new error.KafkaJSError(`Ready callback called in invalid state ${this.#state}`, { code: error.ErrorCodes.ERR__STATE }); + } + this.#state = AdminState.CONNECTED; + + // Resolve the promise. + this.#connectPromiseFunc['resolve'](); + } + + /** + * Callback for the event.error event, either fails the initial connect(), or logs the error. + * @param {Error} err + */ + #errorCb(err) { + if (this.#state === AdminState.CONNECTING) { + this.#connectPromiseFunc['reject'](err); + } else { + this.#logger.error(`Error: ${err.message}`, this.#createAdminBindingMessageMetadata()); + } + } + + /** + * Set up the client and connect to the bootstrap brokers. + * @returns {Promise} Resolves when connection is complete, rejects on error. + */ + async connect() { + if (this.#state !== AdminState.INIT) { + throw new error.KafkaJSError("Connect has already been called elsewhere.", { code: error.ErrorCodes.ERR__STATE }); + } + + this.#state = AdminState.CONNECTING; + + const config = this.#config(); + + return new Promise((resolve, reject) => { + try { + /* AdminClient creation is a synchronous operation for node-rdkafka */ + this.#connectPromiseFunc = { resolve, reject }; + this.#internalClient = RdKafka.AdminClient.create(config, { + 'error': this.#errorCb.bind(this), + 'ready': this.#readyCb.bind(this), + 'event.log': (msg) => loggerTrampoline(msg, this.#logger), + }); + + this.#clientName = this.#internalClient.name; + this.#logger.info("Admin client connected", this.#createAdminBindingMessageMetadata()); + } catch (err) { + reject(createKafkaJsErrorFromLibRdKafkaError(err)); + } + }); + } + + /** + * Disconnect from the brokers, clean-up and tear down the client. + * @returns {Promise} Resolves when disconnect is complete, rejects on error. + */ + async disconnect() { + /* Not yet connected - no error. */ + if (this.#state === AdminState.INIT) { + return; + } + + /* Already disconnecting, or disconnected. */ + if (this.#state >= AdminState.DISCONNECTING) { + return; + } + + this.#state = AdminState.DISCONNECTING; + return new Promise((resolve, reject) => { + try { + /* AdminClient disconnect for node-rdkakfa is synchronous. */ + this.#internalClient.disconnect(); + this.#state = AdminState.DISCONNECTED; + this.#logger.info("Admin client disconnected", this.#createAdminBindingMessageMetadata()); + resolve(); + } catch (err) { + reject(createKafkaJsErrorFromLibRdKafkaError(err)); + } + }); + } + + + /** + * Converts a topic configuration object from kafkaJS to a format suitable for node-rdkafka. + * @param {import("../../types/kafkajs").ITopicConfig} topic + * @returns {import("../../index").NewTopic} + */ + #topicConfigToRdKafka(topic) { + let topicConfig = { topic: topic.topic }; + topicConfig.topic = topic.topic; + topicConfig.num_partitions = topic.numPartitions ?? -1; + topicConfig.replication_factor = topic.replicationFactor ?? -1; + + if (Object.hasOwn(topic, "replicaAssignment")) { + throw new error.KafkaJSError("replicaAssignment is not yet implemented.", { code: error.ErrorCodes.ERR__NOT_IMPLEMENTED }); + } + + topicConfig.config = {}; + topic.configEntries = topic.configEntries ?? []; + for (const configEntry of topic.configEntries) { + topicConfig.config[configEntry.name] = configEntry.value; + } + + return topicConfig; + } + + /** + * Create topics with the given configuration. + * @param {{ validateOnly?: boolean, waitForLeaders?: boolean, timeout?: number, topics: import("../../types/kafkajs").ITopicConfig[] }} options + * @returns {Promise} Resolves true when the topics are created, false if topic exists already, rejects on error. + * In case even one topic already exists, this will return false. + */ + async createTopics(options) { + if (this.#state !== AdminState.CONNECTED) { + throw new error.KafkaJSError("Admin client is not connected.", { code: error.ErrorCodes.ERR__STATE }); + } + + if (Object.hasOwn(options, "validateOnly")) { + throw new error.KafkaJSError("validateOnly is not yet implemented.", { code: error.ErrorCodes.ERR__NOT_IMPLEMENTED }); + } + + if (Object.hasOwn(options, "waitForLeaders")) { + throw new error.KafkaJSError("waitForLeaders is not yet implemented.", { code: error.ErrorCodes.ERR__NOT_IMPLEMENTED }); + } + + /* Convert each topic to a format suitable for node-rdkafka, and dispatch the call. */ + let allTopicsCreated = true; + const ret = + options.topics + .map(this.#topicConfigToRdKafka) + .map(topicConfig => new Promise((resolve, reject) => { + this.#internalClient.createTopic(topicConfig, options.timeout ?? 5000, (err) => { + if (err) { + if (err.code === error.ErrorCodes.ERR_TOPIC_ALREADY_EXISTS) { + allTopicsCreated = false; + resolve(); + return; + } + reject(createKafkaJsErrorFromLibRdKafkaError(err)); + } else { + resolve(); + } + }); + })); + + return Promise.all(ret).then(() => allTopicsCreated); + } + + /** + * Deletes given topics. + * @param {{topics: string[], timeout?: number}} options + * @returns {Promise} Resolves when the topics are deleted, rejects on error. + */ + async deleteTopics(options) { + if (this.#state !== AdminState.CONNECTED) { + throw new error.KafkaJSError("Admin client is not connected.", { code: error.ErrorCodes.ERR__STATE }); + } + + return Promise.all( + options.topics.map(topic => new Promise((resolve, reject) => { + this.#internalClient.deleteTopic(topic, options.timeout ?? 5000, err => { + if (err) { + reject(createKafkaJsErrorFromLibRdKafkaError(err)); + } else { + resolve(); + } + }); + })) + ); + } + + /** + * List consumer groups. + * + * @param {object?} options + * @param {number?} options.timeout - The request timeout in milliseconds. + * May be unset (default: 5000) + * @param {import("../../types/kafkajs").ConsumerGroupStates[]?} options.matchConsumerGroupStates - + * A list of consumer group states to match. May be unset, fetches all states (default: unset). + * @returns {Promise<{ groups: import("../../types/kafkajs").GroupOverview[], errors: import("../../types/kafkajs").LibrdKafkaError[] }>} + * Resolves with the list of consumer groups, rejects on error. + */ + async listGroups(options = {}) { + if (this.#state !== AdminState.CONNECTED) { + throw new error.KafkaJSError("Admin client is not connected.", { code: error.ErrorCodes.ERR__STATE }); + } + + return new Promise((resolve, reject) => { + this.#internalClient.listGroups(options, (err, groups) => { + if (err) { + reject(createKafkaJsErrorFromLibRdKafkaError(err)); + } else { + resolve(groups); + } + }); + }); + } + + /** + * Describe consumer groups. + * + * @param {string[]} groups - The names of the groups to describe. + * @param {object?} options + * @param {number?} options.timeout - The request timeout in milliseconds. + * May be unset (default: 5000) + * @param {boolean?} options.includeAuthorizedOperations - If true, include operations allowed on the group by the calling client (default: false). + * @returns {Promise} + */ + async describeGroups(groups, options = {}) { + if (this.#state !== AdminState.CONNECTED) { + throw new error.KafkaJSError("Admin client is not connected.", { code: error.ErrorCodes.ERR__STATE }); + } + + return new Promise((resolve, reject) => { + this.#internalClient.describeGroups(groups, options, (err, descriptions) => { + if (err) { + reject(createKafkaJsErrorFromLibRdKafkaError(err)); + } else { + resolve(descriptions); + } + }); + }); + } + + /** + * Delete consumer groups. + * @param {string[]} groups - The names of the groups to delete. + * @param {any?} options + * @param {number?} options.timeout - The request timeout in milliseconds. + * May be unset (default: 5000) + * @returns {Promise} + */ + async deleteGroups(groups, options = {}) { + if (this.#state !== AdminState.CONNECTED) { + throw new error.KafkaJSError("Admin client is not connected.", { code: error.ErrorCodes.ERR__STATE }); + } + + return new Promise((resolve, reject) => { + this.#internalClient.deleteGroups(groups, options, (err, reports) => { + if (err) { + reject(createKafkaJsErrorFromLibRdKafkaError(err)); + } else { + resolve(reports); + } + }); + }); + } + + /** + * List topics. + * + * @param {any?} options + * @param {number?} options.timeout - The request timeout in milliseconds. + * May be unset (default: 5000) + * @returns {Promise} + */ + async listTopics(options = {}) { + if (this.#state !== AdminState.CONNECTED) { + throw new error.KafkaJSError("Admin client is not connected.", { code: error.ErrorCodes.ERR__STATE }); + } + + return new Promise((resolve, reject) => { + this.#internalClient.listTopics(options, (err, topics) => { + if (err) { + reject(createKafkaJsErrorFromLibRdKafkaError(err)); + } else { + resolve(topics); + } + }); + }); + } +} + +module.exports = { + Admin, + ConsumerGroupStates: RdKafka.AdminClient.ConsumerGroupStates, + AclOperationTypes: RdKafka.AdminClient.AclOperationTypes +}; diff --git a/lib/kafkajs/_common.js b/lib/kafkajs/_common.js index 4ffc3f4f..2d02ce5d 100644 --- a/lib/kafkajs/_common.js +++ b/lib/kafkajs/_common.js @@ -1,46 +1,861 @@ -async function kafkaJSToRdKafkaConfig(config) { - const ret = { - 'allow.auto.create.topics': 'false' +const error = require("./_error"); +const process = require("process"); +const { AsyncLocalStorage } = require('node:async_hooks'); + +/* A list of kafkaJS compatible properties that we process. + * All of these are not necessarily supported, and an error will be + * thrown if they aren't. */ +const kafkaJSProperties = { + common: [ + "brokers", + "clientId", + "sasl", + "ssl", + "requestTimeout", + "enforceRequestTimeout", + "connectionTimeout", + "authenticationTimeout", + "retry", + "socketFactory", + "reauthenticationThreshold", + "logLevel", + 'logger', + ], + producer: [ + 'createPartitioner', + 'metadataMaxAge', + 'allowAutoTopicCreation', + 'transactionTimeout', + 'idempotent', + 'maxInFlightRequests', + 'transactionalId', + 'compression', + 'acks', + 'timeout', + ], + consumer: [ + 'groupId', + 'partitionAssigners', + 'partitionAssignors', + 'sessionTimeout', + 'rebalanceTimeout', + 'heartbeatInterval', + 'metadataMaxAge', + 'allowAutoTopicCreation', + 'maxBytesPerPartition', + 'maxWaitTimeInMs', + 'minBytes', + 'maxBytes', + 'readUncommitted', + 'maxInFlightRequests', + 'rackId', + 'fromBeginning', + 'autoCommit', + 'autoCommitInterval', + 'autoCommitThreshold', + ], + admin: [], +}; + +const logLevel = Object.freeze({ + NOTHING: 0, + ERROR: 1, + WARN: 2, + INFO: 3, + DEBUG: 4, +}); + +const severityToLogLevel = Object.freeze({ + 0: logLevel.NOTHING, + 1: logLevel.ERROR, + 2: logLevel.ERROR, + 3: logLevel.ERROR, + 4: logLevel.WARN, + 5: logLevel.WARN, + 6: logLevel.INFO, + 7: logLevel.DEBUG, +}); + +/** + * Default logger implementation. + * @type import("../../types/kafkajs").Logger + */ +class DefaultLogger { + constructor() { + this.logLevel = logLevel.INFO; + } + + setLogLevel(logLevel) { + this.logLevel = logLevel; + } + + info(message, extra) { + if (this.logLevel >= logLevel.INFO) + console.info({ message, ...extra }); + } + + error(message, extra) { + if (this.logLevel >= logLevel.ERROR) + console.error({ message, ...extra }); + } + + warn(message, extra) { + if (this.logLevel >= logLevel.WARN) + console.warn({ message, ...extra }); + } + + debug(message, extra) { + if (this.logLevel >= logLevel.DEBUG) + console.log({ message, ...extra }); + } + + namespace() { + return this; + } +} + +/** + * Convenience function to create a new object to be used as metadata for log messages. + * Returned object is intended to be used immediately and not stored. + * + * @param {string|undefined} clientName + */ +function createBindingMessageMetadata(clientName) { + return { + name: clientName, + fac: 'BINDING', + timestamp: Date.now(), + }; +} + +/** + * Trampoline for user defined logger, if any. + * @param {{severity: number, fac: string, message: string}} msg + * + */ +function loggerTrampoline(msg, logger) { + if (!logger) { + return; + } + + const level = severityToLogLevel[msg.severity]; + switch (level) { + case logLevel.NOTHING: + break; + case logLevel.ERROR: + logger.error(msg.message, { fac: msg.fac, timestamp: Date.now(), name: msg.name }); + break; + case logLevel.WARN: + logger.warn(msg.message, { fac: msg.fac, timestamp: Date.now(), name: msg.name }); + break; + case logLevel.INFO: + logger.info(msg.message, { fac: msg.fac, timestamp: Date.now(), name: msg.name }); + break; + case logLevel.DEBUG: + logger.debug(msg.message, { fac: msg.fac, timestamp: Date.now(), name: msg.name }); + break; + default: + throw new error.KafkaJSError("Invalid logLevel", { + code: error.ErrorCodes.ERR__INVALID_ARG, + }); + } +} + +function createReplacementErrorMessage(cOrP, fnCall, property, propertyVal, replacementVal, isLK = false) { + if (!isLK) { + replacementVal = `kafkaJS: { ${replacementVal}, ... }`; + } + return `'${property}' is not supported as a property to '${fnCall}', but must be passed to the ${cOrP} during creation.\n` + + `Before: \n` + + `\tconst ${cOrP} = kafka.${cOrP}({ ... });\n` + + `\tawait ${cOrP}.connect();\n` + + `\t${cOrP}.${fnCall}({ ${propertyVal}, ... });\n` + + `After: \n` + + `\tconst ${cOrP} = kafka.${cOrP}({ ${replacementVal}, ... });\n` + + `\tawait ${cOrP}.connect();\n` + + `\t${cOrP}.${fnCall}({ ... });\n` + + (isLK ? `For more details on what can be used outside the kafkaJS block, see https://github.com/confluentinc/librdkafka/blob/master/CONFIGURATION.md\n` : ''); +} + +const CompatibilityErrorMessages = Object.freeze({ + /* Common */ + brokerString: () => + "The 'brokers' property must be an array of strings.\n" + + "For example: ['kafka:9092', 'kafka2:9093']\n", + saslUnsupportedMechanism: (mechanism) => + `SASL mechanism ${mechanism} is not supported.`, + saslUsernamePasswordString: (mechanism) => + `The 'sasl.username' and 'sasl.password' properties must be strings and must be present for the mechanism ${mechanism}.`, + saslOauthBearerProvider: () => + `The 'oauthBearerProvider' property must be a function.`, + sslObject: () => + "The 'ssl' property must be a boolean. Any additional configuration must be provided outside the kafkaJS block.\n" + + "Before: \n" + + "\tconst kafka = new Kafka({ kafkaJS: { ssl: { rejectUnauthorized: false, ca: [ ... ], key: ..., cert: ... }, } }); \n" + + "After: \n" + + '\tconst kafka = new Kafka({ kafkaJS: { ssl: true, }, "enable.ssl.certificate.verification": false, "ssl.ca.location": ..., "ssl.certificate.pem": ... });\n' + + `For more details on what can be used outside the kafkaJS block, see https://github.com/confluentinc/librdkafka/blob/master/CONFIGURATION.md\n`, + retryFactorMultiplier: () => + + "The 'retry.factor' and 'retry.multiplier' are not supported. They are always set to the default of 0.2 and 2 respectively.", + retryRestartOnFailure: () => + "The restartOnFailure property is ignored. The client always retries on failure.", + socketFactory: () => + "The socketFactory property is not supported.", + logLevelName: (setLevel) => + "The log level must be one of: " + Object.keys(logLevel).join(", ") + ", was " + setLevel, + reauthenticationThreshold: () => + "Reauthentication threshold cannot be set, and reauthentication is automated when 80% of connections.max.reauth.ms is reached.", + unsupportedKey: (key) => + `The '${key}' property is not supported.`, + kafkaJSCommonKey: (key) => + `The '${key}' property seems to be a KafkaJS property in the main config block.` + + `It must be moved to the kafkaJS block.` + + `\nBefore: \n` + + `\tconst kafka = new Kafka({ ${key}: , ... });\n` + + `After: \n` + + `\tconst kafka = new Kafka({ kafkaJS: { ${key}: , ... }, ... });\n`, + kafkaJSClientKey: (key, cOrP) => + `The '${key}' property seems to be a KafkaJS property in the main config block. ` + + `It must be moved to the kafkaJS block.` + + `\nBefore: \n` + + `\tconst kafka = new Kafka({ ... });\n` + + `\tconst ${cOrP} = kafka.${cOrP}({ ${key}: , ... });\n` + + `After: \n` + + `\tconst kafka = new Kafka({ ... });\n` + + `\tconst ${cOrP} = kafka.${cOrP}({ kafkaJS: { ${key}: , ... }, ... });\n`, + + /* Producer */ + createPartitioner: () => + "The 'createPartitioner' property is not supported yet. The default partitioner is set to murmur2_random, compatible with the DefaultPartitioner and the Java partitioner.\n" + + "A number of alternative partioning strategies are available through the 'rdKafka' property, for example: \n" + + "\tconst kafka = new Kafka({ rdKafka: { 'partitioner': 'random|consistent_random|consistent|murmur2|murmur2_random|fnv1a|fnv1a_random' } });\n" + + `For more details on what can be used inside the rdKafka block, see https://github.com/confluentinc/librdkafka/blob/master/CONFIGURATION.md\n`, + sendOptionsMandatoryMissing: () => + "The argument passed to send must be an object, and must contain the 'topic' and 'messages' properties: {topic: string, messages: Message[]}\n", + sendOptionsAcks: (fn) => + createReplacementErrorMessage('producer', fn, 'acks', 'acks: ', 'acks: ', false), + sendOptionsCompression: (fn) => + createReplacementErrorMessage('producer', fn, 'compression', 'compression: ', 'compression: CompressionTypes.GZIP|SNAPPY|LZ4|ZSTD', false), + sendOptionsTimeout: (fn) => + createReplacementErrorMessage('producer', fn, 'timeout', 'timeout: ', 'timeout: ', false), + sendBatchMandatoryMissing: () => + "The argument passed to sendbatch must be an object, and must contain the 'topicMessages' property: { topicMessages: {topic: string, messages: Message[]}[] } \n", + sendOffsetsMustProvideConsumer: () => + "The sendOffsets method must be called with a connected consumer instance and without a consumerGroupId parameter.\n", + + /* Consumer */ + partitionAssignors: () => + 'partitionAssignors must be a list of strings from within `PartitionAssignors`.\n', + subscribeOptionsFromBeginning: () => + createReplacementErrorMessage('consumer', 'subscribe', 'fromBeginning', 'fromBeginning: ', 'fromBeginning: ', false), + subscribeOptionsMandatoryMissing: () => + "The argument passed to subscribe must be an object, and must contain the 'topics' or the 'topic' property: {topics: string[]} or {topic: string}\n", + subscribeOptionsRegexFlag: () => + "If subscribing to topic by RegExp, no flags are allowed. /^abcd/ is okay, but /^abcd/i is not.\n", + subscribeOptionsRegexStart: () => + "If subscribing to topic by RegExp, the pattern must start with a '^'. If you want to use something like /abcd/, /^.*abcd/ must be used.\n", + runOptionsAutoCommit: () => + createReplacementErrorMessage('consumer', 'run', 'autoCommit', 'autoCommit: ', 'autoCommit: ', false), + runOptionsAutoCommitInterval: () => + createReplacementErrorMessage('consumer', 'run', 'autoCommitInterval', 'autoCommitInterval: ', 'autoCommitInterval: ', false), + runOptionsAutoCommitThreshold: () => + "The property 'autoCommitThreshold' is not supported by run.\n", + runOptionsRunConcurrently: () => + "The property 'partitionsConsumedConcurrently' is not currently supported by run\n", +}); + +/** + * Converts the common configuration from KafkaJS to a format that can be used by node-rdkafka. + * @param {object} config + * @returns {import('../../types/config').ProducerGlobalConfig | import('../../types/config').ConsumerGlobalConfig} the converted configuration + * @throws {error.KafkaJSError} if the configuration is invalid. + * The error code will be ERR__INVALID_ARG in case of invalid arguments or features that are not supported. + * The error code will be ERR__NOT_IMPLEMENTED in case of features that are not yet implemented. + */ +function kafkaJSToRdKafkaConfig(config) { + /* Since the kafkaJS block is specified, we operate in + * kafkaJS compatibility mode. That means we change the defaults + * match the kafkaJS defaults. */ + const rdkafkaConfig = {}; + + if (Object.hasOwn(config, "brokers")) { + if (!Array.isArray(config["brokers"])) { + throw new error.KafkaJSError(CompatibilityErrorMessages.brokerString(), { + code: error.ErrorCodes.ERR__INVALID_ARG, + }); + } + rdkafkaConfig["bootstrap.servers"] = config["brokers"].join(","); + } + + if (Object.hasOwn(config, "clientId")) { + rdkafkaConfig["client.id"] = config.clientId; } - ret['bootstrap.servers'] = config['brokers'].join(','); let withSASL = false; - if (config.sasl) { - const sasl = config.sasl; - if (sasl.mechanism === 'plain' && - typeof sasl.username === 'string' && - typeof sasl.password === 'string') { - ret['sasl.mechanism'] = 'PLAIN'; - ret['sasl.username'] = sasl.username; - ret['sasl.password'] = sasl.password; - withSASL = true; + if (Object.hasOwn(config, "sasl")) { + const sasl = config.sasl; + const mechanism = sasl.mechanism.toUpperCase(); + + if (mechanism === 'OAUTHBEARER') { + rdkafkaConfig["sasl.mechanism"] = mechanism; + if (Object.hasOwn(sasl, "oauthBearerProvider")) { + if (typeof sasl.oauthBearerProvider !== 'function') { + throw new error.KafkaJSError(CompatibilityErrorMessages.saslOauthBearerProvider(), { + code: error.ErrorCodes.ERR__INVALID_ARG, + }); + } + rdkafkaConfig['oauthbearer_token_refresh_cb'] = function (oauthbearer_config) { + return sasl.oauthBearerProvider(oauthbearer_config) + .then((token) => { + if (!Object.hasOwn(token, 'value')) { + throw new error.KafkaJSError('Token must have a value property.', { + code: error.ErrorCodes.ERR__INVALID_ARG, + }); + } else if (!Object.hasOwn(token, 'principal')) { + throw new error.KafkaJSError('Token must have a principal property.', { + code: error.ErrorCodes.ERR__INVALID_ARG, + }); + } else if (!Object.hasOwn(token, 'lifetime')) { + throw new error.KafkaJSError('Token must have a lifetime property.', { + code: error.ErrorCodes.ERR__INVALID_ARG, + }); + } + + // Recast token into a value expected by node-rdkafka's callback. + const setToken = { + tokenValue: token.value, + extensions: token.extensions, + principal: token.principal, + lifetime: token.lifetime, + }; + return setToken; + }) + .catch(err => { + if (!(err instanceof Error)) { + err = new Error(err); + } + throw err; + }); + }; + } + /* It's a valid case (unlike in KafkaJS) for oauthBearerProvider to be + * null, because librdkafka provides an unsecured token provider for + * non-prod usecases. So don't do anything in that case. */ + } else if (mechanism === 'PLAIN' || mechanism.startsWith('SCRAM')) { + if (typeof sasl.username !== "string" || typeof sasl.password !== "string") { + throw new error.KafkaJSError(CompatibilityErrorMessages.saslUsernamePasswordString(mechanism), { + code: error.ErrorCodes.ERR__INVALID_ARG, + }); + } + rdkafkaConfig["sasl.mechanism"] = mechanism; + rdkafkaConfig["sasl.username"] = sasl.username; + rdkafkaConfig["sasl.password"] = sasl.password; + } else { + throw new error.KafkaJSError(CompatibilityErrorMessages.saslUnsupportedMechanism(mechanism), { + code: error.ErrorCodes.ERR__INVALID_ARG, + }); } + + withSASL = true; } - if (config.ssl === true && withSASL) { - ret['security.protocol'] = 'sasl_ssl'; + if (Object.hasOwn(config, "ssl") && config.ssl && withSASL) { + rdkafkaConfig["security.protocol"] = "sasl_ssl"; } else if (withSASL) { - ret['security.protocol'] = 'sasl_plaintext'; + rdkafkaConfig["security.protocol"] = "sasl_plaintext"; + } else if (Object.hasOwn(config, "ssl") && config.ssl) { + rdkafkaConfig["security.protocol"] = "ssl"; } - if (config.rdKafka) { - if (config.rdKafka.constructor === Function) { - await config.rdKafka(ret); - } else { - Object.assign(ret, config.rdKafka); + /* TODO: add best-effort support for ssl besides just true/false */ + if (Object.hasOwn(config, "ssl") && typeof config.ssl !== "boolean") { + throw new error.KafkaJSError(CompatibilityErrorMessages.sslObject(), { + code: error.ErrorCodes.ERR__INVALID_ARG, + }); + } + + if (Object.hasOwn(config, "requestTimeout")) { + rdkafkaConfig["socket.timeout.ms"] = config.requestTimeout; + } else { + /* KafkaJS default */ + rdkafkaConfig["socket.timeout.ms"] = 30000; + } + + if (Object.hasOwn(config, "enforceRequestTimeout") && !config.enforceRequestTimeout) { + rdkafkaConfig["socket.timeout.ms"] = 300000; + } + + const connectionTimeout = config.connectionTimeout ?? 1000; + const authenticationTimeout = config.authenticationTimeout ?? 10000; + let totalConnectionTimeout = Number(connectionTimeout) + Number(authenticationTimeout); + + /* The minimum value for socket.connection.setup.timeout.ms is 1000. */ + totalConnectionTimeout = Math.max(totalConnectionTimeout, 1000); + rdkafkaConfig["socket.connection.setup.timeout.ms"] = totalConnectionTimeout; + + const retry = config.retry ?? {}; + const { maxRetryTime, initialRetryTime, factor, multiplier, restartOnFailure } = retry; + + rdkafkaConfig["retry.backoff.max.ms"] = maxRetryTime ?? 30000; + rdkafkaConfig["retry.backoff.ms"] = initialRetryTime ?? 300; + + if ((typeof factor === 'number') || (typeof multiplier === 'number')) { + throw new error.KafkaJSError(CompatibilityErrorMessages.retryFactorMultiplier(), { + code: error.ErrorCodes.ERR__INVALID_ARG, + }); + } + + if (restartOnFailure) { + throw new error.KafkaJSError(CompatibilityErrorMessages.retryRestartOnFailure(), { + code: error.ErrorCodes.ERR__INVALID_ARG, + }); + } + + if (Object.hasOwn(config, "socketFactory")) { + throw new error.KafkaJSError(CompatibilityErrorMessages.socketFactory(), { + code: error.ErrorCodes.ERR__INVALID_ARG, + }); + } + + if (Object.hasOwn(config, "reauthenticationThreshold")) { + throw new error.KafkaJSError(CompatibilityErrorMessages.reauthenticationThreshold(), { + code: error.ErrorCodes.ERR__INVALID_ARG, + }); + } + + rdkafkaConfig["log_level"] = 6 /* LOG_INFO - default in KafkaJS compatibility mode. */; + if (Object.hasOwn(config, "logLevel")) { + let setLevel = config.logLevel; + + if (process.env.KAFKAJS_LOG_LEVEL) { + setLevel = logLevel[process.env.KAFKAJS_LOG_LEVEL.toUpperCase()]; + } + switch (setLevel) { + case logLevel.NOTHING: + rdkafkaConfig["log_level"] = 0; /* LOG_EMERG - we don't have a true log nothing yet */ + break; + case logLevel.ERROR: + rdkafkaConfig["log_level"] = 3 /* LOG_ERR */; + break; + case logLevel.WARN: + rdkafkaConfig["log_level"] = 4 /* LOG_WARNING */; + break; + case logLevel.INFO: + rdkafkaConfig["log_level"] = 6 /* LOG_INFO */; + break; + case logLevel.DEBUG: + rdkafkaConfig["log_level"] = 7 /* LOG_DEBUG */; + break; + default: + throw new error.KafkaJSError(CompatibilityErrorMessages.logLevelName(setLevel), { + code: error.ErrorCodes.ERR__INVALID_ARG, + }); + } + } + + return rdkafkaConfig; +} + +/** + * Checks if the config object contains any keys not allowed by KafkaJS. + * @param {'producer'|'consumer'|'admin'} clientType + * @param {any} config + * @returns {string|null} the first unsupported key, or null if all keys are supported. + */ +function checkAllowedKeys(clientType, config) { + const allowedKeysCommon = kafkaJSProperties.common; + + if (!Object.hasOwn(kafkaJSProperties, clientType)) { + throw new error.KafkaJSError(`Unknown client type ${clientType}`, { + code: error.ErrorCodes.ERR__INVALID_ARG, + }); + } + const allowedKeysSpecific = kafkaJSProperties[clientType]; + + for (const key of Object.keys(config)) { + if (!allowedKeysCommon.includes(key) && !allowedKeysSpecific.includes(key)) { + return key; } } - return ret; + return null; } +/** + * Checks if the config object contains any keys specific to KafkaJS. + * @param {'producer'|'consumer'|'admin'|'common'} propertyType + * @param {any} config + * @returns {string|null} the first KafkaJS specific key, or null if none is present. + */ +function checkIfKafkaJsKeysPresent(propertyType, config) { + if (!Object.hasOwn(kafkaJSProperties, propertyType)) { + throw new error.KafkaJSError(`Unknown config type for ${propertyType}`, { + code: error.ErrorCodes.ERR__INVALID_ARG, + }); + } + const kjsKeys = kafkaJSProperties[propertyType]; + + for (const key of Object.keys(config)) { + /* We exclude 'acks' since it's common to both librdkafka and kafkaJS. + * We don't intend to keep up with new properties, so we don't need to really worry about making it extensible. */ + if (kjsKeys.includes(key) && key !== 'acks') { + return key; + } + } + + return null; +} + +/** + * Converts a topicPartitionOffset from KafkaJS to a format that can be used by node-rdkafka. + * @param {import("../../types/kafkajs").TopicPartitionOffset} tpo + * @returns {{topic: string, partition: number, offset: number}} + */ function topicPartitionOffsetToRdKafka(tpo) { + // TODO: do we need some checks for negative offsets and stuff? Or 'named' offsets? return { topic: tpo.topic, partition: tpo.partition, offset: Number(tpo.offset), + leaderEpoch: tpo.leaderEpoch, + }; +} + +/** + * Converts a topicPartitionOffset from KafkaJS to a format that can be used by node-rdkafka. + * Includes metadata. + * + * @param {import("../../types/kafkajs").TopicPartitionOffsetAndMetadata} tpo + * @returns {import("../../types/rdkafka").TopicPartitionOffsetAndMetadata} + */ +function topicPartitionOffsetMetadataToRdKafka(tpo) { + return { + topic: tpo.topic, + partition: tpo.partition, + offset: tpo.offset ? Number(tpo.offset) : null, + metadata: tpo.metadata, + leaderEpoch: tpo.leaderEpoch, + }; +} + +/** + * Converts a topicPartitionOffset from node-rdkafka to a format that can be used by KafkaJS. + * Includes metadata. + * + * @param {import("../../types/rdkafka").TopicPartitionOffsetAndMetadata} tpo + * @returns {import("../../types/kafkajs").TopicPartitionOffsetAndMetadata} + */ +function topicPartitionOffsetMetadataToKafkaJS(tpo) { + return { + topic: tpo.topic, + partition: tpo.partition, + offset: tpo.offset ? tpo.offset.toString() : null, + metadata: tpo.metadata, + leaderEpoch: tpo.leaderEpoch + }; +} + +/** + * Convert a librdkafka error from node-rdkafka into a KafkaJSError. + * @param {import("../error")} librdKafkaError to convert from. + * @returns {error.KafkaJSError} the converted error. + */ +function createKafkaJsErrorFromLibRdKafkaError(librdKafkaError) { + const properties = { + retriable: librdKafkaError.retriable, + fatal: librdKafkaError.fatal, + abortable: librdKafkaError.abortable, + stack: librdKafkaError.stack, + code: librdKafkaError.code, + }; + + let err = null; + + if (properties.code === error.ErrorCodes.ERR_OFFSET_OUT_OF_RANGE) { + err = new error.KafkaJSOffsetOutOfRange(librdKafkaError, properties); + } else if (properties.code === error.ErrorCodes.ERR_REQUEST_TIMED_OUT) { + err = new error.KafkaJSRequestTimeoutError(librdKafkaError, properties); + } else if (properties.code === error.ErrorCodes.ERR__PARTIAL) { + err = new error.KafkaJSPartialMessageError(librdKafkaError, properties); + } else if (properties.code === error.ErrorCodes.ERR__AUTHENTICATION) { + err = new error.KafkaJSSASLAuthenticationError(librdKafkaError, properties); + } else if (properties.code === error.ErrorCodes.ERR_GROUP_COORDINATOR_NOT_AVAILABLE) { + err = new error.KafkaJSGroupCoordinatorNotAvailableError(librdKafkaError, properties); + } else if (properties.code === error.ErrorCodes.ERR__NOT_IMPLEMENTED) { + err = new error.KafkaJSNotImplemented(librdKafkaError, properties); + } else if (properties.code === error.ErrorCodes.ERR__TIMED_OUT) { + err = new error.KafkaJSTimeout(librdKafkaError, properties); + } else if (properties.code === error.ErrorCodes.ERR__ALL_BROKERS_DOWN) { + err = new error.KafkaJSNoBrokerAvailableError(librdKafkaError, properties); + } else if (properties.code === error.ErrorCodes.ERR__TRANSPORT) { + err = new error.KafkaJSConnectionError(librdKafkaError, properties); + } else if (properties.code > 0) { /* Indicates a non-local error */ + err = new error.KafkaJSProtocolError(librdKafkaError, properties); + } else { + err = new error.KafkaJSError(librdKafkaError, properties); + } + + return err; +} + +/** + * Converts KafkaJS headers to a format that can be used by node-rdkafka. + * @param {import("../../types/kafkajs").IHeaders|null} kafkaJSHeaders + * @returns {import("../../").MessageHeader[]|null} the converted headers. + */ +function convertToRdKafkaHeaders(kafkaJSHeaders) { + if (!kafkaJSHeaders) return null; + + const headers = []; + for (const [key, value] of Object.entries(kafkaJSHeaders)) { + if (value && value.constructor === Array) { + for (const v of value) { + const header = {}; + header[key] = v; + headers.push(header); + } + } else { + const header = {}; + header[key] = value; + headers.push(header); + } + } + return headers; +} + + +function notImplemented(msg = 'Not implemented') { + throw new error.KafkaJSError(msg, { code: error.ErrorCodes.ERR__NOT_IMPLEMENTED }); +} + +/** + * A promise that can be resolved externally. + */ +class DeferredPromise extends Promise{ + #resolved = false; + + /** + * JS expects a resolver function to be passed to classes extending Promise. + * that takes the same parameter a normal Promise constructor does. + * The DeferredPromise cannot be rejected to avoid unhandled rejections + * entirely. + * @param {(resolve: (value: any) => void, reject: (error: Error) => void) => void} resolver + */ + constructor(resolver) { + let resolveF; + super((resolve) => { + resolveF = resolve; + }); + this.resolve = (...args) => { + this.#resolved = true; + resolveF(...args); + }; + if (resolver) + resolver(this.resolve, () => {}); + } + + get resolved() { + return this.#resolved; } } -module.exports = { kafkaJSToRdKafkaConfig, topicPartitionOffsetToRdKafka } +/** + * Utility class for time related functions + */ +class Timer { + /** + * Function that resolves when the given timeout is reached + * or the passed promise resolves, when it's passed, clearing the timeout + * in any case. + * + * @param {number} timeoutMs The timeout in milliseconds. + * @param {Promise|undefined} promise The promise to wait for, + * alternatively to the timeout, or `undefined` to just wait for the timeout. + */ + static async withTimeout(timeoutMs, promise) { + const timer = new DeferredPromise(); + const registration = setTimeout(timer.resolve, timeoutMs); + if (!promise) + await timer; + else { + await Promise.race([ + promise, + timer + ]); + } + if (!timer.resolved) { + timer.resolve(); + } + clearTimeout(registration); + } +} + +/** + * Readers-writer lock with reentrant calls. + * Upgrading from a read to a write lock is supported. + * Acquiring a read lock while holding a write lock is a no-op. + */ +class Lock { + // Total number of readers, not increases when already holding a write lock + #readers = 0; + + // Total number of writers, increased only by a single write and + // its reentrant calls + #writers = 0; + + #asyncLocalStorage = new AsyncLocalStorage(); + + // Promise to resolve and recreate when there are no readers or writers + // This is used to notify all waiting writers so at least one can proceed. + // It's also used to notify all waiting readers so they can can check + // the writer has finished. + #zeroReadersAndWritersPromise = new DeferredPromise(); + + #notifyZeroReadersAndWriters() { + if (this.#readers === 0 && this.#writers === 0) { + this.#zeroReadersAndWritersPromise.resolve(); + this.#zeroReadersAndWritersPromise = new DeferredPromise(); + } + } + + #createAsyncLocalStorageStore() { + return { + // All reentrant calls + stack: [], + // Number of write locks in reentrant calls + writers: 0, + // Number of read locks in reentrant calls + readers: 0, + }; + } + + async #runAsyncStack(type, f) { + let store = this.#asyncLocalStorage.getStore(); + if (store) { + let promise = f(); + store.stack.push(promise); + await promise; + } else { + await this.#asyncLocalStorage.run(this.#createAsyncLocalStorageStore(type), + async () => { + store = this.#asyncLocalStorage.getStore(); + let promise = f(); + store.stack.push(promise); + // Await all promises are settled + await Promise.allSettled(store.stack); + // Reject if any promise is rejected + await Promise.all(store.stack); + }); + } + } + + async #acquireRead() { + let store = this.#asyncLocalStorage.getStore(); + if (!store.writers) { + while (this.#writers > 0) { + await this.#zeroReadersAndWritersPromise; + } + this.#readers++; + store.readers++; + } + } + + async #acquireWrite() { + let store = this.#asyncLocalStorage.getStore(); + // We remove current stack readers and writers so it + // becomes reentrant + let readers = this.#readers - store.readers; + let writers = this.#writers - store.writers; + while (readers > 0 || writers > 0) { + await this.#zeroReadersAndWritersPromise; + writers = this.#writers - store.writers; + readers = this.#readers - store.readers; + } + this.#writers++; + store.writers++; + } + + async #releaseRead() { + let store = this.#asyncLocalStorage.getStore(); + this.#readers--; + store.readers--; + this.#notifyZeroReadersAndWriters(); + } + + async #releaseWrite() { + let store = this.#asyncLocalStorage.getStore(); + this.#writers--; + store.writers--; + this.#notifyZeroReadersAndWriters(); + } + + /** + * Acquire a write (exclusive) lock while executing + * the given task. + * @param {function} task The task to execute. + * @returns {Promise} The result of the task. + */ + async write(task) { + let withWriteLock = async () => { + try { + await this.#acquireWrite(); + return await task(); + } finally { + await this.#releaseWrite(); + } + }; + await this.#runAsyncStack(1, withWriteLock); + } + + + /** + * Acquire a read (shared) lock while executing + * the given task. + * @param {function} task The task to execute. + * @returns {Promise} The result of the task. + */ + async read(task) { + let withReadLock = async () => { + try { + await this.#acquireRead(); + return await task(); + } finally { + await this.#releaseRead(); + } + }; + await this.#runAsyncStack(0, withReadLock); + } +} + +/** + * Creates a key for maps from a topicPartition object. + * @param {{topic: string, partition: number}} topicPartition Any object which can be treated as a topic partition. + * @returns {string} The created key. + */ +function partitionKey(topicPartition) { + return topicPartition.topic + '|'+ (topicPartition.partition); +} + +module.exports = { + kafkaJSToRdKafkaConfig, + topicPartitionOffsetToRdKafka, + topicPartitionOffsetMetadataToRdKafka, + topicPartitionOffsetMetadataToKafkaJS, + createKafkaJsErrorFromLibRdKafkaError, + convertToRdKafkaHeaders, + createBindingMessageMetadata, + notImplemented, + logLevel, + loggerTrampoline, + DefaultLogger, + createReplacementErrorMessage, + CompatibilityErrorMessages, + severityToLogLevel, + checkAllowedKeys, + checkIfKafkaJsKeysPresent, + Lock, + DeferredPromise, + Timer, + partitionKey, +}; diff --git a/lib/kafkajs/_consumer.js b/lib/kafkajs/_consumer.js index 9cfdba0c..1f6ac01e 100644 --- a/lib/kafkajs/_consumer.js +++ b/lib/kafkajs/_consumer.js @@ -1,128 +1,674 @@ const LibrdKafkaError = require('../error'); +const error = require('./_error'); const RdKafka = require('../rdkafka'); -const { kafkaJSToRdKafkaConfig, topicPartitionOffsetToRdKafka } = require('./_common'); +const { + kafkaJSToRdKafkaConfig, + topicPartitionOffsetToRdKafka, + topicPartitionOffsetMetadataToRdKafka, + topicPartitionOffsetMetadataToKafkaJS, + createBindingMessageMetadata, + createKafkaJsErrorFromLibRdKafkaError, + notImplemented, + loggerTrampoline, + DefaultLogger, + CompatibilityErrorMessages, + severityToLogLevel, + checkAllowedKeys, + logLevel, + Lock, + partitionKey, + DeferredPromise, + Timer +} = require('./_common'); +const { Buffer } = require('buffer'); +const MessageCache = require('./_consumer_cache'); +const { hrtime } = require('process'); const ConsumerState = Object.freeze({ - INIT: 0, - CONNECTING: 1, + INIT: 0, + CONNECTING: 1, CONNECTED: 2, DISCONNECTING: 3, DISCONNECTED: 4, }); +const PartitionAssigners = Object.freeze({ + roundRobin: 'roundrobin', + range: 'range', + cooperativeSticky: 'cooperative-sticky', +}); + class Consumer { - #kJSConfig = null - #rdKafkaConfig = null; + /** + * The config supplied by the user. + * @type {import("../../types/kafkajs").ConsumerConstructorConfig|null} + */ + #userConfig = null; + + /** + * The config realized after processing any compatibility options. + * @type {import("../../types/config").ConsumerGlobalConfig|null} + */ + #internalConfig = null; + + /** + * internalClient is the node-rdkafka client used by the API. + * @type {import("../rdkafka").Consumer|null} + */ #internalClient = null; + + /** + * connectPromiseFunc is the set of promise functions used to resolve/reject the connect() promise. + * @type {{resolve: Function, reject: Function}|{}} + */ #connectPromiseFunc = {}; + + /** + * state is the current state of the consumer. + * @type {ConsumerState} + */ #state = ConsumerState.INIT; + /** + * Contains a mapping of topic+partition to an offset that the user wants to seek to. + * The keys are of the type "|". + * @type {Map} + */ + #pendingSeeks = new Map(); + + /** + * Stores the map of paused partitions keys to TopicPartition objects. + * @type {Map} + */ + #pausedPartitions = new Map(); + + /** + * Contains a list of stored topics/regexes that the user has subscribed to. + * @type {(string|RegExp)[]} + */ + #storedSubscriptions = []; + + /** + * A logger for the consumer. + * @type {import("../../types/kafkajs").Logger} + */ + #logger = new DefaultLogger(); + + /** + * A map of topic+partition to the offset that was last consumed. + * The keys are of the type "|". + * @type {Map} + */ + #lastConsumedOffsets = new Map(); + + /** + * A lock for consuming and disconnecting. + * This lock should be held whenever we want to change the state from CONNECTED to any state other than CONNECTED. + * In practical terms, this lock is held whenever we're consuming a message, or disconnecting. + * @type {Lock} + */ + #lock = new Lock(); + + /** + * Whether the consumer is running. + * @type {boolean} + */ + #running = false; + + /** + * The message cache for KafkaJS compatibility mode. + * @type {MessageCache|null} + */ + #messageCache = null; + + /** + * The maximum size of the message cache. + * Will be adjusted dynamically. + */ + #messageCacheMaxSize = 1; + + /** + * Number of times we tried to increase the cache. + */ + #increaseCount = 0; + + /** + * Whether the user has enabled manual offset management (commits). + */ + #autoCommit = false; + + /** + * Signals an intent to disconnect the consumer. + */ + #disconnectStarted = false; + + /** + * Number of partitions owned by the consumer. + * @note This value may or may not be completely accurate, it's more so a hint for spawning concurrent workers. + */ + #partitionCount = 0; + + /** + * Whether worker termination has been scheduled. + */ + #workerTerminationScheduled = new DeferredPromise(); + + /** + * The worker functions currently running in the consumer. + */ + #workers = []; + + /** + * The number of partitions to consume concurrently as set by the user, or 1. + */ + #concurrency = 1; + + /** + * Promise that resolves together with last in progress fetch. + * It's set to null when no fetch is in progress. + */ + #fetchInProgress; + + /** + * Whether any rebalance callback is in progress. + * That can last more than the fetch itself given it's not awaited. + * So we await it after fetch is done. + */ + #rebalanceCbInProgress; + + /** + * Promise that is resolved on fetch to restart max poll interval timer. + */ + #maxPollIntervalRestart = new DeferredPromise(); + + /** + * Initial default value for max poll interval. + */ + #maxPollIntervalMs = 300000; + /** + * Maximum interval between poll calls from workers, + * if exceeded, the cache is cleared so a new poll can be made + * before reaching the max poll interval. + * It's set to max poll interval value. + */ + #cacheExpirationTimeoutMs = 300000; + + /** + * Last fetch real time clock in nanoseconds. + */ + #lastFetchClockNs = 0; + + /** + * List of pending operations to be executed after + * all workers reach the end of their current processing. + */ + #pendingOperations = []; + + /** + * Maps topic-partition key to the batch payload for marking staleness. + * + * Only used with eachBatch. + * NOTE: given that size of this map will never exceed #concurrency, a + * linear search might actually be faster over what will generally be <10 elems. + * But a map makes conceptual sense. Revise at a later point if needed. + */ + #topicPartitionToBatchPayload = new Map(); + + /** + * The client name used by the consumer for logging - determined by librdkafka + * using a combination of clientId and an integer. + * @type {string|undefined} + */ + #clientName = undefined; + + /** + * Convenience function to create the metadata object needed for logging. + */ + #createConsumerBindingMessageMetadata() { + return createBindingMessageMetadata(this.#clientName); + } + + /** + * @constructor + * @param {import("../../types/kafkajs").ConsumerConfig} kJSConfig + */ constructor(kJSConfig) { - this.#kJSConfig = kJSConfig; + this.#userConfig = kJSConfig; } #config() { - if (!this.#rdKafkaConfig) - this.#rdKafkaConfig = this.#finalizedConfig(); - return this.#rdKafkaConfig; - } - - async #finalizedConfig() { - const config = await kafkaJSToRdKafkaConfig(this.#kJSConfig); - if (this.#kJSConfig.groupId) { - config['group.id'] = this.#kJSConfig.groupId; - } - config['offset_commit_cb'] = true; - if (this.#kJSConfig.rebalanceListener) { - config['rebalance_cb'] = (err, assignment) => { - // Create the librdkafka error - err = LibrdKafkaError.create(err); - - let call; - switch(err.code) { - case LibrdKafkaError.codes.ERR__ASSIGN_PARTITIONS: - call = (this.#kJSConfig.rebalanceListener.onPartitionsAssigned ? - this.#kJSConfig.rebalanceListener.onPartitionsAssigned(assignment) : - Promise.resolve()).catch(console.error); - break; - case LibrdKafkaError.codes.ERR__REVOKE_PARTITIONS: - call = (this.#kJSConfig.rebalanceListener.onPartitionsRevoked ? - this.#kJSConfig.rebalanceListener.onPartitionsRevoked(assignment) : - Promise.resolve()).catch(console.error); - break; - default: - call = Promise.reject().catch(() => { - console.error(`Unexpected rebalanceListener error code ${err.code}`); - }); - break; + if (!this.#internalConfig) + this.#internalConfig = this.#finalizedConfig(); + return this.#internalConfig; + } + + /** + * Clear the message cache, and reset to stored positions. + * + * @param {Array<{topic: string, partition: number}>|null} topicPartitions to clear the cache for, if null, then clear all assigned. + */ + async #clearCacheAndResetPositions() { + /* Seek to stored offset for each topic partition. It's possible that we've + * consumed messages upto N from the internalClient, but the user has stale'd the cache + * after consuming just k (< N) messages. We seek back to last consumed offset + 1. */ + this.#messageCache.clear(); + this.#messageCacheMaxSize = 1; + this.#increaseCount = 0; + const clearPartitions = this.assignment(); + const seeks = []; + for (const topicPartition of clearPartitions) { + const key = partitionKey(topicPartition); + if (!this.#lastConsumedOffsets.has(key)) + continue; + + const lastConsumedOffsets = this.#lastConsumedOffsets.get(key); + const topicPartitionOffsets = [ + { + topic: topicPartition.topic, + partition: topicPartition.partition, + offset: lastConsumedOffsets.offset, + leaderEpoch: lastConsumedOffsets.leaderEpoch, } + ]; + seeks.push(this.#seekInternal(topicPartitionOffsets)); + } - call - .finally(() => { - // Emit the event - this.#internalClient.emit('rebalance', err, assignment); - - try { - if (err.code === LibrdKafkaError.codes.ERR__ASSIGN_PARTITIONS) { - this.#internalClient.assign(assignment); - } else { - this.#internalClient.unassign(); - } - } catch (e) { - // Ignore exceptions if we are not connected - if (this.#internalClient.isConnected()) { - this.#internalClient.emit('rebalance.error', e); - } - } - }); - }; + await Promise.allSettled(seeks); + try { + await Promise.all(seeks); + } catch (err) { + /* TODO: we should cry more about this and render the consumer unusable. */ + this.#logger.error(`Seek error. This is effectively a fatal error: ${err.stack}`); } - return config; } - #readyCb(arg) { - if (this.#state !== ConsumerState.CONNECTING) { - // I really don't know how to handle this now. - return; + #unassign(assignment) { + if (this.#internalClient.rebalanceProtocol() === "EAGER") { + this.#internalClient.unassign(); + this.#messageCache.clear(); + this.#partitionCount = 0; + } else { + this.#internalClient.incrementalUnassign(assignment); + this.#messageCache.markStale(assignment); + this.#partitionCount -= assignment.length; + } + } + + /** + * Used as a trampoline to the user's rebalance listener, if any. + * @param {Error} err - error in rebalance + * @param {import("../../types").TopicPartition[]} assignment + */ + async #rebalanceCallback(err, assignment) { + const isLost = this.#internalClient.assignmentLost(); + this.#rebalanceCbInProgress = new DeferredPromise(); + let assignmentFnCalled = false; + this.#logger.info( + `Received rebalance event with message: '${err.message}' and ${assignment.length} partition(s), isLost: ${isLost}`, + this.#createConsumerBindingMessageMetadata()); + /* We allow the user to modify the assignment by returning it. If a truthy + * value is returned, we use that and do not apply any pending seeks to it either. + * The user can alternatively use the assignmentFns argument. + * Precedence is given to the calling of functions within assignmentFns. */ + let assignmentModified = false; + + const assignmentFn = (userAssignment) => { + if (assignmentFnCalled) + return; + assignmentFnCalled = true; + + if (this.#internalClient.rebalanceProtocol() === "EAGER") { + this.#internalClient.assign(userAssignment); + this.#partitionCount = userAssignment.length; + } else { + this.#internalClient.incrementalAssign(userAssignment); + this.#partitionCount += userAssignment.length; + } + }; + + const unassignmentFn = (userAssignment) => { + if (assignmentFnCalled) + return; + + assignmentFnCalled = true; + if (this.#disconnectStarted) + this.#unassign(userAssignment); + else + this.#addPendingOperation(() => this.#unassign(userAssignment)); + }; + + try { + err = LibrdKafkaError.create(err); + const userSpecifiedRebalanceCb = this.#userConfig['rebalance_cb']; + + if (typeof userSpecifiedRebalanceCb === 'function') { + const assignmentFns = { + assign: assignmentFn, + unassign: unassignmentFn, + assignmentLost: () => isLost, + }; + + let alternateAssignment = null; + try { + alternateAssignment = await userSpecifiedRebalanceCb(err, assignment, assignmentFns); + } catch (e) { + this.#logger.error(`Error from user's rebalance callback: ${e.stack}, `+ + 'continuing with the default rebalance behavior.'); + } + + if (alternateAssignment) { + assignment = alternateAssignment; + assignmentModified = true; + } + } else if (err.code !== LibrdKafkaError.codes.ERR__ASSIGN_PARTITIONS && err.code !== LibrdKafkaError.codes.ERR__REVOKE_PARTITIONS) { + throw new Error(`Unexpected rebalance_cb error code ${err.code}`); + } + + } finally { + /* Emit the event */ + this.#internalClient.emit('rebalance', err, assignment); + + /** + * We never need to clear the cache in case of a rebalance. + * This is because rebalances are triggered ONLY when we call the consume() + * method of the internalClient. + * In case consume() is being called, we've already either consumed all the messages + * in the cache, or timed out (this.#messageCache.cachedTime is going to exceed max.poll.interval) + * and marked the cache stale. This means that the cache is always expired when a rebalance + * is triggered. + * This is applicable both for incremental and non-incremental rebalances. + * Multiple consume()s cannot be called together, too, because we make sure that only + * one worker is calling into the internal consumer at a time. + */ + try { + + if (err.code === LibrdKafkaError.codes.ERR__ASSIGN_PARTITIONS) { + + const checkPendingSeeks = this.#pendingSeeks.size !== 0; + if (checkPendingSeeks && !assignmentModified && !assignmentFnCalled) + assignment = this.#assignAsPerSeekedOffsets(assignment); + + assignmentFn(assignment); + + } else { + unassignmentFn(assignment); + } + } catch (e) { + // Ignore exceptions if we are not connected + if (this.#internalClient.isConnected()) { + this.#internalClient.emit('rebalance.error', e); + } } - this.#state = ConsumerState.CONNECTED; - // Resolve the promise. - this.#connectPromiseFunc['resolve'](); + /** + * Schedule worker termination here, in case the number of workers is not equal to the target concurrency. + * We need to do this so we will respawn workers with the correct concurrency count. + */ + const workersToSpawn = Math.max(1, Math.min(this.#concurrency, this.#partitionCount)); + if (workersToSpawn !== this.#workers.length) { + this.#workerTerminationScheduled.resolve(); + /* We don't need to await the workers here. We are OK if the termination and respawning + * occurs later, since even if we have a few more or few less workers for a while, it's + * not a big deal. */ + } + this.#rebalanceCbInProgress.resolve(); + } } - #errorCb(args) { - console.log('error', args); - if (this.#state === ConsumerState.CONNECTING) { - this.#connectPromiseFunc['reject'](args); - } else { - // do nothing for now. + #kafkaJSToConsumerConfig(kjsConfig) { + if (!kjsConfig || Object.keys(kjsConfig).length === 0) { + return {}; + } + + const disallowedKey = checkAllowedKeys('consumer', kjsConfig); + if (disallowedKey !== null) { + throw new error.KafkaJSError(CompatibilityErrorMessages.unsupportedKey(disallowedKey), + { code: error.ErrorCodes.ERR__INVALID_ARG }); + } + const rdKafkaConfig = kafkaJSToRdKafkaConfig(kjsConfig); + + this.#logger = new DefaultLogger(); + + /* Consumer specific configuration */ + if (Object.hasOwn(kjsConfig, 'groupId')) { + rdKafkaConfig['group.id'] = kjsConfig.groupId; + } + + if (Object.hasOwn(kjsConfig, 'partitionAssigners')) { + kjsConfig.partitionAssignors = kjsConfig.partitionAssigners; + } + + if (Object.hasOwn(kjsConfig, 'partitionAssignors')) { + if (!Array.isArray(kjsConfig.partitionAssignors)) { + throw new error.KafkaJSError(CompatibilityErrorMessages.partitionAssignors(), { code: error.ErrorCodes.ERR__INVALID_ARG }); } + + kjsConfig.partitionAssignors.forEach(assignor => { + if (typeof assignor !== 'string') + throw new error.KafkaJSError(CompatibilityErrorMessages.partitionAssignors(), { code: error.ErrorCodes.ERR__INVALID_ARG }); + }); + + rdKafkaConfig['partition.assignment.strategy'] = kjsConfig.partitionAssignors.join(','); + } else { + rdKafkaConfig['partition.assignment.strategy'] = PartitionAssigners.roundRobin; + } + + if (Object.hasOwn(kjsConfig, 'sessionTimeout')) { + rdKafkaConfig['session.timeout.ms'] = kjsConfig.sessionTimeout; + } else { + rdKafkaConfig['session.timeout.ms'] = 30000; + } + + if (Object.hasOwn(kjsConfig, 'rebalanceTimeout')) { + /* In librdkafka, we use the max poll interval as the rebalance timeout as well. */ + rdKafkaConfig['max.poll.interval.ms'] = +kjsConfig.rebalanceTimeout; + } else if (!rdKafkaConfig['max.poll.interval.ms']) { + rdKafkaConfig['max.poll.interval.ms'] = 300000; /* librdkafka default */ + } + + if (Object.hasOwn(kjsConfig, 'heartbeatInterval')) { + rdKafkaConfig['heartbeat.interval.ms'] = kjsConfig.heartbeatInterval; + } + + if (Object.hasOwn(kjsConfig, 'metadataMaxAge')) { + rdKafkaConfig['topic.metadata.refresh.interval.ms'] = kjsConfig.metadataMaxAge; + } + + if (Object.hasOwn(kjsConfig, 'allowAutoTopicCreation')) { + rdKafkaConfig['allow.auto.create.topics'] = kjsConfig.allowAutoTopicCreation; + } else { + rdKafkaConfig['allow.auto.create.topics'] = true; + } + + if (Object.hasOwn(kjsConfig, 'maxBytesPerPartition')) { + rdKafkaConfig['max.partition.fetch.bytes'] = kjsConfig.maxBytesPerPartition; + } else { + rdKafkaConfig['max.partition.fetch.bytes'] = 1048576; + } + + if (Object.hasOwn(kjsConfig, 'maxWaitTimeInMs')) { + rdKafkaConfig['fetch.wait.max.ms'] = kjsConfig.maxWaitTimeInMs; + } else { + rdKafkaConfig['fetch.wait.max.ms'] = 5000; + } + + if (Object.hasOwn(kjsConfig, 'minBytes')) { + rdKafkaConfig['fetch.min.bytes'] = kjsConfig.minBytes; + } + + if (Object.hasOwn(kjsConfig, 'maxBytes')) { + rdKafkaConfig['fetch.message.max.bytes'] = kjsConfig.maxBytes; + } else { + rdKafkaConfig['fetch.message.max.bytes'] = 10485760; + } + + if (Object.hasOwn(kjsConfig, 'readUncommitted')) { + rdKafkaConfig['isolation.level'] = kjsConfig.readUncommitted ? 'read_uncommitted' : 'read_committed'; + } + + if (Object.hasOwn(kjsConfig, 'maxInFlightRequests')) { + rdKafkaConfig['max.in.flight'] = kjsConfig.maxInFlightRequests; + } + + if (Object.hasOwn(kjsConfig, 'rackId')) { + rdKafkaConfig['client.rack'] = kjsConfig.rackId; + } + + if (Object.hasOwn(kjsConfig, 'fromBeginning')) { + rdKafkaConfig['auto.offset.reset'] = kjsConfig.fromBeginning ? 'earliest' : 'latest'; + } + + if (Object.hasOwn(kjsConfig, 'autoCommit')) { + rdKafkaConfig['enable.auto.commit'] = kjsConfig.autoCommit; + } else { + rdKafkaConfig['enable.auto.commit'] = true; + } + + if (Object.hasOwn(kjsConfig, 'autoCommitInterval')) { + rdKafkaConfig['auto.commit.interval.ms'] = kjsConfig.autoCommitInterval; + } + + if (Object.hasOwn(kjsConfig, 'autoCommitThreshold')) { + throw new error.KafkaJSError(CompatibilityErrorMessages.runOptionsAutoCommitThreshold(), { code: error.ErrorCodes.ERR__NOT_IMPLEMENTED }); + } + + /* Set the logger */ + if (Object.hasOwn(kjsConfig, 'logger')) { + this.#logger = kjsConfig.logger; + } + + /* Set the log level - INFO for compatibility with kafkaJS, or DEBUG if that is turned + * on using the logLevel property. rdKafkaConfig.log_level is guaranteed to be set if we're + * here, and containing the correct value. */ + this.#logger.setLogLevel(severityToLogLevel[rdKafkaConfig.log_level]); + + return rdKafkaConfig; + } + + #finalizedConfig() { + /* Creates an rdkafka config based off the kafkaJS block. Switches to compatibility mode if the block exists. */ + let compatibleConfig = this.#kafkaJSToConsumerConfig(this.#userConfig.kafkaJS); + + /* There can be multiple different and conflicting config directives for setting the log level: + * 1. If there's a kafkaJS block: + * a. If there's a logLevel directive in the kafkaJS block, set the logger level accordingly. + * b. If there's no logLevel directive, set the logger level to INFO. + * (both these are already handled in the conversion method above). + * 2. If there is a log_level or debug directive in the main config, set the logger level accordingly. + * !This overrides any different value provided in the kafkaJS block! + * a. If there's a log_level directive, set the logger level accordingly. + * b. If there's a debug directive, set the logger level to DEBUG regardless of anything else. This is because + * librdkafka ignores log_level if debug is set, and our behaviour should be identical. + * 3. There's nothing at all. Take no action in this case, let the logger use its default log level. + */ + if (Object.hasOwn(this.#userConfig, 'log_level')) { + this.#logger.setLogLevel(severityToLogLevel[this.#userConfig.log_level]); + } + + if (Object.hasOwn(this.#userConfig, 'debug')) { + this.#logger.setLogLevel(logLevel.DEBUG); + } + + let rdKafkaConfig = Object.assign(compatibleConfig, this.#userConfig); + + /* Delete properties which are already processed, or cannot be passed to node-rdkafka */ + delete rdKafkaConfig.kafkaJS; + + /* Certain properties that the user has set are overridden. We use trampolines to accommodate the user's callbacks. + * TODO: add trampoline method for offset commit callback. */ + rdKafkaConfig['offset_commit_cb'] = true; + rdKafkaConfig['rebalance_cb'] = (err, assignment) => this.#rebalanceCallback(err, assignment).catch(e => + { + if (this.#logger) + this.#logger.error(`Error from rebalance callback: ${e.stack}`); + }); + + /* We handle offset storage within the promisified API by ourselves. Thus we don't allow the user to change this + * setting and set it to false. */ + if (Object.hasOwn(this.#userConfig, 'enable.auto.offset.store')) { + throw new error.KafkaJSError( + "Changing 'enable.auto.offset.store' is unsupported while using the promisified API.", + { code: error.ErrorCodes.ERR__INVALID_ARG }); + } + rdKafkaConfig['enable.auto.offset.store'] = false; + + if (!Object.hasOwn(rdKafkaConfig, 'enable.auto.commit')) { + this.#autoCommit = true; /* librdkafka default. */ + } else { + this.#autoCommit = rdKafkaConfig['enable.auto.commit']; + } + + /** + * Actual max poll interval is twice the configured max poll interval, + * because we want to ensure that when we ask for worker termination, + * and there is one last message to be processed, we can process it in + * the configured max poll interval time. + * This will cause the rebalance callback timeout to be double + * the value of the configured max poll interval. + * But it's expected otherwise we cannot have a cache and need to consider + * max poll interval reached on processing the very first message. + */ + this.#maxPollIntervalMs = rdKafkaConfig['max.poll.interval.ms'] ?? 300000; + this.#cacheExpirationTimeoutMs = this.#maxPollIntervalMs; + rdKafkaConfig['max.poll.interval.ms'] = this.#maxPollIntervalMs * 2; + + return rdKafkaConfig; } - #notImplemented() { - throw new Error('Not implemented'); + #readyCb() { + if (this.#state !== ConsumerState.CONNECTING) { + /* The connectPromiseFunc might not be set, so we throw such an error. It's a state error that we can't recover from. Probably a bug. */ + throw new error.KafkaJSError(`Ready callback called in invalid state ${this.#state}`, { code: error.ErrorCodes.ERR__STATE }); + } + this.#state = ConsumerState.CONNECTED; + + /* Slight optimization for cases where the size of messages in our subscription is less than the cache size. */ + this.#internalClient.setDefaultIsTimeoutOnlyForFirstMessage(true); + + this.#clientName = this.#internalClient.name; + this.#logger.info('Consumer connected', this.#createConsumerBindingMessageMetadata()); + + // Resolve the promise. + this.#connectPromiseFunc['resolve'](); } + /** + * Callback for the event.error event, either fails the initial connect(), or logs the error. + * @param {Error} err + */ + #errorCb(err) { + if (this.#state === ConsumerState.CONNECTING) { + this.#connectPromiseFunc['reject'](err); + } else { + this.#logger.error(err, this.#createConsumerBindingMessageMetadata()); + } + } + + /** + * Converts a message returned by node-rdkafka into a message that can be used by the eachMessage callback. + * @param {import("../..").Message} message + * @returns {import("../../types/kafkajs").EachMessagePayload} + */ #createPayload(message) { - var key = message.key == null ? null : message.key; + let key = message.key; if (typeof key === 'string') { key = Buffer.from(key); } - let timestamp = message.timestamp ? new Date(message.timestamp).toISOString() - : ''; + let timestamp = message.timestamp ? String(message.timestamp) : ''; - var headers = undefined; + let headers; if (message.headers) { - headers = {} - for (const [key, value] of Object.entries(message.headers)) { - if (!headers[key]) { - headers[key] = value; - } else if (headers[key].constructor === Array) { - headers[key].push(value); - } else { - headers[key] = [headers[key], value]; + headers = {}; + for (const header of message.headers) { + for (const [key, value] of Object.entries(header)) { + if (!Object.hasOwn(headers, key)) { + headers[key] = value; + } else if (headers[key].constructor === Array) { + headers[key].push(value); + } else { + headers[key] = [headers[key], value]; + } } } } @@ -135,169 +681,1228 @@ class Consumer { value: message.value, timestamp, attributes: 0, - offset: message.offset, + offset: String(message.offset), size: message.size, + leaderEpoch: message.leaderEpoch, headers }, - heartbeat: async () => {}, - pause: () => {} + heartbeat: async () => { /* no op */ }, + pause: this.pause.bind(this, [{ topic: message.topic, partitions: [message.partition] }]), + }; + } + + /** + * Method used by #createBatchPayload to resolve offsets. + * Resolution stores the offset into librdkafka if needed, and into the lastConsumedOffsets map + * that we use for seeking to the last consumed offset when forced to clear cache. + * + * @param {*} payload The payload we're creating. This is a method attached to said object. + * @param {*} offsetToResolve The offset to resolve. + * @param {*} leaderEpoch The leader epoch of the message (optional). We expect users to provide it, but for API-compatibility reasons, it's optional. + */ + #eachBatchPayload_resolveOffsets(payload, offsetToResolve, leaderEpoch = -1) { + const offset = +offsetToResolve; + + if (isNaN(offset)) { + /* Not much we can do but throw and log an error. */ + const e = new error.KafkaJSError(`Invalid offset to resolve: ${offsetToResolve}`, { code: error.ErrorCodes.ERR__INVALID_ARG }); + throw e; + } + + /* The user might resolve offset N (< M) after resolving offset M. Given that in librdkafka we can only + * store one offset, store the last possible one. */ + if (offset <= payload._lastResolvedOffset.offset) + return; + + const topic = payload.batch.topic; + const partition = payload.batch.partition; + + payload._lastResolvedOffset = { offset, leaderEpoch }; + + try { + this.#internalClient._offsetsStoreSingle( + topic, + partition, + offset + 1, + leaderEpoch); + } catch (e) { + /* Not much we can do, except log the error. */ + this.#logger.error(`Consumer encountered error while storing offset. Error details: ${e}:${e.stack}`, this.#createConsumerBindingMessageMetadata()); + } + } + + /** + * Method used by #createBatchPayload to commit offsets. + */ + async #eachBatchPayload_commitOffsetsIfNecessary() { + if (this.#autoCommit) { + /* librdkafka internally handles committing of whatever we store. + * We don't worry about it here. */ + return; + } + /* If the offsets are being resolved by the user, they've already called resolveOffset() at this point + * We just need to commit the offsets that we've stored. */ + await this.commitOffsets(); + } + + /** + * Request a size increase. + * It increases the size by 2x, but only if the size is less than 1024, + * only if the size has been requested to be increased twice in a row. + */ + #increaseMaxSize() { + if (this.#messageCacheMaxSize === 1024) + return; + this.#increaseCount++; + if (this.#increaseCount <= 1) + return; + this.#messageCacheMaxSize = Math.min(this.#messageCacheMaxSize << 1, 1024); + this.#increaseCount = 0; + } + + /** + * Request a size decrease. + * It decreases the size to 80% of the last received size, with a minimum of 1. + * @param {number} recvdSize - the number of messages received in the last poll. + */ + #decreaseMaxSize(recvdSize) { + this.#messageCacheMaxSize = Math.max(Math.floor((recvdSize * 8) / 10), 1); + this.#increaseCount = 0; + } + + /** + * Converts a list of messages returned by node-rdkafka into a message that can be used by the eachBatch callback. + * @param {import("../..").Message[]} messages - must not be empty. Must contain messages from the same topic and partition. + * @returns {import("../../types/kafkajs").EachBatchPayload} + */ + #createBatchPayload(messages) { + const topic = messages[0].topic; + const partition = messages[0].partition; + + const messagesConverted = []; + for (let i = 0; i < messages.length; i++) { + const message = messages[i]; + let key = message.key; + if (typeof key === 'string') { + key = Buffer.from(key); + } + + let timestamp = message.timestamp ? String(message.timestamp) : ''; + + let headers; + if (message.headers) { + headers = {}; + for (const [key, value] of Object.entries(message.headers)) { + if (!Object.hasOwn(headers, key)) { + headers[key] = value; + } else if (headers[key].constructor === Array) { + headers[key].push(value); + } else { + headers[key] = [headers[key], value]; + } + } + } + + const messageConverted = { + key, + value: message.value, + timestamp, + attributes: 0, + offset: String(message.offset), + size: message.size, + leaderEpoch: message.leaderEpoch, + headers + }; + + messagesConverted.push(messageConverted); + } + + const batch = { + topic, + partition, + highWatermark: '-1001', /* We don't fetch it yet. We can call committed() to fetch it but that might incur network calls. */ + messages: messagesConverted, + isEmpty: () => false, + firstOffset: () => (messagesConverted[0].offset).toString(), + lastOffset: () => (messagesConverted[messagesConverted.length - 1].offset).toString(), + offsetLag: () => notImplemented(), + offsetLagLow: () => notImplemented(), + }; + + const returnPayload = { + batch, + _stale: false, + _lastResolvedOffset: { offset: -1, leaderEpoch: -1 }, + heartbeat: async () => { /* no op */ }, + pause: this.pause.bind(this, [{ topic, partitions: [partition] }]), + commitOffsetsIfNecessary: this.#eachBatchPayload_commitOffsetsIfNecessary.bind(this), + isRunning: () => this.#running, + isStale: () => returnPayload._stale, + /* NOTE: Probably never to be implemented. Not sure exactly how we'd compute this + * inexpensively. */ + uncommittedOffsets: () => notImplemented(), + }; + + returnPayload.resolveOffset = this.#eachBatchPayload_resolveOffsets.bind(this, returnPayload); + + return returnPayload; + } + + async #fetchAndResolveWith(takeFromCache, size) { + if (this.#fetchInProgress) { + return null; + } + + try { + this.#fetchInProgress = new DeferredPromise(); + const fetchResult = new DeferredPromise(); + this.#logger.debug(`Attempting to fetch ${size} messages to the message cache`, + this.#createConsumerBindingMessageMetadata()); + this.#internalClient.consume(size, (err, messages) => + fetchResult.resolve([err, messages])); + + let [err, messages] = await fetchResult; + if (this.#rebalanceCbInProgress) { + await this.#rebalanceCbInProgress; + this.#rebalanceCbInProgress = null; + } + + if (err) { + throw createKafkaJsErrorFromLibRdKafkaError(err); + } + + this.#messageCache.addMessages(messages); + const res = takeFromCache(); + this.#lastFetchClockNs = hrtime.bigint(); + this.#maxPollIntervalRestart.resolve(); + if (messages.length === this.#messageCacheMaxSize) { + this.#increaseMaxSize(); + } else { + this.#decreaseMaxSize(messages.length); + } + return res; + } finally { + this.#fetchInProgress.resolve(); + this.#fetchInProgress = null; } } - async #consumeSingle() { + /** + * Consumes a single message from the internal consumer. + * @param {PerPartitionCache} ppc Per partition cache to use or null|undefined . + * @returns {Promise} a promise that resolves to a single message or null. + * @note this method caches messages as well, but returns only a single message. + */ + async #consumeSingleCached(ppc) { + const msg = this.#messageCache.next(ppc); + if (msg) { + return msg; + } + + /* It's possible that we get msg = null, but that's because partitionConcurrency + * exceeds the number of partitions containing messages. So in this case, + * we should not call for new fetches, rather, try to focus on what we have left. + */ + if (!msg && this.#messageCache.assignedSize !== 0) { + return null; + } + + return this.#fetchAndResolveWith(() => this.#messageCache.next(), + this.#messageCacheMaxSize); + } + + /** + * Consumes a single message from the internal consumer. + * @param {number} savedIndex - the index of the message in the cache to return. + * @param {number} size - the number of messages to fetch. + * @returns {Promise} a promise that resolves to a list of messages or null. + * @note this method caches messages as well. + * @sa #consumeSingleCached + */ + async #consumeCachedN(ppc, size) { + const msgs = this.#messageCache.nextN(ppc, size); + if (msgs) { + return msgs; + } + + /* It's possible that we get msgs = null, but that's because partitionConcurrency + * exceeds the number of partitions containing messages. So in this case, + * we should not call for new fetches, rather, try to focus on what we have left. + */ + if (!msgs && this.#messageCache.assignedSize !== 0) { + return null; + } + + return this.#fetchAndResolveWith(() => + this.#messageCache.nextN(null, size), + this.#messageCacheMaxSize); + } + + /** + * Consumes n messages from the internal consumer. + * @returns {Promise} a promise that resolves to a list of messages. + * The size of this list is guaranteed to be less + * than or equal to n. + * @note this method cannot be used in conjunction with #consumeSingleCached. + */ + async #consumeN(n) { return new Promise((resolve, reject) => { - this.#internalClient.consume(1, function(err, messages) { + this.#internalClient.consume(n, (err, messages) => { if (err) { - reject(`Consume error code ${err.code}`); + reject(createKafkaJsErrorFromLibRdKafkaError(err)); return; } - - const message = messages[0]; - resolve(message); + resolve(messages); }); }); } + /** + * Flattens a list of topics with partitions into a list of topic, partition. + * @param {({topic: string, partitions: number[]}|{topic: string, partition: number})[]} topics + * @returns {import("../../types/rdkafka").TopicPartition[]} a list of (topic, partition). + */ #flattenTopicPartitions(topics) { const ret = []; - for (let topic of topics) { - if (topic.partition !== null) + for (const topic of topics) { + if (typeof topic.partition === 'number') ret.push({ topic: topic.topic, partition: topic.partition }); else { - for (let partition of topic.partitions) { - ret.push({topic: topic.topic, partition}); + for (const partition of topic.partitions) { + ret.push({ topic: topic.topic, partition }); } } } return ret; } + /** + * @returns {import("../rdkafka").Consumer} the internal node-rdkafka client. + */ _getInternalConsumer() { return this.#internalClient; } + /** + * Set up the client and connect to the bootstrap brokers. + * @returns {Promise} a promise that resolves when the consumer is connected. + */ async connect() { - if (this.#state !== ConsumerState.INIT) { - return Promise.reject('Connect has already been called elsewhere.'); - } + if (this.#state !== ConsumerState.INIT) { + throw new error.KafkaJSError('Connect has already been called elsewhere.', { code: error.ErrorCodes.ERR__STATE }); + } - this.#state = ConsumerState.CONNECTING; - this.#internalClient = new RdKafka.KafkaConsumer(await this.#config()); - this.#internalClient.on('ready', this.#readyCb.bind(this)); - this.#internalClient.on('event.error', this.#errorCb.bind(this)); - this.#internalClient.on('event.log', console.log); - - return new Promise((resolve, reject) => { - this.#connectPromiseFunc = {resolve, reject}; - console.log('Connecting....'); - this.#internalClient.connect(); - console.log('connect() called'); + const rdKafkaConfig = this.#config(); + this.#state = ConsumerState.CONNECTING; + this.#internalClient = new RdKafka.KafkaConsumer(rdKafkaConfig); + this.#internalClient.on('ready', this.#readyCb.bind(this)); + this.#internalClient.on('error', this.#errorCb.bind(this)); + this.#internalClient.on('event.error', this.#errorCb.bind(this)); + this.#internalClient.on('event.log', (msg) => loggerTrampoline(msg, this.#logger)); + + return new Promise((resolve, reject) => { + this.#connectPromiseFunc = { resolve, reject }; + this.#internalClient.connect(null, (err) => { + if (err) + reject(createKafkaJsErrorFromLibRdKafkaError(err)); }); + }); } + /** + * Subscribes the consumer to the given topics. + * @param {import("../../types/kafkajs").ConsumerSubscribeTopics | import("../../types/kafkajs").ConsumerSubscribeTopic} subscription + */ async subscribe(subscription) { - this.#internalClient.subscribe(subscription.topics); + if (this.#state !== ConsumerState.CONNECTED) { + throw new error.KafkaJSError('Subscribe can only be called while connected.', { code: error.ErrorCodes.ERR__STATE }); + } + + if (typeof subscription.fromBeginning === 'boolean') { + throw new error.KafkaJSError( + CompatibilityErrorMessages.subscribeOptionsFromBeginning(), + { code: error.ErrorCodes.ERR__INVALID_ARG }); + } + + if (!Object.hasOwn(subscription, 'topics') && !Object.hasOwn(subscription, 'topic')) { + throw new error.KafkaJSError(CompatibilityErrorMessages.subscribeOptionsMandatoryMissing(), { code: error.ErrorCodes.ERR__INVALID_ARG }); + } + + let topics = []; + if (subscription.topic) { + topics.push(subscription.topic); + } else if (Array.isArray(subscription.topics)) { + topics = subscription.topics; + } else { + throw new error.KafkaJSError(CompatibilityErrorMessages.subscribeOptionsMandatoryMissing(), { code: error.ErrorCodes.ERR__INVALID_ARG }); + } + + topics = topics.map(topic => { + if (typeof topic === 'string') { + return topic; + } else if (topic instanceof RegExp) { + // Flags are not supported, and librdkafka only considers a regex match if the first character of the regex is ^. + if (topic.flags) { + throw new error.KafkaJSError(CompatibilityErrorMessages.subscribeOptionsRegexFlag(), { code: error.ErrorCodes.ERR__INVALID_ARG }); + } + const regexSource = topic.source; + if (regexSource.charAt(0) !== '^') + throw new error.KafkaJSError(CompatibilityErrorMessages.subscribeOptionsRegexStart(), { code: error.ErrorCodes.ERR__INVALID_ARG }); + + return regexSource; + } else { + throw new error.KafkaJSError('Invalid topic ' + topic + ' (' + typeof topic + '), the topic name has to be a String or a RegExp', { code: error.ErrorCodes.ERR__INVALID_ARG }); + } + }); + + this.#storedSubscriptions = subscription.replace ? topics : this.#storedSubscriptions.concat(topics); + this.#logger.debug(`${subscription.replace ? 'Replacing' : 'Adding'} topics [${topics.join(', ')}] to subscription`, this.#createConsumerBindingMessageMetadata()); + this.#internalClient.subscribe(this.#storedSubscriptions); } async stop() { - this.#notImplemented(); + notImplemented(); } + /** + * Starts consumer polling. This method returns immediately. + * @param {import("../../types/kafkajs").ConsumerRunConfig} config + */ async run(config) { if (this.#state !== ConsumerState.CONNECTED) { - throw new Error('Run must be called in state CONNECTED.'); + throw new error.KafkaJSError('Run must be called after a successful connect().', { code: error.ErrorCodes.ERR__STATE }); + } + + if (Object.hasOwn(config, 'autoCommit')) { + throw new error.KafkaJSError(CompatibilityErrorMessages.runOptionsAutoCommit(), { code: error.ErrorCodes.ERR__INVALID_ARG }); + } + + if (Object.hasOwn(config, 'autoCommitInterval')) { + throw new error.KafkaJSError(CompatibilityErrorMessages.runOptionsAutoCommitInterval(), { code: error.ErrorCodes.ERR__INVALID_ARG }); + } + + if (Object.hasOwn(config, 'autoCommitThreshold')) { + throw new error.KafkaJSError(CompatibilityErrorMessages.runOptionsAutoCommitThreshold(), { code: error.ErrorCodes.ERR__NOT_IMPLEMENTED }); + } + + if (this.#running) { + throw new error.KafkaJSError('Consumer is already running.', { code: error.ErrorCodes.ERR__STATE }); + } + this.#running = true; + + /* We're going to add keys to the configuration, so make a copy */ + const configCopy = Object.assign({}, config); + + /* Batches are auto resolved by default. */ + if (!Object.hasOwn(config, 'eachBatchAutoResolve')) { + configCopy.eachBatchAutoResolve = true; + } + + if (!Object.hasOwn(config, 'partitionsConsumedConcurrently')) { + configCopy.partitionsConsumedConcurrently = 1; + } + + this.#messageCache = new MessageCache(this.#logger); + /* We deliberately don't await this because we want to return from this method immediately. */ + this.#runInternal(configCopy); + } + + /** + * Processes a single message. + * + * @param m Message as obtained from #consumeSingleCached. + * @param config Config as passed to run(). + * @returns {Promise} the cache index of the message that was processed. + */ + async #messageProcessor(m, config) { + let ppc; + [m, ppc] = m; + let key = partitionKey(m); + let eachMessageProcessed = false; + const payload = this.#createPayload(m); + + try { + this.#lastConsumedOffsets.set(key, m); + await config.eachMessage(payload); + eachMessageProcessed = true; + } catch (e) { + /* It's not only possible, but expected that an error will be thrown by eachMessage. + * This is especially true since the pattern of pause() followed by throwing an error + * is encouraged. To meet the API contract, we seek one offset backward (which + * means seeking to the message offset). + * However, we don't do this inside the catch, but just outside it. This is because throwing an + * error is not the only case where we might want to seek back. + * + * So - do nothing but a log, but at this point eachMessageProcessed is false. + * TODO: log error only if error type is not KafkaJSError and if no pause() has been called, else log debug. + */ + this.#logger.error( + `Consumer encountered error while processing message. Error details: ${e}: ${e.stack}. The same message may be reprocessed.`, + this.#createConsumerBindingMessageMetadata()); + } + + /* If the message is unprocessed, due to an error, or because the user has not resolved it, we seek back. */ + if (!eachMessageProcessed) { + this.seek({ + topic: m.topic, + partition: m.partition, + offset: m.offset, + leaderEpoch: m.leaderEpoch, + }); } - while (this.#state === ConsumerState.CONNECTED) { - let m = await this.#consumeSingle(); - if (m) { - await config.eachMessage( - this.#createPayload(m) - ) + /* Store the offsets we need to store, or at least record them for cache invalidation reasons. */ + if (eachMessageProcessed) { + try { + this.#internalClient._offsetsStoreSingle(m.topic, m.partition, Number(m.offset) + 1, m.leaderEpoch); + } catch (e) { + /* Not much we can do, except log the error. */ + this.#logger.error(`Consumer encountered error while storing offset. Error details: ${JSON.stringify(e)}`, this.#createConsumerBindingMessageMetadata()); } } + + + return ppc; } - async commitOffsets(topicPartitions = null) { + /** + * Processes a batch of messages. + * + * @param {[[Message], PerPartitionCache]} ms Messages as obtained from #consumeCachedN (ms.length !== 0). + * @param config Config as passed to run(). + * @returns {Promise} the PPC corresponding to + * the passed batch. + */ + async #batchProcessor(ms, config) { + let ppc; + [ms, ppc] = ms; + const key = partitionKey(ms[0]); + const payload = this.#createBatchPayload(ms); + + this.#topicPartitionToBatchPayload.set(key, payload); + + let lastOffsetProcessed = { offset: -1, leaderEpoch: -1 }; + const firstMessage = ms[0]; + const lastMessage = ms[ms.length - 1]; + const lastOffset = +(lastMessage.offset); + const lastLeaderEpoch = lastMessage.leaderEpoch; try { - if (topicPartitions == null) { - this.#internalClient.commitSync(); - } else { - const topicPartitions = topicPartitions.map( - topicPartitionOffsetToRdKafka); - this.#internalClient.commitSync(topicPartitions); + await config.eachBatch(payload); + + /* If the user isn't resolving offsets, we resolve them here. It's significant here to call this method + * because besides updating `payload._lastResolvedOffset`, this method is also storing the offsets to + * librdkafka, and accounting for any cache invalidations. + * Don't bother resolving offsets if payload became stale at some point. We can't know when the payload + * became stale, so either the user has been nice enough to keep resolving messages, or we must seek to + * the first offset to ensure no message loss. */ + if (config.eachBatchAutoResolve && !payload._stale) { + payload.resolveOffset(lastOffset, lastLeaderEpoch); } + + lastOffsetProcessed = payload._lastResolvedOffset; } catch (e) { - if (!e.code || e.code != LibrdKafkaError.codes.ERR__NO_OFFSET) { - throw e; + /* It's not only possible, but expected that an error will be thrown by eachBatch. + * This is especially true since the pattern of pause() followed by throwing an error + * is encouraged. To meet the API contract, we seek one offset backward (which + * means seeking to the message offset). + * However, we don't do this inside the catch, but just outside it. This is because throwing an + * error is not the only case where we might want to seek back. We might want to seek back + * if the user has not called `resolveOffset` manually in case of using eachBatch without + * eachBatchAutoResolve being set. + * + * So - do nothing but a log, but at this point eachMessageProcessed needs to be false unless + * the user has explicitly marked it as true. + * TODO: log error only if error type is not KafkaJSError and if no pause() has been called, else log debug. + */ + this.#logger.error( + `Consumer encountered error while processing message. Error details: ${e}: ${e.stack}. The same message may be reprocessed.`, + this.#createConsumerBindingMessageMetadata()); + + /* The value of eachBatchAutoResolve is not important. The only place where a message is marked processed + * despite an error is if the user says so, and the user can use resolveOffset for both the possible + * values eachBatchAutoResolve can take. */ + lastOffsetProcessed = payload._lastResolvedOffset; + } + + this.#topicPartitionToBatchPayload.delete(key); + + /* If any message is unprocessed, either due to an error or due to the user not marking it processed, we must seek + * back to get it so it can be reprocessed. */ + if (lastOffsetProcessed.offset !== lastOffset) { + const offsetToSeekTo = lastOffsetProcessed.offset === -1 ? firstMessage.offset : (lastOffsetProcessed.offset + 1); + const leaderEpoch = lastOffsetProcessed.offset === -1 ? firstMessage.leaderEpoch : lastOffsetProcessed.leaderEpoch; + this.seek({ + topic: firstMessage.topic, + partition: firstMessage.partition, + offset: offsetToSeekTo, + leaderEpoch: leaderEpoch, + }); + } + + return ppc; + } + + #discardMessages(ms, ppc) { + if (ms) { + let m = ms[0]; + if (m.constructor === Array) { + m = m[0]; + } + ppc = ms[1]; + if (m && !this.#lastConsumedOffsets.has(ppc.key)) { + this.#lastConsumedOffsets.set(ppc.key, { + topic: m.topic, + partition: m.partition, + offset: m.offset - 1, + }); } } + return ppc; } - seek(topicPartitionOffset) { + async #nextFetchRetry() { + if (this.#fetchInProgress) { + await this.#fetchInProgress; + } else { + /* Backoff a little. If m is null, we might be without messages + * or in available partition starvation, and calling consumeSingleCached + * in a tight loop will help no one. */ + await Timer.withTimeout(1); + } + } + + /** + * Starts a worker to fetch messages/batches from the internal consumer and process them. + * + * A worker runs until it's told to stop. + * Conditions where the worker is told to stop: + * 1. Cache globally stale + * 2. Disconnected initiated + * 3. Rebalance + * 4. Some other worker has started terminating. + * + * Worker termination acts as a async barrier. + */ + async #worker(config, perMessageProcessor, fetcher) { + let ppc = null; + + while (!this.#workerTerminationScheduled.resolved) { + + const ms = await fetcher(ppc).catch(e => { + /* Since this error cannot be exposed to the user in the current situation, just log and retry. + * This is due to restartOnFailure being set to always true. */ + if (this.#logger) + this.#logger.error(`Consumer encountered error while consuming. Retrying. Error details: ${e} : ${e.stack}`, this.#createConsumerBindingMessageMetadata()); + }); + + if (this.#pendingOperations.length) { + ppc = this.#discardMessages(ms, ppc); + break; + } + + if (!ms) { + await this.#nextFetchRetry(); + continue; + } + + ppc = await perMessageProcessor(ms, config); + } + + if (ppc) + this.#messageCache.return(ppc); + } + + async #checkMaxPollIntervalNotExceeded(now) { + const maxPollExpiration = this.#lastFetchClockNs + + BigInt((this.#cacheExpirationTimeoutMs + this.#maxPollIntervalMs) + * 1e6); + + let interval = Number(maxPollExpiration - now) / 1e6; + if (interval < 1) + interval = 1; + await Timer.withTimeout(interval, + this.#maxPollIntervalRestart); + now = hrtime.bigint(); + + if (now > (maxPollExpiration - 1000000n)) { + this.#markBatchPayloadsStale(this.assignment()); + } + } + + /** + * Clears the cache and resets the positions when + * the internal client hasn't been polled for more than + * max poll interval since the last fetch. + * After that it waits until barrier is reached or + * max poll interval is reached. In the latter case it + * marks the batch payloads as stale. + */ + async #cacheExpirationLoop() { + while (!this.#workerTerminationScheduled.resolved) { + let now = hrtime.bigint(); + const cacheExpiration = this.#lastFetchClockNs + + BigInt(this.#cacheExpirationTimeoutMs * 1e6); + + if (now > cacheExpiration) { + this.#addPendingOperation(() => + this.#clearCacheAndResetPositions()); + await this.#checkMaxPollIntervalNotExceeded(now); + break; + } + + let interval = Number(cacheExpiration - now) / 1e6; + if (interval < 100) + interval = 100; + const promises = Promise.race([this.#workerTerminationScheduled, + this.#maxPollIntervalRestart]); + await Timer.withTimeout(interval, + promises); + if (this.#maxPollIntervalRestart.resolved) + this.#maxPollIntervalRestart = new DeferredPromise(); + } + if (this.#maxPollIntervalRestart.resolved) + this.#maxPollIntervalRestart = new DeferredPromise(); + } + + /** + * Executes all pending operations and clears the list. + */ + async #executePendingOperations() { + for (const op of this.#pendingOperations) { + await op(); + } + this.#pendingOperations = []; + } + + /** + * Internal polling loop. + * Spawns and awaits workers until disconnect is initiated. + */ + async #runInternal(config) { + this.#concurrency = config.partitionsConsumedConcurrently; + const perMessageProcessor = config.eachMessage ? this.#messageProcessor : this.#batchProcessor; + /* TODO: make this dynamic, based on max batch size / size of last message seen. */ + const maxBatchSize = 32; + const fetcher = config.eachMessage + ? (savedIdx) => this.#consumeSingleCached(savedIdx) + : (savedIdx) => this.#consumeCachedN(savedIdx, maxBatchSize); + this.#workers = []; + + await this.#lock.write(async () => { + + while (!this.#disconnectStarted) { + if (this.#maxPollIntervalRestart.resolved) + this.#maxPollIntervalRestart = new DeferredPromise(); + + this.#workerTerminationScheduled = new DeferredPromise(); + this.#lastFetchClockNs = hrtime.bigint(); + if (this.#pendingOperations.length === 0) { + const workersToSpawn = Math.max(1, Math.min(this.#concurrency, this.#partitionCount)); + const cacheExpirationLoop = this.#cacheExpirationLoop(); + this.#logger.debug(`Spawning ${workersToSpawn} workers`, this.#createConsumerBindingMessageMetadata()); + this.#workers = + Array(workersToSpawn) + .fill() + .map((_, i) => + this.#worker(config, perMessageProcessor.bind(this), fetcher.bind(this)) + .catch(e => { + if (this.#logger) + this.#logger.error(`Worker ${i} encountered an error: ${e}:${e.stack}`); + })); + + /* Best we can do is log errors on worker issues - handled by the catch block above. */ + await Promise.allSettled(this.#workers); + this.#maxPollIntervalRestart.resolve(); + await cacheExpirationLoop; + } + + await this.#executePendingOperations(); + } + + }); + this.#maxPollIntervalRestart.resolve(); + } + + /** + * Consumes a single message from the consumer within the given timeout. + * THIS METHOD IS NOT IMPLEMENTED. + * @note This method cannot be used with run(). Either that, or this must be used. + * + * @param {any} args + * @param {number} args.timeout - the timeout in milliseconds, defaults to 1000. + * @returns {import("../..").Message|null} a message, or null if the timeout was reached. + */ + async consume({ timeout } = { timeout: 1000 }) { + if (this.#state !== ConsumerState.CONNECTED) { + throw new error.KafkaJSError('consume can only be called while connected.', { code: error.ErrorCodes.ERR__STATE }); + } + + if (this.#running) { + throw new error.KafkaJSError('consume() and run() cannot be used together.', { code: error.ErrorCodes.ERR__CONFLICT }); + } + + this.#internalClient.setDefaultConsumeTimeout(timeout); + let m = null; + + try { + const ms = await this.#consumeN(1); + m = ms[0]; + } finally { + this.#internalClient.setDefaultConsumeTimeout(undefined); + } + + throw new error.KafkaJSError('consume() is not implemented.' + m, { code: error.ErrorCodes.ERR__NOT_IMPLEMENTED }); + } + + async #commitOffsetsUntilNoStateErr(offsetsToCommit) { + let err = { code: error.ErrorCodes.ERR_NO_ERROR }; + do { + try { + await this.commitOffsets(offsetsToCommit); + } catch (e) { + err = e; + } + } while (err.code && err.code === error.ErrorCodes.ERR__STATE); + } + + /** + * Commit offsets for the given topic partitions. If topic partitions are not specified, commits all offsets. + * @param {import("../../types/kafkajs").TopicPartitionOffset[]?} topicPartitions + * @returns {Promise} a promise that resolves when the offsets have been committed. + */ + async commitOffsets(topicPartitions = null) { + if (this.#state !== ConsumerState.CONNECTED) { + throw new error.KafkaJSError('Commit can only be called while connected.', { code: error.ErrorCodes.ERR__STATE }); + } + return new Promise((resolve, reject) => { - const rdKafkaTopicPartitionOffset = - topicPartitionOffsetToRdKafka(topicPartitionOffset); - this.#internalClient.seek(rdKafkaTopicPartitionOffset, 0, (err) => { - if (err) { - reject(new Error(`Seek error code ${err.code}`)); - } else { + try { + let cb = (e) => { + if (e) + reject(createKafkaJsErrorFromLibRdKafkaError(e)); + else + resolve(); + }; + + if (topicPartitions) + topicPartitions = topicPartitions.map(topicPartitionOffsetMetadataToRdKafka); + else + topicPartitions = null; + this.#internalClient.commitCb(topicPartitions, cb); + } catch (e) { + if (!e.code || e.code !== error.ErrorCodes.ERR__NO_OFFSET) + reject(createKafkaJsErrorFromLibRdKafkaError(e)); + else resolve(); + } + }); + } + + /** + * Fetch committed offsets for the given topic partitions. + * + * @param {import("../../types/kafkajs").TopicPartitionOffsetAndMetadata[]} topicPartitions - + * the topic partitions to check for committed offsets. Defaults to all assigned partitions. + * @param {number} timeout - timeout in ms. Defaults to infinite (-1). + * @returns {Promise} a promise that resolves to the committed offsets. + */ + async committed(topicPartitions = null, timeout = -1) { + if (this.#state !== ConsumerState.CONNECTED) { + throw new error.KafkaJSError('Committed can only be called while connected.', { code: error.ErrorCodes.ERR__STATE }); + } + + if (!topicPartitions) { + topicPartitions = this.assignment(); + } + + const topicPartitionsRdKafka = topicPartitions.map( + topicPartitionOffsetToRdKafka); + + return new Promise((resolve, reject) => { + this.#internalClient.committed(topicPartitionsRdKafka, timeout, (err, offsets) => { + if (err) { + reject(createKafkaJsErrorFromLibRdKafkaError(err)); + return; } + resolve(offsets.map(topicPartitionOffsetMetadataToKafkaJS)); }); - }).catch(console.error); // Default handler + }); } - async describeGroup() { - this.#notImplemented(); + /** + * Apply pending seeks to topic partitions we have just obtained as a result of a rebalance. + * @param {{topic: string, partition: number}[]} assignment The list of topic partitions to check for pending seeks. + * @returns {{topic: string, partition: number, offset: number}[]} the new assignment with the offsets seeked to, which can be passed to assign(). + */ + #assignAsPerSeekedOffsets(assignment) { + for (let i = 0; i < assignment.length; i++) { + const topicPartition = assignment[i]; + const key = partitionKey(topicPartition); + if (!this.#pendingSeeks.has(key)) + continue; + + const tpo = this.#pendingSeeks.get(key); + this.#pendingSeeks.delete(key); + + assignment[i].offset = tpo.offset; + assignment[i].leaderEpoch = tpo.leaderEpoch; + } + return assignment; } - pause(topics) { - topics = this.#flattenTopicPartitions(topics); - this.#internalClient.pause(topics); + #addPendingOperation(fun) { + if (this.#pendingOperations.length === 0) { + this.#workerTerminationScheduled.resolve(); + } + this.#pendingOperations.push(fun); } - paused() { - this.#notImplemented(); + async #seekInternal(topicPartitionOffsets) { + if (topicPartitionOffsets.length === 0) { + return; + } + + // Uncomment to test an additional delay in seek + // await Timer.withTimeout(1000); + + const seekedPartitions = []; + const pendingSeeks = new Map(); + const assignmentSet = new Set(); + for (const topicPartitionOffset of topicPartitionOffsets) { + const key = partitionKey(topicPartitionOffset); + pendingSeeks.set(key, topicPartitionOffset); + } + + const assignment = this.assignment(); + for (const topicPartition of assignment) { + const key = partitionKey(topicPartition); + assignmentSet.add(key); + if (!pendingSeeks.has(key)) + continue; + seekedPartitions.push([key, pendingSeeks.get(key)]); + } + + for (const topicPartitionOffset of topicPartitionOffsets) { + const key = partitionKey(topicPartitionOffset); + if (!assignmentSet.has(key)) + this.#pendingSeeks.set(key, topicPartitionOffset); + } + + const offsetsToCommit = []; + const librdkafkaSeekPromises = []; + for (const [key, topicPartitionOffset] of seekedPartitions) { + this.#lastConsumedOffsets.delete(key); + this.#messageCache.markStale([topicPartitionOffset]); + offsetsToCommit.push(topicPartitionOffset); + + const librdkafkaSeekPromise = new DeferredPromise(); + this.#internalClient.seek(topicPartitionOffset, 1000, + (err) => { + if (err) + this.#logger.error(`Error while calling seek from within seekInternal: ${err}`, this.#createConsumerBindingMessageMetadata()); + librdkafkaSeekPromise.resolve(); + }); + librdkafkaSeekPromises.push(librdkafkaSeekPromise); + } + await Promise.allSettled(librdkafkaSeekPromises); + await Promise.all(librdkafkaSeekPromises); + + for (const [key, ] of seekedPartitions) { + this.#pendingSeeks.delete(key); + } + + /* Offsets are committed on seek only when in compatibility mode. */ + if (offsetsToCommit.length !== 0 && this.#internalConfig['enable.auto.commit']) { + await this.#commitOffsetsUntilNoStateErr(offsetsToCommit); + } } + #markBatchPayloadsStale(topicPartitions) { + for (const topicPartition of topicPartitions) { + const key = partitionKey(topicPartition); + if (this.#topicPartitionToBatchPayload.has(key)) + this.#topicPartitionToBatchPayload.get(key)._stale = true; + } + } + + async #pauseInternal(topicPartitions) { + // Uncomment to test future async pause + // await Timer.withTimeout(1000); + + this.#messageCache.markStale(topicPartitions); + this.#internalClient.pause(topicPartitions); + + const seekOffsets = []; + for (let topicPartition of topicPartitions) { + const key = partitionKey(topicPartition); + if (this.#lastConsumedOffsets.has(key)) { + const seekOffset = this.#lastConsumedOffsets.get(key); + const topicPartitionOffset = { + topic: topicPartition.topic, + partition: topicPartition.partition, + offset: seekOffset.offset + 1, + leaderEpoch: seekOffset.leaderEpoch, + }; + seekOffsets.push(topicPartitionOffset); + } + } + if (seekOffsets.length) { + await this.#seekInternal(seekOffsets, false); + } + } + + async #resumeInternal(topicPartitions) { + // Uncomment to test future async resume + // await Timer.withTimeout(1000); + this.#internalClient.resume(topicPartitions); + } + + /** + * Seek to the given offset for the topic partition. + * This method is completely asynchronous, and does not wait for the seek to complete. + * In case any partitions that are seeked to, are not a part of the current assignment, they are stored internally. + * If at any time, the consumer is assigned the partition, the seek will be performed. + * Depending on the value of the librdkafka property 'enable.auto.commit', the consumer will commit the offset seeked to. + * @param {import("../../types/kafkajs").TopicPartitionOffset} topicPartitionOffset + */ + seek(topicPartitionOffset) { + if (this.#state !== ConsumerState.CONNECTED) { + throw new error.KafkaJSError('Seek can only be called while connected.', { code: error.ErrorCodes.ERR__STATE }); + } + + const rdKafkaTopicPartitionOffset = + topicPartitionOffsetToRdKafka(topicPartitionOffset); + + if (typeof rdKafkaTopicPartitionOffset.topic !== 'string') { + throw new error.KafkaJSError('Topic must be a string.', { code: error.ErrorCodes.ERR__INVALID_ARG }); + } + + if (isNaN(rdKafkaTopicPartitionOffset.offset) || (rdKafkaTopicPartitionOffset.offset < 0 && rdKafkaTopicPartitionOffset.offset !== -2 && rdKafkaTopicPartitionOffset.offset !== -3)) { + throw new error.KafkaJSError('Offset must be >= 0, or a special value.', { code: error.ErrorCodes.ERR__INVALID_ARG }); + } + + /* If anyone's using eachBatch, mark the batch as stale. */ + this.#markBatchPayloadsStale([rdKafkaTopicPartitionOffset]); + + this.#addPendingOperation(() => + this.#seekInternal([rdKafkaTopicPartitionOffset])); + } + + async describeGroup() { + notImplemented(); + } + + /** + * Find the assigned topic partitions for the consumer. + * @returns {import("../../types/kafkajs").TopicPartition[]} the current assignment. + */ assignment() { + if (this.#state !== ConsumerState.CONNECTED) { + throw new error.KafkaJSError('Assignment can only be called while connected.', { code: error.ErrorCodes.ERR__STATE }); + } + return this.#flattenTopicPartitions(this.#internalClient.assignments()); } + /** + * Get the type of rebalance protocol used in the consumer group. + * + * @returns "NONE" (if not in a group yet), "COOPERATIVE" or "EAGER". + */ + rebalanceProtocol() { + if (this.#state !== ConsumerState.CONNECTED) { + return "NONE"; + } + return this.#internalClient.rebalanceProtocol(); + } + + /** + * Fetches all partitions of topic that are assigned to this consumer. + * @param {string} topic + * @returns {number[]} a list of partitions. + */ + #getAllAssignedPartition(topic) { + return this.#internalClient.assignments() + .filter((partition) => partition.topic === topic) + .map((tpo) => tpo.partition); + } + + /** + * Pauses the given topic partitions. If partitions are not specified, pauses + * all partitions for the given topic. If topic partition(s) are already paused + * this method has no effect. + * @param {{topic: string, partitions?: number[]}[]} topics + * @returns {Function} a function that can be called to resume the given topic partitions. + */ + pause(topics) { + if (this.#state !== ConsumerState.CONNECTED) { + throw new error.KafkaJSError('Pause can only be called while connected.', { code: error.ErrorCodes.ERR__STATE }); + } + + this.#logger.debug(`Pausing ${topics.length} topics`, this.#createConsumerBindingMessageMetadata()); + + const toppars = []; + for (let topic of topics) { + if (typeof topic.topic !== 'string') { + throw new error.KafkaJSError('Topic must be a string.', { code: error.ErrorCodes.ERR__INVALID_ARG }); + } + + const toppar = { topic: topic.topic }; + + if (!topic.partitions) { + toppar.partitions = this.#getAllAssignedPartition(topic.topic); + } else { + /* TODO: add a check here to make sure we own each partition */ + toppar.partitions = [...topic.partitions]; + } + + toppars.push(toppar); + } + + const flattenedToppars = this.#flattenTopicPartitions(toppars); + if (flattenedToppars.length === 0) { + return; + } + + /* If anyone's using eachBatch, mark the batch as stale. */ + this.#markBatchPayloadsStale(flattenedToppars); + + flattenedToppars.forEach( + topicPartition => this.#pausedPartitions.set( + partitionKey(topicPartition), + topicPartition)); + + this.#addPendingOperation(() => + this.#pauseInternal(flattenedToppars)); + + /* Note: we don't use flattenedToppars here because resume flattens them again. */ + return () => this.resume(toppars); + } + + /** + * Returns the list of paused topic partitions. + * @returns {{topic: string, partitions: number[]}[]} a list of paused topic partitions. + */ + paused() { + const topicToPartitions = Array + .from(this.#pausedPartitions.values()) + .reduce( + (acc, { topic, partition }) => { + if (!acc[topic]) { + acc[topic] = []; + } + acc[topic].push(partition); + return acc; + }, + {}); + return Array.from(Object.entries(topicToPartitions), ([topic, partitions]) => ({ topic, partitions })); + } + + + /** + * Resumes the given topic partitions. If partitions are not specified, resumes + * all partitions for the given topic. If topic partition(s) are already resumed + * this method has no effect. + * @param {{topic: string, partitions?: number[]}[]} topics + */ resume(topics) { - topics = this.#flattenTopicPartitions(topics); - this.#internalClient.resume(topics); + if (this.#state !== ConsumerState.CONNECTED) { + throw new error.KafkaJSError('Resume can only be called while connected.', { code: error.ErrorCodes.ERR__STATE }); + } + + this.#logger.debug(`Resuming ${topics.length} topics`, this.#createConsumerBindingMessageMetadata()); + + const toppars = []; + for (let topic of topics) { + if (typeof topic.topic !== 'string') { + throw new error.KafkaJSError('Topic must be a string.', { code: error.ErrorCodes.ERR__INVALID_ARG }); + } + const toppar = { topic: topic.topic }; + + if (!topic.partitions) { + toppar.partitions = this.#getAllAssignedPartition(topic.topic); + } else { + toppar.partitions = [...topic.partitions]; + } + + toppars.push(toppar); + } + + const flattenedToppars = this.#flattenTopicPartitions(toppars); + if (flattenedToppars.length === 0) { + return; + } + flattenedToppars.map(partitionKey). + forEach(key => this.#pausedPartitions.delete(key)); + + this.#addPendingOperation(() => + this.#resumeInternal(flattenedToppars)); } - on(eventName, listener) { - this.#notImplemented(); + on(/* eventName, listener */) { + notImplemented(); } + /** + * @returns {import("../../types/kafkajs").Logger} the logger associated to this consumer. + */ logger() { - this.#notImplemented(); + return this.#logger; } get events() { - this.#notImplemented(); + notImplemented(); + return null; } + /** + * Disconnects and cleans up the consumer. + * @note This cannot be called from within `eachMessage` callback of `Consumer.run`. + * @returns {Promise} a promise that resolves when the consumer has disconnected. + */ async disconnect() { + /* Not yet connected - no error. */ + if (this.#state === ConsumerState.INIT) { + return; + } + + /* TODO: We should handle a case where we are connecting, we should + * await the connection and then schedule a disconnect. */ + + /* Already disconnecting, or disconnected. */ + if (this.#state >= ConsumerState.DISCONNECTING) { + return; + } if (this.#state >= ConsumerState.DISCONNECTING) { return; } - this.#state = ConsumerState.DISCONNECTING; + + this.#disconnectStarted = true; + this.#workerTerminationScheduled.resolve(); + this.#logger.debug("Signalling disconnection attempt to workers", this.#createConsumerBindingMessageMetadata()); + await this.#lock.write(async () => { + + this.#state = ConsumerState.DISCONNECTING; + + }); + await new Promise((resolve, reject) => { const cb = (err) => { - err ? reject(err) : resolve(); + if (err) { + reject(createKafkaJsErrorFromLibRdKafkaError(err)); + return; + } this.#state = ConsumerState.DISCONNECTED; - } + this.#logger.info("Consumer disconnected", this.#createConsumerBindingMessageMetadata()); + resolve(); + }; + this.#internalClient.unsubscribe(); this.#internalClient.disconnect(cb); }); } } -module.exports = { Consumer } +module.exports = { Consumer, PartitionAssigners, }; diff --git a/lib/kafkajs/_consumer_cache.js b/lib/kafkajs/_consumer_cache.js new file mode 100644 index 00000000..38329d4f --- /dev/null +++ b/lib/kafkajs/_consumer_cache.js @@ -0,0 +1,264 @@ +const { + partitionKey, +} = require('./_common'); +const { LinkedList } = require('./_linked-list'); + +/** + * A PerPartitionMessageCache is a cache for messages for a single partition. + */ +class PerPartitionMessageCache { + /* The cache is a list of messages. */ + #cache = new LinkedList(); + /* The key for the partition. */ + #key = null; + /* Whether the cache is assigned to a consumer. */ + _assigned = false; + + constructor(key) { + this.#key = key; + } + + /** + * Returns the number of total elements in the cache. + */ + size() { + return this.#cache.length; + } + + /** + * Adds a message to the cache. + */ + _add(message) { + this.#cache.addLast(message); + } + + get key() { + return this.#key; + } + + /** + * @returns The next element in the cache or null if none exists. + */ + _next() { + return this.#cache.removeFirst(); + } + + /** + * @returns Upto `n` next elements in the cache or an empty array if none exists. + */ + _nextN(n) { + const len = this.#cache.length; + n = (n < 0 || len < n) ? len : n; + + const ret = new Array(n); + for (let i = 0; i < n; i++) { + ret[i] = this.#cache.removeFirst(); + } + return ret; + } +} + + +/** + * MessageCache defines a dynamically sized cache for messages. + * Internally, it uses PerPartitionMessageCache to store messages for each partition. + */ +class MessageCache { + #size; + /* Map of topic+partition to PerPartitionMessageCache. */ + #tpToPpc; + /* LinkedList of available partitions. */ + #availablePartitions; + /* LinkedList of assigned partitions. */ + #assignedPartitions; + + + constructor(logger) { + this.logger = logger ?? console; + this.#reinit(); + } + + /** + * Reinitializes the cache. + */ + #reinit() { + this.#tpToPpc = new Map(); + this.#availablePartitions = new LinkedList(); + this.#assignedPartitions = new LinkedList(); + this.#size = 0; + } + + /** + * Assign a new partition to the consumer, if available. + * + * @returns {PerPartitionMessageCache} - the partition assigned to the consumer, or null if none available. + */ + #assignNewPartition() { + let ppc = this.#availablePartitions.removeFirst(); + if (!ppc) + return null; + + ppc._node = this.#assignedPartitions.addLast(ppc); + ppc._assigned = true; + return ppc; + } + + /** + * Remove an empty partition from the cache. + * + * @param {PerPartitionMessageCache} ppc The partition to remove from the cache. + */ + #removeEmptyPartition(ppc) { + this.#assignedPartitions.remove(ppc._node); + ppc._assigned = false; + ppc._node = null; + this.#tpToPpc.delete(ppc.key); + } + + /** + * Add a single message to a PPC. + * In case the PPC does not exist, it is created. + * + * @param {Object} message - the message to add to the cache. + */ + #add(message) { + const key = partitionKey(message); + let cache = this.#tpToPpc.get(key); + if (!cache) { + cache = new PerPartitionMessageCache(key); + this.#tpToPpc.set(key, cache); + cache._node = this.#availablePartitions.addLast(cache); + } + cache._add(message); + } + + get availableSize() { + return this.#availablePartitions.length; + } + + get assignedSize() { + return this.#assignedPartitions.length; + } + + get size() { + return this.#size; + } + + /** + * Mark a set of topic partitions 'stale'. + * + * Post-conditions: PPCs are removed from their currently assigned list + * and deleted from the PPC map. Cache size is decremented accordingly. + * PPCs are marked as not assigned. + */ + markStale(topicPartitions) { + for (const topicPartition of topicPartitions) { + const key = partitionKey(topicPartition); + const ppc = this.#tpToPpc.get(key); + if (!ppc) + continue; + + this.#size -= ppc.size(); + if (ppc._assigned) { + this.#assignedPartitions.remove(ppc._node); + } else { + this.#availablePartitions.remove(ppc._node); + } + this.#tpToPpc.delete(key); + ppc._assigned = false; + } + } + + /** + * Adds many messages into the cache, partitioning them as per their toppar. + * Increases cache size by the number of messages added. + * + * @param {Array} messages - the messages to add to the cache. + */ + addMessages(messages) { + for (const message of messages) + this.#add(message); + this.#size += messages.length; + } + + /** + * Allows returning the PPC without asking for another message. + * + * @param {PerPartitionMessageCache} ppc - the partition to return. + * + * @note this is a no-op if the PPC is not assigned. + */ + return(ppc) { + if (!ppc._assigned) + return; + if (ppc._node) { + this.#assignedPartitions.remove(ppc._node); + ppc._node = this.#availablePartitions.addLast(ppc); + ppc._assigned = false; + } + } + + /** + * Returns the next element in the cache, or null if none exists. + * + * If the current PPC is exhausted, it moves to the next PPC. + * If all PPCs are exhausted, it returns null. + * + * @param {PerPartitionMessageCache} ppc - after a consumer has consumed a message, it must return the PPC back to us via this parameter. + * otherwise, no messages from that topic partition will be consumed. + * @returns {Array} - the next message in the cache, or null if none exists, and the corresponding PPC. + * @note Whenever making changes to this function, ensure that you benchmark perf. + */ + next(ppc = null) { + if (!ppc|| !ppc._assigned) + ppc = this.#assignNewPartition(); + if (!ppc) + return null; + + let next = ppc._next(); + + if (!next) { + this.#removeEmptyPartition(ppc); + return this.next(); + } + + this.#size--; + return [next, ppc]; + } + + /** + * Returns the next `size` elements in the cache as an array, or null if none exists. + * + * @sa next, the behaviour is similar in other aspects. + */ + nextN(ppc = null, size = -1) { + if (!ppc || !ppc._assigned) + ppc = this.#assignNewPartition(); + if (!ppc) + return null; + + let nextN = ppc._nextN(size); + + if (size === -1 || nextN.length < size) { + this.#removeEmptyPartition(ppc); + } + if (!nextN.length) + return this.nextN(null, size); + + this.#size -= nextN.length; + return [nextN, ppc]; + } + + /** + * Clears the cache completely. + * This resets it to a base state. + */ + clear() { + for (const ppc of this.#tpToPpc.values()) { + ppc._assigned = false; + } + this.#reinit(); + } +} + +module.exports = MessageCache; diff --git a/lib/kafkajs/_error.js b/lib/kafkajs/_error.js new file mode 100644 index 00000000..27584cbf --- /dev/null +++ b/lib/kafkajs/_error.js @@ -0,0 +1,195 @@ +const LibrdKafkaError = require('../error'); + +/** + * @typedef {Object} KafkaJSError represents an error when using the promisified interface. + */ +class KafkaJSError extends Error { + /** + * @param {Error | string} error an Error or a string describing the error. + * @param {object} properties a set of optional error properties. + * @param {boolean} [properties.retriable=false] whether the error is retriable. Applies only to the transactional producer + * @param {boolean} [properties.fatal=false] whether the error is fatal. Applies only to the transactional producer. + * @param {boolean} [properties.abortable=false] whether the error is abortable. Applies only to the transactional producer. + * @param {string} [properties.stack] the stack trace of the error. + * @param {number} [properties.code=LibrdKafkaError.codes.ERR_UNKNOWN] the error code. + */ + constructor(e, { retriable = false, fatal = false, abortable = false, stack = null, code = LibrdKafkaError.codes.ERR_UNKNOWN } = {}) { + super(e, {}); + this.name = 'KafkaJSError'; + this.message = e.message || e; + this.retriable = retriable; + this.fatal = fatal; + this.abortable = abortable; + this.code = code; + + if (stack) { + this.stack = stack; + } else { + Error.captureStackTrace(this, this.constructor); + } + + const errTypes = Object + .keys(LibrdKafkaError.codes) + .filter(k => LibrdKafkaError.codes[k] === this.code); + + if (errTypes.length !== 1) { + this.type = LibrdKafkaError.codes.ERR_UNKNOWN; + } else { + this.type = errTypes[0]; + } + } +} + +/** + * @typedef {Object} KafkaJSProtocolError represents an error that is caused when a Kafka Protocol RPC has an embedded error. + */ +class KafkaJSProtocolError extends KafkaJSError { + constructor() { + super(...arguments); + this.name = 'KafkaJSProtocolError'; + } +} + +/** + * @typedef {Object} KafkaJSOffsetOutOfRange represents the error raised when fetching from an offset out of range. + */ +class KafkaJSOffsetOutOfRange extends KafkaJSProtocolError { + constructor() { + super(...arguments); + this.name = 'KafkaJSOffsetOutOfRange'; + } +} + +/** + * @typedef {Object} KafkaJSConnectionError represents the error raised when a connection to a broker cannot be established or is broken unexpectedly. + */ +class KafkaJSConnectionError extends KafkaJSError { + constructor() { + super(...arguments); + this.name = 'KafkaJSConnectionError'; + } +} + +/** + * @typedef {Object} KafkaJSRequestTimeoutError represents the error raised on a timeout for one request. + */ +class KafkaJSRequestTimeoutError extends KafkaJSError { + constructor() { + super(...arguments); + this.name = 'KafkaJSRequestTimeoutError'; + } +} + +/** + * @typedef {Object} KafkaJSPartialMessageError represents the error raised when a response does not contain all expected information. + */ +class KafkaJSPartialMessageError extends KafkaJSError { + constructor() { + super(...arguments); + this.name = 'KafkaJSPartialMessageError'; + } +} + +/** + * @typedef {Object} KafkaJSSASLAuthenticationError represents an error raised when authentication fails. + */ +class KafkaJSSASLAuthenticationError extends KafkaJSError { + constructor() { + super(...arguments); + this.name = 'KafkaJSSASLAuthenticationError'; + } +} + +/** + * @typedef {Object} KafkaJSGroupCoordinatorNotFound represents an error raised when the group coordinator is not found. + */ +class KafkaJSGroupCoordinatorNotFound extends KafkaJSError { + constructor() { + super(...arguments); + this.name = 'KafkaJSGroupCoordinatorNotFound'; + } +} + +/** + * @typedef {Object} KafkaJSNotImplemented represents an error raised when a feature is not implemented for this particular client. + */ +class KafkaJSNotImplemented extends KafkaJSError { + constructor() { + super(...arguments); + this.name = 'KafkaJSNotImplemented'; + } +} + +/** + * @typedef {Object} KafkaJSTimeout represents an error raised when a timeout for an operation occurs (including retries). + */ +class KafkaJSTimeout extends KafkaJSError { + constructor() { + super(...arguments); + this.name = 'KafkaJSTimeout'; + } +} + +class KafkaJSLockTimeout extends KafkaJSTimeout { + constructor() { + super(...arguments); + this.name = 'KafkaJSLockTimeout'; + } +} + +/** + * @typedef {Object} KafkaJSAggregateError represents an error raised when multiple errors occur at once. + */ +class KafkaJSAggregateError extends Error { + constructor(message, errors) { + super(message); + this.errors = errors; + this.name = 'KafkaJSAggregateError'; + } +} + +/** + * @typedef {Object} KafkaJSNoBrokerAvailableError represents an error raised when no broker is available for the operation. + */ +class KafkaJSNoBrokerAvailableError extends KafkaJSError { + constructor() { + super(...arguments); + this.name = 'KafkaJSNoBrokerAvailableError'; + } +} + +/** + * @function isRebalancing + * @param {KafkaJSError} e + * @returns boolean representing whether the error is a rebalancing error. + */ +const isRebalancing = e => + e.type === 'REBALANCE_IN_PROGRESS' || + e.type === 'NOT_COORDINATOR_FOR_GROUP' || + e.type === 'ILLEGAL_GENERATION'; + +/** + * @function isKafkaJSError + * @param {any} e + * @returns boolean representing whether the error is a KafkaJSError. + */ +const isKafkaJSError = e => e instanceof KafkaJSError; + +module.exports = { + KafkaJSError, + KafkaJSPartialMessageError, + KafkaJSProtocolError, + KafkaJSConnectionError, + KafkaJSRequestTimeoutError, + KafkaJSSASLAuthenticationError, + KafkaJSOffsetOutOfRange, + KafkaJSGroupCoordinatorNotFound, + KafkaJSNotImplemented, + KafkaJSTimeout, + KafkaJSLockTimeout, + KafkaJSAggregateError, + KafkaJSNoBrokerAvailableError, + isRebalancing, + isKafkaJSError, + ErrorCodes: LibrdKafkaError.codes, +}; diff --git a/lib/kafkajs/_kafka.js b/lib/kafkajs/_kafka.js index e8849138..49a97d17 100644 --- a/lib/kafkajs/_kafka.js +++ b/lib/kafkajs/_kafka.js @@ -1,36 +1,90 @@ -const { Producer } = require('./_producer'); -const { Consumer } = require('./_consumer'); - +const { Producer, CompressionTypes } = require('./_producer'); +const { Consumer, PartitionAssigners } = require('./_consumer'); +const { Admin, ConsumerGroupStates, AclOperationTypes } = require('./_admin'); +const error = require('./_error'); +const { logLevel, checkIfKafkaJsKeysPresent, CompatibilityErrorMessages } = require('./_common'); class Kafka { - #commonClientConfig = {}; + /* @type{import("../../types/kafkajs").CommonConstructorConfig} */ + #commonClientConfig = {}; + + /** + * + * @param {import("../../types/kafkajs").CommonConstructorConfig} config + */ + constructor(config) { + this.#commonClientConfig = config ?? {}; - constructor(config) { - this.#commonClientConfig = config; + const disallowedKey = checkIfKafkaJsKeysPresent('common', this.#commonClientConfig); + if (disallowedKey !== null) { + throw new error.KafkaJSError(CompatibilityErrorMessages.kafkaJSCommonKey(disallowedKey)); } + } + + /** + * Merge the producer/consumer specific configuration with the common configuration. + * @param {import("../../types/kafkajs").ProducerConstructorConfig|import("../../types/kafkajs").ConsumerConstructorConfig} config + * @returns {(import("../../types/kafkajs").ProducerConstructorConfig & import("../../types/kafkajs").CommonConstructorConfig) | (import("../../types/kafkajs").ConsumerConstructorConfig & import("../../types/kafkajs").CommonConstructorConfig)} + */ + #mergeConfiguration(config) { + config = Object.assign({}, config) ?? {}; + const mergedConfig = Object.assign({}, this.#commonClientConfig); - #mergeConfiguration(config) { - let baseConfig = Object.assign({}, this.#commonClientConfig); - config = Object.assign({}, config); - - let rdKafka = baseConfig.rdKafka; - Object.assign(baseConfig, config); - if (rdKafka && config.rdKafka) { - baseConfig.rdKafka = { - ...rdKafka, - ...config.rdKafka - } - } - return baseConfig; + mergedConfig.kafkaJS = Object.assign({}, mergedConfig.kafkaJS) ?? {}; + + if (typeof config.kafkaJS === 'object') { + mergedConfig.kafkaJS = Object.assign(mergedConfig.kafkaJS, config.kafkaJS); + delete config.kafkaJS; } - producer(config) { - return new Producer(this.#mergeConfiguration(config)); + Object.assign(mergedConfig, config); + + return mergedConfig; + } + + /** + * Creates a new producer. + * @param {import("../../types/kafkajs").ProducerConstructorConfig} config + * @returns {Producer} + */ + producer(config) { + const disallowedKey = checkIfKafkaJsKeysPresent('producer', config ?? {}); + if (disallowedKey !== null) { + throw new error.KafkaJSError(CompatibilityErrorMessages.kafkaJSClientKey(disallowedKey, 'producer')); } - consumer(config) { - return new Consumer(this.#mergeConfiguration(config)); + return new Producer(this.#mergeConfiguration(config)); + } + + /** + * Creates a new consumer. + * @param {import("../../types/kafkajs").ConsumerConstructorConfig} config + * @returns {Consumer} + */ + consumer(config) { + const disallowedKey = checkIfKafkaJsKeysPresent('consumer', config ?? {}); + if (disallowedKey !== null) { + throw new error.KafkaJSError(CompatibilityErrorMessages.kafkaJSClientKey(disallowedKey, 'consumer')); } + + return new Consumer(this.#mergeConfiguration(config)); + } + + admin(config) { + const disallowedKey = checkIfKafkaJsKeysPresent('admin', config ?? {}); + if (disallowedKey !== null) { + throw new error.KafkaJSError(CompatibilityErrorMessages.kafkaJSClientKey(disallowedKey, 'admin')); + } + + return new Admin(this.#mergeConfiguration(config)); + } } -module.exports = { Kafka } +module.exports = { + Kafka, + ...error, logLevel, + PartitionAssigners, + PartitionAssignors: PartitionAssigners, + CompressionTypes, + ConsumerGroupStates, + AclOperationTypes }; diff --git a/lib/kafkajs/_linked-list.js b/lib/kafkajs/_linked-list.js new file mode 100644 index 00000000..b23d219e --- /dev/null +++ b/lib/kafkajs/_linked-list.js @@ -0,0 +1,219 @@ +/** + * Node class for linked list, after being removed + * it cannot be used again. + */ +class LinkedListNode { + // Value contained by the node. + #value; + // Node was removed from the list. + _removed = false; + // Next node in the list. + _prev = null; + // Previous node in the list. + _next = null; + + constructor(value) { + this.#value = value; + } + + get value() { + return this.#value; + } + + get prev() { + return this._prev; + } + + get next() { + return this._next; + } +} + +class LinkedList { + _head = null; + _tail = null; + #count = 0; + + *#iterator() { + let node = this._head; + while (node) { + yield node.value; + node = node._next; + } + } + + #insertInBetween(node, prev, next) { + node._next = next; + node._prev = prev; + if (prev) + prev._next = node; + else + this._head = node; + + if (next) + next._prev = node; + else + this._tail = node; + + this.#count++; + return node; + } + + /** + * Removes given node from the list, + * if it is not already removed. + * + * @param {LinkedListNode} node + */ + remove(node) { + if (node._removed) { + return; + } + + if (node._prev) + node._prev._next = node._next; + else + this._head = node._next; + + if (node._next) + node._next._prev = node._prev; + else + this._tail = node._prev; + + node._next = null; + node._prev = null; + node._removed = true; + this.#count--; + } + + /** + * Removes the first node from the list and returns it, + * or null if the list is empty. + * + * @returns {any} The value of the first node in the list or null. + */ + removeFirst() { + if (this._head === null) { + return null; + } + + const node = this._head; + this.remove(node); + return node.value; + } + + /** + * Removes the last node from the list and returns its value, + * or null if the list is empty. + * + * @returns {any} The value of the last node in the list or null. + */ + removeLast() { + if (this._tail === null) { + return null; + } + + const node = this._tail; + this.remove(node); + return node.value; + } + + /** + * Add a new node to the beginning of the list and returns it. + * + * @param {any} value + * @returns {LinkedListNode} The new node. + */ + addFirst(value) { + const node = new LinkedListNode(value); + return this.#insertInBetween(node, null, + this._head); + } + + /** + * Add a new node to the end of the list and returns it. + * + * @param {any} value Node value. + * @returns {LinkedListNode} The new node. + */ + addLast(value) { + const node = new LinkedListNode(value); + return this.#insertInBetween(node, this._tail, null); + } + + /** + * Add a new node before the given node and returns it. + * Given node must not be removed. + * + * @param {LinkedListNode} node Reference node. + * @param {any} value New node value. + * @returns {LinkedListNode} The new node. + */ + addBefore(node, value) { + if (node._removed) + throw new Error('Node was removed'); + const newNode = new LinkedListNode(value); + return this.#insertInBetween(newNode, node._prev, node); + } + + /** + * Add a new node after the given node and returns it. + * Given node must not be removed. + * + * @param {LinkedListNode} node Reference node. + * @param {any} value New node value. + * @returns {LinkedListNode} The new node. + */ + addAfter(node, value) { + if (node._removed) + throw new Error('Node was removed'); + const newNode = new LinkedListNode(value); + return this.#insertInBetween(newNode, node, node._next); + } + + /** + * Concatenates the given list to the end of this list. + * + * @param {LinkedList} list List to concatenate. + */ + concat(list) { + if (list.length === 0) { + return; + } + + if (this._tail) { + this._tail._next = list._head; + } + + if (list._head) { + list._head._prev = this._tail; + } + + this._tail = list._tail; + this.#count += list.length; + list.#count = 0; + list._head = null; + list._tail = null; + } + + get first() { + return this._head; + } + + get last() { + return this._tail; + } + + get length() { + return this.#count; + } + + [Symbol.iterator]() { + return this.#iterator(); + } +} + +module.exports = { + LinkedList, + LinkedListNode +}; diff --git a/lib/kafkajs/_producer.js b/lib/kafkajs/_producer.js index acd31932..f2542f39 100644 --- a/lib/kafkajs/_producer.js +++ b/lib/kafkajs/_producer.js @@ -1,10 +1,22 @@ const RdKafka = require('../rdkafka'); -const { kafkaJSToRdKafkaConfig, topicPartitionOffsetToRdKafka } = require('./_common'); -const { Consumer } = require('./_consumer'); +const { kafkaJSToRdKafkaConfig, + topicPartitionOffsetToRdKafka, + createKafkaJsErrorFromLibRdKafkaError, + convertToRdKafkaHeaders, + createBindingMessageMetadata, + DefaultLogger, + loggerTrampoline, + severityToLogLevel, + checkAllowedKeys, + CompatibilityErrorMessages, + logLevel, +} = require('./_common'); +const error = require('./_error'); +const { Buffer } = require('buffer'); const ProducerState = Object.freeze({ - INIT: 0, - CONNECTING: 1, + INIT: 0, + CONNECTING: 1, INITIALIZING_TRANSACTIONS: 2, INITIALIZED_TRANSACTIONS: 3, CONNECTED: 4, @@ -12,168 +24,377 @@ const ProducerState = Object.freeze({ DISCONNECTED: 6, }); +const CompressionTypes = Object.freeze({ + None: 'none', + GZIP: 'gzip', + SNAPPY: 'snappy', + LZ4: 'lz4', + ZSTD: 'zstd', +}); + class Producer { - #kJSConfig = null - #rdKafkaConfig = null; + /** + * The config supplied by the user. + * @type {import("../../types/kafkajs").ProducerConstructorConfig|null} + */ + #userConfig = null; + + /** + * The config realized after processing any compatibility options. + * @type {import("../../types/config").ProducerGlobalConfig|null} + */ + #internalConfig = null; + + /** + * internalClient is the node-rdkafka client used by the API. + * @type {import("../rdkafka").Producer|null} + */ #internalClient = null; + + /** + * connectPromiseFunc is the set of promise functions used to resolve/reject the connect() promise. + * @type {{resolve: Function, reject: Function}|{}} + */ #connectPromiseFunc = {}; + + /** + * state is the current state of the producer. + * @type {ProducerState} + */ #state = ProducerState.INIT; + + /** + * ongoingTransaction is true if there is an ongoing transaction. + * @type {boolean} + */ #ongoingTransaction = false; + /** + * A logger for the producer. + * @type {import("../../types/kafkajs").Logger} + */ + #logger = new DefaultLogger(); + + /** + * @constructor + * @param {import("../../types/kafkajs").ProducerConfig} kJSConfig + */ constructor(kJSConfig) { - this.#kJSConfig = kJSConfig; + this.#userConfig = kJSConfig; + } + + /** + * The client name used by the producer for logging - determined by librdkafka + * using a combination of clientId and an integer. + * @type {string|undefined} + */ + #clientName = undefined; + + /** + * Convenience function to create the metadata object needed for logging. + */ + #createProducerBindingMessageMetadata() { + return createBindingMessageMetadata(this.#clientName); } #config() { - if (!this.#rdKafkaConfig) - this.#rdKafkaConfig = this.#finalizedConfig(); - return this.#rdKafkaConfig; + if (!this.#internalConfig) + this.#internalConfig = this.#finalizedConfig(); + return this.#internalConfig; } - async #finalizedConfig() { - const config = await kafkaJSToRdKafkaConfig(this.#kJSConfig); - config.dr_cb = 'true'; + #kafkaJSToProducerConfig(kjsConfig) { + if (!kjsConfig || Object.keys(kjsConfig).length === 0) { + return {}; + } + + const disallowedKey = checkAllowedKeys('producer', kjsConfig); + if (disallowedKey) { + throw new error.KafkaJSError(CompatibilityErrorMessages.unsupportedKey(disallowedKey), { code: error.ErrorCodes.ERR__INVALID_ARG }); + } + + const rdKafkaConfig = kafkaJSToRdKafkaConfig(kjsConfig); + + /* Producer specific configuration. */ + if (Object.hasOwn(kjsConfig, 'createPartitioner')) { + throw new error.KafkaJSError(CompatibilityErrorMessages.createPartitioner(), { code: error.ErrorCodes.ERR__NOT_IMPLEMENTED }); + } + rdKafkaConfig['partitioner'] = 'murmur2_random'; + + if (Object.hasOwn(kjsConfig, 'metadataMaxAge')) { + rdKafkaConfig['topic.metadata.refresh.interval.ms'] = kjsConfig.metadataMaxAge; + } + + if (Object.hasOwn(kjsConfig, 'allowAutoTopicCreation')) { + rdKafkaConfig['allow.auto.create.topics'] = kjsConfig.allowAutoTopicCreation; + } + + if (Object.hasOwn(kjsConfig, 'transactionTimeout')) { + rdKafkaConfig['transaction.timeout.ms'] = kjsConfig.transactionTimeout; + } else { + rdKafkaConfig['transaction.timeout.ms'] = 60000; + } + + // `socket.timeout.ms` must be set <= `transaction.timeout.ms` + 100 + if (rdKafkaConfig['socket.timeout.ms'] > rdKafkaConfig['transaction.timeout.ms'] + 100) { + rdKafkaConfig['socket.timeout.ms'] = rdKafkaConfig['transaction.timeout.ms'] + 100; + } - if (this.#kJSConfig.hasOwnProperty('transactionalId')) { - config['transactional.id'] = this.#kJSConfig.transactionalId; + if (Object.hasOwn(kjsConfig, 'idempotent')) { + rdKafkaConfig['enable.idempotence'] = kjsConfig.idempotent; } - return config; + if (Object.hasOwn(kjsConfig, 'maxInFlightRequests')) { + rdKafkaConfig['max.in.flight'] = kjsConfig.maxInFlightRequests; + } + + if (Object.hasOwn(kjsConfig, 'transactionalId')) { + rdKafkaConfig['transactional.id'] = kjsConfig.transactionalId; + } + + if (Object.hasOwn(kjsConfig, 'compression')) { + rdKafkaConfig['compression.codec'] = kjsConfig.compression; + } + + if (Object.hasOwn(kjsConfig, 'acks')) { + rdKafkaConfig['acks'] = kjsConfig.acks; + } + + if (Object.hasOwn(kjsConfig, 'timeout')) { + rdKafkaConfig['request.timeout.ms'] = kjsConfig.timeout; + } + + const retry = kjsConfig.retry ?? {}; + const { retries } = retry; + rdKafkaConfig["retries"] = retries ?? 5; + + /* Set the logger */ + if (Object.hasOwn(kjsConfig, 'logger')) { + this.#logger = kjsConfig.logger; + } + + /* Set the log level - INFO for compatibility with kafkaJS, or DEBUG if that is turned + * on using the logLevel property. rdKafkaConfig.log_level is guaranteed to be set if we're + * here, and containing the correct value. */ + this.#logger.setLogLevel(severityToLogLevel[rdKafkaConfig.log_level]); + + return rdKafkaConfig; } + #finalizedConfig() { + /* Creates an rdkafka config based off the kafkaJS block. Switches to compatibility mode if the block exists. */ + let compatibleConfig = this.#kafkaJSToProducerConfig(this.#userConfig.kafkaJS); + + /* There can be multiple different and conflicting config directives for setting the log level: + * 1. If there's a kafkaJS block: + * a. If there's a logLevel directive in the kafkaJS block, set the logger level accordingly. + * b. If there's no logLevel directive, set the logger level to INFO. + * (both these are already handled in the conversion method above). + * 2. If there is a log_level or debug directive in the main config, set the logger level accordingly. + * !This overrides any different value provided in the kafkaJS block! + * a. If there's a log_level directive, set the logger level accordingly. + * b. If there's a debug directive, set the logger level to DEBUG regardless of anything else. This is because + * librdkafka ignores log_level if debug is set, and our behaviour should be identical. + * 3. There's nothing at all. Take no action in this case, let the logger use its default log level. + */ + if (Object.hasOwn(this.#userConfig, 'log_level')) { + this.#logger.setLogLevel(severityToLogLevel[this.#userConfig.log_level]); + } + + if (Object.hasOwn(this.#userConfig, 'debug')) { + this.#logger.setLogLevel(logLevel.DEBUG); + } + + let rdKafkaConfig = Object.assign(compatibleConfig, this.#userConfig); + + /* Delete properties which are already processed, or cannot be passed to node-rdkafka */ + delete rdKafkaConfig.kafkaJS; + + /* Certain properties that the user has set are overridden. There is + * no longer a delivery report, rather, results are made available on + * awaiting. */ + /* TODO: Add a warning if dr_cb is set? Or else, create a trampoline for it. */ + rdKafkaConfig.dr_cb = true; + + return rdKafkaConfig; + } + + /** + * Flattens a list of topics with partitions into a list of topic, partition, offset. + * @param {import("../../types/kafkajs").TopicOffsets[]} topics + * @returns {import("../../types/kafkajs").TopicPartitionOffset} + */ #flattenTopicPartitionOffsets(topics) { return topics.flatMap(topic => { return topic.partitions.map(partition => { - return { partition: partition.partition, offset: partition.offset, topic: topic.topic }; - }) - }) + return { partition: Number(partition.partition), offset: String(partition.offset), topic: String(topic.topic) }; + }); + }); } #readyTransactions(err) { - if (err) { - this.#connectPromiseFunc["reject"](err); - return; - } + if (err) { + this.#connectPromiseFunc["reject"](err); + return; + } - if (this.#state !== ProducerState.INITIALIZING_TRANSACTIONS) { - // FSM impossible state. We should add error handling for - // this later. - return; - } + if (this.#state !== ProducerState.INITIALIZING_TRANSACTIONS) { + // FSM impossible state. We should add error handling for + // this later. + return; + } - this.#state = ProducerState.INITIALIZED_TRANSACTIONS; - this.#readyCb(null); + this.#state = ProducerState.INITIALIZED_TRANSACTIONS; + this.#readyCb(); } - async #readyCb(arg) { - if (this.#state !== ProducerState.CONNECTING && this.#state !== ProducerState.INITIALIZED_TRANSACTIONS) { - // I really don't know how to handle this now. - return; - } + /** + * Processes a delivery report, converting it to the type that the promisified API uses. + * @param {import('../..').LibrdKafkaError} err + * @param {import('../..').DeliveryReport} report + */ + #deliveryCallback(err, report) { + const opaque = report.opaque; + if (!opaque || (typeof opaque.resolve !== 'function' && typeof opaque.reject !== 'function')) { + // not sure how to handle this. + throw new error.KafkaJSError("Internal error: deliveryCallback called without opaque set properly", { code: error.ErrorCodes.ERR__STATE }); + } - let config = await this.#config(); - if (config.hasOwnProperty('transactional.id') && this.#state !== ProducerState.INITIALIZED_TRANSACTIONS) { - this.#state = ProducerState.INITIALIZING_TRANSACTIONS; - this.#internalClient.initTransactions(5000 /* default: 5s */, this.#readyTransactions.bind(this)); - return; - } + if (err) { + opaque.reject(createKafkaJsErrorFromLibRdKafkaError(err)); + return; + } - this.#state = ProducerState.CONNECTED; + delete report['opaque']; - // Start a loop to poll. - let pollInterval = setInterval(() => { - if (this.#state >= ProducerState.DISCONNECTING) { - clearInterval(pollInterval); - return; - } - this.#internalClient.poll(); - }, 500); - - this.#internalClient.on('delivery-report', function(err, report) { - //console.log('got delivery report', report, err); - const opaque = report.opaque; - if (!opaque) { - // not sure how to handle this. - return; - } - if (err) { - opaque.reject('err out'); - return; - } - //console.log('delivery-report: ' + JSON.stringify(report)); - delete report['opaque']; - - const recordMetadata = { - topicName: report.topic, - partition: report.partition, - errorCode: 0, - baseOffset: report.offset, - logAppendTime: null, - logStartOffset: null, - } + const recordMetadata = { + topicName: report.topic, + partition: report.partition, + errorCode: 0, + baseOffset: report.offset, + logAppendTime: '-1', + logStartOffset: '0', + }; - opaque.resolve(recordMetadata); - }); + opaque.resolve(recordMetadata); + } + + async #readyCb() { + if (this.#state !== ProducerState.CONNECTING && this.#state !== ProducerState.INITIALIZED_TRANSACTIONS) { + /* The connectPromiseFunc might not be set, so we throw such an error. It's a state error that we can't recover from. Probably a bug. */ + throw new error.KafkaJSError(`Ready callback called in invalid state ${this.#state}`, { code: error.ErrorCodes.ERR__STATE }); + } + + const rdKafkaConfig = this.#config(); + this.#clientName = this.#internalClient.name; - // Resolve the promise. - this.#connectPromiseFunc["resolve"](); + if (Object.hasOwn(rdKafkaConfig, 'transactional.id') && this.#state !== ProducerState.INITIALIZED_TRANSACTIONS) { + this.#state = ProducerState.INITIALIZING_TRANSACTIONS; + this.#logger.debug("Attempting to initialize transactions", this.#createProducerBindingMessageMetadata()); + this.#internalClient.initTransactions(5000 /* default: 5s */, this.#readyTransactions.bind(this)); + return; + } + + this.#state = ProducerState.CONNECTED; + this.#internalClient.setPollInBackground(true); + this.#internalClient.on('delivery-report', this.#deliveryCallback.bind(this)); + this.#logger.info("Producer connected", this.#createProducerBindingMessageMetadata()); + + // Resolve the promise. + this.#connectPromiseFunc["resolve"](); } - #errorCb(args) { - console.log('error', args); - if (this.#state === ProducerState.CONNECTING) { - this.#connectPromiseFunc["reject"](args); - } else { - // do nothing for now. - } + /** + * Callback for the event.error event, either fails the initial connect(), or logs the error. + * @param {Error} err + */ + #errorCb(err) { + if (this.#state === ProducerState.CONNECTING) { + this.#connectPromiseFunc["reject"](err); + } else { + this.#logger.error(err, this.#createProducerBindingMessageMetadata()); + } } + /** + * Set up the client and connect to the bootstrap brokers. + * @returns {Promise} Resolves when connection is complete, rejects on error. + */ async connect() { - if (this.#state !== ProducerState.INIT) { - return Promise.reject("Connect has already been called elsewhere."); - } + if (this.#state !== ProducerState.INIT) { + throw new error.KafkaJSError("Connect has already been called elsewhere.", { code: error.ErrorCodes.ERR__STATE }); + } + + this.#state = ProducerState.CONNECTING; - this.#state = ProducerState.CONNECTING; - this.#internalClient = new RdKafka.Producer(await this.#config()); - this.#internalClient.on('ready', this.#readyCb.bind(this)); - this.#internalClient.on('event.error', this.#errorCb.bind(this)); - this.#internalClient.on('event.log', console.log); - - return new Promise((resolve, reject) => { - this.#connectPromiseFunc = {resolve, reject}; - console.log("Connecting...."); - this.#internalClient.connect(); - console.log("connect() called"); + const rdKafkaConfig = this.#config(); + + this.#internalClient = new RdKafka.Producer(rdKafkaConfig); + this.#internalClient.on('ready', this.#readyCb.bind(this)); + this.#internalClient.on('event.error', this.#errorCb.bind(this)); + this.#internalClient.on('error', this.#errorCb.bind(this)); + this.#internalClient.on('event.log', (msg) => loggerTrampoline(msg, this.#logger)); + + return new Promise((resolve, reject) => { + this.#connectPromiseFunc = { resolve, reject }; + this.#internalClient.connect(null, (err) => { + if (err) + reject(createKafkaJsErrorFromLibRdKafkaError(err)); }); + }); } + /** + * Disconnect from the brokers, clean-up and tear down the client. + * @returns {Promise} Resolves when disconnect is complete, rejects on error. + */ async disconnect() { + /* Not yet connected - no error. */ + if (this.#state === ProducerState.INIT) { + return; + } + + /* TODO: We should handle a case where we are connecting, we should + * await the connection and then schedule a disconnect. */ + + /* Already disconnecting, or disconnected. */ if (this.#state >= ProducerState.DISCONNECTING) { return; } + this.#state = ProducerState.DISCONNECTING; await new Promise((resolve, reject) => { const cb = (err) => { - err ? reject(err) : resolve(); + if (err) { + reject(createKafkaJsErrorFromLibRdKafkaError(err)); + return; + } this.#state = ProducerState.DISCONNECTED; - } - this.#internalClient.disconnect(5000, cb); + this.#logger.info("Producer disconnected", this.#createProducerBindingMessageMetadata()); + resolve(); + }; + this.#internalClient.disconnect(5000 /* default timeout, 5000ms */, cb); }); } + /** + * Start a transaction - can only be used with a transactional producer. + * @returns {Promise} Resolves with the producer when the transaction is started. + */ async transaction() { if (this.#state !== ProducerState.CONNECTED) { - return Promise.reject("Cannot start transaction without awaiting connect()"); + throw new error.KafkaJSError("Cannot start transaction without awaiting connect()", { code: error.ErrorCodes.ERR__STATE }); } if (this.#ongoingTransaction) { - return Promise.reject("Can only start one transaction at a time."); + throw new error.KafkaJSError("Can only start one transaction at a time.", { code: error.ErrorCodes.ERR__STATE }); } + this.#logger.debug("Attempting to begin transaction", this.#createProducerBindingMessageMetadata()); return new Promise((resolve, reject) => { this.#internalClient.beginTransaction((err) => { if (err) { - reject(err); + reject(createKafkaJsErrorFromLibRdKafkaError(err)); return; } this.#ongoingTransaction = true; @@ -186,20 +407,25 @@ class Producer { }); } + /** + * Commit the current transaction. + * @returns {Promise} Resolves when the transaction is committed. + */ async commit() { if (this.#state !== ProducerState.CONNECTED) { - return Promise.reject("Cannot commit without awaiting connect()"); + throw new error.KafkaJSError("Cannot commit without awaiting connect()", { code: error.ErrorCodes.ERR__STATE }); } if (!this.#ongoingTransaction) { - return Promise.reject("Cannot commit, no transaction ongoing."); + throw new error.KafkaJSError("Cannot commit, no transaction ongoing.", { code: error.ErrorCodes.ERR__STATE }); } + this.#logger.debug("Attempting to commit transaction", this.#createProducerBindingMessageMetadata()); return new Promise((resolve, reject) => { this.#internalClient.commitTransaction(5000 /* default: 5000ms */, err => { if (err) { // TODO: Do we reset ongoingTransaction here? - reject(err); + reject(createKafkaJsErrorFromLibRdKafkaError(err)); return; } this.#ongoingTransaction = false; @@ -208,21 +434,25 @@ class Producer { }); } - + /** + * Abort the current transaction. + * @returns {Promise} Resolves when the transaction is aborted. + */ async abort() { if (this.#state !== ProducerState.CONNECTED) { - return Promise.reject("Cannot abort without awaiting connect()"); + throw new error.KafkaJSError("Cannot abort without awaiting connect()", { code: error.ErrorCodes.ERR__STATE }); } if (!this.#ongoingTransaction) { - return Promise.reject("Cannot abort, no transaction ongoing."); + throw new error.KafkaJSError("Cannot abort, no transaction ongoing.", { code: error.ErrorCodes.ERR__STATE }); } + this.#logger.debug("Attempting to abort transaction", this.#createProducerBindingMessageMetadata()); return new Promise((resolve, reject) => { this.#internalClient.abortTransaction(5000 /* default: 5000ms */, err => { if (err) { // TODO: Do we reset ongoingTransaction here? - reject(err); + reject(createKafkaJsErrorFromLibRdKafkaError(err)); return; } this.#ongoingTransaction = false; @@ -231,33 +461,32 @@ class Producer { }); } + /** + * Send offsets for the transaction. + * @param {object} arg - The arguments to sendOffsets + * @param {Consumer} arg.consumer - The consumer to send offsets for. + * @param {import("../../types/kafkajs").TopicOffsets[]} arg.topics - The topics, partitions and the offsets to send. + * + * @returns {Promise} Resolves when the offsets are sent. + */ async sendOffsets(arg) { let { consumerGroupId, topics, consumer } = arg; - if ((!consumerGroupId && !consumer) || !Array.isArray(topics) || topics.length === 0) { - return Promise.reject("sendOffsets must have the arguments {consumerGroupId: string or consumer: Consumer, topics: non-empty array"); + /* If the user has not supplied a consumer, or supplied a consumerGroupId, throw immediately. */ + if (consumerGroupId || !consumer) { + throw new error.KafkaJSError(CompatibilityErrorMessages.sendOffsetsMustProvideConsumer(), { code: error.ErrorCodes.ERR__INVALID_ARG }); } - if (this.#state !== ProducerState.CONNECTED) { - return Promise.reject("Cannot sendOffsets without awaiting connect()"); + if (!Array.isArray(topics) || topics.length === 0) { + throw new error.KafkaJSError("sendOffsets arguments are invalid", { code: error.ErrorCodes.ERR__INVALID_ARG }); } - if (!this.#ongoingTransaction) { - return Promise.reject("Cannot sendOffsets, no transaction ongoing."); + if (this.#state !== ProducerState.CONNECTED) { + throw new error.KafkaJSError("Cannot sendOffsets without awaiting connect()", { code: error.ErrorCodes.ERR__STATE }); } - // If we don't have a consumer, we must create a consumer at this point internally. - // This isn't exactly efficient, but we expect people to use either a consumer, - // or we will need to change the C/C++ code to facilitate using the consumerGroupId - // directly. - // TODO: Change the C/C++ code to facilitate this if we go to release with this. - - let consumerCreated = false; - if (!consumer) { - const config = Object.assign({ groupId: consumerGroupId }, this.#kJSConfig); - consumer = new Consumer(config); - consumerCreated = true; - await consumer.connect(); + if (!this.#ongoingTransaction) { + throw new error.KafkaJSError("Cannot sendOffsets, no transaction ongoing.", { code: error.ErrorCodes.ERR__STATE }); } return new Promise((resolve, reject) => { @@ -265,79 +494,253 @@ class Producer { this.#flattenTopicPartitionOffsets(topics).map(topicPartitionOffsetToRdKafka), consumer._getInternalConsumer(), async err => { - if (consumerCreated) - await consumer.disconnect(); if (err) - reject(err); + reject(createKafkaJsErrorFromLibRdKafkaError(err)); else resolve(); - }) + }); }); } + /** + * Check if there is an ongoing transaction. + * + * NOTE: Since Producer itself represents a transaction, and there is no distinct + * type for a transaction, this method exists on the producer. + * @returns {boolean} true if there is an ongoing transaction, false otherwise. + */ + isActive() { + return this.#ongoingTransaction; + } + + /** + * Sends a record of messages to a specific topic. + * + * @param {import('../../types/kafkajs').ProducerRecord} sendOptions - The record to send. The keys `acks`, `timeout`, and `compression` are not used, and should not be set, rather, they should be set in the global config. + * @returns {Promise} Resolves with the record metadata for the messages. + */ async send(sendOptions) { - if (this.#state !== ProducerState.CONNECTED) { - return Promise.reject("Cannot send message without awaiting connect()"); + if (this.#state !== ProducerState.CONNECTED) { + throw new error.KafkaJSError("Cannot send without awaiting connect()", { code: error.ErrorCodes.ERR__STATE }); + } + + if (sendOptions === null || !(sendOptions instanceof Object)) { + throw new error.KafkaJSError(CompatibilityErrorMessages.sendOptionsMandatoryMissing(), { code: error.ErrorCodes.ERR__INVALID_ARG }); + } + + if (Object.hasOwn(sendOptions, 'acks')) { + throw new error.KafkaJSError(CompatibilityErrorMessages.sendOptionsAcks('send'), { code: error.ErrorCodes.ERR__INVALID_ARG }); + } + if (Object.hasOwn(sendOptions, 'timeout')) { + throw new error.KafkaJSError(CompatibilityErrorMessages.sendOptionsTimeout('send'), { code: error.ErrorCodes.ERR__INVALID_ARG }); + } + if (Object.hasOwn(sendOptions, 'compression')) { + throw new error.KafkaJSError(CompatibilityErrorMessages.sendOptionsCompression('send'), { code: error.ErrorCodes.ERR__INVALID_ARG }); + } + + const msgPromises = []; + for (let i = 0; i < sendOptions.messages.length; i++) { + const msg = sendOptions.messages[i]; + + if (!Object.hasOwn(msg, "partition") || msg.partition === null) { + msg.partition = -1; } - if (sendOptions === null || !(sendOptions instanceof Object)) { - return Promise.reject("sendOptions must be set correctly"); + if (typeof msg.value === 'string') { + msg.value = Buffer.from(msg.value); } - // Ignore all properties except topic and messages. - // TODO: log a warning instead of ignoring. - if (!sendOptions.hasOwnProperty("topic") || !sendOptions.hasOwnProperty("messages") || !Array.isArray(sendOptions["messages"])) { - // TODO: add further validations. - return Promise.reject("sendOptions must be of the form {topic: string, messages: Message[]}"); + if (Object.hasOwn(msg, "timestamp") && msg.timestamp) { + msg.timestamp = Number(msg.timestamp); + } else { + msg.timestamp = 0; } - const msgPromises = []; - for (let i = 0; i < sendOptions.messages.length; i++) { - const msg = sendOptions.messages[i]; + msg.headers = convertToRdKafkaHeaders(msg.headers); - if (!msg.hasOwnProperty("partition") || msg.partition === null) { - msg.partition = -1; - } + msgPromises.push(new Promise((resolve, reject) => { + const opaque = { resolve, reject }; + try { + this.#internalClient.produce(sendOptions.topic, msg.partition, msg.value, msg.key, msg.timestamp, opaque, msg.headers); + } catch (err) { + reject(err); + } + })); + } - if (typeof msg.value === 'string') { - msg.value = Buffer.from(msg.value); - } + /* The delivery report will be handled by the delivery-report event handler, and we can simply wait for it here. */ - msgPromises.push(new Promise((resolve, reject) => { - const opaque = {resolve, reject}; - this.#internalClient.produce(sendOptions.topic, msg.partition, msg.value, msg.key, msg.timestamp ?? Date.now(), opaque, msg.headers); - })); + const recordMetadataArr = await Promise.all(msgPromises); + const topicPartitionRecordMetadata = new Map(); + for (const recordMetadata of recordMetadataArr) { + const key = `${recordMetadata.topicName},${recordMetadata.partition}`; + if (recordMetadata.baseOffset === null || !topicPartitionRecordMetadata.has(key)) { + topicPartitionRecordMetadata.set(key, recordMetadata); + continue; } - const recordMetadataArr = await Promise.all(msgPromises); - - const topicPartitionRecordMetadata = new Map(); - for (const recordMetadata of recordMetadataArr) { - const key = `${recordMetadata.topicName},${recordMetadata.partition}`; - if (recordMetadata.baseOffset == null || !topicPartitionRecordMetadata.has(key)) { - topicPartitionRecordMetadata.set(key, recordMetadata); - continue; - } - const currentRecordMetadata = topicPartitionRecordMetadata.get(key); + const currentRecordMetadata = topicPartitionRecordMetadata.get(key); - // Don't overwrite a null baseOffset - if (currentRecordMetadata.baseOffset == null) { - continue; - } + // Don't overwrite a null baseOffset + if (currentRecordMetadata.baseOffset === null) { + continue; + } - if (currentRecordMetadata.baseOffset > recordMetadata.baseOffset) { - topicPartitionRecordMetadata.set(key, recordMetadata); - } + if (currentRecordMetadata.baseOffset > recordMetadata.baseOffset) { + topicPartitionRecordMetadata.set(key, recordMetadata); } + } - const ret = []; - for (const [key, value] of topicPartitionRecordMetadata.entries()) { - value.baseOffset = value.baseOffset?.toString(); - ret.push(value); + const ret = []; + for (const value of topicPartitionRecordMetadata.values()) { + value.baseOffset = value.baseOffset?.toString(); + ret.push(value); + } + return ret; + } + + /** + * Sends a record of messages to various topics. + * + * NOTE: This method is identical to calling send() repeatedly and waiting on all the return values together. + * @param {import('../../types/kafkajs').ProducerBatch} sendOptions - The record to send. The keys `acks`, `timeout`, and `compression` are not used, and should not be set, rather, they should be set in the global config. + * @returns {Promise} Resolves with the record metadata for the messages. + */ + async sendBatch(sendOptions) { + if (this.#state !== ProducerState.CONNECTED) { + throw new error.KafkaJSError("Cannot sendBatch without awaiting connect()", { code: error.ErrorCodes.ERR__STATE }); + } + + if (sendOptions === null || !(sendOptions instanceof Object)) { + throw new error.KafkaJSError(CompatibilityErrorMessages.sendBatchMandatoryMissing(), { code: error.ErrorCodes.ERR__INVALID_ARG }); + } + + if (Object.hasOwn(sendOptions, 'acks')) { + throw new error.KafkaJSError(CompatibilityErrorMessages.sendOptionsAcks('sendBatch'), { code: error.ErrorCodes.ERR__INVALID_ARG }); + } + if (Object.hasOwn(sendOptions, 'timeout')) { + throw new error.KafkaJSError(CompatibilityErrorMessages.sendOptionsTimeout('timeout'), { code: error.ErrorCodes.ERR__INVALID_ARG }); + } + if (Object.hasOwn(sendOptions, 'compression')) { + throw new error.KafkaJSError(CompatibilityErrorMessages.sendOptionsCompression('compression'), { code: error.ErrorCodes.ERR__INVALID_ARG }); + } + + if (sendOptions.topicMessages !== null && !Array.isArray(sendOptions.topicMessages)) { + throw new error.KafkaJSError(CompatibilityErrorMessages.sendBatchMandatoryMissing(), { code: error.ErrorCodes.ERR__INVALID_ARG }); + } + + if (!sendOptions.topicMessages || sendOptions.topicMessages.length === 0) { + return Promise.resolve([]); + } + + // Internally, we just use send() because the batching is handled by librdkafka. + const sentPromises = []; + + for (const topicMessage of sendOptions.topicMessages) { + sentPromises.push(this.send(topicMessage)); + } + + const records = await Promise.all(sentPromises); + return records.flat(); + } + + /** + * @returns {import("../../types/kafkajs").Logger} the logger associated to this producer. + */ + logger() { + return this.#logger; + } + + /** + * Change SASL credentials to be sent on the next authentication attempt. + * + * @param {string} args.username + * @param {string} args.password + * @note Only applicable if SASL authentication is being used. + */ + setSaslCredentials(args = {}) { + if (!Object.hasOwn(args, 'username')) { + throw new error.KafkaJSError("username must be set for setSaslCredentials", { code: error.ErrorCodes.ERR__INVALID_ARG }); + } + + if (!Object.hasOwn(args, 'password')) { + throw new error.KafkaJSError("password must be set for setSaslCredentials", { code: error.ErrorCodes.ERR__INVALID_ARG }); + } + + /** + * In case we've not started connecting yet, just modify the configuration for + * the first connection attempt. + */ + if (this.#state < ProducerState.CONNECTING) { + this.#userConfig['sasl.username'] = args.username; + this.#userConfig['sasl.password'] = args.password; + if (Object.hasOwn(this.#userConfig, 'kafkaJS') && Object.hasOwn(this.#userConfig.kafkaJS, 'sasl')) { + this.#userConfig.kafkaJS.sasl.username = args.username; + this.#userConfig.kafkaJS.sasl.password = args.password; } - return ret; + return; + } + + this.#logger.info("Setting SASL credentials", this.#createProducerBindingMessageMetadata()); + this.#internalClient.setSaslCredentials(args.username, args.password); + } + + /** + * Flushes any pending messages. + * + * Messages are batched internally by librdkafka for performance reasons. + * Continously sent messages are batched upto a timeout, or upto a maximum + * size. Calling flush sends any pending messages immediately without + * waiting for this size or timeout. + * + * @param {number} args.timeout Time to try flushing for in milliseconds. + * @returns {Promise} Resolves on successful flush. + * @throws {KafkaJSTimeout} if the flush times out. + * + * @note This is only useful when using asynchronous sends. + * For example, the following code does not get any benefit from flushing, + * since `await`ing the send waits for the delivery report, and the message + * has already been sent by the time we start flushing: + * for (let i = 0; i < 100; i++) await send(...); + * await flush(...) // Not useful. + * + * However, using the following code may put these 5 messages into a batch + * and then the subsequent `flush` will send the batch altogether (as long as + * batch size, etc. are conducive to batching): + * for (let i = 0; i < 5; i++) send(...); + * await flush({timeout: 5000}); + */ + async flush(args = { timeout: 500 }) { + if (this.#state !== ProducerState.CONNECTED) { + throw new error.KafkaJSError("Cannot flush without awaiting connect()", { code: error.ErrorCodes.ERR__STATE }); + } + + if (!Object.hasOwn(args, 'timeout')) { + throw new error.KafkaJSError("timeout must be set for flushing", { code: error.ErrorCodes.ERR__INVALID_ARG }); + } + + this.#logger.debug(`Attempting to flush messages for ${args.timeout}ms`, this.#createProducerBindingMessageMetadata()); + return new Promise((resolve, reject) => { + this.#internalClient.flush(args.timeout, (err) => { + if (err) { + const kjsErr = createKafkaJsErrorFromLibRdKafkaError(err); + if (err.code === error.ErrorCodes.ERR__TIMED_OUT) { + /* See reason below for yield. Same here - but for partially processed delivery reports. */ + setTimeout(() => reject(kjsErr), 0); + } else { + reject(kjsErr); + } + return; + } + /* Yielding here allows any 'then's and 'awaits' on associated sends to be scheduled + * before flush completes, which means that the user doesn't have to yield themselves. + * It's not necessary that all the 'then's and 'awaits' will be able to run, but + * it's better than nothing. */ + setTimeout(resolve, 0); + }); + }); } } -module.exports = { Producer } +module.exports = { Producer, CompressionTypes }; diff --git a/lib/kafkajs/index.js b/lib/kafkajs/index.js index a41822d9..5181979b 100644 --- a/lib/kafkajs/index.js +++ b/lib/kafkajs/index.js @@ -1,3 +1 @@ -const { Kafka } = require('./_kafka'); - -module.exports = { Kafka } +module.exports = require("./_kafka"); diff --git a/lib/producer-stream.js b/lib/producer-stream.js index bb2d28ff..57051445 100644 --- a/lib/producer-stream.js +++ b/lib/producer-stream.js @@ -1,5 +1,5 @@ /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * * Copyright (c) 2016-2023 Blizzard Entertainment * @@ -14,6 +14,7 @@ module.exports = ProducerStream; var Writable = require('stream').Writable; var util = require('util'); var ErrorCode = require('./error').codes; +var { Buffer } = require('buffer'); util.inherits(ProducerStream, Writable); @@ -78,8 +79,6 @@ function ProducerStream(producer, options) { this.connect(this.connectOptions); } - var self = this; - this.once('finish', function() { if (this.autoClose) { this.close(); @@ -89,7 +88,7 @@ function ProducerStream(producer, options) { } ProducerStream.prototype.connect = function(options) { - this.producer.connect(options, function(err, data) { + this.producer.connect(options, function(err) { if (err) { this.emit('error', err); return; @@ -258,13 +257,11 @@ ProducerStream.prototype._writev = function(data, cb) { var self = this; var len = data.length; var chunks = new Array(len); - var size = 0; for (var i = 0; i < len; i++) { var chunk = data[i].chunk; chunks[i] = chunk; - size += chunk.length; } writev(this.producer, this.topicName, chunks, function(err) { @@ -289,7 +286,7 @@ ProducerStream.prototype.close = function(cb) { self.producer.disconnect(function() { // Previously this set the producer to null. I'm not sure there is any benefit // to that other than I guess helping flag it for GC? - // https://github.com/confluentinc/confluent-kafka-js/issues/344 + // https://github.com/confluentinc/confluent-kafka-javascript/issues/344 close(); }); } else if (self.producer._isConnecting){ diff --git a/lib/producer.js b/lib/producer.js index 65b4c936..b89248fa 100644 --- a/lib/producer.js +++ b/lib/producer.js @@ -1,7 +1,8 @@ /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * * Copyright (c) 2016-2023 Blizzard Entertainment + * (c) 2024 Confluent, Inc. * * This software may be modified and distributed under the terms * of the MIT license. See the LICENSE.txt file for details. @@ -70,7 +71,7 @@ function Producer(conf, topicConf) { delete conf.dr_cb; delete conf.dr_msg_cb; - // client is an initialized consumer object + // client is an initialized producer object // @see NodeKafka::Producer::Init Client.call(this, conf, Kafka.Producer, topicConf); @@ -78,7 +79,7 @@ function Producer(conf, topicConf) { this.globalConfig = conf; this.topicConfig = topicConf; this.defaultTopic = gTopic || null; - this.defaultPartition = gPart == null ? -1 : gPart; + this.defaultPartition = gPart === null ? -1 : gPart; this.sentMessages = 0; @@ -133,7 +134,7 @@ Producer.prototype.produce = function(topic, partition, message, key, timestamp, this.sentMessages++; - partition = partition == null ? this.defaultPartition : partition; + partition = partition === null ? this.defaultPartition : partition; return this._errorWrap( this._client.produce(topic, partition, message, key, timestamp, opaque, headers)); @@ -213,7 +214,7 @@ Producer.prototype.setPollInterval = function(interval) { this.pollInterval = setInterval(function() { try { self.poll(); - } catch (e) { + } catch { // We can probably ignore errors here as far as broadcasting. // Disconnection issues will get handled below } @@ -229,6 +230,23 @@ Producer.prototype.setPollInterval = function(interval) { return this; }; +/** + * Set automatic polling for events on the librdkafka background thread. + * + * This provides several advantages over `setPollInterval`, as the polling + * does not happen on the event loop, but on the C thread spawned by librdkafka, + * and can be more efficient for high-throughput producers. + * + * @param {boolean} set Whether to poll in the background or not. + * @note If set = true, this will disable any polling interval set by `setPollInterval`. + */ +Producer.prototype.setPollInBackground = function(set) { + if (set) { + this.setPollInterval(0); // Clear poll interval from JS. + } + this._client.setPollInBackground(set); +}; + /** * Flush the producer * diff --git a/lib/producer/high-level-producer.js b/lib/producer/high-level-producer.js index 506c2acd..cb7b8f27 100644 --- a/lib/producer/high-level-producer.js +++ b/lib/producer/high-level-producer.js @@ -1,5 +1,5 @@ /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * * Copyright (c) 2016-2023 Blizzard Entertainment * @@ -11,11 +11,9 @@ module.exports = HighLevelProducer; var util = require('util'); var Producer = require('../producer'); -var LibrdKafkaError = require('../error'); var EventEmitter = require('events').EventEmitter; var RefCounter = require('../tools/ref-counter'); var shallowCopy = require('../util').shallowCopy; -var isObject = require('../util').isObject; util.inherits(HighLevelProducer, Producer); @@ -45,7 +43,37 @@ function createSerializer(serializer) { // operation if the number of parameters the function accepts is > 1 return { apply: applyFn, - async: serializer.length > 1 + async: serializer.length > 1, + needsTopic: false + }; +} + +/** + * Create a serializer that additionally takes the topic name + * + * Method simply wraps a serializer provided by a user + * so it adds context to the error + * + * @returns {function} Serialization function + */ +function createTopicSerializer(serializer) { + var applyFn = function serializationWrapper(t, v, cb) { + try { + return cb ? serializer(t, v, cb) : serializer(t, v); + } catch (e) { + var modifiedError = new Error('Could not serialize value: ' + e.message); + modifiedError.value = v; + modifiedError.serializer = serializer; + throw modifiedError; + } + }; + + // We can check how many parameters the function has and activate the asynchronous + // operation if the number of parameters the function accepts is > 2 + return { + apply: applyFn, + async: serializer.length > 2, + needsTopic: true }; } @@ -109,7 +137,7 @@ function HighLevelProducer(conf, topicConf) { self._hl.pollingRefTimeout = setInterval(function() { try { self.poll(); - } catch (e) { + } catch { if (!self._isConnected) { // If we got disconnected for some reason there is no point // in polling anymore @@ -258,10 +286,20 @@ HighLevelProducer.prototype._modifiedProduce = function(topic, partition, messag try { if (this.valueSerializer.async) { - // If this is async we need to give it a callback - this.valueSerializer.apply(message, valueSerializerCallback); + if (this.valueSerializer.needsTopic) { + // If this is async we need to give it a callback + this.valueSerializer.apply(topic, message, valueSerializerCallback); + } else { + // If this is async we need to give it a callback + this.valueSerializer.apply(message, valueSerializerCallback); + } } else { - var serializedValue = this.valueSerializer.apply(message); + var serializedValue; + if (this.valueSerializer.needsTopic) { + serializedValue = this.valueSerializer.apply(topic, message); + } else { + serializedValue = this.valueSerializer.apply(message); + } // Check if we were returned a promise in order to support promise behavior if (serializedValue && typeof serializedValue.then === 'function' && @@ -274,10 +312,20 @@ HighLevelProducer.prototype._modifiedProduce = function(topic, partition, messag } if (this.keySerializer.async) { - // If this is async we need to give it a callback - this.keySerializer.apply(key, keySerializerCallback); + if (this.valueSerializer.needsTopic) { + // If this is async we need to give it a callback + this.keySerializer.apply(topic, key, keySerializerCallback); + } else { + // If this is async we need to give it a callback + this.keySerializer.apply(key, keySerializerCallback); + } } else { - var serializedKey = this.keySerializer.apply(key); + var serializedKey; + if (this.valueSerializer.needsTopic) { + serializedKey = this.keySerializer.apply(topic, key); + } else { + serializedKey = this.keySerializer.apply(key); + } // Check if we were returned a promise in order to support promise behavior if (serializedKey && typeof serializedKey.then === 'function' && @@ -321,3 +369,21 @@ HighLevelProducer.prototype.setKeySerializer = function(serializer) { HighLevelProducer.prototype.setValueSerializer = function(serializer) { this.valueSerializer = createSerializer(serializer); }; + +/** + * Set the topic-key serializer + * + * A serializer that takes the topic name in addition to the key. + */ +HighLevelProducer.prototype.setTopicKeySerializer = function(serializer) { + this.keySerializer = createTopicSerializer(serializer); +}; + +/** + * Set the topic-value serializer + * + * A serializer that takes the topic name in addition to the value. + */ +HighLevelProducer.prototype.setTopicValueSerializer = function(serializer) { + this.valueSerializer = createTopicSerializer(serializer); +}; diff --git a/lib/rdkafka.js b/lib/rdkafka.js index c5e2e088..58d79b5d 100644 --- a/lib/rdkafka.js +++ b/lib/rdkafka.js @@ -1,5 +1,5 @@ /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * * Copyright (c) 2016-2023 Blizzard Entertainment * @@ -31,4 +31,4 @@ module.exports = { Topic: Topic, features: features, librdkafkaVersion: lib.librdkafkaVersion, -} +}; diff --git a/lib/tools/ref-counter.js b/lib/tools/ref-counter.js index a4800466..6347070a 100644 --- a/lib/tools/ref-counter.js +++ b/lib/tools/ref-counter.js @@ -1,5 +1,5 @@ /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * * Copyright (c) 2016-2023 Blizzard Entertainment * diff --git a/lib/topic-partition.js b/lib/topic-partition.js index dc22375e..fd709068 100644 --- a/lib/topic-partition.js +++ b/lib/topic-partition.js @@ -1,5 +1,5 @@ /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * * Copyright (c) 2016-2023 Blizzard Entertainment * diff --git a/lib/topic.js b/lib/topic.js index b591ceb7..df8d952f 100644 --- a/lib/topic.js +++ b/lib/topic.js @@ -1,5 +1,5 @@ /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * * Copyright (c) 2016-2023 Blizzard Entertainment * @@ -17,7 +17,7 @@ var topicKeyLength = topicKey.length; // Take all of the topic special codes from librdkafka and add them // to the object // You can find this list in the C++ code at -// https://github.com/edenhill/librdkafka/blob/master/src-cpp/rdkafkacpp.h#L1250 +// https://github.com/confluentinc/librdkafka/blob/master/src-cpp/rdkafkacpp.h#L1250 for (var key in librdkafka.topic) { // Skip it if it doesn't start with ErrorCode if (key.indexOf('RdKafka::Topic::') !== 0) { diff --git a/lib/util.js b/lib/util.js index 1b0187c0..2d4ca508 100644 --- a/lib/util.js +++ b/lib/util.js @@ -1,7 +1,8 @@ /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * * Copyright (c) 2016-2023 Blizzard Entertainment + * 2024 Confluent, Inc. * * This software may be modified and distributed under the terms * of the MIT license. See the LICENSE.txt file for details. @@ -16,7 +17,7 @@ util.shallowCopy = function (obj) { var copy = {}; for (var k in obj) { - if (obj.hasOwnProperty(k)) { + if (Object.hasOwn(obj, k)) { copy[k] = obj[k]; } } @@ -27,3 +28,28 @@ util.shallowCopy = function (obj) { util.isObject = function (obj) { return obj && typeof obj === 'object'; }; + +// Convert Map or object to a list of [key, value, key, value...]. +util.dictToStringList = function (mapOrObject) { + let list = null; + if (mapOrObject && (mapOrObject instanceof Map)) { + list = + Array + .from(mapOrObject).reduce((acc, [key, value]) => { + acc.push(key, value); + return acc; + }, []) + .map(v => String(v)); + } else if (util.isObject(mapOrObject)) { + list = + Object + .entries(mapOrObject).reduce((acc, [key, value]) => { + acc.push(key, value); + return acc; + }, []) + .map(v => String(v)); + } + return list; +}; + +util.bindingVersion = 'v0.2.1'; diff --git a/librdkafka.js b/librdkafka.js index 35e4e149..87ccb864 100644 --- a/librdkafka.js +++ b/librdkafka.js @@ -1,5 +1,5 @@ /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * * Copyright (c) 2016-2023 Blizzard Entertainment * @@ -7,6 +7,6 @@ * of the MIT license. See the LICENSE.txt file for details. */ -var kafka = require('bindings')('confluent-kafka-js'); +var kafka = require('bindings')('confluent-kafka-javascript'); module.exports = kafka; diff --git a/make_docs.sh b/make_docs.sh index 4ab333e8..bdb4d6d2 100755 --- a/make_docs.sh +++ b/make_docs.sh @@ -6,8 +6,8 @@ if [[ `git status --porcelain` ]]; then exit 1 fi -# REPO=git@github.com:confluentinc/confluent-kafka-js.git -REPO=https://github.com/confluentinc/confluent-kafka-js.git +# REPO=git@github.com:confluentinc/confluent-kafka-javascript.git +REPO=https://github.com/confluentinc/confluent-kafka-javascript.git git remote add deploy $REPO diff --git a/package-lock.json b/package-lock.json index 8b9ef7fd..c69d17e0 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,2576 +1,9171 @@ { - "name": "confluent-kafka-js", - "version": "v2.18.0", + "name": "@confluentinc/kafka-javascript", + "version": "v0.2.1", "lockfileVersion": 3, "requires": true, "packages": { "": { - "name": "confluent-kafka-js", - "version": "v2.18.0", + "name": "@confluentinc/kafka-javascript", + "version": "v0.2.1", "hasInstallScript": true, "license": "MIT", + "workspaces": [ + "schemaregistry", + "schemaregistry-examples" + ], "dependencies": { + "@aws-sdk/client-kms": "^3.637.0", + "@azure/identity": "^4.4.1", + "@azure/keyvault-keys": "^4.8.0", + "@bufbuild/protobuf": "^2.0.0", + "@criteria/json-schema": "^0.10.0", + "@criteria/json-schema-validation": "^0.10.0", + "@google-cloud/kms": "^4.5.0", + "@hackbg/miscreant-esm": "^0.3.2-patch.3", "@mapbox/node-pre-gyp": "^1.0.11", + "@smithy/types": "^3.3.0", + "@types/simple-oauth2": "^5.0.7", + "@types/validator": "^13.12.0", + "ajv": "^8.17.1", + "async-mutex": "^0.5.0", + "avsc": "^5.7.7", + "axios": "^1.7.3", "bindings": "^1.3.1", - "nan": "^2.17.0" + "json-stringify-deterministic": "^1.0.12", + "lru-cache": "^11.0.0", + "nan": "^2.17.0", + "node-vault": "^0.10.2", + "simple-oauth2": "^5.1.0", + "validator": "^13.12.0" }, "devDependencies": { - "@types/node": "^20.4.5", + "@bufbuild/buf": "^1.37.0", + "@bufbuild/protoc-gen-es": "^2.0.0", + "@eslint/js": "^9.9.0", + "@types/eslint__js": "^8.42.3", + "@types/jest": "^29.5.13", + "@types/node": "^20.16.1", "bluebird": "^3.5.3", - "jsdoc": "^3.4.0", - "jshint": "^2.10.1", - "mocha": "^10.2.0", + "eslint": "^8.57.0", + "eslint-plugin-jest": "^28.6.0", + "jest": "^29.7.0", + "jsdoc": "^4.0.2", + "mocha": "^10.7.0", "node-gyp": "^9.3.1", - "typescript": "^5.1.6" + "ts-jest": "^29.2.5", + "typescript": "^5.5.4", + "typescript-eslint": "^8.2.0" }, "engines": { - "node": ">=14.0.0" + "node": ">=18.0.0" } }, - "node_modules/@babel/parser": { - "version": "7.23.0", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.23.0.tgz", - "integrity": "sha512-vvPKKdMemU85V9WE/l5wZEmImpCtLqbnTvqDS2U1fJ96KrxoW7KrXhNsNCblQlg8Ck4b85yxdTyelsMUgFUXiw==", + "node_modules/@ampproject/remapping": { + "version": "2.3.0", "dev": true, - "bin": { - "parser": "bin/babel-parser.js" + "license": "Apache-2.0", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.24" }, "engines": { "node": ">=6.0.0" } }, - "node_modules/@isaacs/cliui": { - "version": "8.0.2", - "resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz", - "integrity": "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==", - "dev": true, + "node_modules/@aws-crypto/sha256-browser": { + "version": "5.2.0", + "license": "Apache-2.0", "dependencies": { - "string-width": "^5.1.2", - "string-width-cjs": "npm:string-width@^4.2.0", - "strip-ansi": "^7.0.1", - "strip-ansi-cjs": "npm:strip-ansi@^6.0.1", - "wrap-ansi": "^8.1.0", - "wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0" - }, - "engines": { - "node": ">=12" + "@aws-crypto/sha256-js": "^5.2.0", + "@aws-crypto/supports-web-crypto": "^5.2.0", + "@aws-crypto/util": "^5.2.0", + "@aws-sdk/types": "^3.222.0", + "@aws-sdk/util-locate-window": "^3.0.0", + "@smithy/util-utf8": "^2.0.0", + "tslib": "^2.6.2" } }, - "node_modules/@isaacs/cliui/node_modules/ansi-regex": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz", - "integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==", - "dev": true, - "engines": { - "node": ">=12" + "node_modules/@aws-crypto/sha256-browser/node_modules/@smithy/is-array-buffer": { + "version": "2.2.0", + "license": "Apache-2.0", + "dependencies": { + "tslib": "^2.6.2" }, - "funding": { - "url": "https://github.com/chalk/ansi-regex?sponsor=1" - } - }, - "node_modules/@isaacs/cliui/node_modules/ansi-styles": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz", - "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==", - "dev": true, "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" + "node": ">=14.0.0" } }, - "node_modules/@isaacs/cliui/node_modules/emoji-regex": { - "version": "9.2.2", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", - "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", - "dev": true - }, - "node_modules/@isaacs/cliui/node_modules/string-width": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", - "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", - "dev": true, + "node_modules/@aws-crypto/sha256-browser/node_modules/@smithy/util-buffer-from": { + "version": "2.2.0", + "license": "Apache-2.0", "dependencies": { - "eastasianwidth": "^0.2.0", - "emoji-regex": "^9.2.2", - "strip-ansi": "^7.0.1" + "@smithy/is-array-buffer": "^2.2.0", + "tslib": "^2.6.2" }, "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "node": ">=14.0.0" } }, - "node_modules/@isaacs/cliui/node_modules/strip-ansi": { - "version": "7.1.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", - "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", - "dev": true, + "node_modules/@aws-crypto/sha256-browser/node_modules/@smithy/util-utf8": { + "version": "2.3.0", + "license": "Apache-2.0", "dependencies": { - "ansi-regex": "^6.0.1" + "@smithy/util-buffer-from": "^2.2.0", + "tslib": "^2.6.2" }, "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/strip-ansi?sponsor=1" + "node": ">=14.0.0" } }, - "node_modules/@isaacs/cliui/node_modules/wrap-ansi": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", - "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", - "dev": true, + "node_modules/@aws-crypto/sha256-js": { + "version": "5.2.0", + "license": "Apache-2.0", "dependencies": { - "ansi-styles": "^6.1.0", - "string-width": "^5.0.1", - "strip-ansi": "^7.0.1" + "@aws-crypto/util": "^5.2.0", + "@aws-sdk/types": "^3.222.0", + "tslib": "^2.6.2" }, "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + "node": ">=16.0.0" } }, - "node_modules/@mapbox/node-pre-gyp": { - "version": "1.0.11", - "resolved": "https://registry.npmjs.org/@mapbox/node-pre-gyp/-/node-pre-gyp-1.0.11.tgz", - "integrity": "sha512-Yhlar6v9WQgUp/He7BdgzOz8lqMQ8sU+jkCq7Wx8Myc5YFJLbEe7lgui/V7G1qB1DJykHSGwreceSaD60Y0PUQ==", + "node_modules/@aws-crypto/supports-web-crypto": { + "version": "5.2.0", + "license": "Apache-2.0", "dependencies": { - "detect-libc": "^2.0.0", - "https-proxy-agent": "^5.0.0", - "make-dir": "^3.1.0", - "node-fetch": "^2.6.7", - "nopt": "^5.0.0", - "npmlog": "^5.0.1", - "rimraf": "^3.0.2", - "semver": "^7.3.5", - "tar": "^6.1.11" - }, - "bin": { - "node-pre-gyp": "bin/node-pre-gyp" + "tslib": "^2.6.2" } }, - "node_modules/@npmcli/fs": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/@npmcli/fs/-/fs-3.1.0.tgz", - "integrity": "sha512-7kZUAaLscfgbwBQRbvdMYaZOWyMEcPTH/tJjnyAWJ/dvvs9Ef+CERx/qJb9GExJpl1qipaDGn7KqHnFGGixd0w==", - "dev": true, + "node_modules/@aws-crypto/util": { + "version": "5.2.0", + "license": "Apache-2.0", "dependencies": { - "semver": "^7.3.5" - }, - "engines": { - "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + "@aws-sdk/types": "^3.222.0", + "@smithy/util-utf8": "^2.0.0", + "tslib": "^2.6.2" } }, - "node_modules/@pkgjs/parseargs": { - "version": "0.11.0", - "resolved": "https://registry.npmjs.org/@pkgjs/parseargs/-/parseargs-0.11.0.tgz", - "integrity": "sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==", - "dev": true, - "optional": true, + "node_modules/@aws-crypto/util/node_modules/@smithy/is-array-buffer": { + "version": "2.2.0", + "license": "Apache-2.0", + "dependencies": { + "tslib": "^2.6.2" + }, "engines": { - "node": ">=14" + "node": ">=14.0.0" } }, - "node_modules/@tootallnate/once": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/@tootallnate/once/-/once-2.0.0.tgz", - "integrity": "sha512-XCuKFP5PS55gnMVu3dty8KPatLqUoy/ZYzDzAGCQ8JNFCkLXzmI7vNHCR+XpbZaMWQK/vQubr7PkYq8g470J/A==", - "dev": true, + "node_modules/@aws-crypto/util/node_modules/@smithy/util-buffer-from": { + "version": "2.2.0", + "license": "Apache-2.0", + "dependencies": { + "@smithy/is-array-buffer": "^2.2.0", + "tslib": "^2.6.2" + }, "engines": { - "node": ">= 10" + "node": ">=14.0.0" } }, - "node_modules/@types/linkify-it": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/@types/linkify-it/-/linkify-it-3.0.4.tgz", - "integrity": "sha512-hPpIeeHb/2UuCw06kSNAOVWgehBLXEo0/fUs0mw3W2qhqX89PI2yvok83MnuctYGCPrabGIoi0fFso4DQ+sNUQ==", - "dev": true - }, - "node_modules/@types/markdown-it": { - "version": "12.2.3", - "resolved": "https://registry.npmjs.org/@types/markdown-it/-/markdown-it-12.2.3.tgz", - "integrity": "sha512-GKMHFfv3458yYy+v/N8gjufHO6MSZKCOXpZc5GXIWWy8uldwfmPn98vp81gZ5f9SVw8YYBctgfJ22a2d7AOMeQ==", - "dev": true, + "node_modules/@aws-crypto/util/node_modules/@smithy/util-utf8": { + "version": "2.3.0", + "license": "Apache-2.0", "dependencies": { - "@types/linkify-it": "*", - "@types/mdurl": "*" + "@smithy/util-buffer-from": "^2.2.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=14.0.0" } }, - "node_modules/@types/mdurl": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/@types/mdurl/-/mdurl-1.0.4.tgz", - "integrity": "sha512-ARVxjAEX5TARFRzpDRVC6cEk0hUIXCCwaMhz8y7S1/PxU6zZS1UMjyobz7q4w/D/R552r4++EhwmXK1N2rAy0A==", - "dev": true + "node_modules/@aws-sdk/client-kms": { + "version": "3.637.0", + "license": "Apache-2.0", + "dependencies": { + "@aws-crypto/sha256-browser": "5.2.0", + "@aws-crypto/sha256-js": "5.2.0", + "@aws-sdk/client-sso-oidc": "3.637.0", + "@aws-sdk/client-sts": "3.637.0", + "@aws-sdk/core": "3.635.0", + "@aws-sdk/credential-provider-node": "3.637.0", + "@aws-sdk/middleware-host-header": "3.620.0", + "@aws-sdk/middleware-logger": "3.609.0", + "@aws-sdk/middleware-recursion-detection": "3.620.0", + "@aws-sdk/middleware-user-agent": "3.637.0", + "@aws-sdk/region-config-resolver": "3.614.0", + "@aws-sdk/types": "3.609.0", + "@aws-sdk/util-endpoints": "3.637.0", + "@aws-sdk/util-user-agent-browser": "3.609.0", + "@aws-sdk/util-user-agent-node": "3.614.0", + "@smithy/config-resolver": "^3.0.5", + "@smithy/core": "^2.4.0", + "@smithy/fetch-http-handler": "^3.2.4", + "@smithy/hash-node": "^3.0.3", + "@smithy/invalid-dependency": "^3.0.3", + "@smithy/middleware-content-length": "^3.0.5", + "@smithy/middleware-endpoint": "^3.1.0", + "@smithy/middleware-retry": "^3.0.15", + "@smithy/middleware-serde": "^3.0.3", + "@smithy/middleware-stack": "^3.0.3", + "@smithy/node-config-provider": "^3.1.4", + "@smithy/node-http-handler": "^3.1.4", + "@smithy/protocol-http": "^4.1.0", + "@smithy/smithy-client": "^3.2.0", + "@smithy/types": "^3.3.0", + "@smithy/url-parser": "^3.0.3", + "@smithy/util-base64": "^3.0.0", + "@smithy/util-body-length-browser": "^3.0.0", + "@smithy/util-body-length-node": "^3.0.0", + "@smithy/util-defaults-mode-browser": "^3.0.15", + "@smithy/util-defaults-mode-node": "^3.0.15", + "@smithy/util-endpoints": "^2.0.5", + "@smithy/util-middleware": "^3.0.3", + "@smithy/util-retry": "^3.0.3", + "@smithy/util-utf8": "^3.0.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=16.0.0" + } + }, + "node_modules/@aws-sdk/client-sso": { + "version": "3.637.0", + "license": "Apache-2.0", + "dependencies": { + "@aws-crypto/sha256-browser": "5.2.0", + "@aws-crypto/sha256-js": "5.2.0", + "@aws-sdk/core": "3.635.0", + "@aws-sdk/middleware-host-header": "3.620.0", + "@aws-sdk/middleware-logger": "3.609.0", + "@aws-sdk/middleware-recursion-detection": "3.620.0", + "@aws-sdk/middleware-user-agent": "3.637.0", + "@aws-sdk/region-config-resolver": "3.614.0", + "@aws-sdk/types": "3.609.0", + "@aws-sdk/util-endpoints": "3.637.0", + "@aws-sdk/util-user-agent-browser": "3.609.0", + "@aws-sdk/util-user-agent-node": "3.614.0", + "@smithy/config-resolver": "^3.0.5", + "@smithy/core": "^2.4.0", + "@smithy/fetch-http-handler": "^3.2.4", + "@smithy/hash-node": "^3.0.3", + "@smithy/invalid-dependency": "^3.0.3", + "@smithy/middleware-content-length": "^3.0.5", + "@smithy/middleware-endpoint": "^3.1.0", + "@smithy/middleware-retry": "^3.0.15", + "@smithy/middleware-serde": "^3.0.3", + "@smithy/middleware-stack": "^3.0.3", + "@smithy/node-config-provider": "^3.1.4", + "@smithy/node-http-handler": "^3.1.4", + "@smithy/protocol-http": "^4.1.0", + "@smithy/smithy-client": "^3.2.0", + "@smithy/types": "^3.3.0", + "@smithy/url-parser": "^3.0.3", + "@smithy/util-base64": "^3.0.0", + "@smithy/util-body-length-browser": "^3.0.0", + "@smithy/util-body-length-node": "^3.0.0", + "@smithy/util-defaults-mode-browser": "^3.0.15", + "@smithy/util-defaults-mode-node": "^3.0.15", + "@smithy/util-endpoints": "^2.0.5", + "@smithy/util-middleware": "^3.0.3", + "@smithy/util-retry": "^3.0.3", + "@smithy/util-utf8": "^3.0.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=16.0.0" + } + }, + "node_modules/@aws-sdk/client-sso-oidc": { + "version": "3.637.0", + "license": "Apache-2.0", + "dependencies": { + "@aws-crypto/sha256-browser": "5.2.0", + "@aws-crypto/sha256-js": "5.2.0", + "@aws-sdk/core": "3.635.0", + "@aws-sdk/credential-provider-node": "3.637.0", + "@aws-sdk/middleware-host-header": "3.620.0", + "@aws-sdk/middleware-logger": "3.609.0", + "@aws-sdk/middleware-recursion-detection": "3.620.0", + "@aws-sdk/middleware-user-agent": "3.637.0", + "@aws-sdk/region-config-resolver": "3.614.0", + "@aws-sdk/types": "3.609.0", + "@aws-sdk/util-endpoints": "3.637.0", + "@aws-sdk/util-user-agent-browser": "3.609.0", + "@aws-sdk/util-user-agent-node": "3.614.0", + "@smithy/config-resolver": "^3.0.5", + "@smithy/core": "^2.4.0", + "@smithy/fetch-http-handler": "^3.2.4", + "@smithy/hash-node": "^3.0.3", + "@smithy/invalid-dependency": "^3.0.3", + "@smithy/middleware-content-length": "^3.0.5", + "@smithy/middleware-endpoint": "^3.1.0", + "@smithy/middleware-retry": "^3.0.15", + "@smithy/middleware-serde": "^3.0.3", + "@smithy/middleware-stack": "^3.0.3", + "@smithy/node-config-provider": "^3.1.4", + "@smithy/node-http-handler": "^3.1.4", + "@smithy/protocol-http": "^4.1.0", + "@smithy/smithy-client": "^3.2.0", + "@smithy/types": "^3.3.0", + "@smithy/url-parser": "^3.0.3", + "@smithy/util-base64": "^3.0.0", + "@smithy/util-body-length-browser": "^3.0.0", + "@smithy/util-body-length-node": "^3.0.0", + "@smithy/util-defaults-mode-browser": "^3.0.15", + "@smithy/util-defaults-mode-node": "^3.0.15", + "@smithy/util-endpoints": "^2.0.5", + "@smithy/util-middleware": "^3.0.3", + "@smithy/util-retry": "^3.0.3", + "@smithy/util-utf8": "^3.0.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=16.0.0" + }, + "peerDependencies": { + "@aws-sdk/client-sts": "^3.637.0" + } + }, + "node_modules/@aws-sdk/client-sts": { + "version": "3.637.0", + "license": "Apache-2.0", + "dependencies": { + "@aws-crypto/sha256-browser": "5.2.0", + "@aws-crypto/sha256-js": "5.2.0", + "@aws-sdk/client-sso-oidc": "3.637.0", + "@aws-sdk/core": "3.635.0", + "@aws-sdk/credential-provider-node": "3.637.0", + "@aws-sdk/middleware-host-header": "3.620.0", + "@aws-sdk/middleware-logger": "3.609.0", + "@aws-sdk/middleware-recursion-detection": "3.620.0", + "@aws-sdk/middleware-user-agent": "3.637.0", + "@aws-sdk/region-config-resolver": "3.614.0", + "@aws-sdk/types": "3.609.0", + "@aws-sdk/util-endpoints": "3.637.0", + "@aws-sdk/util-user-agent-browser": "3.609.0", + "@aws-sdk/util-user-agent-node": "3.614.0", + "@smithy/config-resolver": "^3.0.5", + "@smithy/core": "^2.4.0", + "@smithy/fetch-http-handler": "^3.2.4", + "@smithy/hash-node": "^3.0.3", + "@smithy/invalid-dependency": "^3.0.3", + "@smithy/middleware-content-length": "^3.0.5", + "@smithy/middleware-endpoint": "^3.1.0", + "@smithy/middleware-retry": "^3.0.15", + "@smithy/middleware-serde": "^3.0.3", + "@smithy/middleware-stack": "^3.0.3", + "@smithy/node-config-provider": "^3.1.4", + "@smithy/node-http-handler": "^3.1.4", + "@smithy/protocol-http": "^4.1.0", + "@smithy/smithy-client": "^3.2.0", + "@smithy/types": "^3.3.0", + "@smithy/url-parser": "^3.0.3", + "@smithy/util-base64": "^3.0.0", + "@smithy/util-body-length-browser": "^3.0.0", + "@smithy/util-body-length-node": "^3.0.0", + "@smithy/util-defaults-mode-browser": "^3.0.15", + "@smithy/util-defaults-mode-node": "^3.0.15", + "@smithy/util-endpoints": "^2.0.5", + "@smithy/util-middleware": "^3.0.3", + "@smithy/util-retry": "^3.0.3", + "@smithy/util-utf8": "^3.0.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=16.0.0" + } + }, + "node_modules/@aws-sdk/core": { + "version": "3.635.0", + "license": "Apache-2.0", + "dependencies": { + "@smithy/core": "^2.4.0", + "@smithy/node-config-provider": "^3.1.4", + "@smithy/property-provider": "^3.1.3", + "@smithy/protocol-http": "^4.1.0", + "@smithy/signature-v4": "^4.1.0", + "@smithy/smithy-client": "^3.2.0", + "@smithy/types": "^3.3.0", + "@smithy/util-middleware": "^3.0.3", + "fast-xml-parser": "4.4.1", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=16.0.0" + } + }, + "node_modules/@aws-sdk/credential-provider-env": { + "version": "3.620.1", + "license": "Apache-2.0", + "dependencies": { + "@aws-sdk/types": "3.609.0", + "@smithy/property-provider": "^3.1.3", + "@smithy/types": "^3.3.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=16.0.0" + } + }, + "node_modules/@aws-sdk/credential-provider-http": { + "version": "3.635.0", + "license": "Apache-2.0", + "dependencies": { + "@aws-sdk/types": "3.609.0", + "@smithy/fetch-http-handler": "^3.2.4", + "@smithy/node-http-handler": "^3.1.4", + "@smithy/property-provider": "^3.1.3", + "@smithy/protocol-http": "^4.1.0", + "@smithy/smithy-client": "^3.2.0", + "@smithy/types": "^3.3.0", + "@smithy/util-stream": "^3.1.3", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=16.0.0" + } + }, + "node_modules/@aws-sdk/credential-provider-ini": { + "version": "3.637.0", + "license": "Apache-2.0", + "dependencies": { + "@aws-sdk/credential-provider-env": "3.620.1", + "@aws-sdk/credential-provider-http": "3.635.0", + "@aws-sdk/credential-provider-process": "3.620.1", + "@aws-sdk/credential-provider-sso": "3.637.0", + "@aws-sdk/credential-provider-web-identity": "3.621.0", + "@aws-sdk/types": "3.609.0", + "@smithy/credential-provider-imds": "^3.2.0", + "@smithy/property-provider": "^3.1.3", + "@smithy/shared-ini-file-loader": "^3.1.4", + "@smithy/types": "^3.3.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=16.0.0" + }, + "peerDependencies": { + "@aws-sdk/client-sts": "^3.637.0" + } }, - "node_modules/@types/node": { - "version": "20.8.10", - "resolved": "https://registry.npmjs.org/@types/node/-/node-20.8.10.tgz", - "integrity": "sha512-TlgT8JntpcbmKUFzjhsyhGfP2fsiz1Mv56im6enJ905xG1DAYesxJaeSbGqQmAw8OWPdhyJGhGSQGKRNJ45u9w==", - "dev": true, + "node_modules/@aws-sdk/credential-provider-node": { + "version": "3.637.0", + "license": "Apache-2.0", "dependencies": { - "undici-types": "~5.26.4" + "@aws-sdk/credential-provider-env": "3.620.1", + "@aws-sdk/credential-provider-http": "3.635.0", + "@aws-sdk/credential-provider-ini": "3.637.0", + "@aws-sdk/credential-provider-process": "3.620.1", + "@aws-sdk/credential-provider-sso": "3.637.0", + "@aws-sdk/credential-provider-web-identity": "3.621.0", + "@aws-sdk/types": "3.609.0", + "@smithy/credential-provider-imds": "^3.2.0", + "@smithy/property-provider": "^3.1.3", + "@smithy/shared-ini-file-loader": "^3.1.4", + "@smithy/types": "^3.3.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=16.0.0" } }, - "node_modules/abbrev": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.1.1.tgz", - "integrity": "sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q==" - }, - "node_modules/agent-base": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz", - "integrity": "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==", + "node_modules/@aws-sdk/credential-provider-process": { + "version": "3.620.1", + "license": "Apache-2.0", "dependencies": { - "debug": "4" + "@aws-sdk/types": "3.609.0", + "@smithy/property-provider": "^3.1.3", + "@smithy/shared-ini-file-loader": "^3.1.4", + "@smithy/types": "^3.3.0", + "tslib": "^2.6.2" }, "engines": { - "node": ">= 6.0.0" + "node": ">=16.0.0" } }, - "node_modules/agentkeepalive": { - "version": "4.5.0", - "resolved": "https://registry.npmjs.org/agentkeepalive/-/agentkeepalive-4.5.0.tgz", - "integrity": "sha512-5GG/5IbQQpC9FpkRGsSvZI5QYeSCzlJHdpBQntCsuTOxhKD8lqKhrleg2Yi7yvMIf82Ycmmqln9U8V9qwEiJew==", - "dev": true, + "node_modules/@aws-sdk/credential-provider-sso": { + "version": "3.637.0", + "license": "Apache-2.0", "dependencies": { - "humanize-ms": "^1.2.1" + "@aws-sdk/client-sso": "3.637.0", + "@aws-sdk/token-providers": "3.614.0", + "@aws-sdk/types": "3.609.0", + "@smithy/property-provider": "^3.1.3", + "@smithy/shared-ini-file-loader": "^3.1.4", + "@smithy/types": "^3.3.0", + "tslib": "^2.6.2" }, "engines": { - "node": ">= 8.0.0" + "node": ">=16.0.0" } }, - "node_modules/aggregate-error": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/aggregate-error/-/aggregate-error-3.1.0.tgz", - "integrity": "sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA==", - "dev": true, + "node_modules/@aws-sdk/credential-provider-web-identity": { + "version": "3.621.0", + "license": "Apache-2.0", "dependencies": { - "clean-stack": "^2.0.0", - "indent-string": "^4.0.0" + "@aws-sdk/types": "3.609.0", + "@smithy/property-provider": "^3.1.3", + "@smithy/types": "^3.3.0", + "tslib": "^2.6.2" }, "engines": { - "node": ">=8" + "node": ">=16.0.0" + }, + "peerDependencies": { + "@aws-sdk/client-sts": "^3.621.0" } }, - "node_modules/ansi-colors": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/ansi-colors/-/ansi-colors-4.1.1.tgz", - "integrity": "sha512-JoX0apGbHaUJBNl6yF+p6JAFYZ666/hhCGKN5t9QFjbJQKUU/g8MNbFDbvfrgKXvI1QpZplPOnwIo99lX/AAmA==", - "dev": true, + "node_modules/@aws-sdk/middleware-host-header": { + "version": "3.620.0", + "license": "Apache-2.0", + "dependencies": { + "@aws-sdk/types": "3.609.0", + "@smithy/protocol-http": "^4.1.0", + "@smithy/types": "^3.3.0", + "tslib": "^2.6.2" + }, "engines": { - "node": ">=6" + "node": ">=16.0.0" } }, - "node_modules/ansi-regex": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", - "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "node_modules/@aws-sdk/middleware-logger": { + "version": "3.609.0", + "license": "Apache-2.0", + "dependencies": { + "@aws-sdk/types": "3.609.0", + "@smithy/types": "^3.3.0", + "tslib": "^2.6.2" + }, "engines": { - "node": ">=8" + "node": ">=16.0.0" } }, - "node_modules/ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dev": true, + "node_modules/@aws-sdk/middleware-recursion-detection": { + "version": "3.620.0", + "license": "Apache-2.0", "dependencies": { - "color-convert": "^2.0.1" + "@aws-sdk/types": "3.609.0", + "@smithy/protocol-http": "^4.1.0", + "@smithy/types": "^3.3.0", + "tslib": "^2.6.2" }, "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" + "node": ">=16.0.0" } }, - "node_modules/anymatch": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", - "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", - "dev": true, + "node_modules/@aws-sdk/middleware-user-agent": { + "version": "3.637.0", + "license": "Apache-2.0", "dependencies": { - "normalize-path": "^3.0.0", - "picomatch": "^2.0.4" + "@aws-sdk/types": "3.609.0", + "@aws-sdk/util-endpoints": "3.637.0", + "@smithy/protocol-http": "^4.1.0", + "@smithy/types": "^3.3.0", + "tslib": "^2.6.2" }, "engines": { - "node": ">= 8" + "node": ">=16.0.0" } }, - "node_modules/aproba": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/aproba/-/aproba-2.0.0.tgz", - "integrity": "sha512-lYe4Gx7QT+MKGbDsA+Z+he/Wtef0BiwDOlK/XkBrdfsh9J/jPPXbX0tE9x9cl27Tmu5gg3QUbUrQYa/y+KOHPQ==" - }, - "node_modules/are-we-there-yet": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/are-we-there-yet/-/are-we-there-yet-2.0.0.tgz", - "integrity": "sha512-Ci/qENmwHnsYo9xKIcUJN5LeDKdJ6R1Z1j9V/J5wyq8nh/mYPEpIKJbBZXtZjG04HiK7zV/p6Vs9952MrMeUIw==", + "node_modules/@aws-sdk/region-config-resolver": { + "version": "3.614.0", + "license": "Apache-2.0", "dependencies": { - "delegates": "^1.0.0", - "readable-stream": "^3.6.0" + "@aws-sdk/types": "3.609.0", + "@smithy/node-config-provider": "^3.1.4", + "@smithy/types": "^3.3.0", + "@smithy/util-config-provider": "^3.0.0", + "@smithy/util-middleware": "^3.0.3", + "tslib": "^2.6.2" }, "engines": { - "node": ">=10" + "node": ">=16.0.0" } }, - "node_modules/are-we-there-yet/node_modules/readable-stream": { - "version": "3.6.2", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", - "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", + "node_modules/@aws-sdk/token-providers": { + "version": "3.614.0", + "license": "Apache-2.0", "dependencies": { - "inherits": "^2.0.3", - "string_decoder": "^1.1.1", - "util-deprecate": "^1.0.1" + "@aws-sdk/types": "3.609.0", + "@smithy/property-provider": "^3.1.3", + "@smithy/shared-ini-file-loader": "^3.1.4", + "@smithy/types": "^3.3.0", + "tslib": "^2.6.2" }, "engines": { - "node": ">= 6" + "node": ">=16.0.0" + }, + "peerDependencies": { + "@aws-sdk/client-sso-oidc": "^3.614.0" } }, - "node_modules/are-we-there-yet/node_modules/string_decoder": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", - "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", + "node_modules/@aws-sdk/types": { + "version": "3.609.0", + "license": "Apache-2.0", "dependencies": { - "safe-buffer": "~5.2.0" + "@smithy/types": "^3.3.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=16.0.0" } }, - "node_modules/argparse": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", - "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", - "dev": true - }, - "node_modules/balanced-match": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", - "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==" - }, - "node_modules/binary-extensions": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz", - "integrity": "sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==", - "dev": true, + "node_modules/@aws-sdk/util-endpoints": { + "version": "3.637.0", + "license": "Apache-2.0", + "dependencies": { + "@aws-sdk/types": "3.609.0", + "@smithy/types": "^3.3.0", + "@smithy/util-endpoints": "^2.0.5", + "tslib": "^2.6.2" + }, "engines": { - "node": ">=8" + "node": ">=16.0.0" } }, - "node_modules/bindings": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/bindings/-/bindings-1.5.0.tgz", - "integrity": "sha512-p2q/t/mhvuOj/UeLlV6566GD/guowlr0hHxClI0W9m7MWYkL1F0hLo+0Aexs9HSPCtR1SXQ0TD3MMKrXZajbiQ==", + "node_modules/@aws-sdk/util-locate-window": { + "version": "3.568.0", + "license": "Apache-2.0", "dependencies": { - "file-uri-to-path": "1.0.0" + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=16.0.0" } }, - "node_modules/bluebird": { - "version": "3.7.2", - "resolved": "https://registry.npmjs.org/bluebird/-/bluebird-3.7.2.tgz", - "integrity": "sha512-XpNj6GDQzdfW+r2Wnn7xiSAd7TM3jzkxGXBGTtWKuSXv1xUV+azxAm8jdWZN06QTQk+2N2XB9jRDkvbmQmcRtg==", - "dev": true - }, - "node_modules/brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "node_modules/@aws-sdk/util-user-agent-browser": { + "version": "3.609.0", + "license": "Apache-2.0", "dependencies": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" + "@aws-sdk/types": "3.609.0", + "@smithy/types": "^3.3.0", + "bowser": "^2.11.0", + "tslib": "^2.6.2" } }, - "node_modules/braces": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", - "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", - "dev": true, + "node_modules/@aws-sdk/util-user-agent-node": { + "version": "3.614.0", + "license": "Apache-2.0", "dependencies": { - "fill-range": "^7.0.1" + "@aws-sdk/types": "3.609.0", + "@smithy/node-config-provider": "^3.1.4", + "@smithy/types": "^3.3.0", + "tslib": "^2.6.2" }, "engines": { - "node": ">=8" + "node": ">=16.0.0" + }, + "peerDependencies": { + "aws-crt": ">=1.0.0" + }, + "peerDependenciesMeta": { + "aws-crt": { + "optional": true + } } }, - "node_modules/browser-stdout": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/browser-stdout/-/browser-stdout-1.3.1.tgz", - "integrity": "sha512-qhAVI1+Av2X7qelOfAIYwXONood6XlZE/fXaBSmW/T5SzLAmCgzi+eiWE7fUvbHaeNBQH13UftjpXxsfLkMpgw==", - "dev": true - }, - "node_modules/cacache": { - "version": "17.1.4", - "resolved": "https://registry.npmjs.org/cacache/-/cacache-17.1.4.tgz", - "integrity": "sha512-/aJwG2l3ZMJ1xNAnqbMpA40of9dj/pIH3QfiuQSqjfPJF747VR0J/bHn+/KdNnHKc6XQcWt/AfRSBft82W1d2A==", - "dev": true, + "node_modules/@azure/abort-controller": { + "version": "1.1.0", + "license": "MIT", "dependencies": { - "@npmcli/fs": "^3.1.0", - "fs-minipass": "^3.0.0", - "glob": "^10.2.2", - "lru-cache": "^7.7.1", - "minipass": "^7.0.3", - "minipass-collect": "^1.0.2", - "minipass-flush": "^1.0.5", - "minipass-pipeline": "^1.2.4", - "p-map": "^4.0.0", - "ssri": "^10.0.0", - "tar": "^6.1.11", - "unique-filename": "^3.0.0" + "tslib": "^2.2.0" }, "engines": { - "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + "node": ">=12.0.0" } }, - "node_modules/cacache/node_modules/brace-expansion": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", - "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", - "dev": true, + "node_modules/@azure/core-auth": { + "version": "1.7.2", + "license": "MIT", "dependencies": { - "balanced-match": "^1.0.0" + "@azure/abort-controller": "^2.0.0", + "@azure/core-util": "^1.1.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" } }, - "node_modules/cacache/node_modules/glob": { - "version": "10.3.10", - "resolved": "https://registry.npmjs.org/glob/-/glob-10.3.10.tgz", - "integrity": "sha512-fa46+tv1Ak0UPK1TOy/pZrIybNNt4HCv7SDzwyfiOZkvZLEbjsZkJBPtDHVshZjbecAoAGSC20MjLDG/qr679g==", - "dev": true, + "node_modules/@azure/core-auth/node_modules/@azure/abort-controller": { + "version": "2.1.2", + "license": "MIT", "dependencies": { - "foreground-child": "^3.1.0", - "jackspeak": "^2.3.5", - "minimatch": "^9.0.1", - "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0", - "path-scurry": "^1.10.1" - }, - "bin": { - "glob": "dist/esm/bin.mjs" + "tslib": "^2.6.2" }, "engines": { - "node": ">=16 || 14 >=14.17" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" + "node": ">=18.0.0" } }, - "node_modules/cacache/node_modules/minimatch": { - "version": "9.0.3", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.3.tgz", - "integrity": "sha512-RHiac9mvaRw0x3AYRgDC1CxAP7HTcNrrECeA8YYJeWnpo+2Q5CegtZjaotWTWxDG3UeGA1coE05iH1mPjT/2mg==", - "dev": true, + "node_modules/@azure/core-client": { + "version": "1.9.2", + "license": "MIT", "dependencies": { - "brace-expansion": "^2.0.1" + "@azure/abort-controller": "^2.0.0", + "@azure/core-auth": "^1.4.0", + "@azure/core-rest-pipeline": "^1.9.1", + "@azure/core-tracing": "^1.0.0", + "@azure/core-util": "^1.6.1", + "@azure/logger": "^1.0.0", + "tslib": "^2.6.2" }, "engines": { - "node": ">=16 || 14 >=14.17" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" + "node": ">=18.0.0" } }, - "node_modules/cacache/node_modules/minipass": { - "version": "7.0.4", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.0.4.tgz", - "integrity": "sha512-jYofLM5Dam9279rdkWzqHozUo4ybjdZmCsDHePy5V/PbBcVMiSZR97gmAy45aqi8CK1lG2ECd356FU86avfwUQ==", - "dev": true, + "node_modules/@azure/core-client/node_modules/@azure/abort-controller": { + "version": "2.1.2", + "license": "MIT", + "dependencies": { + "tslib": "^2.6.2" + }, "engines": { - "node": ">=16 || 14 >=14.17" + "node": ">=18.0.0" } }, - "node_modules/camelcase": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz", - "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==", - "dev": true, - "engines": { - "node": ">=10" + "node_modules/@azure/core-http-compat": { + "version": "2.1.2", + "license": "MIT", + "dependencies": { + "@azure/abort-controller": "^2.0.0", + "@azure/core-client": "^1.3.0", + "@azure/core-rest-pipeline": "^1.3.0" }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "engines": { + "node": ">=18.0.0" } }, - "node_modules/catharsis": { - "version": "0.9.0", - "resolved": "https://registry.npmjs.org/catharsis/-/catharsis-0.9.0.tgz", - "integrity": "sha512-prMTQVpcns/tzFgFVkVp6ak6RykZyWb3gu8ckUpd6YkTlacOd3DXGJjIpD4Q6zJirizvaiAjSSHlOsA+6sNh2A==", - "dev": true, + "node_modules/@azure/core-http-compat/node_modules/@azure/abort-controller": { + "version": "2.1.2", + "license": "MIT", "dependencies": { - "lodash": "^4.17.15" + "tslib": "^2.6.2" }, "engines": { - "node": ">= 10" + "node": ">=18.0.0" } }, - "node_modules/chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "dev": true, + "node_modules/@azure/core-lro": { + "version": "2.7.2", + "license": "MIT", "dependencies": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" + "@azure/abort-controller": "^2.0.0", + "@azure/core-util": "^1.2.0", + "@azure/logger": "^1.0.0", + "tslib": "^2.6.2" }, "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/chalk?sponsor=1" + "node": ">=18.0.0" } }, - "node_modules/chalk/node_modules/supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dev": true, + "node_modules/@azure/core-lro/node_modules/@azure/abort-controller": { + "version": "2.1.2", + "license": "MIT", "dependencies": { - "has-flag": "^4.0.0" + "tslib": "^2.6.2" }, "engines": { - "node": ">=8" + "node": ">=18.0.0" } }, - "node_modules/chokidar": { - "version": "3.5.3", - "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.5.3.tgz", - "integrity": "sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw==", - "dev": true, - "funding": [ - { - "type": "individual", - "url": "https://paulmillr.com/funding/" - } - ], + "node_modules/@azure/core-paging": { + "version": "1.6.2", + "license": "MIT", "dependencies": { - "anymatch": "~3.1.2", - "braces": "~3.0.2", - "glob-parent": "~5.1.2", - "is-binary-path": "~2.1.0", - "is-glob": "~4.0.1", - "normalize-path": "~3.0.0", - "readdirp": "~3.6.0" + "tslib": "^2.6.2" }, "engines": { - "node": ">= 8.10.0" - }, - "optionalDependencies": { - "fsevents": "~2.3.2" + "node": ">=18.0.0" } }, - "node_modules/chownr": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/chownr/-/chownr-2.0.0.tgz", - "integrity": "sha512-bIomtDF5KGpdogkLd9VspvFzk9KfpyyGlS8YFVZl7TGPBHL5snIOnxeshwVgPteQ9b4Eydl+pVbIyE1DcvCWgQ==", + "node_modules/@azure/core-rest-pipeline": { + "version": "1.16.3", + "license": "MIT", + "dependencies": { + "@azure/abort-controller": "^2.0.0", + "@azure/core-auth": "^1.4.0", + "@azure/core-tracing": "^1.0.1", + "@azure/core-util": "^1.9.0", + "@azure/logger": "^1.0.0", + "http-proxy-agent": "^7.0.0", + "https-proxy-agent": "^7.0.0", + "tslib": "^2.6.2" + }, "engines": { - "node": ">=10" + "node": ">=18.0.0" } }, - "node_modules/clean-stack": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-2.2.0.tgz", - "integrity": "sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A==", - "dev": true, + "node_modules/@azure/core-rest-pipeline/node_modules/@azure/abort-controller": { + "version": "2.1.2", + "license": "MIT", + "dependencies": { + "tslib": "^2.6.2" + }, "engines": { - "node": ">=6" + "node": ">=18.0.0" } }, - "node_modules/cli": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/cli/-/cli-1.0.1.tgz", - "integrity": "sha512-41U72MB56TfUMGndAKK8vJ78eooOD4Z5NOL4xEfjc0c23s+6EYKXlXsmACBVclLP1yOfWCgEganVzddVrSNoTg==", - "dev": true, + "node_modules/@azure/core-rest-pipeline/node_modules/agent-base": { + "version": "7.1.1", + "license": "MIT", "dependencies": { - "exit": "0.1.2", - "glob": "^7.1.1" + "debug": "^4.3.4" }, "engines": { - "node": ">=0.2.5" + "node": ">= 14" } }, - "node_modules/cliui": { - "version": "7.0.4", - "resolved": "https://registry.npmjs.org/cliui/-/cliui-7.0.4.tgz", - "integrity": "sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ==", - "dev": true, + "node_modules/@azure/core-rest-pipeline/node_modules/http-proxy-agent": { + "version": "7.0.2", + "license": "MIT", "dependencies": { - "string-width": "^4.2.0", - "strip-ansi": "^6.0.0", - "wrap-ansi": "^7.0.0" + "agent-base": "^7.1.0", + "debug": "^4.3.4" + }, + "engines": { + "node": ">= 14" } }, - "node_modules/color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, + "node_modules/@azure/core-rest-pipeline/node_modules/https-proxy-agent": { + "version": "7.0.5", + "license": "MIT", "dependencies": { - "color-name": "~1.1.4" + "agent-base": "^7.0.2", + "debug": "4" }, "engines": { - "node": ">=7.0.0" + "node": ">= 14" } }, - "node_modules/color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true - }, - "node_modules/color-support": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-support/-/color-support-1.1.3.tgz", - "integrity": "sha512-qiBjkpbMLO/HL68y+lh4q0/O1MZFj2RX6X/KmMa3+gJD3z+WwI1ZzDHysvqHGS3mP6mznPckpXmw1nI9cJjyRg==", - "bin": { - "color-support": "bin.js" + "node_modules/@azure/core-tracing": { + "version": "1.1.2", + "license": "MIT", + "dependencies": { + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" } }, - "node_modules/concat-map": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", - "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==" - }, - "node_modules/console-browserify": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/console-browserify/-/console-browserify-1.1.0.tgz", - "integrity": "sha512-duS7VP5pvfsNLDvL1O4VOEbw37AI3A4ZUQYemvDlnpGrNu9tprR7BYWpDYwC0Xia0Zxz5ZupdiIrUp0GH1aXfg==", - "dev": true, + "node_modules/@azure/core-util": { + "version": "1.9.2", + "license": "MIT", "dependencies": { - "date-now": "^0.1.4" + "@azure/abort-controller": "^2.0.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=18.0.0" } }, - "node_modules/console-control-strings": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/console-control-strings/-/console-control-strings-1.1.0.tgz", - "integrity": "sha512-ty/fTekppD2fIwRvnZAVdeOiGd1c7YXEixbgJTNzqcxJWKQnjJ/V1bNEEE6hygpM3WjwHFUVK6HTjWSzV4a8sQ==" - }, - "node_modules/core-util-is": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz", - "integrity": "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==", - "dev": true - }, - "node_modules/cross-spawn": { - "version": "7.0.3", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", - "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", - "dev": true, + "node_modules/@azure/core-util/node_modules/@azure/abort-controller": { + "version": "2.1.2", + "license": "MIT", "dependencies": { - "path-key": "^3.1.0", - "shebang-command": "^2.0.0", - "which": "^2.0.1" + "tslib": "^2.6.2" }, "engines": { - "node": ">= 8" + "node": ">=18.0.0" } }, - "node_modules/date-now": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/date-now/-/date-now-0.1.4.tgz", - "integrity": "sha512-AsElvov3LoNB7tf5k37H2jYSB+ZZPMT5sG2QjJCcdlV5chIv6htBUBUui2IKRjgtKAKtCBN7Zbwa+MtwLjSeNw==", - "dev": true + "node_modules/@azure/identity": { + "version": "4.4.1", + "license": "MIT", + "dependencies": { + "@azure/abort-controller": "^1.0.0", + "@azure/core-auth": "^1.5.0", + "@azure/core-client": "^1.9.2", + "@azure/core-rest-pipeline": "^1.1.0", + "@azure/core-tracing": "^1.0.0", + "@azure/core-util": "^1.3.0", + "@azure/logger": "^1.0.0", + "@azure/msal-browser": "^3.14.0", + "@azure/msal-node": "^2.9.2", + "events": "^3.0.0", + "jws": "^4.0.0", + "open": "^8.0.0", + "stoppable": "^1.1.0", + "tslib": "^2.2.0" + }, + "engines": { + "node": ">=18.0.0" + } }, - "node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "node_modules/@azure/keyvault-keys": { + "version": "4.8.0", + "license": "MIT", "dependencies": { - "ms": "2.1.2" + "@azure/abort-controller": "^1.0.0", + "@azure/core-auth": "^1.3.0", + "@azure/core-client": "^1.5.0", + "@azure/core-http-compat": "^2.0.1", + "@azure/core-lro": "^2.2.0", + "@azure/core-paging": "^1.1.1", + "@azure/core-rest-pipeline": "^1.8.1", + "@azure/core-tracing": "^1.0.0", + "@azure/core-util": "^1.0.0", + "@azure/logger": "^1.0.0", + "tslib": "^2.2.0" }, "engines": { - "node": ">=6.0" + "node": ">=18.0.0" + } + }, + "node_modules/@azure/logger": { + "version": "1.1.4", + "license": "MIT", + "dependencies": { + "tslib": "^2.6.2" }, - "peerDependenciesMeta": { - "supports-color": { - "optional": true - } + "engines": { + "node": ">=18.0.0" } }, - "node_modules/debug/node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" + "node_modules/@azure/msal-browser": { + "version": "3.21.0", + "license": "MIT", + "dependencies": { + "@azure/msal-common": "14.14.1" + }, + "engines": { + "node": ">=0.8.0" + } }, - "node_modules/decamelize": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-4.0.0.tgz", - "integrity": "sha512-9iE1PgSik9HeIIw2JO94IidnE3eBoQrFJ3w7sFuzSX4DpmZ3v5sZpUiV5Swcf6mQEF+Y0ru8Neo+p+nyh2J+hQ==", - "dev": true, + "node_modules/@azure/msal-common": { + "version": "14.14.1", + "license": "MIT", "engines": { - "node": ">=10" + "node": ">=0.8.0" + } + }, + "node_modules/@azure/msal-node": { + "version": "2.13.0", + "license": "MIT", + "dependencies": { + "@azure/msal-common": "14.14.1", + "jsonwebtoken": "^9.0.0", + "uuid": "^8.3.0" }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "engines": { + "node": ">=16" } }, - "node_modules/delegates": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/delegates/-/delegates-1.0.0.tgz", - "integrity": "sha512-bd2L678uiWATM6m5Z1VzNCErI3jiGzt6HGY8OVICs40JQq/HALfbyNJmp0UDakEY4pMMaN0Ly5om/B1VI/+xfQ==" + "node_modules/@azure/msal-node/node_modules/uuid": { + "version": "8.3.2", + "license": "MIT", + "bin": { + "uuid": "dist/bin/uuid" + } }, - "node_modules/detect-libc": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.0.2.tgz", - "integrity": "sha512-UX6sGumvvqSaXgdKGUsgZWqcUyIXZ/vZTrlRT/iobiKhGL0zL4d3osHj3uqllWJK+i+sixDS/3COVEOFbupFyw==", + "node_modules/@babel/code-frame": { + "version": "7.24.7", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/highlight": "^7.24.7", + "picocolors": "^1.0.0" + }, "engines": { - "node": ">=8" + "node": ">=6.9.0" } }, - "node_modules/diff": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/diff/-/diff-5.0.0.tgz", - "integrity": "sha512-/VTCrvm5Z0JGty/BWHljh+BAiw3IK+2j87NGMu8Nwc/f48WoDAC395uomO9ZD117ZOBaHmkX1oyLvkVM/aIT3w==", + "node_modules/@babel/compat-data": { + "version": "7.25.2", "dev": true, + "license": "MIT", "engines": { - "node": ">=0.3.1" + "node": ">=6.9.0" } }, - "node_modules/dom-serializer": { - "version": "0.2.2", - "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-0.2.2.tgz", - "integrity": "sha512-2/xPb3ORsQ42nHYiSunXkDjPLBaEj/xTwUO4B7XCZQTRk7EBtTOPaygh10YAAh2OI1Qrp6NWfpAhzswj0ydt9g==", + "node_modules/@babel/core": { + "version": "7.25.2", "dev": true, + "license": "MIT", "dependencies": { - "domelementtype": "^2.0.1", - "entities": "^2.0.0" + "@ampproject/remapping": "^2.2.0", + "@babel/code-frame": "^7.24.7", + "@babel/generator": "^7.25.0", + "@babel/helper-compilation-targets": "^7.25.2", + "@babel/helper-module-transforms": "^7.25.2", + "@babel/helpers": "^7.25.0", + "@babel/parser": "^7.25.0", + "@babel/template": "^7.25.0", + "@babel/traverse": "^7.25.2", + "@babel/types": "^7.25.2", + "convert-source-map": "^2.0.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.2", + "json5": "^2.2.3", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/babel" } }, - "node_modules/dom-serializer/node_modules/domelementtype": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-2.3.0.tgz", - "integrity": "sha512-OLETBj6w0OsagBwdXnPdN0cnMfF9opN69co+7ZrbfPGrdpPVNBUj02spi6B1N7wChLQiPn4CSH/zJvXw56gmHw==", + "node_modules/@babel/core/node_modules/semver": { + "version": "6.3.1", "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/fb55" - } - ] + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } }, - "node_modules/dom-serializer/node_modules/entities": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/entities/-/entities-2.2.0.tgz", - "integrity": "sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A==", + "node_modules/@babel/generator": { + "version": "7.25.0", "dev": true, - "funding": { - "url": "https://github.com/fb55/entities?sponsor=1" + "license": "MIT", + "dependencies": { + "@babel/types": "^7.25.0", + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.25", + "jsesc": "^2.5.1" + }, + "engines": { + "node": ">=6.9.0" } }, - "node_modules/domelementtype": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-1.3.1.tgz", - "integrity": "sha512-BSKB+TSpMpFI/HOxCNr1O8aMOTZ8hT3pM3GQ0w/mWRmkhEDSFJkkyzz4XQsBV44BChwGkrDfMyjVD0eA2aFV3w==", - "dev": true - }, - "node_modules/domhandler": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-2.3.0.tgz", - "integrity": "sha512-q9bUwjfp7Eif8jWxxxPSykdRZAb6GkguBGSgvvCrhI9wB71W2K/Kvv4E61CF/mcCfnVJDeDWx/Vb/uAqbDj6UQ==", + "node_modules/@babel/helper-compilation-targets": { + "version": "7.25.2", "dev": true, + "license": "MIT", "dependencies": { - "domelementtype": "1" + "@babel/compat-data": "^7.25.2", + "@babel/helper-validator-option": "^7.24.8", + "browserslist": "^4.23.1", + "lru-cache": "^5.1.1", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" } }, - "node_modules/domutils": { - "version": "1.5.1", - "resolved": "https://registry.npmjs.org/domutils/-/domutils-1.5.1.tgz", - "integrity": "sha512-gSu5Oi/I+3wDENBsOWBiRK1eoGxcywYSqg3rR960/+EfY0CF4EX1VPkgHOZ3WiS/Jg2DtliF6BhWcHlfpYUcGw==", + "node_modules/@babel/helper-compilation-targets/node_modules/lru-cache": { + "version": "5.1.1", "dev": true, + "license": "ISC", "dependencies": { - "dom-serializer": "0", - "domelementtype": "1" + "yallist": "^3.0.2" } }, - "node_modules/eastasianwidth": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz", - "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==", - "dev": true - }, - "node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" + "node_modules/@babel/helper-compilation-targets/node_modules/semver": { + "version": "6.3.1", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } }, - "node_modules/encoding": { - "version": "0.1.13", - "resolved": "https://registry.npmjs.org/encoding/-/encoding-0.1.13.tgz", - "integrity": "sha512-ETBauow1T35Y/WZMkio9jiM0Z5xjHHmJ4XmjZOq1l/dXz3lr2sRn87nJy20RupqSh1F2m3HHPSp8ShIPQJrJ3A==", - "optional": true, + "node_modules/@babel/helper-module-imports": { + "version": "7.24.7", + "dev": true, + "license": "MIT", "dependencies": { - "iconv-lite": "^0.6.2" + "@babel/traverse": "^7.24.7", + "@babel/types": "^7.24.7" + }, + "engines": { + "node": ">=6.9.0" } }, - "node_modules/entities": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/entities/-/entities-1.0.0.tgz", - "integrity": "sha512-LbLqfXgJMmy81t+7c14mnulFHJ170cM6E+0vMXR9k/ZiZwgX8i5pNgjTCX3SO4VeUsFLV+8InixoretwU+MjBQ==", - "dev": true - }, - "node_modules/env-paths": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/env-paths/-/env-paths-2.2.1.tgz", - "integrity": "sha512-+h1lkLKhZMTYjog1VEpJNG7NZJWcuc2DDk/qsqSTRRCOXiLjeQ1d1/udrUGhqMxUgAlwKNZ0cf2uqan5GLuS2A==", + "node_modules/@babel/helper-module-transforms": { + "version": "7.25.2", "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-module-imports": "^7.24.7", + "@babel/helper-simple-access": "^7.24.7", + "@babel/helper-validator-identifier": "^7.24.7", + "@babel/traverse": "^7.25.2" + }, "engines": { - "node": ">=6" + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" } }, - "node_modules/err-code": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/err-code/-/err-code-2.0.3.tgz", - "integrity": "sha512-2bmlRpNKBxT/CRmPOlyISQpNj+qSeYvcym/uT0Jx2bMOlKLtSy1ZmLuVxSEKKyor/N5yhvp/ZiG1oE3DEYMSFA==", - "dev": true - }, - "node_modules/escalade": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", - "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==", + "node_modules/@babel/helper-plugin-utils": { + "version": "7.24.8", "dev": true, + "license": "MIT", "engines": { - "node": ">=6" + "node": ">=6.9.0" } }, - "node_modules/escape-string-regexp": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-2.0.0.tgz", - "integrity": "sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w==", + "node_modules/@babel/helper-simple-access": { + "version": "7.24.7", "dev": true, + "license": "MIT", + "dependencies": { + "@babel/traverse": "^7.24.7", + "@babel/types": "^7.24.7" + }, "engines": { - "node": ">=8" + "node": ">=6.9.0" } }, - "node_modules/exit": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/exit/-/exit-0.1.2.tgz", - "integrity": "sha512-Zk/eNKV2zbjpKzrsQ+n1G6poVbErQxJ0LBOJXaKZ1EViLzH+hrLu9cdXI4zw9dBQJslwBEpbQ2P1oS7nDxs6jQ==", + "node_modules/@babel/helper-string-parser": { + "version": "7.24.8", "dev": true, + "license": "MIT", "engines": { - "node": ">= 0.8.0" + "node": ">=6.9.0" } }, - "node_modules/exponential-backoff": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/exponential-backoff/-/exponential-backoff-3.1.1.tgz", - "integrity": "sha512-dX7e/LHVJ6W3DE1MHWi9S1EYzDESENfLrYohG2G++ovZrYOkm4Knwa0mc1cn84xJOR4KEU0WSchhLbd0UklbHw==", - "dev": true - }, - "node_modules/file-uri-to-path": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/file-uri-to-path/-/file-uri-to-path-1.0.0.tgz", - "integrity": "sha512-0Zt+s3L7Vf1biwWZ29aARiVYLx7iMGnEUl9x33fbB/j3jR81u/O2LbqK+Bm1CDSNDKVtJ/YjwY7TUd5SkeLQLw==" - }, - "node_modules/fill-range": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", - "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", + "node_modules/@babel/helper-validator-identifier": { + "version": "7.24.7", "dev": true, - "dependencies": { - "to-regex-range": "^5.0.1" - }, + "license": "MIT", "engines": { - "node": ">=8" + "node": ">=6.9.0" } }, - "node_modules/find-up": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", - "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "node_modules/@babel/helper-validator-option": { + "version": "7.24.8", "dev": true, - "dependencies": { - "locate-path": "^6.0.0", - "path-exists": "^4.0.0" - }, + "license": "MIT", "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "node": ">=6.9.0" } }, - "node_modules/flat": { - "version": "5.0.2", - "resolved": "https://registry.npmjs.org/flat/-/flat-5.0.2.tgz", - "integrity": "sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ==", + "node_modules/@babel/helpers": { + "version": "7.25.0", "dev": true, - "bin": { - "flat": "cli.js" + "license": "MIT", + "dependencies": { + "@babel/template": "^7.25.0", + "@babel/types": "^7.25.0" + }, + "engines": { + "node": ">=6.9.0" } }, - "node_modules/foreground-child": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-3.1.1.tgz", - "integrity": "sha512-TMKDUnIte6bfb5nWv7V/caI169OHgvwjb7V4WkeUvbQQdjr5rWKqHFiKWb/fcOwB+CzBT+qbWjvj+DVwRskpIg==", + "node_modules/@babel/highlight": { + "version": "7.24.7", "dev": true, + "license": "MIT", "dependencies": { - "cross-spawn": "^7.0.0", - "signal-exit": "^4.0.1" + "@babel/helper-validator-identifier": "^7.24.7", + "chalk": "^2.4.2", + "js-tokens": "^4.0.0", + "picocolors": "^1.0.0" }, "engines": { - "node": ">=14" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" + "node": ">=6.9.0" } }, - "node_modules/foreground-child/node_modules/signal-exit": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", - "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", + "node_modules/@babel/highlight/node_modules/ansi-styles": { + "version": "3.2.1", "dev": true, - "engines": { - "node": ">=14" + "license": "MIT", + "dependencies": { + "color-convert": "^1.9.0" }, - "funding": { - "url": "https://github.com/sponsors/isaacs" + "engines": { + "node": ">=4" } }, - "node_modules/fs-minipass": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/fs-minipass/-/fs-minipass-3.0.3.tgz", - "integrity": "sha512-XUBA9XClHbnJWSfBzjkm6RvPsyg3sryZt06BEQoXcF7EK/xpGaQYJgQKDJSUH5SGZ76Y7pFx1QBnXz09rU5Fbw==", + "node_modules/@babel/highlight/node_modules/chalk": { + "version": "2.4.2", "dev": true, + "license": "MIT", "dependencies": { - "minipass": "^7.0.3" + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" }, "engines": { - "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + "node": ">=4" } }, - "node_modules/fs-minipass/node_modules/minipass": { - "version": "7.0.4", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.0.4.tgz", - "integrity": "sha512-jYofLM5Dam9279rdkWzqHozUo4ybjdZmCsDHePy5V/PbBcVMiSZR97gmAy45aqi8CK1lG2ECd356FU86avfwUQ==", + "node_modules/@babel/highlight/node_modules/color-convert": { + "version": "1.9.3", "dev": true, - "engines": { - "node": ">=16 || 14 >=14.17" + "license": "MIT", + "dependencies": { + "color-name": "1.1.3" } }, - "node_modules/fs.realpath": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", - "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==" + "node_modules/@babel/highlight/node_modules/color-name": { + "version": "1.1.3", + "dev": true, + "license": "MIT" }, - "node_modules/fsevents": { - "version": "2.3.3", - "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", - "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "node_modules/@babel/highlight/node_modules/escape-string-regexp": { + "version": "1.0.5", "dev": true, - "hasInstallScript": true, - "optional": true, - "os": [ - "darwin" - ], + "license": "MIT", "engines": { - "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + "node": ">=0.8.0" } }, - "node_modules/gauge": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/gauge/-/gauge-3.0.2.tgz", - "integrity": "sha512-+5J6MS/5XksCuXq++uFRsnUd7Ovu1XenbeuIuNRJxYWjgQbPuFhT14lAvsWfqfAmnwluf1OwMjz39HjfLPci0Q==", - "dependencies": { - "aproba": "^1.0.3 || ^2.0.0", - "color-support": "^1.1.2", - "console-control-strings": "^1.0.0", - "has-unicode": "^2.0.1", - "object-assign": "^4.1.1", - "signal-exit": "^3.0.0", - "string-width": "^4.2.3", - "strip-ansi": "^6.0.1", - "wide-align": "^1.1.2" - }, + "node_modules/@babel/highlight/node_modules/has-flag": { + "version": "3.0.0", + "dev": true, + "license": "MIT", "engines": { - "node": ">=10" + "node": ">=4" } }, - "node_modules/get-caller-file": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", - "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", + "node_modules/@babel/highlight/node_modules/supports-color": { + "version": "5.5.0", "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^3.0.0" + }, "engines": { - "node": "6.* || 8.* || >= 10.*" + "node": ">=4" } }, - "node_modules/glob": { - "version": "7.2.3", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", - "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "node_modules/@babel/parser": { + "version": "7.25.3", + "dev": true, + "license": "MIT", "dependencies": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^3.1.1", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" + "@babel/types": "^7.25.2" }, - "engines": { - "node": "*" + "bin": { + "parser": "bin/babel-parser.js" }, - "funding": { - "url": "https://github.com/sponsors/isaacs" + "engines": { + "node": ">=6.0.0" } }, - "node_modules/glob-parent": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", - "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "node_modules/@babel/plugin-syntax-async-generators": { + "version": "7.8.4", "dev": true, + "license": "MIT", "dependencies": { - "is-glob": "^4.0.1" + "@babel/helper-plugin-utils": "^7.8.0" }, - "engines": { - "node": ">= 6" + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/glob/node_modules/minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "node_modules/@babel/plugin-syntax-bigint": { + "version": "7.8.3", + "dev": true, + "license": "MIT", "dependencies": { - "brace-expansion": "^1.1.7" + "@babel/helper-plugin-utils": "^7.8.0" }, - "engines": { - "node": "*" + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/graceful-fs": { - "version": "4.2.11", - "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", - "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==", - "dev": true - }, - "node_modules/has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "node_modules/@babel/plugin-syntax-class-properties": { + "version": "7.12.13", "dev": true, - "engines": { - "node": ">=8" + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.12.13" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/has-unicode": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/has-unicode/-/has-unicode-2.0.1.tgz", - "integrity": "sha512-8Rf9Y83NBReMnx0gFzA8JImQACstCYWUplepDa9xprwwtmgEZUF0h/i5xSA625zB/I37EtrswSST6OXxwaaIJQ==" - }, - "node_modules/he": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/he/-/he-1.2.0.tgz", - "integrity": "sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==", + "node_modules/@babel/plugin-syntax-import-meta": { + "version": "7.10.4", "dev": true, - "bin": { - "he": "bin/he" + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/htmlparser2": { - "version": "3.8.3", - "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-3.8.3.tgz", - "integrity": "sha512-hBxEg3CYXe+rPIua8ETe7tmG3XDn9B0edOE/e9wH2nLczxzgdu0m0aNHY+5wFZiviLWLdANPJTssa92dMcXQ5Q==", + "node_modules/@babel/plugin-syntax-json-strings": { + "version": "7.8.3", "dev": true, + "license": "MIT", "dependencies": { - "domelementtype": "1", - "domhandler": "2.3", - "domutils": "1.5", - "entities": "1.0", - "readable-stream": "1.1" + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/http-cache-semantics": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.1.1.tgz", - "integrity": "sha512-er295DKPVsV82j5kw1Gjt+ADA/XYHsajl82cGNQG2eyoPkvgUhX+nDIyelzhIWbbsXP39EHcI6l5tYs2FYqYXQ==", - "dev": true - }, - "node_modules/http-proxy-agent": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-5.0.0.tgz", - "integrity": "sha512-n2hY8YdoRE1i7r6M0w9DIw5GgZN0G25P8zLCRQ8rjXtTU3vsNFBI/vWK/UIeE6g5MUUz6avwAPXmL6Fy9D/90w==", + "node_modules/@babel/plugin-syntax-jsx": { + "version": "7.24.7", "dev": true, + "license": "MIT", "dependencies": { - "@tootallnate/once": "2", - "agent-base": "6", - "debug": "4" + "@babel/helper-plugin-utils": "^7.24.7" }, "engines": { - "node": ">= 6" + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/https-proxy-agent": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-5.0.1.tgz", - "integrity": "sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA==", + "node_modules/@babel/plugin-syntax-logical-assignment-operators": { + "version": "7.10.4", + "dev": true, + "license": "MIT", "dependencies": { - "agent-base": "6", - "debug": "4" + "@babel/helper-plugin-utils": "^7.10.4" }, - "engines": { - "node": ">= 6" + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/humanize-ms": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/humanize-ms/-/humanize-ms-1.2.1.tgz", - "integrity": "sha512-Fl70vYtsAFb/C06PTS9dZBo7ihau+Tu/DNCk/OyHhea07S+aeMWpFFkUaXRa8fI+ScZbEI8dfSxwY7gxZ9SAVQ==", + "node_modules/@babel/plugin-syntax-nullish-coalescing-operator": { + "version": "7.8.3", "dev": true, + "license": "MIT", "dependencies": { - "ms": "^2.0.0" + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/iconv-lite": { - "version": "0.6.3", - "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz", - "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==", - "optional": true, + "node_modules/@babel/plugin-syntax-numeric-separator": { + "version": "7.10.4", + "dev": true, + "license": "MIT", "dependencies": { - "safer-buffer": ">= 2.1.2 < 3.0.0" + "@babel/helper-plugin-utils": "^7.10.4" }, - "engines": { - "node": ">=0.10.0" + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/imurmurhash": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", - "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", + "node_modules/@babel/plugin-syntax-object-rest-spread": { + "version": "7.8.3", "dev": true, - "engines": { - "node": ">=0.8.19" + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/indent-string": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", - "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==", + "node_modules/@babel/plugin-syntax-optional-catch-binding": { + "version": "7.8.3", "dev": true, - "engines": { - "node": ">=8" + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/inflight": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", - "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", + "node_modules/@babel/plugin-syntax-optional-chaining": { + "version": "7.8.3", + "dev": true, + "license": "MIT", "dependencies": { - "once": "^1.3.0", - "wrappy": "1" + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/inherits": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", - "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" - }, - "node_modules/ip": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ip/-/ip-2.0.0.tgz", - "integrity": "sha512-WKa+XuLG1A1R0UWhl2+1XQSi+fZWMsYKffMZTTYsiZaUD8k2yDAj5atimTUD2TZkyCkNEeYE5NhFZmupOGtjYQ==", - "dev": true - }, - "node_modules/is-binary-path": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", - "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", + "node_modules/@babel/plugin-syntax-top-level-await": { + "version": "7.14.5", "dev": true, + "license": "MIT", "dependencies": { - "binary-extensions": "^2.0.0" + "@babel/helper-plugin-utils": "^7.14.5" }, "engines": { - "node": ">=8" + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/is-extglob": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", - "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "node_modules/@babel/plugin-syntax-typescript": { + "version": "7.24.7", "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.7" + }, "engines": { - "node": ">=0.10.0" + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/is-fullwidth-code-point": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", - "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "node_modules/@babel/template": { + "version": "7.25.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.24.7", + "@babel/parser": "^7.25.0", + "@babel/types": "^7.25.0" + }, "engines": { - "node": ">=8" + "node": ">=6.9.0" } }, - "node_modules/is-glob": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", - "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "node_modules/@babel/traverse": { + "version": "7.25.3", "dev": true, + "license": "MIT", "dependencies": { - "is-extglob": "^2.1.1" + "@babel/code-frame": "^7.24.7", + "@babel/generator": "^7.25.0", + "@babel/parser": "^7.25.3", + "@babel/template": "^7.25.0", + "@babel/types": "^7.25.2", + "debug": "^4.3.1", + "globals": "^11.1.0" }, "engines": { - "node": ">=0.10.0" + "node": ">=6.9.0" } }, - "node_modules/is-lambda": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-lambda/-/is-lambda-1.0.1.tgz", - "integrity": "sha512-z7CMFGNrENq5iFB9Bqo64Xk6Y9sg+epq1myIcdHaGnbMTYOxvzsEtdYqQUylB7LxfkvgrrjP32T6Ywciio9UIQ==", - "dev": true - }, - "node_modules/is-number": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", - "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "node_modules/@babel/traverse/node_modules/globals": { + "version": "11.12.0", "dev": true, + "license": "MIT", "engines": { - "node": ">=0.12.0" + "node": ">=4" } }, - "node_modules/is-plain-obj": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-2.1.0.tgz", - "integrity": "sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA==", + "node_modules/@babel/types": { + "version": "7.25.2", "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-string-parser": "^7.24.8", + "@babel/helper-validator-identifier": "^7.24.7", + "to-fast-properties": "^2.0.0" + }, "engines": { - "node": ">=8" + "node": ">=6.9.0" } }, - "node_modules/is-unicode-supported": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/is-unicode-supported/-/is-unicode-supported-0.1.0.tgz", - "integrity": "sha512-knxG2q4UC3u8stRGyAVJCOdxFmv5DZiRcdlIaAQXAbSfJya+OhopNotLQrstBhququ4ZpuKbDc/8S6mgXgPFPw==", + "node_modules/@bcoe/v8-coverage": { + "version": "0.2.3", + "dev": true, + "license": "MIT" + }, + "node_modules/@bufbuild/buf": { + "version": "1.37.0", "dev": true, + "hasInstallScript": true, + "license": "Apache-2.0", + "bin": { + "buf": "bin/buf", + "protoc-gen-buf-breaking": "bin/protoc-gen-buf-breaking", + "protoc-gen-buf-lint": "bin/protoc-gen-buf-lint" + }, "engines": { - "node": ">=10" + "node": ">=12" }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "optionalDependencies": { + "@bufbuild/buf-darwin-arm64": "1.37.0", + "@bufbuild/buf-darwin-x64": "1.37.0", + "@bufbuild/buf-linux-aarch64": "1.37.0", + "@bufbuild/buf-linux-x64": "1.37.0", + "@bufbuild/buf-win32-arm64": "1.37.0", + "@bufbuild/buf-win32-x64": "1.37.0" + } + }, + "node_modules/@bufbuild/buf-darwin-arm64": { + "version": "1.37.0", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "Apache-2.0", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" } }, - "node_modules/isarray": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz", - "integrity": "sha512-D2S+3GLxWH+uhrNEcoh/fnmYeP8E8/zHl644d/jdA0g2uyXvy3sb0qxotE+ne0LtccHknQzWwZEzhak7oJ0COQ==", - "dev": true - }, - "node_modules/isexe": { + "node_modules/@bufbuild/protobuf": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", - "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", - "dev": true + "license": "(Apache-2.0 AND BSD-3-Clause)" }, - "node_modules/jackspeak": { - "version": "2.3.6", - "resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-2.3.6.tgz", - "integrity": "sha512-N3yCS/NegsOBokc8GAdM8UcmfsKiSS8cipheD/nivzr700H+nsMOxJjQnvwOcRYVuFkdH0wGUvW2WbXGmrZGbQ==", + "node_modules/@bufbuild/protoc-gen-es": { + "version": "2.0.0", "dev": true, + "license": "Apache-2.0", "dependencies": { - "@isaacs/cliui": "^8.0.2" + "@bufbuild/protobuf": "^2.0.0", + "@bufbuild/protoplugin": "2.0.0" + }, + "bin": { + "protoc-gen-es": "bin/protoc-gen-es" }, "engines": { "node": ">=14" }, - "funding": { - "url": "https://github.com/sponsors/isaacs" + "peerDependencies": { + "@bufbuild/protobuf": "2.0.0" }, - "optionalDependencies": { - "@pkgjs/parseargs": "^0.11.0" + "peerDependenciesMeta": { + "@bufbuild/protobuf": { + "optional": true + } } }, - "node_modules/js-yaml": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", - "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "node_modules/@bufbuild/protoplugin": { + "version": "2.0.0", "dev": true, + "license": "Apache-2.0", "dependencies": { - "argparse": "^2.0.1" - }, - "bin": { - "js-yaml": "bin/js-yaml.js" + "@bufbuild/protobuf": "2.0.0", + "@typescript/vfs": "^1.5.2", + "typescript": "5.4.5" } }, - "node_modules/js2xmlparser": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/js2xmlparser/-/js2xmlparser-4.0.2.tgz", - "integrity": "sha512-6n4D8gLlLf1n5mNLQPRfViYzu9RATblzPEtm1SthMX1Pjao0r9YI9nw7ZIfRxQMERS87mcswrg+r/OYrPRX6jA==", + "node_modules/@bufbuild/protoplugin/node_modules/typescript": { + "version": "5.4.5", "dev": true, - "dependencies": { - "xmlcreate": "^2.0.4" + "license": "Apache-2.0", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" } }, - "node_modules/jsdoc": { - "version": "3.6.11", - "resolved": "https://registry.npmjs.org/jsdoc/-/jsdoc-3.6.11.tgz", - "integrity": "sha512-8UCU0TYeIYD9KeLzEcAu2q8N/mx9O3phAGl32nmHlE0LpaJL71mMkP4d+QE5zWfNt50qheHtOZ0qoxVrsX5TUg==", + "node_modules/@confluentinc/kafka-javascript": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/@confluentinc/kafka-javascript/-/kafka-javascript-0.2.0.tgz", + "integrity": "sha512-IWjyGRqeDBcWmYcEQHu1XlZQ6am5qzzIEb18rdxFZkFeVQ6piG28bQ6BmlqvHn3zd+XoAO+e8bRlpgdUuTLC9Q==", "dev": true, + "hasInstallScript": true, + "workspaces": [ + "schemaregistry" + ], "dependencies": { - "@babel/parser": "^7.9.4", - "@types/markdown-it": "^12.2.3", - "bluebird": "^3.7.2", - "catharsis": "^0.9.0", - "escape-string-regexp": "^2.0.0", - "js2xmlparser": "^4.0.2", - "klaw": "^3.0.0", - "markdown-it": "^12.3.2", - "markdown-it-anchor": "^8.4.1", - "marked": "^4.0.10", - "mkdirp": "^1.0.4", - "requizzle": "^0.2.3", - "strip-json-comments": "^3.1.0", - "taffydb": "2.6.2", - "underscore": "~1.13.2" - }, - "bin": { - "jsdoc": "jsdoc.js" + "@aws-sdk/client-kms": "^3.637.0", + "@azure/identity": "^4.4.1", + "@azure/keyvault-keys": "^4.8.0", + "@bufbuild/protobuf": "^2.0.0", + "@criteria/json-schema": "^0.10.0", + "@criteria/json-schema-validation": "^0.10.0", + "@google-cloud/kms": "^4.5.0", + "@hackbg/miscreant-esm": "^0.3.2-patch.3", + "@mapbox/node-pre-gyp": "^1.0.11", + "@smithy/types": "^3.3.0", + "@types/simple-oauth2": "^5.0.7", + "@types/validator": "^13.12.0", + "ajv": "^8.17.1", + "async-mutex": "^0.5.0", + "avsc": "^5.7.7", + "axios": "^1.7.3", + "bindings": "^1.3.1", + "json-stringify-deterministic": "^1.0.12", + "lru-cache": "^11.0.0", + "nan": "^2.17.0", + "node-vault": "^0.10.2", + "simple-oauth2": "^5.1.0", + "validator": "^13.12.0" }, "engines": { - "node": ">=12.0.0" + "node": ">=18.0.0" } }, - "node_modules/jshint": { - "version": "2.13.6", - "resolved": "https://registry.npmjs.org/jshint/-/jshint-2.13.6.tgz", - "integrity": "sha512-IVdB4G0NTTeQZrBoM8C5JFVLjV2KtZ9APgybDA1MK73xb09qFs0jCXyQLnCOp1cSZZZbvhq/6mfXHUTaDkffuQ==", - "dev": true, + "node_modules/@confluentinc/schemaregistry": { + "resolved": "schemaregistry", + "link": true + }, + "node_modules/@criteria/json-pointer": { + "version": "0.2.1", + "license": "MIT", + "engines": { + "node": ">=18.12.1" + } + }, + "node_modules/@criteria/json-schema": { + "version": "0.10.0", + "license": "MIT", "dependencies": { - "cli": "~1.0.0", - "console-browserify": "1.1.x", - "exit": "0.1.x", - "htmlparser2": "3.8.x", - "lodash": "~4.17.21", - "minimatch": "~3.0.2", - "strip-json-comments": "1.0.x" + "@criteria/json-pointer": "^0.2.1", + "toad-uri-js": "^5.0.1" }, - "bin": { - "jshint": "bin/jshint" + "engines": { + "node": ">=18.12.1" } }, - "node_modules/jshint/node_modules/strip-json-comments": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-1.0.4.tgz", - "integrity": "sha512-AOPG8EBc5wAikaG1/7uFCNFJwnKOuQwFTpYBdTW6OvWHeZBQBrAA/amefHGrEiOnCPcLFZK6FUPtWVKpQVIRgg==", - "dev": true, - "bin": { - "strip-json-comments": "cli.js" + "node_modules/@criteria/json-schema-validation": { + "version": "0.10.0", + "license": "MIT", + "dependencies": { + "@criteria/json-pointer": "^0.2.1", + "@criteria/json-schema": "^0.10.0", + "fast-deep-equal": "^3.1.3", + "punycode": "^2.3.1", + "smtp-address-parser": "^1.0.10", + "toad-uri-js": "^5.0.1" }, "engines": { - "node": ">=0.8.0" + "node": ">=18.12.1" } }, - "node_modules/klaw": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/klaw/-/klaw-3.0.0.tgz", - "integrity": "sha512-0Fo5oir+O9jnXu5EefYbVK+mHMBeEVEy2cmctR1O1NECcCkPRreJKrS6Qt/j3KC2C148Dfo9i3pCmCMsdqGr0g==", + "node_modules/@eslint-community/eslint-utils": { + "version": "4.4.0", "dev": true, + "license": "MIT", "dependencies": { - "graceful-fs": "^4.1.9" + "eslint-visitor-keys": "^3.3.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "peerDependencies": { + "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0" } }, - "node_modules/linkify-it": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/linkify-it/-/linkify-it-3.0.3.tgz", - "integrity": "sha512-ynTsyrFSdE5oZ/O9GEf00kPngmOfVwazR5GKDq6EYfhlpFug3J2zybX56a2PRRpc9P+FuSoGNAwjlbDs9jJBPQ==", + "node_modules/@eslint-community/regexpp": { + "version": "4.11.0", "dev": true, - "dependencies": { - "uc.micro": "^1.0.1" + "license": "MIT", + "engines": { + "node": "^12.0.0 || ^14.0.0 || >=16.0.0" } }, - "node_modules/locate-path": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", - "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "node_modules/@eslint/eslintrc": { + "version": "2.1.4", "dev": true, + "license": "MIT", "dependencies": { - "p-locate": "^5.0.0" + "ajv": "^6.12.4", + "debug": "^4.3.2", + "espree": "^9.6.0", + "globals": "^13.19.0", + "ignore": "^5.2.0", + "import-fresh": "^3.2.1", + "js-yaml": "^4.1.0", + "minimatch": "^3.1.2", + "strip-json-comments": "^3.1.1" }, "engines": { - "node": ">=10" + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" }, "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "url": "https://opencollective.com/eslint" } }, - "node_modules/lodash": { - "version": "4.17.21", - "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", - "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==", - "dev": true - }, - "node_modules/log-symbols": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-4.1.0.tgz", - "integrity": "sha512-8XPvpAA8uyhfteu8pIvQxpJZ7SYYdpUivZpGy6sFsBuKRY/7rQGavedeB8aK+Zkyq6upMFVL/9AW6vOYzfRyLg==", + "node_modules/@eslint/eslintrc/node_modules/ajv": { + "version": "6.12.6", "dev": true, + "license": "MIT", "dependencies": { - "chalk": "^4.1.0", - "is-unicode-supported": "^0.1.0" - }, - "engines": { - "node": ">=10" + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" }, "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" } }, - "node_modules/lru-cache": { - "version": "7.18.3", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-7.18.3.tgz", - "integrity": "sha512-jumlc0BIUrS3qJGgIkWZsyfAM7NCWiBcCDhnd+3NNM5KbBmLTgHVfWBcg6W+rLUsIpzpERPsvwUP7CckAQSOoA==", + "node_modules/@eslint/eslintrc/node_modules/json-schema-traverse": { + "version": "0.4.1", + "dev": true, + "license": "MIT" + }, + "node_modules/@eslint/js": { + "version": "9.9.0", "dev": true, + "license": "MIT", "engines": { - "node": ">=12" + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" } }, - "node_modules/make-dir": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-3.1.0.tgz", - "integrity": "sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw==", + "node_modules/@gar/promisify": { + "version": "1.1.3", + "dev": true, + "license": "MIT" + }, + "node_modules/@google-cloud/kms": { + "version": "4.5.0", + "license": "Apache-2.0", "dependencies": { - "semver": "^6.0.0" + "google-gax": "^4.0.3" }, "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/make-dir/node_modules/semver": { - "version": "6.3.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", - "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", - "bin": { - "semver": "bin/semver.js" + "node": ">=14.0.0" } }, - "node_modules/make-fetch-happen": { - "version": "11.1.1", - "resolved": "https://registry.npmjs.org/make-fetch-happen/-/make-fetch-happen-11.1.1.tgz", - "integrity": "sha512-rLWS7GCSTcEujjVBs2YqG7Y4643u8ucvCJeSRqiLYhesrDuzeuFIk37xREzAsfQaqzl8b9rNCE4m6J8tvX4Q8w==", - "dev": true, + "node_modules/@grpc/grpc-js": { + "version": "1.11.1", + "license": "Apache-2.0", "dependencies": { - "agentkeepalive": "^4.2.1", - "cacache": "^17.0.0", - "http-cache-semantics": "^4.1.1", - "http-proxy-agent": "^5.0.0", - "https-proxy-agent": "^5.0.0", - "is-lambda": "^1.0.1", - "lru-cache": "^7.7.1", - "minipass": "^5.0.0", - "minipass-fetch": "^3.0.0", - "minipass-flush": "^1.0.5", - "minipass-pipeline": "^1.2.4", - "negotiator": "^0.6.3", - "promise-retry": "^2.0.1", - "socks-proxy-agent": "^7.0.0", - "ssri": "^10.0.0" + "@grpc/proto-loader": "^0.7.13", + "@js-sdsl/ordered-map": "^4.4.2" }, "engines": { - "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + "node": ">=12.10.0" } }, - "node_modules/markdown-it": { - "version": "12.3.2", - "resolved": "https://registry.npmjs.org/markdown-it/-/markdown-it-12.3.2.tgz", - "integrity": "sha512-TchMembfxfNVpHkbtriWltGWc+m3xszaRD0CZup7GFFhzIgQqxIfn3eGj1yZpfuflzPvfkt611B2Q/Bsk1YnGg==", - "dev": true, + "node_modules/@grpc/proto-loader": { + "version": "0.7.13", + "license": "Apache-2.0", "dependencies": { - "argparse": "^2.0.1", - "entities": "~2.1.0", - "linkify-it": "^3.0.1", - "mdurl": "^1.0.1", - "uc.micro": "^1.0.5" + "lodash.camelcase": "^4.3.0", + "long": "^5.0.0", + "protobufjs": "^7.2.5", + "yargs": "^17.7.2" }, "bin": { - "markdown-it": "bin/markdown-it.js" + "proto-loader-gen-types": "build/bin/proto-loader-gen-types.js" + }, + "engines": { + "node": ">=6" } }, - "node_modules/markdown-it-anchor": { - "version": "8.6.7", - "resolved": "https://registry.npmjs.org/markdown-it-anchor/-/markdown-it-anchor-8.6.7.tgz", - "integrity": "sha512-FlCHFwNnutLgVTflOYHPW2pPcl2AACqVzExlkGQNsi4CJgqOHN7YTgDd4LuhgN1BFO3TS0vLAruV1Td6dwWPJA==", - "dev": true, - "peerDependencies": { - "@types/markdown-it": "*", - "markdown-it": "*" - } + "node_modules/@hackbg/miscreant-esm": { + "version": "0.3.2-patch.3", + "license": "MIT" }, - "node_modules/markdown-it/node_modules/entities": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/entities/-/entities-2.1.0.tgz", - "integrity": "sha512-hCx1oky9PFrJ611mf0ifBLBRW8lUUVRlFolb5gWRfIELabBlbp9xZvrqZLZAs+NxFnbfQoeGd8wDkygjg7U85w==", - "dev": true, - "funding": { - "url": "https://github.com/fb55/entities?sponsor=1" + "node_modules/@hapi/boom": { + "version": "10.0.1", + "resolved": "https://registry.npmjs.org/@hapi/boom/-/boom-10.0.1.tgz", + "integrity": "sha512-ERcCZaEjdH3OgSJlyjVk8pHIFeus91CjKP3v+MpgBNp5IvGzP2l/bRiD78nqYcKPaZdbKkK5vDBVPd2ohHBlsA==", + "license": "BSD-3-Clause", + "dependencies": { + "@hapi/hoek": "^11.0.2" } }, - "node_modules/marked": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/marked/-/marked-4.3.0.tgz", - "integrity": "sha512-PRsaiG84bK+AMvxziE/lCFss8juXjNaWzVbN5tXAm4XjeaS9NAHhop+PjQxz2A9h8Q4M/xGmzP8vqNwy6JeK0A==", - "dev": true, - "bin": { - "marked": "bin/marked.js" - }, - "engines": { - "node": ">= 12" - } + "node_modules/@hapi/bourne": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@hapi/bourne/-/bourne-3.0.0.tgz", + "integrity": "sha512-Waj1cwPXJDucOib4a3bAISsKJVb15MKi9IvmTI/7ssVEm6sywXGjVJDhl6/umt1pK1ZS7PacXU3A1PmFKHEZ2w==", + "license": "BSD-3-Clause" }, - "node_modules/mdurl": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/mdurl/-/mdurl-1.0.1.tgz", - "integrity": "sha512-/sKlQJCBYVY9Ers9hqzKou4H6V5UWc/M59TH2dvkt+84itfnq7uFOMLpOiOS4ujvHP4etln18fmIxA5R5fll0g==", - "dev": true + "node_modules/@hapi/hoek": { + "version": "11.0.4", + "resolved": "https://registry.npmjs.org/@hapi/hoek/-/hoek-11.0.4.tgz", + "integrity": "sha512-PnsP5d4q7289pS2T2EgGz147BFJ2Jpb4yrEdkpz2IhgEUzos1S7HTl7ezWh1yfYzYlj89KzLdCRkqsP6SIryeQ==", + "license": "BSD-3-Clause" }, - "node_modules/minimatch": { - "version": "3.0.8", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.8.tgz", - "integrity": "sha512-6FsRAQsxQ61mw+qP1ZzbL9Bc78x2p5OqNgNpnoAFLTrX8n5Kxph0CsnhmKKNXTWjXqU5L0pGPR7hYk+XWZr60Q==", - "dev": true, + "node_modules/@hapi/topo": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/@hapi/topo/-/topo-5.1.0.tgz", + "integrity": "sha512-foQZKJig7Ob0BMAYBfcJk8d77QtOe7Wo4ox7ff1lQYoNNAb6jwcY1ncdoy2e9wQZzvNy7ODZCYJkK8kzmcAnAg==", + "license": "BSD-3-Clause", "dependencies": { - "brace-expansion": "^1.1.7" - }, - "engines": { - "node": "*" + "@hapi/hoek": "^9.0.0" } }, - "node_modules/minipass": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-5.0.0.tgz", - "integrity": "sha512-3FnjYuehv9k6ovOEbyOswadCDPX1piCfhV8ncmYtHOjuPwylVWsghTLo7rabjC3Rx5xD4HDx8Wm1xnMF7S5qFQ==", - "engines": { - "node": ">=8" - } + "node_modules/@hapi/topo/node_modules/@hapi/hoek": { + "version": "9.3.0", + "resolved": "https://registry.npmjs.org/@hapi/hoek/-/hoek-9.3.0.tgz", + "integrity": "sha512-/c6rf4UJlmHlC9b5BaNvzAcFv7HZ2QHaV0D4/HNlBdvFnvQq8RI4kYdhyPCl7Xj+oWvTWQ8ujhqS53LIgAe6KQ==", + "license": "BSD-3-Clause" }, - "node_modules/minipass-collect": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/minipass-collect/-/minipass-collect-1.0.2.tgz", - "integrity": "sha512-6T6lH0H8OG9kITm/Jm6tdooIbogG9e0tLgpY6mphXSm/A9u8Nq1ryBG+Qspiub9LjWlBPsPS3tWQ/Botq4FdxA==", - "dev": true, + "node_modules/@hapi/wreck": { + "version": "18.1.0", + "resolved": "https://registry.npmjs.org/@hapi/wreck/-/wreck-18.1.0.tgz", + "integrity": "sha512-0z6ZRCmFEfV/MQqkQomJ7sl/hyxvcZM7LtuVqN3vdAO4vM9eBbowl0kaqQj9EJJQab+3Uuh1GxbGIBFy4NfJ4w==", + "license": "BSD-3-Clause", "dependencies": { - "minipass": "^3.0.0" - }, - "engines": { - "node": ">= 8" + "@hapi/boom": "^10.0.1", + "@hapi/bourne": "^3.0.0", + "@hapi/hoek": "^11.0.2" } }, - "node_modules/minipass-collect/node_modules/minipass": { - "version": "3.3.6", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", - "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", + "node_modules/@humanwhocodes/config-array": { + "version": "0.11.14", "dev": true, + "license": "Apache-2.0", "dependencies": { - "yallist": "^4.0.0" + "@humanwhocodes/object-schema": "^2.0.2", + "debug": "^4.3.1", + "minimatch": "^3.0.5" }, "engines": { - "node": ">=8" + "node": ">=10.10.0" } }, - "node_modules/minipass-fetch": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/minipass-fetch/-/minipass-fetch-3.0.4.tgz", - "integrity": "sha512-jHAqnA728uUpIaFm7NWsCnqKT6UqZz7GcI/bDpPATuwYyKwJwW0remxSCxUlKiEty+eopHGa3oc8WxgQ1FFJqg==", + "node_modules/@humanwhocodes/module-importer": { + "version": "1.0.1", "dev": true, - "dependencies": { - "minipass": "^7.0.3", - "minipass-sized": "^1.0.3", - "minizlib": "^2.1.2" - }, + "license": "Apache-2.0", "engines": { - "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + "node": ">=12.22" }, - "optionalDependencies": { - "encoding": "^0.1.13" + "funding": { + "type": "github", + "url": "https://github.com/sponsors/nzakas" } }, - "node_modules/minipass-fetch/node_modules/minipass": { - "version": "7.0.4", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.0.4.tgz", - "integrity": "sha512-jYofLM5Dam9279rdkWzqHozUo4ybjdZmCsDHePy5V/PbBcVMiSZR97gmAy45aqi8CK1lG2ECd356FU86avfwUQ==", + "node_modules/@humanwhocodes/object-schema": { + "version": "2.0.3", "dev": true, - "engines": { - "node": ">=16 || 14 >=14.17" - } + "license": "BSD-3-Clause" }, - "node_modules/minipass-flush": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/minipass-flush/-/minipass-flush-1.0.5.tgz", - "integrity": "sha512-JmQSYYpPUqX5Jyn1mXaRwOda1uQ8HP5KAT/oDSLCzt1BYRhQU0/hDtsB1ufZfEEzMZ9aAVmsBw8+FWsIXlClWw==", + "node_modules/@istanbuljs/load-nyc-config": { + "version": "1.1.0", "dev": true, + "license": "ISC", "dependencies": { - "minipass": "^3.0.0" + "camelcase": "^5.3.1", + "find-up": "^4.1.0", + "get-package-type": "^0.1.0", + "js-yaml": "^3.13.1", + "resolve-from": "^5.0.0" }, "engines": { - "node": ">= 8" + "node": ">=8" } }, - "node_modules/minipass-flush/node_modules/minipass": { - "version": "3.3.6", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", - "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", + "node_modules/@istanbuljs/load-nyc-config/node_modules/argparse": { + "version": "1.0.10", "dev": true, + "license": "MIT", "dependencies": { - "yallist": "^4.0.0" - }, - "engines": { - "node": ">=8" + "sprintf-js": "~1.0.2" } }, - "node_modules/minipass-pipeline": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/minipass-pipeline/-/minipass-pipeline-1.2.4.tgz", - "integrity": "sha512-xuIq7cIOt09RPRJ19gdi4b+RiNvDFYe5JH+ggNvBqGqpQXcru3PcRmOZuHBKWK1Txf9+cQ+HMVN4d6z46LZP7A==", + "node_modules/@istanbuljs/load-nyc-config/node_modules/find-up": { + "version": "4.1.0", "dev": true, + "license": "MIT", "dependencies": { - "minipass": "^3.0.0" + "locate-path": "^5.0.0", + "path-exists": "^4.0.0" }, "engines": { "node": ">=8" } }, - "node_modules/minipass-pipeline/node_modules/minipass": { - "version": "3.3.6", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", - "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", + "node_modules/@istanbuljs/load-nyc-config/node_modules/js-yaml": { + "version": "3.14.1", "dev": true, + "license": "MIT", "dependencies": { - "yallist": "^4.0.0" + "argparse": "^1.0.7", + "esprima": "^4.0.0" }, - "engines": { - "node": ">=8" + "bin": { + "js-yaml": "bin/js-yaml.js" } }, - "node_modules/minipass-sized": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/minipass-sized/-/minipass-sized-1.0.3.tgz", - "integrity": "sha512-MbkQQ2CTiBMlA2Dm/5cY+9SWFEN8pzzOXi6rlM5Xxq0Yqbda5ZQy9sU75a673FE9ZK0Zsbr6Y5iP6u9nktfg2g==", + "node_modules/@istanbuljs/load-nyc-config/node_modules/locate-path": { + "version": "5.0.0", "dev": true, + "license": "MIT", "dependencies": { - "minipass": "^3.0.0" + "p-locate": "^4.1.0" }, "engines": { "node": ">=8" } }, - "node_modules/minipass-sized/node_modules/minipass": { - "version": "3.3.6", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", - "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", + "node_modules/@istanbuljs/load-nyc-config/node_modules/p-limit": { + "version": "2.3.0", "dev": true, + "license": "MIT", "dependencies": { - "yallist": "^4.0.0" + "p-try": "^2.0.0" }, "engines": { - "node": ">=8" - } - }, - "node_modules/minizlib": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-2.1.2.tgz", - "integrity": "sha512-bAxsR8BVfj60DWXHE3u30oHzfl4G7khkSuPW+qvpd7jFRHm7dLxOjUk1EHACJ/hxLY8phGJ0YhYHZo7jil7Qdg==", - "dependencies": { - "minipass": "^3.0.0", - "yallist": "^4.0.0" + "node": ">=6" }, - "engines": { - "node": ">= 8" + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/minizlib/node_modules/minipass": { - "version": "3.3.6", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", - "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", + "node_modules/@istanbuljs/load-nyc-config/node_modules/p-locate": { + "version": "4.1.0", + "dev": true, + "license": "MIT", "dependencies": { - "yallist": "^4.0.0" + "p-limit": "^2.2.0" }, "engines": { "node": ">=8" } }, - "node_modules/mkdirp": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-1.0.4.tgz", - "integrity": "sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw==", - "bin": { - "mkdirp": "bin/cmd.js" - }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/resolve-from": { + "version": "5.0.0", + "dev": true, + "license": "MIT", "engines": { - "node": ">=10" + "node": ">=8" } }, - "node_modules/mocha": { - "version": "10.2.0", - "resolved": "https://registry.npmjs.org/mocha/-/mocha-10.2.0.tgz", - "integrity": "sha512-IDY7fl/BecMwFHzoqF2sg/SHHANeBoMMXFlS9r0OXKDssYE1M5O43wUY/9BVPeIvfH2zmEbBfseqN9gBQZzXkg==", - "dev": true, - "dependencies": { - "ansi-colors": "4.1.1", - "browser-stdout": "1.3.1", - "chokidar": "3.5.3", - "debug": "4.3.4", - "diff": "5.0.0", - "escape-string-regexp": "4.0.0", - "find-up": "5.0.0", - "glob": "7.2.0", - "he": "1.2.0", - "js-yaml": "4.1.0", - "log-symbols": "4.1.0", - "minimatch": "5.0.1", - "ms": "2.1.3", - "nanoid": "3.3.3", - "serialize-javascript": "6.0.0", - "strip-json-comments": "3.1.1", - "supports-color": "8.1.1", - "workerpool": "6.2.1", - "yargs": "16.2.0", - "yargs-parser": "20.2.4", - "yargs-unparser": "2.0.0" - }, - "bin": { - "_mocha": "bin/_mocha", - "mocha": "bin/mocha.js" - }, + "node_modules/@istanbuljs/load-nyc-config/node_modules/sprintf-js": { + "version": "1.0.3", + "dev": true, + "license": "BSD-3-Clause" + }, + "node_modules/@istanbuljs/schema": { + "version": "0.1.3", + "dev": true, + "license": "MIT", "engines": { - "node": ">= 14.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/mochajs" + "node": ">=8" } }, - "node_modules/mocha/node_modules/escape-string-regexp": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", - "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "node_modules/@jest/console": { + "version": "29.7.0", "dev": true, - "engines": { - "node": ">=10" + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "jest-message-util": "^29.7.0", + "jest-util": "^29.7.0", + "slash": "^3.0.0" }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/mocha/node_modules/glob": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.0.tgz", - "integrity": "sha512-lmLf6gtyrPq8tTjSmrO94wBeQbFR3HbLHbuyD69wuyQkImp2hWqMGB47OX65FBkPffO641IP9jWa1z4ivqG26Q==", + "node_modules/@jest/core": { + "version": "29.7.0", "dev": true, + "license": "MIT", "dependencies": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^3.0.4", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" + "@jest/console": "^29.7.0", + "@jest/reporters": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "ansi-escapes": "^4.2.1", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "exit": "^0.1.2", + "graceful-fs": "^4.2.9", + "jest-changed-files": "^29.7.0", + "jest-config": "^29.7.0", + "jest-haste-map": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-regex-util": "^29.6.3", + "jest-resolve": "^29.7.0", + "jest-resolve-dependencies": "^29.7.0", + "jest-runner": "^29.7.0", + "jest-runtime": "^29.7.0", + "jest-snapshot": "^29.7.0", + "jest-util": "^29.7.0", + "jest-validate": "^29.7.0", + "jest-watcher": "^29.7.0", + "micromatch": "^4.0.4", + "pretty-format": "^29.7.0", + "slash": "^3.0.0", + "strip-ansi": "^6.0.0" }, "engines": { - "node": "*" + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" }, - "funding": { - "url": "https://github.com/sponsors/isaacs" + "peerDependencies": { + "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0" + }, + "peerDependenciesMeta": { + "node-notifier": { + "optional": true + } } }, - "node_modules/mocha/node_modules/glob/node_modules/minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "node_modules/@jest/environment": { + "version": "29.7.0", "dev": true, + "license": "MIT", "dependencies": { - "brace-expansion": "^1.1.7" + "@jest/fake-timers": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "jest-mock": "^29.7.0" }, "engines": { - "node": "*" + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/mocha/node_modules/minimatch": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-5.0.1.tgz", - "integrity": "sha512-nLDxIFRyhDblz3qMuq+SoRZED4+miJ/G+tdDrjkkkRnjAsBexeGpgjLEQ0blJy7rHhR2b93rhQY4SvyWu9v03g==", + "node_modules/@jest/expect": { + "version": "29.7.0", "dev": true, + "license": "MIT", "dependencies": { - "brace-expansion": "^2.0.1" + "expect": "^29.7.0", + "jest-snapshot": "^29.7.0" }, "engines": { - "node": ">=10" + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/mocha/node_modules/minimatch/node_modules/brace-expansion": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", - "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "node_modules/@jest/expect-utils": { + "version": "29.7.0", "dev": true, + "license": "MIT", "dependencies": { - "balanced-match": "^1.0.0" + "jest-get-type": "^29.6.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/ms": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", - "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", - "dev": true - }, - "node_modules/nan": { - "version": "2.18.0", - "resolved": "https://registry.npmjs.org/nan/-/nan-2.18.0.tgz", - "integrity": "sha512-W7tfG7vMOGtD30sHoZSSc/JVYiyDPEyQVso/Zz+/uQd0B0L46gtC+pHha5FFMRpil6fm/AoEcRWyOVi4+E/f8w==" - }, - "node_modules/nanoid": { - "version": "3.3.3", - "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.3.tgz", - "integrity": "sha512-p1sjXuopFs0xg+fPASzQ28agW1oHD7xDsd9Xkf3T15H3c/cifrFHVwrh74PdoklAPi+i7MdRsE47vm2r6JoB+w==", + "node_modules/@jest/fake-timers": { + "version": "29.7.0", "dev": true, - "bin": { - "nanoid": "bin/nanoid.cjs" + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "@sinonjs/fake-timers": "^10.0.2", + "@types/node": "*", + "jest-message-util": "^29.7.0", + "jest-mock": "^29.7.0", + "jest-util": "^29.7.0" }, "engines": { - "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/negotiator": { - "version": "0.6.3", - "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz", - "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==", + "node_modules/@jest/globals": { + "version": "29.7.0", "dev": true, + "license": "MIT", + "dependencies": { + "@jest/environment": "^29.7.0", + "@jest/expect": "^29.7.0", + "@jest/types": "^29.6.3", + "jest-mock": "^29.7.0" + }, "engines": { - "node": ">= 0.6" + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/node-fetch": { - "version": "2.6.12", - "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.6.12.tgz", - "integrity": "sha512-C/fGU2E8ToujUivIO0H+tpQ6HWo4eEmchoPIoXtxCrVghxdKq+QOHqEZW7tuP3KlV3bC8FRMO5nMCC7Zm1VP6g==", + "node_modules/@jest/reporters": { + "version": "29.7.0", + "dev": true, + "license": "MIT", "dependencies": { - "whatwg-url": "^5.0.0" + "@bcoe/v8-coverage": "^0.2.3", + "@jest/console": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "@jridgewell/trace-mapping": "^0.3.18", + "@types/node": "*", + "chalk": "^4.0.0", + "collect-v8-coverage": "^1.0.0", + "exit": "^0.1.2", + "glob": "^7.1.3", + "graceful-fs": "^4.2.9", + "istanbul-lib-coverage": "^3.0.0", + "istanbul-lib-instrument": "^6.0.0", + "istanbul-lib-report": "^3.0.0", + "istanbul-lib-source-maps": "^4.0.0", + "istanbul-reports": "^3.1.3", + "jest-message-util": "^29.7.0", + "jest-util": "^29.7.0", + "jest-worker": "^29.7.0", + "slash": "^3.0.0", + "string-length": "^4.0.1", + "strip-ansi": "^6.0.0", + "v8-to-istanbul": "^9.0.1" }, "engines": { - "node": "4.x || >=6.0.0" + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" }, "peerDependencies": { - "encoding": "^0.1.0" + "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0" }, "peerDependenciesMeta": { - "encoding": { + "node-notifier": { "optional": true } } }, - "node_modules/node-gyp": { - "version": "9.4.0", - "resolved": "https://registry.npmjs.org/node-gyp/-/node-gyp-9.4.0.tgz", - "integrity": "sha512-dMXsYP6gc9rRbejLXmTbVRYjAHw7ppswsKyMxuxJxxOHzluIO1rGp9TOQgjFJ+2MCqcOcQTOPB/8Xwhr+7s4Eg==", + "node_modules/@jest/schemas": { + "version": "29.6.3", "dev": true, + "license": "MIT", "dependencies": { - "env-paths": "^2.2.0", - "exponential-backoff": "^3.1.1", - "glob": "^7.1.4", - "graceful-fs": "^4.2.6", - "make-fetch-happen": "^11.0.3", - "nopt": "^6.0.0", - "npmlog": "^6.0.0", - "rimraf": "^3.0.2", - "semver": "^7.3.5", - "tar": "^6.1.2", - "which": "^2.0.2" - }, - "bin": { - "node-gyp": "bin/node-gyp.js" + "@sinclair/typebox": "^0.27.8" }, "engines": { - "node": "^12.13 || ^14.13 || >=16" + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/node-gyp/node_modules/are-we-there-yet": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/are-we-there-yet/-/are-we-there-yet-3.0.1.tgz", - "integrity": "sha512-QZW4EDmGwlYur0Yyf/b2uGucHQMa8aFUP7eu9ddR73vvhFyt4V0Vl3QHPcTNJ8l6qYOBdxgXdnBXQrHilfRQBg==", + "node_modules/@jest/source-map": { + "version": "29.6.3", "dev": true, + "license": "MIT", "dependencies": { - "delegates": "^1.0.0", - "readable-stream": "^3.6.0" + "@jridgewell/trace-mapping": "^0.3.18", + "callsites": "^3.0.0", + "graceful-fs": "^4.2.9" }, "engines": { - "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/node-gyp/node_modules/gauge": { - "version": "4.0.4", - "resolved": "https://registry.npmjs.org/gauge/-/gauge-4.0.4.tgz", - "integrity": "sha512-f9m+BEN5jkg6a0fZjleidjN51VE1X+mPFQ2DJ0uv1V39oCLCbsGe6yjbBnp7eK7z/+GAon99a3nHuqbuuthyPg==", + "node_modules/@jest/test-result": { + "version": "29.7.0", "dev": true, + "license": "MIT", "dependencies": { - "aproba": "^1.0.3 || ^2.0.0", - "color-support": "^1.1.3", - "console-control-strings": "^1.1.0", - "has-unicode": "^2.0.1", - "signal-exit": "^3.0.7", - "string-width": "^4.2.3", - "strip-ansi": "^6.0.1", - "wide-align": "^1.1.5" + "@jest/console": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "collect-v8-coverage": "^1.0.0" }, "engines": { - "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/node-gyp/node_modules/nopt": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/nopt/-/nopt-6.0.0.tgz", - "integrity": "sha512-ZwLpbTgdhuZUnZzjd7nb1ZV+4DoiC6/sfiVKok72ym/4Tlf+DFdlHYmT2JPmcNNWV6Pi3SDf1kT+A4r9RTuT9g==", + "node_modules/@jest/test-sequencer": { + "version": "29.7.0", "dev": true, + "license": "MIT", "dependencies": { - "abbrev": "^1.0.0" - }, - "bin": { - "nopt": "bin/nopt.js" + "@jest/test-result": "^29.7.0", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^29.7.0", + "slash": "^3.0.0" }, "engines": { - "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/node-gyp/node_modules/npmlog": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/npmlog/-/npmlog-6.0.2.tgz", - "integrity": "sha512-/vBvz5Jfr9dT/aFWd0FIRf+T/Q2WBsLENygUaFUqstqsycmZAP/t5BvFJTK0viFmSUxiUKTUplWy5vt+rvKIxg==", + "node_modules/@jest/transform": { + "version": "29.7.0", "dev": true, + "license": "MIT", "dependencies": { - "are-we-there-yet": "^3.0.0", - "console-control-strings": "^1.1.0", - "gauge": "^4.0.3", - "set-blocking": "^2.0.0" + "@babel/core": "^7.11.6", + "@jest/types": "^29.6.3", + "@jridgewell/trace-mapping": "^0.3.18", + "babel-plugin-istanbul": "^6.1.1", + "chalk": "^4.0.0", + "convert-source-map": "^2.0.0", + "fast-json-stable-stringify": "^2.1.0", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^29.7.0", + "jest-regex-util": "^29.6.3", + "jest-util": "^29.7.0", + "micromatch": "^4.0.4", + "pirates": "^4.0.4", + "slash": "^3.0.0", + "write-file-atomic": "^4.0.2" }, "engines": { - "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/node-gyp/node_modules/readable-stream": { - "version": "3.6.2", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", - "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", + "node_modules/@jest/types": { + "version": "29.6.3", "dev": true, + "license": "MIT", "dependencies": { - "inherits": "^2.0.3", - "string_decoder": "^1.1.1", - "util-deprecate": "^1.0.1" + "@jest/schemas": "^29.6.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "@types/istanbul-reports": "^3.0.0", + "@types/node": "*", + "@types/yargs": "^17.0.8", + "chalk": "^4.0.0" }, "engines": { - "node": ">= 6" + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/node-gyp/node_modules/string_decoder": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", - "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.5", "dev": true, + "license": "MIT", "dependencies": { - "safe-buffer": "~5.2.0" + "@jridgewell/set-array": "^1.2.1", + "@jridgewell/sourcemap-codec": "^1.4.10", + "@jridgewell/trace-mapping": "^0.3.24" + }, + "engines": { + "node": ">=6.0.0" } }, - "node_modules/nopt": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/nopt/-/nopt-5.0.0.tgz", - "integrity": "sha512-Tbj67rffqceeLpcRXrT7vKAN8CwfPeIBgM7E6iBkmKLV7bEMwpGgYLGv0jACUsECaa/vuxP0IjEont6umdMgtQ==", - "dependencies": { - "abbrev": "1" - }, - "bin": { - "nopt": "bin/nopt.js" - }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "dev": true, + "license": "MIT", "engines": { - "node": ">=6" + "node": ">=6.0.0" } }, - "node_modules/normalize-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", - "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "node_modules/@jridgewell/set-array": { + "version": "1.2.1", "dev": true, + "license": "MIT", "engines": { - "node": ">=0.10.0" + "node": ">=6.0.0" } }, - "node_modules/npmlog": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/npmlog/-/npmlog-5.0.1.tgz", - "integrity": "sha512-AqZtDUWOMKs1G/8lwylVjrdYgqA4d9nu8hc+0gzRxlDb1I10+FHBGMXs6aiQHFdCUUlqH99MUMuLfzWDNDtfxw==", + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.0", + "dev": true, + "license": "MIT" + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.25", + "dev": true, + "license": "MIT", "dependencies": { - "are-we-there-yet": "^2.0.0", - "console-control-strings": "^1.1.0", - "gauge": "^3.0.0", - "set-blocking": "^2.0.0" + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" } }, - "node_modules/object-assign": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", - "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", + "node_modules/@js-sdsl/ordered-map": { + "version": "4.4.2", + "license": "MIT", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/js-sdsl" + } + }, + "node_modules/@jsdoc/salty": { + "version": "0.2.8", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "lodash": "^4.17.21" + }, "engines": { - "node": ">=0.10.0" + "node": ">=v12.0.0" } }, - "node_modules/once": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", - "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "node_modules/@mapbox/node-pre-gyp": { + "version": "1.0.11", + "license": "BSD-3-Clause", "dependencies": { - "wrappy": "1" + "detect-libc": "^2.0.0", + "https-proxy-agent": "^5.0.0", + "make-dir": "^3.1.0", + "node-fetch": "^2.6.7", + "nopt": "^5.0.0", + "npmlog": "^5.0.1", + "rimraf": "^3.0.2", + "semver": "^7.3.5", + "tar": "^6.1.11" + }, + "bin": { + "node-pre-gyp": "bin/node-pre-gyp" } }, - "node_modules/p-limit": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", - "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "node_modules/@microsoft/tsdoc": { + "version": "0.15.0", + "resolved": "https://registry.npmjs.org/@microsoft/tsdoc/-/tsdoc-0.15.0.tgz", + "integrity": "sha512-HZpPoABogPvjeJOdzCOSJsXeL/SMCBgBZMVC3X3d7YYp2gf31MfxhUoYUNwf1ERPJOnQc0wkFn9trqI6ZEdZuA==", + "dev": true + }, + "node_modules/@microsoft/tsdoc-config": { + "version": "0.17.0", + "resolved": "https://registry.npmjs.org/@microsoft/tsdoc-config/-/tsdoc-config-0.17.0.tgz", + "integrity": "sha512-v/EYRXnCAIHxOHW+Plb6OWuUoMotxTN0GLatnpOb1xq0KuTNw/WI3pamJx/UbsoJP5k9MCw1QxvvhPcF9pH3Zg==", "dev": true, "dependencies": { - "yocto-queue": "^0.1.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "@microsoft/tsdoc": "0.15.0", + "ajv": "~8.12.0", + "jju": "~1.4.0", + "resolve": "~1.22.2" } }, - "node_modules/p-locate": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", - "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "node_modules/@microsoft/tsdoc-config/node_modules/ajv": { + "version": "8.12.0", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.12.0.tgz", + "integrity": "sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==", "dev": true, "dependencies": { - "p-limit": "^3.0.2" - }, - "engines": { - "node": ">=10" + "fast-deep-equal": "^3.1.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2", + "uri-js": "^4.2.2" }, "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" } }, - "node_modules/p-map": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/p-map/-/p-map-4.0.0.tgz", - "integrity": "sha512-/bjOqmgETBYB5BoEeGVea8dmvHb2m9GLy1E9W43yeyfP6QQCZGFNa+XRceJEuDB6zqr+gKpIAmlLebMpykw/MQ==", + "node_modules/@nodelib/fs.scandir": { + "version": "2.1.5", "dev": true, + "license": "MIT", "dependencies": { - "aggregate-error": "^3.0.0" + "@nodelib/fs.stat": "2.0.5", + "run-parallel": "^1.1.9" }, "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "node": ">= 8" } }, - "node_modules/path-exists": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", - "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "node_modules/@nodelib/fs.stat": { + "version": "2.0.5", "dev": true, + "license": "MIT", "engines": { - "node": ">=8" + "node": ">= 8" } }, - "node_modules/path-is-absolute": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", - "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", + "node_modules/@nodelib/fs.walk": { + "version": "1.2.8", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.scandir": "2.1.5", + "fastq": "^1.6.0" + }, "engines": { - "node": ">=0.10.0" + "node": ">= 8" } }, - "node_modules/path-key": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", - "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "node_modules/@npmcli/fs": { + "version": "2.1.2", "dev": true, + "license": "ISC", + "dependencies": { + "@gar/promisify": "^1.1.3", + "semver": "^7.3.5" + }, "engines": { - "node": ">=8" + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, - "node_modules/path-scurry": { - "version": "1.10.1", - "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-1.10.1.tgz", - "integrity": "sha512-MkhCqzzBEpPvxxQ71Md0b1Kk51W01lrYvlMzSUaIzNsODdd7mqhiimSZlr+VegAz5Z6Vzt9Xg2ttE//XBhH3EQ==", + "node_modules/@npmcli/move-file": { + "version": "2.0.1", "dev": true, + "license": "MIT", "dependencies": { - "lru-cache": "^9.1.1 || ^10.0.0", - "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0" + "mkdirp": "^1.0.4", + "rimraf": "^3.0.2" }, "engines": { - "node": ">=16 || 14 >=14.17" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, - "node_modules/path-scurry/node_modules/lru-cache": { - "version": "10.0.1", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.0.1.tgz", - "integrity": "sha512-IJ4uwUTi2qCccrioU6g9g/5rvvVl13bsdczUUcqbciD9iLr095yj8DQKdObriEvuNSx325N1rV1O0sJFszx75g==", - "dev": true, + "node_modules/@postman/form-data": { + "version": "3.1.1", + "license": "MIT", + "dependencies": { + "asynckit": "^0.4.0", + "combined-stream": "^1.0.8", + "mime-types": "^2.1.12" + }, "engines": { - "node": "14 || >=16.14" + "node": ">= 6" } }, - "node_modules/picomatch": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", - "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", - "dev": true, - "engines": { - "node": ">=8.6" + "node_modules/@postman/tough-cookie": { + "version": "4.1.3-postman.1", + "license": "BSD-3-Clause", + "dependencies": { + "psl": "^1.1.33", + "punycode": "^2.1.1", + "universalify": "^0.2.0", + "url-parse": "^1.5.3" }, - "funding": { - "url": "https://github.com/sponsors/jonschlinkert" + "engines": { + "node": ">=6" } }, - "node_modules/promise-retry": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/promise-retry/-/promise-retry-2.0.1.tgz", - "integrity": "sha512-y+WKFlBR8BGXnsNlIHFGPZmyDf3DFMoLhaflAnyZgV6rG6xu+JwesTo2Q9R6XwYmtmwAFCkAk3e35jEdoeh/3g==", - "dev": true, + "node_modules/@postman/tunnel-agent": { + "version": "0.6.4", + "license": "Apache-2.0", "dependencies": { - "err-code": "^2.0.2", - "retry": "^0.12.0" + "safe-buffer": "^5.0.1" }, "engines": { - "node": ">=10" + "node": "*" } }, - "node_modules/randombytes": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", - "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", - "dev": true, + "node_modules/@protobufjs/aspromise": { + "version": "1.1.2", + "license": "BSD-3-Clause" + }, + "node_modules/@protobufjs/base64": { + "version": "1.1.2", + "license": "BSD-3-Clause" + }, + "node_modules/@protobufjs/codegen": { + "version": "2.0.4", + "license": "BSD-3-Clause" + }, + "node_modules/@protobufjs/eventemitter": { + "version": "1.1.0", + "license": "BSD-3-Clause" + }, + "node_modules/@protobufjs/fetch": { + "version": "1.1.0", + "license": "BSD-3-Clause", "dependencies": { - "safe-buffer": "^5.1.0" + "@protobufjs/aspromise": "^1.1.1", + "@protobufjs/inquire": "^1.1.0" } }, - "node_modules/readable-stream": { - "version": "1.1.14", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-1.1.14.tgz", - "integrity": "sha512-+MeVjFf4L44XUkhM1eYbD8fyEsxcV81pqMSR5gblfcLCHfZvbrqy4/qYHE+/R5HoBUT11WV5O08Cr1n3YXkWVQ==", + "node_modules/@protobufjs/float": { + "version": "1.0.2", + "license": "BSD-3-Clause" + }, + "node_modules/@protobufjs/inquire": { + "version": "1.1.0", + "license": "BSD-3-Clause" + }, + "node_modules/@protobufjs/path": { + "version": "1.1.2", + "license": "BSD-3-Clause" + }, + "node_modules/@protobufjs/pool": { + "version": "1.1.0", + "license": "BSD-3-Clause" + }, + "node_modules/@protobufjs/utf8": { + "version": "1.1.0", + "license": "BSD-3-Clause" + }, + "node_modules/@sideway/address": { + "version": "4.1.5", + "resolved": "https://registry.npmjs.org/@sideway/address/-/address-4.1.5.tgz", + "integrity": "sha512-IqO/DUQHUkPeixNQ8n0JA6102hT9CmaljNTPmQ1u8MEhBo/R4Q8eKLN/vGZxuebwOroDB4cbpjheD4+/sKFK4Q==", + "license": "BSD-3-Clause", + "dependencies": { + "@hapi/hoek": "^9.0.0" + } + }, + "node_modules/@sideway/address/node_modules/@hapi/hoek": { + "version": "9.3.0", + "resolved": "https://registry.npmjs.org/@hapi/hoek/-/hoek-9.3.0.tgz", + "integrity": "sha512-/c6rf4UJlmHlC9b5BaNvzAcFv7HZ2QHaV0D4/HNlBdvFnvQq8RI4kYdhyPCl7Xj+oWvTWQ8ujhqS53LIgAe6KQ==", + "license": "BSD-3-Clause" + }, + "node_modules/@sideway/formula": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@sideway/formula/-/formula-3.0.1.tgz", + "integrity": "sha512-/poHZJJVjx3L+zVD6g9KgHfYnb443oi7wLu/XKojDviHy6HOEOA6z1Trk5aR1dGcmPenJEgb2sK2I80LeS3MIg==", + "license": "BSD-3-Clause" + }, + "node_modules/@sideway/pinpoint": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@sideway/pinpoint/-/pinpoint-2.0.0.tgz", + "integrity": "sha512-RNiOoTPkptFtSVzQevY/yWtZwf/RxyVnPy/OcA9HBM3MlGDnBEYL5B41H0MTn0Uec8Hi+2qUtTfG2WWZBmMejQ==", + "license": "BSD-3-Clause" + }, + "node_modules/@sinclair/typebox": { + "version": "0.27.8", + "dev": true, + "license": "MIT" + }, + "node_modules/@sinonjs/commons": { + "version": "3.0.1", "dev": true, + "license": "BSD-3-Clause", "dependencies": { - "core-util-is": "~1.0.0", - "inherits": "~2.0.1", - "isarray": "0.0.1", - "string_decoder": "~0.10.x" + "type-detect": "4.0.8" } }, - "node_modules/readdirp": { - "version": "3.6.0", - "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", - "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", + "node_modules/@sinonjs/fake-timers": { + "version": "10.3.0", "dev": true, + "license": "BSD-3-Clause", "dependencies": { - "picomatch": "^2.2.1" + "@sinonjs/commons": "^3.0.0" + } + }, + "node_modules/@smithy/abort-controller": { + "version": "3.1.1", + "license": "Apache-2.0", + "dependencies": { + "@smithy/types": "^3.3.0", + "tslib": "^2.6.2" }, "engines": { - "node": ">=8.10.0" + "node": ">=16.0.0" } }, - "node_modules/require-directory": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", - "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", - "dev": true, + "node_modules/@smithy/config-resolver": { + "version": "3.0.5", + "license": "Apache-2.0", + "dependencies": { + "@smithy/node-config-provider": "^3.1.4", + "@smithy/types": "^3.3.0", + "@smithy/util-config-provider": "^3.0.0", + "@smithy/util-middleware": "^3.0.3", + "tslib": "^2.6.2" + }, "engines": { - "node": ">=0.10.0" + "node": ">=16.0.0" } }, - "node_modules/requizzle": { - "version": "0.2.4", - "resolved": "https://registry.npmjs.org/requizzle/-/requizzle-0.2.4.tgz", - "integrity": "sha512-JRrFk1D4OQ4SqovXOgdav+K8EAhSB/LJZqCz8tbX0KObcdeM15Ss59ozWMBWmmINMagCwmqn4ZNryUGpBsl6Jw==", - "dev": true, + "node_modules/@smithy/core": { + "version": "2.4.0", + "license": "Apache-2.0", "dependencies": { - "lodash": "^4.17.21" + "@smithy/middleware-endpoint": "^3.1.0", + "@smithy/middleware-retry": "^3.0.15", + "@smithy/middleware-serde": "^3.0.3", + "@smithy/protocol-http": "^4.1.0", + "@smithy/smithy-client": "^3.2.0", + "@smithy/types": "^3.3.0", + "@smithy/util-body-length-browser": "^3.0.0", + "@smithy/util-middleware": "^3.0.3", + "@smithy/util-utf8": "^3.0.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=16.0.0" } }, - "node_modules/retry": { - "version": "0.12.0", - "resolved": "https://registry.npmjs.org/retry/-/retry-0.12.0.tgz", - "integrity": "sha512-9LkiTwjUh6rT555DtE9rTX+BKByPfrMzEAtnlEtdEwr3Nkffwiihqe2bWADg+OQRjt9gl6ICdmB/ZFDCGAtSow==", - "dev": true, + "node_modules/@smithy/credential-provider-imds": { + "version": "3.2.0", + "license": "Apache-2.0", + "dependencies": { + "@smithy/node-config-provider": "^3.1.4", + "@smithy/property-provider": "^3.1.3", + "@smithy/types": "^3.3.0", + "@smithy/url-parser": "^3.0.3", + "tslib": "^2.6.2" + }, "engines": { - "node": ">= 4" + "node": ">=16.0.0" } }, - "node_modules/rimraf": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", - "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", + "node_modules/@smithy/fetch-http-handler": { + "version": "3.2.4", + "license": "Apache-2.0", "dependencies": { - "glob": "^7.1.3" - }, - "bin": { - "rimraf": "bin.js" + "@smithy/protocol-http": "^4.1.0", + "@smithy/querystring-builder": "^3.0.3", + "@smithy/types": "^3.3.0", + "@smithy/util-base64": "^3.0.0", + "tslib": "^2.6.2" + } + }, + "node_modules/@smithy/hash-node": { + "version": "3.0.3", + "license": "Apache-2.0", + "dependencies": { + "@smithy/types": "^3.3.0", + "@smithy/util-buffer-from": "^3.0.0", + "@smithy/util-utf8": "^3.0.0", + "tslib": "^2.6.2" }, - "funding": { - "url": "https://github.com/sponsors/isaacs" + "engines": { + "node": ">=16.0.0" } }, - "node_modules/safe-buffer": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", - "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ] + "node_modules/@smithy/invalid-dependency": { + "version": "3.0.3", + "license": "Apache-2.0", + "dependencies": { + "@smithy/types": "^3.3.0", + "tslib": "^2.6.2" + } }, - "node_modules/safer-buffer": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", - "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", - "optional": true + "node_modules/@smithy/is-array-buffer": { + "version": "3.0.0", + "license": "Apache-2.0", + "dependencies": { + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=16.0.0" + } }, - "node_modules/semver": { - "version": "7.5.4", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", - "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", + "node_modules/@smithy/middleware-content-length": { + "version": "3.0.5", + "license": "Apache-2.0", "dependencies": { - "lru-cache": "^6.0.0" + "@smithy/protocol-http": "^4.1.0", + "@smithy/types": "^3.3.0", + "tslib": "^2.6.2" }, - "bin": { - "semver": "bin/semver.js" + "engines": { + "node": ">=16.0.0" + } + }, + "node_modules/@smithy/middleware-endpoint": { + "version": "3.1.0", + "license": "Apache-2.0", + "dependencies": { + "@smithy/middleware-serde": "^3.0.3", + "@smithy/node-config-provider": "^3.1.4", + "@smithy/shared-ini-file-loader": "^3.1.4", + "@smithy/types": "^3.3.0", + "@smithy/url-parser": "^3.0.3", + "@smithy/util-middleware": "^3.0.3", + "tslib": "^2.6.2" }, "engines": { - "node": ">=10" + "node": ">=16.0.0" } }, - "node_modules/semver/node_modules/lru-cache": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", - "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", + "node_modules/@smithy/middleware-retry": { + "version": "3.0.15", + "license": "Apache-2.0", "dependencies": { - "yallist": "^4.0.0" + "@smithy/node-config-provider": "^3.1.4", + "@smithy/protocol-http": "^4.1.0", + "@smithy/service-error-classification": "^3.0.3", + "@smithy/smithy-client": "^3.2.0", + "@smithy/types": "^3.3.0", + "@smithy/util-middleware": "^3.0.3", + "@smithy/util-retry": "^3.0.3", + "tslib": "^2.6.2", + "uuid": "^9.0.1" }, "engines": { - "node": ">=10" + "node": ">=16.0.0" } }, - "node_modules/serialize-javascript": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-6.0.0.tgz", - "integrity": "sha512-Qr3TosvguFt8ePWqsvRfrKyQXIiW+nGbYpy8XK24NQHE83caxWt+mIymTT19DGFbNWNLfEwsrkSmN64lVWB9ag==", - "dev": true, + "node_modules/@smithy/middleware-serde": { + "version": "3.0.3", + "license": "Apache-2.0", "dependencies": { - "randombytes": "^2.1.0" + "@smithy/types": "^3.3.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=16.0.0" } }, - "node_modules/set-blocking": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/set-blocking/-/set-blocking-2.0.0.tgz", - "integrity": "sha512-KiKBS8AnWGEyLzofFfmvKwpdPzqiy16LvQfK3yv/fVH7Bj13/wl3JSR1J+rfgRE9q7xUJK4qvgS8raSOeLUehw==" + "node_modules/@smithy/middleware-stack": { + "version": "3.0.3", + "license": "Apache-2.0", + "dependencies": { + "@smithy/types": "^3.3.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=16.0.0" + } }, - "node_modules/shebang-command": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", - "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", - "dev": true, + "node_modules/@smithy/node-config-provider": { + "version": "3.1.4", + "license": "Apache-2.0", "dependencies": { - "shebang-regex": "^3.0.0" + "@smithy/property-provider": "^3.1.3", + "@smithy/shared-ini-file-loader": "^3.1.4", + "@smithy/types": "^3.3.0", + "tslib": "^2.6.2" }, "engines": { - "node": ">=8" + "node": ">=16.0.0" } }, - "node_modules/shebang-regex": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", - "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", - "dev": true, + "node_modules/@smithy/node-http-handler": { + "version": "3.1.4", + "license": "Apache-2.0", + "dependencies": { + "@smithy/abort-controller": "^3.1.1", + "@smithy/protocol-http": "^4.1.0", + "@smithy/querystring-builder": "^3.0.3", + "@smithy/types": "^3.3.0", + "tslib": "^2.6.2" + }, "engines": { - "node": ">=8" + "node": ">=16.0.0" } }, - "node_modules/signal-exit": { - "version": "3.0.7", - "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", - "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==" + "node_modules/@smithy/property-provider": { + "version": "3.1.3", + "license": "Apache-2.0", + "dependencies": { + "@smithy/types": "^3.3.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=16.0.0" + } }, - "node_modules/smart-buffer": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/smart-buffer/-/smart-buffer-4.2.0.tgz", - "integrity": "sha512-94hK0Hh8rPqQl2xXc3HsaBoOXKV20MToPkcXvwbISWLEs+64sBq5kFgn2kJDHb1Pry9yrP0dxrCI9RRci7RXKg==", - "dev": true, + "node_modules/@smithy/protocol-http": { + "version": "4.1.0", + "license": "Apache-2.0", + "dependencies": { + "@smithy/types": "^3.3.0", + "tslib": "^2.6.2" + }, "engines": { - "node": ">= 6.0.0", - "npm": ">= 3.0.0" + "node": ">=16.0.0" } }, - "node_modules/socks": { - "version": "2.7.1", - "resolved": "https://registry.npmjs.org/socks/-/socks-2.7.1.tgz", - "integrity": "sha512-7maUZy1N7uo6+WVEX6psASxtNlKaNVMlGQKkG/63nEDdLOWNbiUMoLK7X4uYoLhQstau72mLgfEWcXcwsaHbYQ==", - "dev": true, + "node_modules/@smithy/querystring-builder": { + "version": "3.0.3", + "license": "Apache-2.0", "dependencies": { - "ip": "^2.0.0", - "smart-buffer": "^4.2.0" + "@smithy/types": "^3.3.0", + "@smithy/util-uri-escape": "^3.0.0", + "tslib": "^2.6.2" }, "engines": { - "node": ">= 10.13.0", - "npm": ">= 3.0.0" + "node": ">=16.0.0" } }, - "node_modules/socks-proxy-agent": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/socks-proxy-agent/-/socks-proxy-agent-7.0.0.tgz", - "integrity": "sha512-Fgl0YPZ902wEsAyiQ+idGd1A7rSFx/ayC1CQVMw5P+EQx2V0SgpGtf6OKFhVjPflPUl9YMmEOnmfjCdMUsygww==", - "dev": true, + "node_modules/@smithy/querystring-parser": { + "version": "3.0.3", + "license": "Apache-2.0", "dependencies": { - "agent-base": "^6.0.2", - "debug": "^4.3.3", - "socks": "^2.6.2" + "@smithy/types": "^3.3.0", + "tslib": "^2.6.2" }, "engines": { - "node": ">= 10" + "node": ">=16.0.0" } }, - "node_modules/ssri": { - "version": "10.0.5", - "resolved": "https://registry.npmjs.org/ssri/-/ssri-10.0.5.tgz", - "integrity": "sha512-bSf16tAFkGeRlUNDjXu8FzaMQt6g2HZJrun7mtMbIPOddxt3GLMSz5VWUWcqTJUPfLEaDIepGxv+bYQW49596A==", - "dev": true, + "node_modules/@smithy/service-error-classification": { + "version": "3.0.3", + "license": "Apache-2.0", "dependencies": { - "minipass": "^7.0.3" + "@smithy/types": "^3.3.0" }, "engines": { - "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + "node": ">=16.0.0" } }, - "node_modules/ssri/node_modules/minipass": { - "version": "7.0.4", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.0.4.tgz", - "integrity": "sha512-jYofLM5Dam9279rdkWzqHozUo4ybjdZmCsDHePy5V/PbBcVMiSZR97gmAy45aqi8CK1lG2ECd356FU86avfwUQ==", - "dev": true, + "node_modules/@smithy/shared-ini-file-loader": { + "version": "3.1.4", + "license": "Apache-2.0", + "dependencies": { + "@smithy/types": "^3.3.0", + "tslib": "^2.6.2" + }, "engines": { - "node": ">=16 || 14 >=14.17" + "node": ">=16.0.0" } }, - "node_modules/string_decoder": { - "version": "0.10.31", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-0.10.31.tgz", - "integrity": "sha512-ev2QzSzWPYmy9GuqfIVildA4OdcGLeFZQrq5ys6RtiuF+RQQiZWr8TZNyAcuVXyQRYfEO+MsoB/1BuQVhOJuoQ==", - "dev": true + "node_modules/@smithy/signature-v4": { + "version": "4.1.0", + "license": "Apache-2.0", + "dependencies": { + "@smithy/is-array-buffer": "^3.0.0", + "@smithy/protocol-http": "^4.1.0", + "@smithy/types": "^3.3.0", + "@smithy/util-hex-encoding": "^3.0.0", + "@smithy/util-middleware": "^3.0.3", + "@smithy/util-uri-escape": "^3.0.0", + "@smithy/util-utf8": "^3.0.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=16.0.0" + } }, - "node_modules/string-width": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "node_modules/@smithy/smithy-client": { + "version": "3.2.0", + "license": "Apache-2.0", "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" + "@smithy/middleware-endpoint": "^3.1.0", + "@smithy/middleware-stack": "^3.0.3", + "@smithy/protocol-http": "^4.1.0", + "@smithy/types": "^3.3.0", + "@smithy/util-stream": "^3.1.3", + "tslib": "^2.6.2" }, "engines": { - "node": ">=8" + "node": ">=16.0.0" } }, - "node_modules/string-width-cjs": { - "name": "string-width", - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", - "dev": true, + "node_modules/@smithy/types": { + "version": "3.3.0", + "license": "Apache-2.0", "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" + "tslib": "^2.6.2" }, "engines": { - "node": ">=8" + "node": ">=16.0.0" + } + }, + "node_modules/@smithy/url-parser": { + "version": "3.0.3", + "license": "Apache-2.0", + "dependencies": { + "@smithy/querystring-parser": "^3.0.3", + "@smithy/types": "^3.3.0", + "tslib": "^2.6.2" + } + }, + "node_modules/@smithy/util-base64": { + "version": "3.0.0", + "license": "Apache-2.0", + "dependencies": { + "@smithy/util-buffer-from": "^3.0.0", + "@smithy/util-utf8": "^3.0.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=16.0.0" + } + }, + "node_modules/@smithy/util-body-length-browser": { + "version": "3.0.0", + "license": "Apache-2.0", + "dependencies": { + "tslib": "^2.6.2" + } + }, + "node_modules/@smithy/util-body-length-node": { + "version": "3.0.0", + "license": "Apache-2.0", + "dependencies": { + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=16.0.0" + } + }, + "node_modules/@smithy/util-buffer-from": { + "version": "3.0.0", + "license": "Apache-2.0", + "dependencies": { + "@smithy/is-array-buffer": "^3.0.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=16.0.0" + } + }, + "node_modules/@smithy/util-config-provider": { + "version": "3.0.0", + "license": "Apache-2.0", + "dependencies": { + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=16.0.0" + } + }, + "node_modules/@smithy/util-defaults-mode-browser": { + "version": "3.0.15", + "license": "Apache-2.0", + "dependencies": { + "@smithy/property-provider": "^3.1.3", + "@smithy/smithy-client": "^3.2.0", + "@smithy/types": "^3.3.0", + "bowser": "^2.11.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">= 10.0.0" + } + }, + "node_modules/@smithy/util-defaults-mode-node": { + "version": "3.0.15", + "license": "Apache-2.0", + "dependencies": { + "@smithy/config-resolver": "^3.0.5", + "@smithy/credential-provider-imds": "^3.2.0", + "@smithy/node-config-provider": "^3.1.4", + "@smithy/property-provider": "^3.1.3", + "@smithy/smithy-client": "^3.2.0", + "@smithy/types": "^3.3.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">= 10.0.0" + } + }, + "node_modules/@smithy/util-endpoints": { + "version": "2.0.5", + "license": "Apache-2.0", + "dependencies": { + "@smithy/node-config-provider": "^3.1.4", + "@smithy/types": "^3.3.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=16.0.0" + } + }, + "node_modules/@smithy/util-hex-encoding": { + "version": "3.0.0", + "license": "Apache-2.0", + "dependencies": { + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=16.0.0" + } + }, + "node_modules/@smithy/util-middleware": { + "version": "3.0.3", + "license": "Apache-2.0", + "dependencies": { + "@smithy/types": "^3.3.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=16.0.0" + } + }, + "node_modules/@smithy/util-retry": { + "version": "3.0.3", + "license": "Apache-2.0", + "dependencies": { + "@smithy/service-error-classification": "^3.0.3", + "@smithy/types": "^3.3.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=16.0.0" + } + }, + "node_modules/@smithy/util-stream": { + "version": "3.1.3", + "license": "Apache-2.0", + "dependencies": { + "@smithy/fetch-http-handler": "^3.2.4", + "@smithy/node-http-handler": "^3.1.4", + "@smithy/types": "^3.3.0", + "@smithy/util-base64": "^3.0.0", + "@smithy/util-buffer-from": "^3.0.0", + "@smithy/util-hex-encoding": "^3.0.0", + "@smithy/util-utf8": "^3.0.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=16.0.0" + } + }, + "node_modules/@smithy/util-uri-escape": { + "version": "3.0.0", + "license": "Apache-2.0", + "dependencies": { + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=16.0.0" + } + }, + "node_modules/@smithy/util-utf8": { + "version": "3.0.0", + "license": "Apache-2.0", + "dependencies": { + "@smithy/util-buffer-from": "^3.0.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": ">=16.0.0" + } + }, + "node_modules/@tootallnate/once": { + "version": "2.0.0", + "license": "MIT", + "engines": { + "node": ">= 10" + } + }, + "node_modules/@types/babel__core": { + "version": "7.20.5", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.20.7", + "@babel/types": "^7.20.7", + "@types/babel__generator": "*", + "@types/babel__template": "*", + "@types/babel__traverse": "*" + } + }, + "node_modules/@types/babel__generator": { + "version": "7.6.8", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__template": { + "version": "7.4.4", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.1.0", + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__traverse": { + "version": "7.20.6", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.20.7" + } + }, + "node_modules/@types/caseless": { + "version": "0.12.5", + "license": "MIT" + }, + "node_modules/@types/eslint": { + "version": "9.6.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree": "*", + "@types/json-schema": "*" + } + }, + "node_modules/@types/eslint__js": { + "version": "8.42.3", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/eslint": "*" + } + }, + "node_modules/@types/estree": { + "version": "1.0.5", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/graceful-fs": { + "version": "4.1.9", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/istanbul-lib-coverage": { + "version": "2.0.6", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/istanbul-lib-report": { + "version": "3.0.3", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/istanbul-lib-coverage": "*" + } + }, + "node_modules/@types/istanbul-reports": { + "version": "3.0.4", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/istanbul-lib-report": "*" + } + }, + "node_modules/@types/jest": { + "version": "29.5.13", + "resolved": "https://registry.npmjs.org/@types/jest/-/jest-29.5.13.tgz", + "integrity": "sha512-wd+MVEZCHt23V0/L642O5APvspWply/rGY5BcW4SUETo2UzPU3Z26qr8jC2qxpimI2jjx9h7+2cj2FwIr01bXg==", + "dev": true, + "dependencies": { + "expect": "^29.0.0", + "pretty-format": "^29.0.0" + } + }, + "node_modules/@types/json-schema": { + "version": "7.0.15", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/linkify-it": { + "version": "5.0.0", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/long": { + "version": "4.0.2", + "license": "MIT" + }, + "node_modules/@types/markdown-it": { + "version": "14.1.2", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/linkify-it": "^5", + "@types/mdurl": "^2" + } + }, + "node_modules/@types/mdurl": { + "version": "2.0.0", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/node": { + "version": "20.16.1", + "license": "MIT", + "dependencies": { + "undici-types": "~6.19.2" + } + }, + "node_modules/@types/request": { + "version": "2.48.12", + "license": "MIT", + "dependencies": { + "@types/caseless": "*", + "@types/node": "*", + "@types/tough-cookie": "*", + "form-data": "^2.5.0" + } + }, + "node_modules/@types/request/node_modules/form-data": { + "version": "2.5.1", + "license": "MIT", + "dependencies": { + "asynckit": "^0.4.0", + "combined-stream": "^1.0.6", + "mime-types": "^2.1.12" + }, + "engines": { + "node": ">= 0.12" + } + }, + "node_modules/@types/semver": { + "version": "7.5.8", + "dev": true, + "license": "MIT", + "optional": true, + "peer": true + }, + "node_modules/@types/simple-oauth2": { + "version": "5.0.7", + "resolved": "https://registry.npmjs.org/@types/simple-oauth2/-/simple-oauth2-5.0.7.tgz", + "integrity": "sha512-8JbWVJbiTSBQP/7eiyGKyXWAqp3dKQZpaA+pdW16FCi32ujkzRMG8JfjoAzdWt6W8U591ZNdHcPtP2D7ILTKuA==", + "license": "MIT" + }, + "node_modules/@types/stack-utils": { + "version": "2.0.3", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/tough-cookie": { + "version": "4.0.5", + "license": "MIT" + }, + "node_modules/@types/uuid": { + "version": "10.0.0", + "resolved": "https://registry.npmjs.org/@types/uuid/-/uuid-10.0.0.tgz", + "integrity": "sha512-7gqG38EyHgyP1S+7+xomFtL+ZNHcKv6DwNaCZmJmo1vgMugyF3TCnXVg4t1uk89mLNwnLtnY3TpOpCOyp1/xHQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/validator": { + "version": "13.12.0", + "license": "MIT" + }, + "node_modules/@types/yargs": { + "version": "17.0.32", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/yargs-parser": "*" + } + }, + "node_modules/@types/yargs-parser": { + "version": "21.0.3", + "dev": true, + "license": "MIT" + }, + "node_modules/@typescript-eslint/eslint-plugin": { + "version": "7.2.0", + "dev": true, + "license": "MIT", + "optional": true, + "peer": true, + "dependencies": { + "@eslint-community/regexpp": "^4.5.1", + "@typescript-eslint/scope-manager": "7.2.0", + "@typescript-eslint/type-utils": "7.2.0", + "@typescript-eslint/utils": "7.2.0", + "@typescript-eslint/visitor-keys": "7.2.0", + "debug": "^4.3.4", + "graphemer": "^1.4.0", + "ignore": "^5.2.4", + "natural-compare": "^1.4.0", + "semver": "^7.5.4", + "ts-api-utils": "^1.0.1" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "@typescript-eslint/parser": "^7.0.0", + "eslint": "^8.56.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/eslint-plugin/node_modules/@typescript-eslint/scope-manager": { + "version": "7.2.0", + "dev": true, + "license": "MIT", + "optional": true, + "peer": true, + "dependencies": { + "@typescript-eslint/types": "7.2.0", + "@typescript-eslint/visitor-keys": "7.2.0" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/eslint-plugin/node_modules/@typescript-eslint/types": { + "version": "7.2.0", + "dev": true, + "license": "MIT", + "optional": true, + "peer": true, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/eslint-plugin/node_modules/@typescript-eslint/typescript-estree": { + "version": "7.2.0", + "dev": true, + "license": "BSD-2-Clause", + "optional": true, + "peer": true, + "dependencies": { + "@typescript-eslint/types": "7.2.0", + "@typescript-eslint/visitor-keys": "7.2.0", + "debug": "^4.3.4", + "globby": "^11.1.0", + "is-glob": "^4.0.3", + "minimatch": "9.0.3", + "semver": "^7.5.4", + "ts-api-utils": "^1.0.1" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/eslint-plugin/node_modules/@typescript-eslint/utils": { + "version": "7.2.0", + "dev": true, + "license": "MIT", + "optional": true, + "peer": true, + "dependencies": { + "@eslint-community/eslint-utils": "^4.4.0", + "@types/json-schema": "^7.0.12", + "@types/semver": "^7.5.0", + "@typescript-eslint/scope-manager": "7.2.0", + "@typescript-eslint/types": "7.2.0", + "@typescript-eslint/typescript-estree": "7.2.0", + "semver": "^7.5.4" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.56.0" + } + }, + "node_modules/@typescript-eslint/eslint-plugin/node_modules/@typescript-eslint/visitor-keys": { + "version": "7.2.0", + "dev": true, + "license": "MIT", + "optional": true, + "peer": true, + "dependencies": { + "@typescript-eslint/types": "7.2.0", + "eslint-visitor-keys": "^3.4.1" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/eslint-plugin/node_modules/brace-expansion": { + "version": "2.0.1", + "dev": true, + "license": "MIT", + "optional": true, + "peer": true, + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/@typescript-eslint/eslint-plugin/node_modules/minimatch": { + "version": "9.0.3", + "dev": true, + "license": "ISC", + "optional": true, + "peer": true, + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/@typescript-eslint/parser": { + "version": "7.2.0", + "dev": true, + "license": "BSD-2-Clause", + "optional": true, + "peer": true, + "dependencies": { + "@typescript-eslint/scope-manager": "7.2.0", + "@typescript-eslint/types": "7.2.0", + "@typescript-eslint/typescript-estree": "7.2.0", + "@typescript-eslint/visitor-keys": "7.2.0", + "debug": "^4.3.4" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.56.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/scope-manager": { + "version": "7.2.0", + "dev": true, + "license": "MIT", + "optional": true, + "peer": true, + "dependencies": { + "@typescript-eslint/types": "7.2.0", + "@typescript-eslint/visitor-keys": "7.2.0" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/types": { + "version": "7.2.0", + "dev": true, + "license": "MIT", + "optional": true, + "peer": true, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/typescript-estree": { + "version": "7.2.0", + "dev": true, + "license": "BSD-2-Clause", + "optional": true, + "peer": true, + "dependencies": { + "@typescript-eslint/types": "7.2.0", + "@typescript-eslint/visitor-keys": "7.2.0", + "debug": "^4.3.4", + "globby": "^11.1.0", + "is-glob": "^4.0.3", + "minimatch": "9.0.3", + "semver": "^7.5.4", + "ts-api-utils": "^1.0.1" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/visitor-keys": { + "version": "7.2.0", + "dev": true, + "license": "MIT", + "optional": true, + "peer": true, + "dependencies": { + "@typescript-eslint/types": "7.2.0", + "eslint-visitor-keys": "^3.4.1" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/parser/node_modules/brace-expansion": { + "version": "2.0.1", + "dev": true, + "license": "MIT", + "optional": true, + "peer": true, + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/@typescript-eslint/parser/node_modules/minimatch": { + "version": "9.0.3", + "dev": true, + "license": "ISC", + "optional": true, + "peer": true, + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/@typescript-eslint/scope-manager": { + "version": "7.18.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "7.18.0", + "@typescript-eslint/visitor-keys": "7.18.0" + }, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/type-utils": { + "version": "7.2.0", + "dev": true, + "license": "MIT", + "optional": true, + "peer": true, + "dependencies": { + "@typescript-eslint/typescript-estree": "7.2.0", + "@typescript-eslint/utils": "7.2.0", + "debug": "^4.3.4", + "ts-api-utils": "^1.0.1" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.56.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/type-utils/node_modules/@typescript-eslint/scope-manager": { + "version": "7.2.0", + "dev": true, + "license": "MIT", + "optional": true, + "peer": true, + "dependencies": { + "@typescript-eslint/types": "7.2.0", + "@typescript-eslint/visitor-keys": "7.2.0" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/type-utils/node_modules/@typescript-eslint/types": { + "version": "7.2.0", + "dev": true, + "license": "MIT", + "optional": true, + "peer": true, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/type-utils/node_modules/@typescript-eslint/typescript-estree": { + "version": "7.2.0", + "dev": true, + "license": "BSD-2-Clause", + "optional": true, + "peer": true, + "dependencies": { + "@typescript-eslint/types": "7.2.0", + "@typescript-eslint/visitor-keys": "7.2.0", + "debug": "^4.3.4", + "globby": "^11.1.0", + "is-glob": "^4.0.3", + "minimatch": "9.0.3", + "semver": "^7.5.4", + "ts-api-utils": "^1.0.1" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/type-utils/node_modules/@typescript-eslint/utils": { + "version": "7.2.0", + "dev": true, + "license": "MIT", + "optional": true, + "peer": true, + "dependencies": { + "@eslint-community/eslint-utils": "^4.4.0", + "@types/json-schema": "^7.0.12", + "@types/semver": "^7.5.0", + "@typescript-eslint/scope-manager": "7.2.0", + "@typescript-eslint/types": "7.2.0", + "@typescript-eslint/typescript-estree": "7.2.0", + "semver": "^7.5.4" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.56.0" + } + }, + "node_modules/@typescript-eslint/type-utils/node_modules/@typescript-eslint/visitor-keys": { + "version": "7.2.0", + "dev": true, + "license": "MIT", + "optional": true, + "peer": true, + "dependencies": { + "@typescript-eslint/types": "7.2.0", + "eslint-visitor-keys": "^3.4.1" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/type-utils/node_modules/brace-expansion": { + "version": "2.0.1", + "dev": true, + "license": "MIT", + "optional": true, + "peer": true, + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/@typescript-eslint/type-utils/node_modules/minimatch": { + "version": "9.0.3", + "dev": true, + "license": "ISC", + "optional": true, + "peer": true, + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/@typescript-eslint/types": { + "version": "7.18.0", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/typescript-estree": { + "version": "7.18.0", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "@typescript-eslint/types": "7.18.0", + "@typescript-eslint/visitor-keys": "7.18.0", + "debug": "^4.3.4", + "globby": "^11.1.0", + "is-glob": "^4.0.3", + "minimatch": "^9.0.4", + "semver": "^7.6.0", + "ts-api-utils": "^1.3.0" + }, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/typescript-estree/node_modules/brace-expansion": { + "version": "2.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/@typescript-eslint/typescript-estree/node_modules/minimatch": { + "version": "9.0.5", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/@typescript-eslint/utils": { + "version": "7.18.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/eslint-utils": "^4.4.0", + "@typescript-eslint/scope-manager": "7.18.0", + "@typescript-eslint/types": "7.18.0", + "@typescript-eslint/typescript-estree": "7.18.0" + }, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.56.0" + } + }, + "node_modules/@typescript-eslint/visitor-keys": { + "version": "7.18.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "7.18.0", + "eslint-visitor-keys": "^3.4.3" + }, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript/vfs": { + "version": "1.6.0", + "dev": true, + "license": "MIT", + "dependencies": { + "debug": "^4.1.1" + }, + "peerDependencies": { + "typescript": "*" + } + }, + "node_modules/@ungap/structured-clone": { + "version": "1.2.0", + "dev": true, + "license": "ISC" + }, + "node_modules/abbrev": { + "version": "1.1.1", + "license": "ISC" + }, + "node_modules/abort-controller": { + "version": "3.0.0", + "license": "MIT", + "dependencies": { + "event-target-shim": "^5.0.0" + }, + "engines": { + "node": ">=6.5" + } + }, + "node_modules/acorn": { + "version": "8.12.1", + "dev": true, + "license": "MIT", + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-jsx": { + "version": "5.3.2", + "dev": true, + "license": "MIT", + "peerDependencies": { + "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, + "node_modules/agent-base": { + "version": "6.0.2", + "license": "MIT", + "dependencies": { + "debug": "4" + }, + "engines": { + "node": ">= 6.0.0" + } + }, + "node_modules/agentkeepalive": { + "version": "4.5.0", + "dev": true, + "license": "MIT", + "dependencies": { + "humanize-ms": "^1.2.1" + }, + "engines": { + "node": ">= 8.0.0" + } + }, + "node_modules/aggregate-error": { + "version": "3.1.0", + "dev": true, + "license": "MIT", + "dependencies": { + "clean-stack": "^2.0.0", + "indent-string": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/ajv": { + "version": "8.17.1", + "license": "MIT", + "dependencies": { + "fast-deep-equal": "^3.1.3", + "fast-uri": "^3.0.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/ansi-colors": { + "version": "4.1.3", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/ansi-escapes": { + "version": "4.3.2", + "dev": true, + "license": "MIT", + "dependencies": { + "type-fest": "^0.21.3" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ansi-escapes/node_modules/type-fest": { + "version": "0.21.3", + "dev": true, + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ansi-regex": { + "version": "5.0.1", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "license": "MIT", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/anymatch": { + "version": "3.1.3", + "dev": true, + "license": "ISC", + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/aproba": { + "version": "2.0.0", + "license": "ISC" + }, + "node_modules/are-we-there-yet": { + "version": "2.0.0", + "license": "ISC", + "dependencies": { + "delegates": "^1.0.0", + "readable-stream": "^3.6.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/argparse": { + "version": "2.0.1", + "dev": true, + "license": "Python-2.0" + }, + "node_modules/array-union": { + "version": "2.1.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/asn1": { + "version": "0.2.6", + "license": "MIT", + "dependencies": { + "safer-buffer": "~2.1.0" + } + }, + "node_modules/assert-plus": { + "version": "1.0.0", + "license": "MIT", + "engines": { + "node": ">=0.8" + } + }, + "node_modules/async": { + "version": "3.2.5", + "dev": true, + "license": "MIT" + }, + "node_modules/async-mutex": { + "version": "0.5.0", + "license": "MIT", + "dependencies": { + "tslib": "^2.4.0" + } + }, + "node_modules/asynckit": { + "version": "0.4.0", + "license": "MIT" + }, + "node_modules/avsc": { + "version": "5.7.7", + "license": "MIT", + "engines": { + "node": ">=0.11" + } + }, + "node_modules/aws-sign2": { + "version": "0.7.0", + "license": "Apache-2.0", + "engines": { + "node": "*" + } + }, + "node_modules/aws4": { + "version": "1.13.1", + "license": "MIT" + }, + "node_modules/axios": { + "version": "1.7.7", + "resolved": "https://registry.npmjs.org/axios/-/axios-1.7.7.tgz", + "integrity": "sha512-S4kL7XrjgBmvdGut0sN3yJxqYzrDOnivkBiN0OFs6hLiUam3UPvswUo0kqGyhqUZGEOytHyumEdXsAkgCOUf3Q==", + "license": "MIT", + "dependencies": { + "follow-redirects": "^1.15.6", + "form-data": "^4.0.0", + "proxy-from-env": "^1.1.0" + } + }, + "node_modules/babel-jest": { + "version": "29.7.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/transform": "^29.7.0", + "@types/babel__core": "^7.1.14", + "babel-plugin-istanbul": "^6.1.1", + "babel-preset-jest": "^29.6.3", + "chalk": "^4.0.0", + "graceful-fs": "^4.2.9", + "slash": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "@babel/core": "^7.8.0" + } + }, + "node_modules/babel-plugin-istanbul": { + "version": "6.1.1", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "@babel/helper-plugin-utils": "^7.0.0", + "@istanbuljs/load-nyc-config": "^1.0.0", + "@istanbuljs/schema": "^0.1.2", + "istanbul-lib-instrument": "^5.0.4", + "test-exclude": "^6.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/babel-plugin-istanbul/node_modules/istanbul-lib-instrument": { + "version": "5.2.1", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "@babel/core": "^7.12.3", + "@babel/parser": "^7.14.7", + "@istanbuljs/schema": "^0.1.2", + "istanbul-lib-coverage": "^3.2.0", + "semver": "^6.3.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/babel-plugin-istanbul/node_modules/semver": { + "version": "6.3.1", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/babel-plugin-jest-hoist": { + "version": "29.6.3", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/template": "^7.3.3", + "@babel/types": "^7.3.3", + "@types/babel__core": "^7.1.14", + "@types/babel__traverse": "^7.0.6" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/babel-preset-current-node-syntax": { + "version": "1.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/plugin-syntax-async-generators": "^7.8.4", + "@babel/plugin-syntax-bigint": "^7.8.3", + "@babel/plugin-syntax-class-properties": "^7.8.3", + "@babel/plugin-syntax-import-meta": "^7.8.3", + "@babel/plugin-syntax-json-strings": "^7.8.3", + "@babel/plugin-syntax-logical-assignment-operators": "^7.8.3", + "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3", + "@babel/plugin-syntax-numeric-separator": "^7.8.3", + "@babel/plugin-syntax-object-rest-spread": "^7.8.3", + "@babel/plugin-syntax-optional-catch-binding": "^7.8.3", + "@babel/plugin-syntax-optional-chaining": "^7.8.3", + "@babel/plugin-syntax-top-level-await": "^7.8.3" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/babel-preset-jest": { + "version": "29.6.3", + "dev": true, + "license": "MIT", + "dependencies": { + "babel-plugin-jest-hoist": "^29.6.3", + "babel-preset-current-node-syntax": "^1.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "license": "MIT" + }, + "node_modules/base64-js": { + "version": "1.5.1", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/bcrypt-pbkdf": { + "version": "1.0.2", + "license": "BSD-3-Clause", + "dependencies": { + "tweetnacl": "^0.14.3" + } + }, + "node_modules/bignumber.js": { + "version": "9.1.2", + "license": "MIT", + "engines": { + "node": "*" + } + }, + "node_modules/binary-extensions": { + "version": "2.3.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/bindings": { + "version": "1.5.0", + "license": "MIT", + "dependencies": { + "file-uri-to-path": "1.0.0" + } + }, + "node_modules/bluebird": { + "version": "3.7.2", + "dev": true, + "license": "MIT" + }, + "node_modules/bowser": { + "version": "2.11.0", + "license": "MIT" + }, + "node_modules/brace-expansion": { + "version": "1.1.11", + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/braces": { + "version": "3.0.3", + "dev": true, + "license": "MIT", + "dependencies": { + "fill-range": "^7.1.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/brotli": { + "version": "1.3.3", + "license": "MIT", + "dependencies": { + "base64-js": "^1.1.2" + } + }, + "node_modules/browser-stdout": { + "version": "1.3.1", + "dev": true, + "license": "ISC" + }, + "node_modules/browserslist": { + "version": "4.23.3", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "caniuse-lite": "^1.0.30001646", + "electron-to-chromium": "^1.5.4", + "node-releases": "^2.0.18", + "update-browserslist-db": "^1.1.0" + }, + "bin": { + "browserslist": "cli.js" + }, + "engines": { + "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" + } + }, + "node_modules/bs-logger": { + "version": "0.2.6", + "dev": true, + "license": "MIT", + "dependencies": { + "fast-json-stable-stringify": "2.x" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/bser": { + "version": "2.1.1", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "node-int64": "^0.4.0" + } + }, + "node_modules/buffer-equal-constant-time": { + "version": "1.0.1", + "license": "BSD-3-Clause" + }, + "node_modules/buffer-from": { + "version": "1.1.2", + "dev": true, + "license": "MIT" + }, + "node_modules/cacache": { + "version": "16.1.3", + "dev": true, + "license": "ISC", + "dependencies": { + "@npmcli/fs": "^2.1.0", + "@npmcli/move-file": "^2.0.0", + "chownr": "^2.0.0", + "fs-minipass": "^2.1.0", + "glob": "^8.0.1", + "infer-owner": "^1.0.4", + "lru-cache": "^7.7.1", + "minipass": "^3.1.6", + "minipass-collect": "^1.0.2", + "minipass-flush": "^1.0.5", + "minipass-pipeline": "^1.2.4", + "mkdirp": "^1.0.4", + "p-map": "^4.0.0", + "promise-inflight": "^1.0.1", + "rimraf": "^3.0.2", + "ssri": "^9.0.0", + "tar": "^6.1.11", + "unique-filename": "^2.0.0" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/cacache/node_modules/brace-expansion": { + "version": "2.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/cacache/node_modules/glob": { + "version": "8.1.0", + "dev": true, + "license": "ISC", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^5.0.1", + "once": "^1.3.0" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/cacache/node_modules/lru-cache": { + "version": "7.18.3", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/cacache/node_modules/minimatch": { + "version": "5.1.6", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/callsites": { + "version": "3.1.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/camelcase": { + "version": "5.3.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/caniuse-lite": { + "version": "1.0.30001646", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "CC-BY-4.0" + }, + "node_modules/caseless": { + "version": "0.12.0", + "license": "Apache-2.0" + }, + "node_modules/catharsis": { + "version": "0.9.0", + "dev": true, + "license": "MIT", + "dependencies": { + "lodash": "^4.17.15" + }, + "engines": { + "node": ">= 10" + } + }, + "node_modules/chalk": { + "version": "4.1.2", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/char-regex": { + "version": "1.0.2", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + } + }, + "node_modules/chokidar": { + "version": "3.6.0", + "dev": true, + "license": "MIT", + "dependencies": { + "anymatch": "~3.1.2", + "braces": "~3.0.2", + "glob-parent": "~5.1.2", + "is-binary-path": "~2.1.0", + "is-glob": "~4.0.1", + "normalize-path": "~3.0.0", + "readdirp": "~3.6.0" + }, + "engines": { + "node": ">= 8.10.0" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + } + }, + "node_modules/chokidar/node_modules/glob-parent": { + "version": "5.1.2", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/chownr": { + "version": "2.0.0", + "license": "ISC", + "engines": { + "node": ">=10" + } + }, + "node_modules/ci-info": { + "version": "3.9.0", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/sibiraj-s" + } + ], + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/cjs-module-lexer": { + "version": "1.3.1", + "dev": true, + "license": "MIT" + }, + "node_modules/clean-stack": { + "version": "2.2.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/cliui": { + "version": "8.0.1", + "license": "ISC", + "dependencies": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.1", + "wrap-ansi": "^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/co": { + "version": "4.6.0", + "dev": true, + "license": "MIT", + "engines": { + "iojs": ">= 1.0.0", + "node": ">= 0.12.0" + } + }, + "node_modules/collect-v8-coverage": { + "version": "1.0.2", + "dev": true, + "license": "MIT" + }, + "node_modules/color-convert": { + "version": "2.0.1", + "license": "MIT", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "license": "MIT" + }, + "node_modules/color-support": { + "version": "1.1.3", + "license": "ISC", + "bin": { + "color-support": "bin.js" + } + }, + "node_modules/combined-stream": { + "version": "1.0.8", + "license": "MIT", + "dependencies": { + "delayed-stream": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/commander": { + "version": "2.20.3", + "license": "MIT" + }, + "node_modules/concat-map": { + "version": "0.0.1", + "license": "MIT" + }, + "node_modules/console-control-strings": { + "version": "1.1.0", + "license": "ISC" + }, + "node_modules/convert-source-map": { + "version": "2.0.0", + "dev": true, + "license": "MIT" + }, + "node_modules/core-util-is": { + "version": "1.0.2", + "license": "MIT" + }, + "node_modules/create-jest": { + "version": "29.7.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "chalk": "^4.0.0", + "exit": "^0.1.2", + "graceful-fs": "^4.2.9", + "jest-config": "^29.7.0", + "jest-util": "^29.7.0", + "prompts": "^2.0.1" + }, + "bin": { + "create-jest": "bin/create-jest.js" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/cross-spawn": { + "version": "7.0.3", + "dev": true, + "license": "MIT", + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/dashdash": { + "version": "1.14.1", + "license": "MIT", + "dependencies": { + "assert-plus": "^1.0.0" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/debug": { + "version": "4.3.6", + "license": "MIT", + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/decamelize": { + "version": "4.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/dedent": { + "version": "1.5.3", + "dev": true, + "license": "MIT", + "peerDependencies": { + "babel-plugin-macros": "^3.1.0" + }, + "peerDependenciesMeta": { + "babel-plugin-macros": { + "optional": true + } + } + }, + "node_modules/deep-is": { + "version": "0.1.4", + "dev": true, + "license": "MIT" + }, + "node_modules/deepmerge": { + "version": "4.3.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/define-lazy-prop": { + "version": "2.0.0", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/delayed-stream": { + "version": "1.0.0", + "license": "MIT", + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/delegates": { + "version": "1.0.0", + "license": "MIT" + }, + "node_modules/detect-libc": { + "version": "2.0.3", + "license": "Apache-2.0", + "engines": { + "node": ">=8" + } + }, + "node_modules/detect-newline": { + "version": "3.1.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/diff": { + "version": "5.2.0", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.3.1" + } + }, + "node_modules/diff-sequences": { + "version": "29.6.3", + "dev": true, + "license": "MIT", + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/dir-glob": { + "version": "3.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "path-type": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/discontinuous-range": { + "version": "1.0.0", + "license": "MIT" + }, + "node_modules/doctrine": { + "version": "3.0.0", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "esutils": "^2.0.2" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/duplexify": { + "version": "4.1.3", + "license": "MIT", + "dependencies": { + "end-of-stream": "^1.4.1", + "inherits": "^2.0.3", + "readable-stream": "^3.1.1", + "stream-shift": "^1.0.2" + } + }, + "node_modules/ecc-jsbn": { + "version": "0.1.2", + "license": "MIT", + "dependencies": { + "jsbn": "~0.1.0", + "safer-buffer": "^2.1.0" + } + }, + "node_modules/ecc-jsbn/node_modules/jsbn": { + "version": "0.1.1", + "license": "MIT" + }, + "node_modules/ecdsa-sig-formatter": { + "version": "1.0.11", + "license": "Apache-2.0", + "dependencies": { + "safe-buffer": "^5.0.1" + } + }, + "node_modules/ejs": { + "version": "3.1.10", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "jake": "^10.8.5" + }, + "bin": { + "ejs": "bin/cli.js" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/electron-to-chromium": { + "version": "1.5.4", + "dev": true, + "license": "ISC" + }, + "node_modules/emittery": { + "version": "0.13.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sindresorhus/emittery?sponsor=1" + } + }, + "node_modules/emoji-regex": { + "version": "8.0.0", + "license": "MIT" + }, + "node_modules/encoding": { + "version": "0.1.13", + "license": "MIT", + "optional": true, + "dependencies": { + "iconv-lite": "^0.6.2" + } + }, + "node_modules/end-of-stream": { + "version": "1.4.4", + "license": "MIT", + "dependencies": { + "once": "^1.4.0" + } + }, + "node_modules/entities": { + "version": "4.5.0", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=0.12" + }, + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, + "node_modules/env-paths": { + "version": "2.2.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/err-code": { + "version": "2.0.3", + "dev": true, + "license": "MIT" + }, + "node_modules/error-ex": { + "version": "1.3.2", + "dev": true, + "license": "MIT", + "dependencies": { + "is-arrayish": "^0.2.1" + } + }, + "node_modules/escalade": { + "version": "3.1.2", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/escape-string-regexp": { + "version": "4.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint": { + "version": "8.57.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/eslint-utils": "^4.2.0", + "@eslint-community/regexpp": "^4.6.1", + "@eslint/eslintrc": "^2.1.4", + "@eslint/js": "8.57.0", + "@humanwhocodes/config-array": "^0.11.14", + "@humanwhocodes/module-importer": "^1.0.1", + "@nodelib/fs.walk": "^1.2.8", + "@ungap/structured-clone": "^1.2.0", + "ajv": "^6.12.4", + "chalk": "^4.0.0", + "cross-spawn": "^7.0.2", + "debug": "^4.3.2", + "doctrine": "^3.0.0", + "escape-string-regexp": "^4.0.0", + "eslint-scope": "^7.2.2", + "eslint-visitor-keys": "^3.4.3", + "espree": "^9.6.1", + "esquery": "^1.4.2", + "esutils": "^2.0.2", + "fast-deep-equal": "^3.1.3", + "file-entry-cache": "^6.0.1", + "find-up": "^5.0.0", + "glob-parent": "^6.0.2", + "globals": "^13.19.0", + "graphemer": "^1.4.0", + "ignore": "^5.2.0", + "imurmurhash": "^0.1.4", + "is-glob": "^4.0.0", + "is-path-inside": "^3.0.3", + "js-yaml": "^4.1.0", + "json-stable-stringify-without-jsonify": "^1.0.1", + "levn": "^0.4.1", + "lodash.merge": "^4.6.2", + "minimatch": "^3.1.2", + "natural-compare": "^1.4.0", + "optionator": "^0.9.3", + "strip-ansi": "^6.0.1", + "text-table": "^0.2.0" + }, + "bin": { + "eslint": "bin/eslint.js" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint-plugin-jest": { + "version": "28.6.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/utils": "^6.0.0 || ^7.0.0" + }, + "engines": { + "node": "^16.10.0 || ^18.12.0 || >=20.0.0" + }, + "peerDependencies": { + "@typescript-eslint/eslint-plugin": "^6.0.0 || ^7.0.0", + "eslint": "^7.0.0 || ^8.0.0 || ^9.0.0", + "jest": "*" + }, + "peerDependenciesMeta": { + "@typescript-eslint/eslint-plugin": { + "optional": true + }, + "jest": { + "optional": true + } + } + }, + "node_modules/eslint-plugin-tsdoc": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/eslint-plugin-tsdoc/-/eslint-plugin-tsdoc-0.3.0.tgz", + "integrity": "sha512-0MuFdBrrJVBjT/gyhkP2BqpD0np1NxNLfQ38xXDlSs/KVVpKI2A6vN7jx2Rve/CyUsvOsMGwp9KKrinv7q9g3A==", + "dev": true, + "dependencies": { + "@microsoft/tsdoc": "0.15.0", + "@microsoft/tsdoc-config": "0.17.0" + } + }, + "node_modules/eslint-scope": { + "version": "7.2.2", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^5.2.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint-visitor-keys": { + "version": "3.4.3", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint/node_modules/@eslint/js": { + "version": "8.57.0", + "dev": true, + "license": "MIT", + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + } + }, + "node_modules/eslint/node_modules/ajv": { + "version": "6.12.6", + "dev": true, + "license": "MIT", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/eslint/node_modules/json-schema-traverse": { + "version": "0.4.1", + "dev": true, + "license": "MIT" + }, + "node_modules/espree": { + "version": "9.6.1", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "acorn": "^8.9.0", + "acorn-jsx": "^5.3.2", + "eslint-visitor-keys": "^3.4.1" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/esprima": { + "version": "4.0.1", + "dev": true, + "license": "BSD-2-Clause", + "bin": { + "esparse": "bin/esparse.js", + "esvalidate": "bin/esvalidate.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/esquery": { + "version": "1.6.0", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "estraverse": "^5.1.0" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/esrecurse": { + "version": "4.3.0", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "estraverse": "^5.2.0" + }, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estraverse": { + "version": "5.3.0", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=4.0" + } + }, + "node_modules/esutils": { + "version": "2.0.3", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/event-target-shim": { + "version": "5.0.1", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/events": { + "version": "3.3.0", + "license": "MIT", + "engines": { + "node": ">=0.8.x" + } + }, + "node_modules/execa": { + "version": "5.1.1", + "dev": true, + "license": "MIT", + "dependencies": { + "cross-spawn": "^7.0.3", + "get-stream": "^6.0.0", + "human-signals": "^2.1.0", + "is-stream": "^2.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^4.0.1", + "onetime": "^5.1.2", + "signal-exit": "^3.0.3", + "strip-final-newline": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sindresorhus/execa?sponsor=1" + } + }, + "node_modules/exit": { + "version": "0.1.2", + "dev": true, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/expect": { + "version": "29.7.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/expect-utils": "^29.7.0", + "jest-get-type": "^29.6.3", + "jest-matcher-utils": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-util": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/exponential-backoff": { + "version": "3.1.1", + "dev": true, + "license": "Apache-2.0" + }, + "node_modules/extend": { + "version": "3.0.2", + "license": "MIT" + }, + "node_modules/extsprintf": { + "version": "1.3.0", + "engines": [ + "node >=0.6.0" + ], + "license": "MIT" + }, + "node_modules/fast-deep-equal": { + "version": "3.1.3", + "license": "MIT" + }, + "node_modules/fast-glob": { + "version": "3.3.2", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.4" + }, + "engines": { + "node": ">=8.6.0" + } + }, + "node_modules/fast-glob/node_modules/glob-parent": { + "version": "5.1.2", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/fast-json-stable-stringify": { + "version": "2.1.0", + "license": "MIT" + }, + "node_modules/fast-levenshtein": { + "version": "2.0.6", + "dev": true, + "license": "MIT" + }, + "node_modules/fast-uri": { + "version": "3.0.1", + "license": "MIT" + }, + "node_modules/fast-xml-parser": { + "version": "4.4.1", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/NaturalIntelligence" + }, + { + "type": "paypal", + "url": "https://paypal.me/naturalintelligence" + } + ], + "license": "MIT", + "dependencies": { + "strnum": "^1.0.5" + }, + "bin": { + "fxparser": "src/cli/cli.js" + } + }, + "node_modules/fastq": { + "version": "1.17.1", + "dev": true, + "license": "ISC", + "dependencies": { + "reusify": "^1.0.4" + } + }, + "node_modules/fb-watchman": { + "version": "2.0.2", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "bser": "2.1.1" + } + }, + "node_modules/file-entry-cache": { + "version": "6.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "flat-cache": "^3.0.4" + }, + "engines": { + "node": "^10.12.0 || >=12.0.0" + } + }, + "node_modules/file-uri-to-path": { + "version": "1.0.0", + "license": "MIT" + }, + "node_modules/filelist": { + "version": "1.0.4", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "minimatch": "^5.0.1" + } + }, + "node_modules/filelist/node_modules/brace-expansion": { + "version": "2.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/filelist/node_modules/minimatch": { + "version": "5.1.6", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/fill-range": { + "version": "7.1.1", + "dev": true, + "license": "MIT", + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/find-up": { + "version": "5.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/flat": { + "version": "5.0.2", + "dev": true, + "license": "BSD-3-Clause", + "bin": { + "flat": "cli.js" + } + }, + "node_modules/flat-cache": { + "version": "3.2.0", + "dev": true, + "license": "MIT", + "dependencies": { + "flatted": "^3.2.9", + "keyv": "^4.5.3", + "rimraf": "^3.0.2" + }, + "engines": { + "node": "^10.12.0 || >=12.0.0" + } + }, + "node_modules/flatted": { + "version": "3.3.1", + "dev": true, + "license": "ISC" + }, + "node_modules/follow-redirects": { + "version": "1.15.6", + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/RubenVerborgh" + } + ], + "license": "MIT", + "engines": { + "node": ">=4.0" + }, + "peerDependenciesMeta": { + "debug": { + "optional": true + } + } + }, + "node_modules/forever-agent": { + "version": "0.6.1", + "license": "Apache-2.0", + "engines": { + "node": "*" + } + }, + "node_modules/form-data": { + "version": "4.0.0", + "license": "MIT", + "dependencies": { + "asynckit": "^0.4.0", + "combined-stream": "^1.0.8", + "mime-types": "^2.1.12" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/fs-minipass": { + "version": "2.1.0", + "license": "ISC", + "dependencies": { + "minipass": "^3.0.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/fs.realpath": { + "version": "1.0.0", + "license": "ISC" + }, + "node_modules/fsevents": { + "version": "2.3.3", + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/function-bind": { + "version": "1.1.2", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/gauge": { + "version": "3.0.2", + "license": "ISC", + "dependencies": { + "aproba": "^1.0.3 || ^2.0.0", + "color-support": "^1.1.2", + "console-control-strings": "^1.0.0", + "has-unicode": "^2.0.1", + "object-assign": "^4.1.1", + "signal-exit": "^3.0.0", + "string-width": "^4.2.3", + "strip-ansi": "^6.0.1", + "wide-align": "^1.1.2" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/gaxios": { + "version": "6.7.1", + "license": "Apache-2.0", + "dependencies": { + "extend": "^3.0.2", + "https-proxy-agent": "^7.0.1", + "is-stream": "^2.0.0", + "node-fetch": "^2.6.9", + "uuid": "^9.0.1" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/gaxios/node_modules/agent-base": { + "version": "7.1.1", + "license": "MIT", + "dependencies": { + "debug": "^4.3.4" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/gaxios/node_modules/https-proxy-agent": { + "version": "7.0.5", + "license": "MIT", + "dependencies": { + "agent-base": "^7.0.2", + "debug": "4" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/gcp-metadata": { + "version": "6.1.0", + "license": "Apache-2.0", + "dependencies": { + "gaxios": "^6.0.0", + "json-bigint": "^1.0.0" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/gensync": { + "version": "1.0.0-beta.2", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/get-caller-file": { + "version": "2.0.5", + "license": "ISC", + "engines": { + "node": "6.* || 8.* || >= 10.*" + } + }, + "node_modules/get-package-type": { + "version": "0.1.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/get-stream": { + "version": "6.0.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/getpass": { + "version": "0.1.7", + "license": "MIT", + "dependencies": { + "assert-plus": "^1.0.0" + } + }, + "node_modules/glob": { + "version": "7.2.3", + "license": "ISC", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/glob-parent": { + "version": "6.0.2", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.3" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/globals": { + "version": "13.24.0", + "dev": true, + "license": "MIT", + "dependencies": { + "type-fest": "^0.20.2" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/globby": { + "version": "11.1.0", + "dev": true, + "license": "MIT", + "dependencies": { + "array-union": "^2.1.0", + "dir-glob": "^3.0.1", + "fast-glob": "^3.2.9", + "ignore": "^5.2.0", + "merge2": "^1.4.1", + "slash": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/google-auth-library": { + "version": "9.14.0", + "license": "Apache-2.0", + "dependencies": { + "base64-js": "^1.3.0", + "ecdsa-sig-formatter": "^1.0.11", + "gaxios": "^6.1.1", + "gcp-metadata": "^6.1.0", + "gtoken": "^7.0.0", + "jws": "^4.0.0" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/google-gax": { + "version": "4.3.9", + "license": "Apache-2.0", + "dependencies": { + "@grpc/grpc-js": "^1.10.9", + "@grpc/proto-loader": "^0.7.13", + "@types/long": "^4.0.0", + "abort-controller": "^3.0.0", + "duplexify": "^4.0.0", + "google-auth-library": "^9.3.0", + "node-fetch": "^2.7.0", + "object-hash": "^3.0.0", + "proto3-json-serializer": "^2.0.2", + "protobufjs": "^7.3.2", + "retry-request": "^7.0.0", + "uuid": "^9.0.1" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/graceful-fs": { + "version": "4.2.11", + "dev": true, + "license": "ISC" + }, + "node_modules/graphemer": { + "version": "1.4.0", + "dev": true, + "license": "MIT" + }, + "node_modules/gtoken": { + "version": "7.1.0", + "license": "MIT", + "dependencies": { + "gaxios": "^6.0.0", + "jws": "^4.0.0" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/har-schema": { + "version": "2.0.0", + "license": "ISC", + "engines": { + "node": ">=4" + } + }, + "node_modules/har-validator": { + "version": "5.1.5", + "license": "MIT", + "dependencies": { + "ajv": "^6.12.3", + "har-schema": "^2.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/har-validator/node_modules/ajv": { + "version": "6.12.6", + "license": "MIT", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/har-validator/node_modules/json-schema-traverse": { + "version": "0.4.1", + "license": "MIT" + }, + "node_modules/has-flag": { + "version": "4.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/has-unicode": { + "version": "2.0.1", + "license": "ISC" + }, + "node_modules/hasown": { + "version": "2.0.2", + "dev": true, + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/he": { + "version": "1.2.0", + "dev": true, + "license": "MIT", + "bin": { + "he": "bin/he" + } + }, + "node_modules/html-escaper": { + "version": "2.0.2", + "dev": true, + "license": "MIT" + }, + "node_modules/http-cache-semantics": { + "version": "4.1.1", + "dev": true, + "license": "BSD-2-Clause" + }, + "node_modules/http-proxy-agent": { + "version": "5.0.0", + "license": "MIT", + "dependencies": { + "@tootallnate/once": "2", + "agent-base": "6", + "debug": "4" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/http-signature": { + "version": "1.3.6", + "license": "MIT", + "dependencies": { + "assert-plus": "^1.0.0", + "jsprim": "^2.0.2", + "sshpk": "^1.14.1" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/https-proxy-agent": { + "version": "5.0.1", + "license": "MIT", + "dependencies": { + "agent-base": "6", + "debug": "4" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/human-signals": { + "version": "2.1.0", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=10.17.0" + } + }, + "node_modules/humanize-ms": { + "version": "1.2.1", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "^2.0.0" + } + }, + "node_modules/iconv-lite": { + "version": "0.6.3", + "license": "MIT", + "optional": true, + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/ignore": { + "version": "5.3.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "node_modules/import-fresh": { + "version": "3.3.0", + "dev": true, + "license": "MIT", + "dependencies": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/import-local": { + "version": "3.2.0", + "dev": true, + "license": "MIT", + "dependencies": { + "pkg-dir": "^4.2.0", + "resolve-cwd": "^3.0.0" + }, + "bin": { + "import-local-fixture": "fixtures/cli.js" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/imurmurhash": { + "version": "0.1.4", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.8.19" + } + }, + "node_modules/indent-string": { + "version": "4.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/infer-owner": { + "version": "1.0.4", + "dev": true, + "license": "ISC" + }, + "node_modules/inflight": { + "version": "1.0.6", + "license": "ISC", + "dependencies": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "license": "ISC" + }, + "node_modules/ip-address": { + "version": "9.0.5", + "dev": true, + "license": "MIT", + "dependencies": { + "jsbn": "1.1.0", + "sprintf-js": "^1.1.3" + }, + "engines": { + "node": ">= 12" + } + }, + "node_modules/is-arrayish": { + "version": "0.2.1", + "dev": true, + "license": "MIT" + }, + "node_modules/is-binary-path": { + "version": "2.1.0", + "dev": true, + "license": "MIT", + "dependencies": { + "binary-extensions": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-core-module": { + "version": "2.15.0", + "dev": true, + "license": "MIT", + "dependencies": { + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-docker": { + "version": "2.2.1", + "license": "MIT", + "bin": { + "is-docker": "cli.js" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-generator-fn": { + "version": "2.1.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "dev": true, + "license": "MIT", + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-lambda": { + "version": "1.0.1", + "dev": true, + "license": "MIT" + }, + "node_modules/is-number": { + "version": "7.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-path-inside": { + "version": "3.0.3", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-plain-obj": { + "version": "2.1.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-stream": { + "version": "2.0.1", + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-typedarray": { + "version": "1.0.0", + "license": "MIT" + }, + "node_modules/is-unicode-supported": { + "version": "0.1.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-wsl": { + "version": "2.2.0", + "license": "MIT", + "dependencies": { + "is-docker": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/isexe": { + "version": "2.0.0", + "dev": true, + "license": "ISC" + }, + "node_modules/isstream": { + "version": "0.1.2", + "license": "MIT" + }, + "node_modules/istanbul-lib-coverage": { + "version": "3.2.2", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=8" + } + }, + "node_modules/istanbul-lib-instrument": { + "version": "6.0.3", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "@babel/core": "^7.23.9", + "@babel/parser": "^7.23.9", + "@istanbuljs/schema": "^0.1.3", + "istanbul-lib-coverage": "^3.2.0", + "semver": "^7.5.4" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-lib-report": { + "version": "3.0.1", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "istanbul-lib-coverage": "^3.0.0", + "make-dir": "^4.0.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-lib-report/node_modules/make-dir": { + "version": "4.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "semver": "^7.5.3" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/istanbul-lib-source-maps": { + "version": "4.0.1", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "debug": "^4.1.1", + "istanbul-lib-coverage": "^3.0.0", + "source-map": "^0.6.1" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-reports": { + "version": "3.1.7", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "html-escaper": "^2.0.0", + "istanbul-lib-report": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/jake": { + "version": "10.9.2", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "async": "^3.2.3", + "chalk": "^4.0.2", + "filelist": "^1.0.4", + "minimatch": "^3.1.2" + }, + "bin": { + "jake": "bin/cli.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/jest": { + "version": "29.7.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/core": "^29.7.0", + "@jest/types": "^29.6.3", + "import-local": "^3.0.2", + "jest-cli": "^29.7.0" + }, + "bin": { + "jest": "bin/jest.js" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0" + }, + "peerDependenciesMeta": { + "node-notifier": { + "optional": true + } + } + }, + "node_modules/jest-changed-files": { + "version": "29.7.0", + "dev": true, + "license": "MIT", + "dependencies": { + "execa": "^5.0.0", + "jest-util": "^29.7.0", + "p-limit": "^3.1.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-circus": { + "version": "29.7.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/environment": "^29.7.0", + "@jest/expect": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "co": "^4.6.0", + "dedent": "^1.0.0", + "is-generator-fn": "^2.0.0", + "jest-each": "^29.7.0", + "jest-matcher-utils": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-runtime": "^29.7.0", + "jest-snapshot": "^29.7.0", + "jest-util": "^29.7.0", + "p-limit": "^3.1.0", + "pretty-format": "^29.7.0", + "pure-rand": "^6.0.0", + "slash": "^3.0.0", + "stack-utils": "^2.0.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-cli": { + "version": "29.7.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/core": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/types": "^29.6.3", + "chalk": "^4.0.0", + "create-jest": "^29.7.0", + "exit": "^0.1.2", + "import-local": "^3.0.2", + "jest-config": "^29.7.0", + "jest-util": "^29.7.0", + "jest-validate": "^29.7.0", + "yargs": "^17.3.1" + }, + "bin": { + "jest": "bin/jest.js" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0" + }, + "peerDependenciesMeta": { + "node-notifier": { + "optional": true + } + } + }, + "node_modules/jest-config": { + "version": "29.7.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/core": "^7.11.6", + "@jest/test-sequencer": "^29.7.0", + "@jest/types": "^29.6.3", + "babel-jest": "^29.7.0", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "deepmerge": "^4.2.2", + "glob": "^7.1.3", + "graceful-fs": "^4.2.9", + "jest-circus": "^29.7.0", + "jest-environment-node": "^29.7.0", + "jest-get-type": "^29.6.3", + "jest-regex-util": "^29.6.3", + "jest-resolve": "^29.7.0", + "jest-runner": "^29.7.0", + "jest-util": "^29.7.0", + "jest-validate": "^29.7.0", + "micromatch": "^4.0.4", + "parse-json": "^5.2.0", + "pretty-format": "^29.7.0", + "slash": "^3.0.0", + "strip-json-comments": "^3.1.1" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "@types/node": "*", + "ts-node": ">=9.0.0" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "ts-node": { + "optional": true + } + } + }, + "node_modules/jest-diff": { + "version": "29.7.0", + "dev": true, + "license": "MIT", + "dependencies": { + "chalk": "^4.0.0", + "diff-sequences": "^29.6.3", + "jest-get-type": "^29.6.3", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-docblock": { + "version": "29.7.0", + "dev": true, + "license": "MIT", + "dependencies": { + "detect-newline": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-each": { + "version": "29.7.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "chalk": "^4.0.0", + "jest-get-type": "^29.6.3", + "jest-util": "^29.7.0", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-environment-node": { + "version": "29.7.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/environment": "^29.7.0", + "@jest/fake-timers": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "jest-mock": "^29.7.0", + "jest-util": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-get-type": { + "version": "29.6.3", + "dev": true, + "license": "MIT", + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-haste-map": { + "version": "29.7.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "@types/graceful-fs": "^4.1.3", + "@types/node": "*", + "anymatch": "^3.0.3", + "fb-watchman": "^2.0.0", + "graceful-fs": "^4.2.9", + "jest-regex-util": "^29.6.3", + "jest-util": "^29.7.0", + "jest-worker": "^29.7.0", + "micromatch": "^4.0.4", + "walker": "^1.0.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "optionalDependencies": { + "fsevents": "^2.3.2" + } + }, + "node_modules/jest-leak-detector": { + "version": "29.7.0", + "dev": true, + "license": "MIT", + "dependencies": { + "jest-get-type": "^29.6.3", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-matcher-utils": { + "version": "29.7.0", + "dev": true, + "license": "MIT", + "dependencies": { + "chalk": "^4.0.0", + "jest-diff": "^29.7.0", + "jest-get-type": "^29.6.3", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-message-util": { + "version": "29.7.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.12.13", + "@jest/types": "^29.6.3", + "@types/stack-utils": "^2.0.0", + "chalk": "^4.0.0", + "graceful-fs": "^4.2.9", + "micromatch": "^4.0.4", + "pretty-format": "^29.7.0", + "slash": "^3.0.0", + "stack-utils": "^2.0.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-mock": { + "version": "29.7.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "jest-util": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-pnp-resolver": { + "version": "1.2.3", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + }, + "peerDependencies": { + "jest-resolve": "*" + }, + "peerDependenciesMeta": { + "jest-resolve": { + "optional": true + } + } + }, + "node_modules/jest-regex-util": { + "version": "29.6.3", + "dev": true, + "license": "MIT", + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-resolve": { + "version": "29.7.0", + "dev": true, + "license": "MIT", + "dependencies": { + "chalk": "^4.0.0", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^29.7.0", + "jest-pnp-resolver": "^1.2.2", + "jest-util": "^29.7.0", + "jest-validate": "^29.7.0", + "resolve": "^1.20.0", + "resolve.exports": "^2.0.0", + "slash": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-resolve-dependencies": { + "version": "29.7.0", + "dev": true, + "license": "MIT", + "dependencies": { + "jest-regex-util": "^29.6.3", + "jest-snapshot": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-runner": { + "version": "29.7.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/console": "^29.7.0", + "@jest/environment": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "emittery": "^0.13.1", + "graceful-fs": "^4.2.9", + "jest-docblock": "^29.7.0", + "jest-environment-node": "^29.7.0", + "jest-haste-map": "^29.7.0", + "jest-leak-detector": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-resolve": "^29.7.0", + "jest-runtime": "^29.7.0", + "jest-util": "^29.7.0", + "jest-watcher": "^29.7.0", + "jest-worker": "^29.7.0", + "p-limit": "^3.1.0", + "source-map-support": "0.5.13" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-runtime": { + "version": "29.7.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/environment": "^29.7.0", + "@jest/fake-timers": "^29.7.0", + "@jest/globals": "^29.7.0", + "@jest/source-map": "^29.6.3", + "@jest/test-result": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "cjs-module-lexer": "^1.0.0", + "collect-v8-coverage": "^1.0.0", + "glob": "^7.1.3", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-mock": "^29.7.0", + "jest-regex-util": "^29.6.3", + "jest-resolve": "^29.7.0", + "jest-snapshot": "^29.7.0", + "jest-util": "^29.7.0", + "slash": "^3.0.0", + "strip-bom": "^4.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-snapshot": { + "version": "29.7.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/core": "^7.11.6", + "@babel/generator": "^7.7.2", + "@babel/plugin-syntax-jsx": "^7.7.2", + "@babel/plugin-syntax-typescript": "^7.7.2", + "@babel/types": "^7.3.3", + "@jest/expect-utils": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "babel-preset-current-node-syntax": "^1.0.0", + "chalk": "^4.0.0", + "expect": "^29.7.0", + "graceful-fs": "^4.2.9", + "jest-diff": "^29.7.0", + "jest-get-type": "^29.6.3", + "jest-matcher-utils": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-util": "^29.7.0", + "natural-compare": "^1.4.0", + "pretty-format": "^29.7.0", + "semver": "^7.5.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-util": { + "version": "29.7.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "graceful-fs": "^4.2.9", + "picomatch": "^2.2.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-validate": { + "version": "29.7.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/types": "^29.6.3", + "camelcase": "^6.2.0", + "chalk": "^4.0.0", + "jest-get-type": "^29.6.3", + "leven": "^3.1.0", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-validate/node_modules/camelcase": { + "version": "6.3.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/jest-watcher": { + "version": "29.7.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/test-result": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "ansi-escapes": "^4.2.1", + "chalk": "^4.0.0", + "emittery": "^0.13.1", + "jest-util": "^29.7.0", + "string-length": "^4.0.1" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-worker": { + "version": "29.7.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/node": "*", + "jest-util": "^29.7.0", + "merge-stream": "^2.0.0", + "supports-color": "^8.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-worker/node_modules/supports-color": { + "version": "8.1.1", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, + "node_modules/jju": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/jju/-/jju-1.4.0.tgz", + "integrity": "sha512-8wb9Yw966OSxApiCt0K3yNJL8pnNeIv+OEq2YMidz4FKP6nonSRoOXc80iXY4JaN2FC11B9qsNmDsm+ZOfMROA==", + "dev": true + }, + "node_modules/joi": { + "version": "17.13.3", + "resolved": "https://registry.npmjs.org/joi/-/joi-17.13.3.tgz", + "integrity": "sha512-otDA4ldcIx+ZXsKHWmp0YizCweVRZG96J10b0FevjfuncLO1oX59THoAmHkNubYJ+9gWsYsp5k8v4ib6oDv1fA==", + "license": "BSD-3-Clause", + "dependencies": { + "@hapi/hoek": "^9.3.0", + "@hapi/topo": "^5.1.0", + "@sideway/address": "^4.1.5", + "@sideway/formula": "^3.0.1", + "@sideway/pinpoint": "^2.0.0" + } + }, + "node_modules/joi/node_modules/@hapi/hoek": { + "version": "9.3.0", + "resolved": "https://registry.npmjs.org/@hapi/hoek/-/hoek-9.3.0.tgz", + "integrity": "sha512-/c6rf4UJlmHlC9b5BaNvzAcFv7HZ2QHaV0D4/HNlBdvFnvQq8RI4kYdhyPCl7Xj+oWvTWQ8ujhqS53LIgAe6KQ==", + "license": "BSD-3-Clause" + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "dev": true, + "license": "MIT" + }, + "node_modules/js-yaml": { + "version": "4.1.0", + "dev": true, + "license": "MIT", + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/js2xmlparser": { + "version": "4.0.2", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "xmlcreate": "^2.0.4" + } + }, + "node_modules/jsbn": { + "version": "1.1.0", + "dev": true, + "license": "MIT" + }, + "node_modules/jsdoc": { + "version": "4.0.3", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@babel/parser": "^7.20.15", + "@jsdoc/salty": "^0.2.1", + "@types/markdown-it": "^14.1.1", + "bluebird": "^3.7.2", + "catharsis": "^0.9.0", + "escape-string-regexp": "^2.0.0", + "js2xmlparser": "^4.0.2", + "klaw": "^3.0.0", + "markdown-it": "^14.1.0", + "markdown-it-anchor": "^8.6.7", + "marked": "^4.0.10", + "mkdirp": "^1.0.4", + "requizzle": "^0.2.3", + "strip-json-comments": "^3.1.0", + "underscore": "~1.13.2" + }, + "bin": { + "jsdoc": "jsdoc.js" + }, + "engines": { + "node": ">=12.0.0" + } + }, + "node_modules/jsdoc/node_modules/escape-string-regexp": { + "version": "2.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/jsesc": { + "version": "2.5.2", + "dev": true, + "license": "MIT", + "bin": { + "jsesc": "bin/jsesc" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/json-bigint": { + "version": "1.0.0", + "license": "MIT", + "dependencies": { + "bignumber.js": "^9.0.0" + } + }, + "node_modules/json-buffer": { + "version": "3.0.1", + "dev": true, + "license": "MIT" + }, + "node_modules/json-parse-even-better-errors": { + "version": "2.3.1", + "dev": true, + "license": "MIT" + }, + "node_modules/json-schema": { + "version": "0.4.0", + "license": "(AFL-2.1 OR BSD-3-Clause)" + }, + "node_modules/json-schema-traverse": { + "version": "1.0.0", + "license": "MIT" + }, + "node_modules/json-stable-stringify-without-jsonify": { + "version": "1.0.1", + "dev": true, + "license": "MIT" + }, + "node_modules/json-stringify-deterministic": { + "version": "1.0.12", + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "node_modules/json-stringify-safe": { + "version": "5.0.1", + "license": "ISC" + }, + "node_modules/json5": { + "version": "2.2.3", + "dev": true, + "license": "MIT", + "bin": { + "json5": "lib/cli.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/jsonata": { + "version": "2.0.5", + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/jsonwebtoken": { + "version": "9.0.2", + "license": "MIT", + "dependencies": { + "jws": "^3.2.2", + "lodash.includes": "^4.3.0", + "lodash.isboolean": "^3.0.3", + "lodash.isinteger": "^4.0.4", + "lodash.isnumber": "^3.0.3", + "lodash.isplainobject": "^4.0.6", + "lodash.isstring": "^4.0.1", + "lodash.once": "^4.0.0", + "ms": "^2.1.1", + "semver": "^7.5.4" + }, + "engines": { + "node": ">=12", + "npm": ">=6" + } + }, + "node_modules/jsonwebtoken/node_modules/jwa": { + "version": "1.4.1", + "license": "MIT", + "dependencies": { + "buffer-equal-constant-time": "1.0.1", + "ecdsa-sig-formatter": "1.0.11", + "safe-buffer": "^5.0.1" + } + }, + "node_modules/jsonwebtoken/node_modules/jws": { + "version": "3.2.2", + "license": "MIT", + "dependencies": { + "jwa": "^1.4.1", + "safe-buffer": "^5.0.1" + } + }, + "node_modules/jsprim": { + "version": "2.0.2", + "engines": [ + "node >=0.6.0" + ], + "license": "MIT", + "dependencies": { + "assert-plus": "1.0.0", + "extsprintf": "1.3.0", + "json-schema": "0.4.0", + "verror": "1.10.0" + } + }, + "node_modules/jwa": { + "version": "2.0.0", + "license": "MIT", + "dependencies": { + "buffer-equal-constant-time": "1.0.1", + "ecdsa-sig-formatter": "1.0.11", + "safe-buffer": "^5.0.1" + } + }, + "node_modules/jws": { + "version": "4.0.0", + "license": "MIT", + "dependencies": { + "jwa": "^2.0.0", + "safe-buffer": "^5.0.1" + } + }, + "node_modules/keyv": { + "version": "4.5.4", + "dev": true, + "license": "MIT", + "dependencies": { + "json-buffer": "3.0.1" + } + }, + "node_modules/klaw": { + "version": "3.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "graceful-fs": "^4.1.9" + } + }, + "node_modules/kleur": { + "version": "3.0.3", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/leven": { + "version": "3.1.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/levn": { + "version": "0.4.1", + "dev": true, + "license": "MIT", + "dependencies": { + "prelude-ls": "^1.2.1", + "type-check": "~0.4.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/lines-and-columns": { + "version": "1.2.4", + "dev": true, + "license": "MIT" + }, + "node_modules/linkify-it": { + "version": "5.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "uc.micro": "^2.0.0" + } + }, + "node_modules/locate-path": { + "version": "6.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lodash": { + "version": "4.17.21", + "dev": true, + "license": "MIT" + }, + "node_modules/lodash.camelcase": { + "version": "4.3.0", + "license": "MIT" + }, + "node_modules/lodash.includes": { + "version": "4.3.0", + "license": "MIT" + }, + "node_modules/lodash.isboolean": { + "version": "3.0.3", + "license": "MIT" + }, + "node_modules/lodash.isinteger": { + "version": "4.0.4", + "license": "MIT" + }, + "node_modules/lodash.isnumber": { + "version": "3.0.3", + "license": "MIT" + }, + "node_modules/lodash.isplainobject": { + "version": "4.0.6", + "license": "MIT" + }, + "node_modules/lodash.isstring": { + "version": "4.0.1", + "license": "MIT" + }, + "node_modules/lodash.memoize": { + "version": "4.1.2", + "dev": true, + "license": "MIT" + }, + "node_modules/lodash.merge": { + "version": "4.6.2", + "dev": true, + "license": "MIT" + }, + "node_modules/lodash.once": { + "version": "4.1.1", + "license": "MIT" + }, + "node_modules/log-symbols": { + "version": "4.1.0", + "dev": true, + "license": "MIT", + "dependencies": { + "chalk": "^4.1.0", + "is-unicode-supported": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/long": { + "version": "5.2.3", + "license": "Apache-2.0" + }, + "node_modules/lru-cache": { + "version": "11.0.0", + "license": "ISC", + "engines": { + "node": "20 || >=22" + } + }, + "node_modules/make-dir": { + "version": "3.1.0", + "license": "MIT", + "dependencies": { + "semver": "^6.0.0" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/make-dir/node_modules/semver": { + "version": "6.3.1", + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/make-error": { + "version": "1.3.6", + "dev": true, + "license": "ISC" + }, + "node_modules/make-fetch-happen": { + "version": "10.2.1", + "dev": true, + "license": "ISC", + "dependencies": { + "agentkeepalive": "^4.2.1", + "cacache": "^16.1.0", + "http-cache-semantics": "^4.1.0", + "http-proxy-agent": "^5.0.0", + "https-proxy-agent": "^5.0.0", + "is-lambda": "^1.0.1", + "lru-cache": "^7.7.1", + "minipass": "^3.1.6", + "minipass-collect": "^1.0.2", + "minipass-fetch": "^2.0.3", + "minipass-flush": "^1.0.5", + "minipass-pipeline": "^1.2.4", + "negotiator": "^0.6.3", + "promise-retry": "^2.0.1", + "socks-proxy-agent": "^7.0.0", + "ssri": "^9.0.0" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/make-fetch-happen/node_modules/lru-cache": { + "version": "7.18.3", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/makeerror": { + "version": "1.0.12", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "tmpl": "1.0.5" + } + }, + "node_modules/markdown-it": { + "version": "14.1.0", + "dev": true, + "license": "MIT", + "dependencies": { + "argparse": "^2.0.1", + "entities": "^4.4.0", + "linkify-it": "^5.0.0", + "mdurl": "^2.0.0", + "punycode.js": "^2.3.1", + "uc.micro": "^2.1.0" + }, + "bin": { + "markdown-it": "bin/markdown-it.mjs" + } + }, + "node_modules/markdown-it-anchor": { + "version": "8.6.7", + "dev": true, + "license": "Unlicense", + "peerDependencies": { + "@types/markdown-it": "*", + "markdown-it": "*" + } + }, + "node_modules/marked": { + "version": "4.3.0", + "dev": true, + "license": "MIT", + "bin": { + "marked": "bin/marked.js" + }, + "engines": { + "node": ">= 12" + } + }, + "node_modules/mdurl": { + "version": "2.0.0", + "dev": true, + "license": "MIT" + }, + "node_modules/merge-stream": { + "version": "2.0.0", + "dev": true, + "license": "MIT" + }, + "node_modules/merge2": { + "version": "1.4.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/micromatch": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", + "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", + "dev": true, + "license": "MIT", + "dependencies": { + "braces": "^3.0.3", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/mime-db": { + "version": "1.52.0", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types": { + "version": "2.1.35", + "license": "MIT", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mimic-fn": { + "version": "2.1.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/minimatch": { + "version": "3.1.2", + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/minipass": { + "version": "3.3.6", + "license": "ISC", + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/minipass-collect": { + "version": "1.0.2", + "dev": true, + "license": "ISC", + "dependencies": { + "minipass": "^3.0.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/minipass-fetch": { + "version": "2.1.2", + "dev": true, + "license": "MIT", + "dependencies": { + "minipass": "^3.1.6", + "minipass-sized": "^1.0.3", + "minizlib": "^2.1.2" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + }, + "optionalDependencies": { + "encoding": "^0.1.13" + } + }, + "node_modules/minipass-flush": { + "version": "1.0.5", + "dev": true, + "license": "ISC", + "dependencies": { + "minipass": "^3.0.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/minipass-pipeline": { + "version": "1.2.4", + "dev": true, + "license": "ISC", + "dependencies": { + "minipass": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/minipass-sized": { + "version": "1.0.3", + "dev": true, + "license": "ISC", + "dependencies": { + "minipass": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/minipass/node_modules/yallist": { + "version": "4.0.0", + "license": "ISC" + }, + "node_modules/minizlib": { + "version": "2.1.2", + "license": "MIT", + "dependencies": { + "minipass": "^3.0.0", + "yallist": "^4.0.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/minizlib/node_modules/yallist": { + "version": "4.0.0", + "license": "ISC" + }, + "node_modules/mkdirp": { + "version": "1.0.4", + "license": "MIT", + "bin": { + "mkdirp": "bin/cmd.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/mocha": { + "version": "10.7.0", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-colors": "^4.1.3", + "browser-stdout": "^1.3.1", + "chokidar": "^3.5.3", + "debug": "^4.3.5", + "diff": "^5.2.0", + "escape-string-regexp": "^4.0.0", + "find-up": "^5.0.0", + "glob": "^8.1.0", + "he": "^1.2.0", + "js-yaml": "^4.1.0", + "log-symbols": "^4.1.0", + "minimatch": "^5.1.6", + "ms": "^2.1.3", + "serialize-javascript": "^6.0.2", + "strip-json-comments": "^3.1.1", + "supports-color": "^8.1.1", + "workerpool": "^6.5.1", + "yargs": "^16.2.0", + "yargs-parser": "^20.2.9", + "yargs-unparser": "^2.0.0" + }, + "bin": { + "_mocha": "bin/_mocha", + "mocha": "bin/mocha.js" + }, + "engines": { + "node": ">= 14.0.0" + } + }, + "node_modules/mocha/node_modules/brace-expansion": { + "version": "2.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/mocha/node_modules/cliui": { + "version": "7.0.4", + "dev": true, + "license": "ISC", + "dependencies": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.0", + "wrap-ansi": "^7.0.0" + } + }, + "node_modules/mocha/node_modules/glob": { + "version": "8.1.0", + "dev": true, + "license": "ISC", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^5.0.1", + "once": "^1.3.0" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/mocha/node_modules/minimatch": { + "version": "5.1.6", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/mocha/node_modules/ms": { + "version": "2.1.3", + "dev": true, + "license": "MIT" + }, + "node_modules/mocha/node_modules/supports-color": { + "version": "8.1.1", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, + "node_modules/mocha/node_modules/yargs": { + "version": "16.2.0", + "dev": true, + "license": "MIT", + "dependencies": { + "cliui": "^7.0.2", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.0", + "y18n": "^5.0.5", + "yargs-parser": "^20.2.2" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/moo": { + "version": "0.5.2", + "license": "BSD-3-Clause" + }, + "node_modules/ms": { + "version": "2.1.2", + "license": "MIT" + }, + "node_modules/mustache": { + "version": "4.2.0", + "license": "MIT", + "bin": { + "mustache": "bin/mustache" + } + }, + "node_modules/nan": { + "version": "2.20.0", + "license": "MIT" + }, + "node_modules/natural-compare": { + "version": "1.4.0", + "dev": true, + "license": "MIT" + }, + "node_modules/nearley": { + "version": "2.20.1", + "license": "MIT", + "dependencies": { + "commander": "^2.19.0", + "moo": "^0.5.0", + "railroad-diagrams": "^1.0.0", + "randexp": "0.4.6" + }, + "bin": { + "nearley-railroad": "bin/nearley-railroad.js", + "nearley-test": "bin/nearley-test.js", + "nearley-unparse": "bin/nearley-unparse.js", + "nearleyc": "bin/nearleyc.js" + }, + "funding": { + "type": "individual", + "url": "https://nearley.js.org/#give-to-nearley" + } + }, + "node_modules/negotiator": { + "version": "0.6.3", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/node-fetch": { + "version": "2.7.0", + "license": "MIT", + "dependencies": { + "whatwg-url": "^5.0.0" + }, + "engines": { + "node": "4.x || >=6.0.0" + }, + "peerDependencies": { + "encoding": "^0.1.0" + }, + "peerDependenciesMeta": { + "encoding": { + "optional": true + } + } + }, + "node_modules/node-gyp": { + "version": "9.4.1", + "dev": true, + "license": "MIT", + "dependencies": { + "env-paths": "^2.2.0", + "exponential-backoff": "^3.1.1", + "glob": "^7.1.4", + "graceful-fs": "^4.2.6", + "make-fetch-happen": "^10.0.3", + "nopt": "^6.0.0", + "npmlog": "^6.0.0", + "rimraf": "^3.0.2", + "semver": "^7.3.5", + "tar": "^6.1.2", + "which": "^2.0.2" + }, + "bin": { + "node-gyp": "bin/node-gyp.js" + }, + "engines": { + "node": "^12.13 || ^14.13 || >=16" + } + }, + "node_modules/node-gyp/node_modules/are-we-there-yet": { + "version": "3.0.1", + "dev": true, + "license": "ISC", + "dependencies": { + "delegates": "^1.0.0", + "readable-stream": "^3.6.0" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/node-gyp/node_modules/gauge": { + "version": "4.0.4", + "dev": true, + "license": "ISC", + "dependencies": { + "aproba": "^1.0.3 || ^2.0.0", + "color-support": "^1.1.3", + "console-control-strings": "^1.1.0", + "has-unicode": "^2.0.1", + "signal-exit": "^3.0.7", + "string-width": "^4.2.3", + "strip-ansi": "^6.0.1", + "wide-align": "^1.1.5" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/node-gyp/node_modules/nopt": { + "version": "6.0.0", + "dev": true, + "license": "ISC", + "dependencies": { + "abbrev": "^1.0.0" + }, + "bin": { + "nopt": "bin/nopt.js" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/node-gyp/node_modules/npmlog": { + "version": "6.0.2", + "dev": true, + "license": "ISC", + "dependencies": { + "are-we-there-yet": "^3.0.0", + "console-control-strings": "^1.1.0", + "gauge": "^4.0.3", + "set-blocking": "^2.0.0" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/node-int64": { + "version": "0.4.0", + "dev": true, + "license": "MIT" + }, + "node_modules/node-releases": { + "version": "2.0.18", + "dev": true, + "license": "MIT" + }, + "node_modules/node-vault": { + "version": "0.10.2", + "license": "MIT", + "dependencies": { + "debug": "^4.3.4", + "mustache": "^4.2.0", + "postman-request": "^2.88.1-postman.33", + "tv4": "^1.3.0" + }, + "engines": { + "node": ">= 16.0.0" + } + }, + "node_modules/nopt": { + "version": "5.0.0", + "license": "ISC", + "dependencies": { + "abbrev": "1" + }, + "bin": { + "nopt": "bin/nopt.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/normalize-path": { + "version": "3.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm-run-path": { + "version": "4.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "path-key": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/npmlog": { + "version": "5.0.1", + "license": "ISC", + "dependencies": { + "are-we-there-yet": "^2.0.0", + "console-control-strings": "^1.1.0", + "gauge": "^3.0.0", + "set-blocking": "^2.0.0" + } + }, + "node_modules/oauth-sign": { + "version": "0.9.0", + "license": "Apache-2.0", + "engines": { + "node": "*" + } + }, + "node_modules/object-assign": { + "version": "4.1.1", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object-hash": { + "version": "3.0.0", + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/once": { + "version": "1.4.0", + "license": "ISC", + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/onetime": { + "version": "5.1.2", + "dev": true, + "license": "MIT", + "dependencies": { + "mimic-fn": "^2.1.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/open": { + "version": "8.4.2", + "license": "MIT", + "dependencies": { + "define-lazy-prop": "^2.0.0", + "is-docker": "^2.1.1", + "is-wsl": "^2.2.0" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/optionator": { + "version": "0.9.4", + "dev": true, + "license": "MIT", + "dependencies": { + "deep-is": "^0.1.3", + "fast-levenshtein": "^2.0.6", + "levn": "^0.4.1", + "prelude-ls": "^1.2.1", + "type-check": "^0.4.0", + "word-wrap": "^1.2.5" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/p-limit": { + "version": "3.1.0", + "dev": true, + "license": "MIT", + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "5.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-map": { + "version": "4.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "aggregate-error": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-try": { + "version": "2.2.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/parent-module": { + "version": "1.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "callsites": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/parse-json": { + "version": "5.2.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.0.0", + "error-ex": "^1.3.1", + "json-parse-even-better-errors": "^2.3.0", + "lines-and-columns": "^1.1.6" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/path-exists": { + "version": "4.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-is-absolute": { + "version": "1.0.1", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/path-key": { + "version": "3.1.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-parse": { + "version": "1.0.7", + "dev": true, + "license": "MIT" + }, + "node_modules/path-type": { + "version": "4.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/performance-now": { + "version": "2.1.0", + "license": "MIT" + }, + "node_modules/picocolors": { + "version": "1.0.1", + "dev": true, + "license": "ISC" + }, + "node_modules/picomatch": { + "version": "2.3.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pirates": { + "version": "4.0.6", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/pkg-dir": { + "version": "4.2.0", + "dev": true, + "license": "MIT", + "dependencies": { + "find-up": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/pkg-dir/node_modules/find-up": { + "version": "4.1.0", + "dev": true, + "license": "MIT", + "dependencies": { + "locate-path": "^5.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/pkg-dir/node_modules/locate-path": { + "version": "5.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "p-locate": "^4.1.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/pkg-dir/node_modules/p-limit": { + "version": "2.3.0", + "dev": true, + "license": "MIT", + "dependencies": { + "p-try": "^2.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/pkg-dir/node_modules/p-locate": { + "version": "4.1.0", + "dev": true, + "license": "MIT", + "dependencies": { + "p-limit": "^2.2.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/postman-request": { + "version": "2.88.1-postman.39", + "license": "Apache-2.0", + "dependencies": { + "@postman/form-data": "~3.1.1", + "@postman/tough-cookie": "~4.1.3-postman.1", + "@postman/tunnel-agent": "^0.6.4", + "aws-sign2": "~0.7.0", + "aws4": "^1.12.0", + "brotli": "^1.3.3", + "caseless": "~0.12.0", + "combined-stream": "~1.0.6", + "extend": "~3.0.2", + "forever-agent": "~0.6.1", + "har-validator": "~5.1.3", + "http-signature": "~1.3.1", + "is-typedarray": "~1.0.0", + "isstream": "~0.1.2", + "json-stringify-safe": "~5.0.1", + "mime-types": "^2.1.35", + "oauth-sign": "~0.9.0", + "performance-now": "^2.1.0", + "qs": "~6.5.3", + "safe-buffer": "^5.1.2", + "stream-length": "^1.0.2", + "uuid": "^8.3.2" + }, + "engines": { + "node": ">= 16" + } + }, + "node_modules/postman-request/node_modules/uuid": { + "version": "8.3.2", + "license": "MIT", + "bin": { + "uuid": "dist/bin/uuid" + } + }, + "node_modules/prelude-ls": { + "version": "1.2.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/pretty-format": { + "version": "29.7.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/schemas": "^29.6.3", + "ansi-styles": "^5.0.0", + "react-is": "^18.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/pretty-format/node_modules/ansi-styles": { + "version": "5.2.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/promise-inflight": { + "version": "1.0.1", + "dev": true, + "license": "ISC" + }, + "node_modules/promise-retry": { + "version": "2.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "err-code": "^2.0.2", + "retry": "^0.12.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/prompts": { + "version": "2.4.2", + "dev": true, + "license": "MIT", + "dependencies": { + "kleur": "^3.0.3", + "sisteransi": "^1.0.5" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/proto3-json-serializer": { + "version": "2.0.2", + "license": "Apache-2.0", + "dependencies": { + "protobufjs": "^7.2.5" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/protobufjs": { + "version": "7.4.0", + "hasInstallScript": true, + "license": "BSD-3-Clause", + "dependencies": { + "@protobufjs/aspromise": "^1.1.2", + "@protobufjs/base64": "^1.1.2", + "@protobufjs/codegen": "^2.0.4", + "@protobufjs/eventemitter": "^1.1.0", + "@protobufjs/fetch": "^1.1.0", + "@protobufjs/float": "^1.0.2", + "@protobufjs/inquire": "^1.1.0", + "@protobufjs/path": "^1.1.2", + "@protobufjs/pool": "^1.1.0", + "@protobufjs/utf8": "^1.1.0", + "@types/node": ">=13.7.0", + "long": "^5.0.0" + }, + "engines": { + "node": ">=12.0.0" + } + }, + "node_modules/proxy-from-env": { + "version": "1.1.0", + "license": "MIT" + }, + "node_modules/psl": { + "version": "1.9.0", + "license": "MIT" + }, + "node_modules/punycode": { + "version": "2.3.1", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/punycode.js": { + "version": "2.3.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/pure-rand": { + "version": "6.1.0", + "dev": true, + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/dubzzz" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/fast-check" + } + ], + "license": "MIT" + }, + "node_modules/qs": { + "version": "6.5.3", + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.6" + } + }, + "node_modules/querystringify": { + "version": "2.2.0", + "license": "MIT" + }, + "node_modules/queue-microtask": { + "version": "1.2.3", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/railroad-diagrams": { + "version": "1.0.0", + "license": "CC0-1.0" + }, + "node_modules/randexp": { + "version": "0.4.6", + "license": "MIT", + "dependencies": { + "discontinuous-range": "1.0.0", + "ret": "~0.1.10" + }, + "engines": { + "node": ">=0.12" + } + }, + "node_modules/randombytes": { + "version": "2.1.0", + "dev": true, + "license": "MIT", + "dependencies": { + "safe-buffer": "^5.1.0" + } + }, + "node_modules/react-is": { + "version": "18.3.1", + "dev": true, + "license": "MIT" + }, + "node_modules/readable-stream": { + "version": "3.6.2", + "license": "MIT", + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/readdirp": { + "version": "3.6.0", + "dev": true, + "license": "MIT", + "dependencies": { + "picomatch": "^2.2.1" + }, + "engines": { + "node": ">=8.10.0" + } + }, + "node_modules/require-directory": { + "version": "2.1.1", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/require-from-string": { + "version": "2.0.2", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/requires-port": { + "version": "1.0.0", + "license": "MIT" + }, + "node_modules/requizzle": { + "version": "0.2.4", + "dev": true, + "license": "MIT", + "dependencies": { + "lodash": "^4.17.21" + } + }, + "node_modules/resolve": { + "version": "1.22.8", + "dev": true, + "license": "MIT", + "dependencies": { + "is-core-module": "^2.13.0", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + }, + "bin": { + "resolve": "bin/resolve" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/resolve-cwd": { + "version": "3.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "resolve-from": "^5.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/resolve-cwd/node_modules/resolve-from": { + "version": "5.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/resolve-from": { + "version": "4.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/resolve.exports": { + "version": "2.0.2", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + } + }, + "node_modules/ret": { + "version": "0.1.15", + "license": "MIT", + "engines": { + "node": ">=0.12" + } + }, + "node_modules/retry": { + "version": "0.12.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "node_modules/retry-request": { + "version": "7.0.2", + "license": "MIT", + "dependencies": { + "@types/request": "^2.48.8", + "extend": "^3.0.2", + "teeny-request": "^9.0.0" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/reusify": { + "version": "1.0.4", + "dev": true, + "license": "MIT", + "engines": { + "iojs": ">=1.0.0", + "node": ">=0.10.0" + } + }, + "node_modules/rimraf": { + "version": "3.0.2", + "license": "ISC", + "dependencies": { + "glob": "^7.1.3" + }, + "bin": { + "rimraf": "bin.js" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/run-parallel": { + "version": "1.2.0", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "queue-microtask": "^1.2.2" + } + }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/safer-buffer": { + "version": "2.1.2", + "license": "MIT" + }, + "node_modules/schemaregistry-examples": { + "resolved": "schemaregistry-examples", + "link": true + }, + "node_modules/semver": { + "version": "7.6.3", + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/serialize-javascript": { + "version": "6.0.2", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "randombytes": "^2.1.0" + } + }, + "node_modules/set-blocking": { + "version": "2.0.0", + "license": "ISC" + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/signal-exit": { + "version": "3.0.7", + "license": "ISC" + }, + "node_modules/simple-oauth2": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/simple-oauth2/-/simple-oauth2-5.1.0.tgz", + "integrity": "sha512-gWDa38Ccm4MwlG5U7AlcJxPv3lvr80dU7ARJWrGdgvOKyzSj1gr3GBPN1rABTedAYvC/LsGYoFuFxwDBPtGEbw==", + "license": "Apache-2.0", + "dependencies": { + "@hapi/hoek": "^11.0.4", + "@hapi/wreck": "^18.0.0", + "debug": "^4.3.4", + "joi": "^17.6.4" + } + }, + "node_modules/sisteransi": { + "version": "1.0.5", + "dev": true, + "license": "MIT" + }, + "node_modules/slash": { + "version": "3.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/smart-buffer": { + "version": "4.2.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 6.0.0", + "npm": ">= 3.0.0" + } + }, + "node_modules/smtp-address-parser": { + "version": "1.1.0", + "license": "MIT", + "dependencies": { + "nearley": "^2.20.1" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/socks": { + "version": "2.8.3", + "dev": true, + "license": "MIT", + "dependencies": { + "ip-address": "^9.0.5", + "smart-buffer": "^4.2.0" + }, + "engines": { + "node": ">= 10.0.0", + "npm": ">= 3.0.0" + } + }, + "node_modules/socks-proxy-agent": { + "version": "7.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "agent-base": "^6.0.2", + "debug": "^4.3.3", + "socks": "^2.6.2" + }, + "engines": { + "node": ">= 10" + } + }, + "node_modules/source-map": { + "version": "0.6.1", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/source-map-support": { + "version": "0.5.13", + "dev": true, + "license": "MIT", + "dependencies": { + "buffer-from": "^1.0.0", + "source-map": "^0.6.0" + } + }, + "node_modules/sprintf-js": { + "version": "1.1.3", + "dev": true, + "license": "BSD-3-Clause" + }, + "node_modules/sshpk": { + "version": "1.18.0", + "license": "MIT", + "dependencies": { + "asn1": "~0.2.3", + "assert-plus": "^1.0.0", + "bcrypt-pbkdf": "^1.0.0", + "dashdash": "^1.12.0", + "ecc-jsbn": "~0.1.1", + "getpass": "^0.1.1", + "jsbn": "~0.1.0", + "safer-buffer": "^2.0.2", + "tweetnacl": "~0.14.0" + }, + "bin": { + "sshpk-conv": "bin/sshpk-conv", + "sshpk-sign": "bin/sshpk-sign", + "sshpk-verify": "bin/sshpk-verify" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/sshpk/node_modules/jsbn": { + "version": "0.1.1", + "license": "MIT" + }, + "node_modules/ssri": { + "version": "9.0.1", + "dev": true, + "license": "ISC", + "dependencies": { + "minipass": "^3.1.1" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/stack-utils": { + "version": "2.0.6", + "dev": true, + "license": "MIT", + "dependencies": { + "escape-string-regexp": "^2.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/stack-utils/node_modules/escape-string-regexp": { + "version": "2.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/stoppable": { + "version": "1.1.0", + "license": "MIT", + "engines": { + "node": ">=4", + "npm": ">=6" + } + }, + "node_modules/stream-events": { + "version": "1.0.5", + "license": "MIT", + "dependencies": { + "stubs": "^3.0.0" + } + }, + "node_modules/stream-length": { + "version": "1.0.2", + "license": "WTFPL", + "dependencies": { + "bluebird": "^2.6.2" + } + }, + "node_modules/stream-length/node_modules/bluebird": { + "version": "2.11.0", + "license": "MIT" + }, + "node_modules/stream-shift": { + "version": "1.0.3", + "license": "MIT" + }, + "node_modules/string_decoder": { + "version": "1.3.0", + "license": "MIT", + "dependencies": { + "safe-buffer": "~5.2.0" + } + }, + "node_modules/string-length": { + "version": "4.0.2", + "dev": true, + "license": "MIT", + "dependencies": { + "char-regex": "^1.0.2", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/string-width": { + "version": "4.2.3", + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi": { + "version": "6.0.1", + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-bom": { + "version": "4.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-final-newline": { + "version": "2.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/strip-json-comments": { + "version": "3.1.1", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/strnum": { + "version": "1.0.5", + "license": "MIT" + }, + "node_modules/stubs": { + "version": "3.0.0", + "license": "MIT" + }, + "node_modules/supports-color": { + "version": "7.2.0", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/supports-preserve-symlinks-flag": { + "version": "1.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/tar": { + "version": "6.2.1", + "license": "ISC", + "dependencies": { + "chownr": "^2.0.0", + "fs-minipass": "^2.0.0", + "minipass": "^5.0.0", + "minizlib": "^2.1.1", + "mkdirp": "^1.0.3", + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/tar/node_modules/minipass": { + "version": "5.0.0", + "license": "ISC", + "engines": { + "node": ">=8" + } + }, + "node_modules/tar/node_modules/yallist": { + "version": "4.0.0", + "license": "ISC" + }, + "node_modules/teeny-request": { + "version": "9.0.0", + "license": "Apache-2.0", + "dependencies": { + "http-proxy-agent": "^5.0.0", + "https-proxy-agent": "^5.0.0", + "node-fetch": "^2.6.9", + "stream-events": "^1.0.5", + "uuid": "^9.0.0" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/test-exclude": { + "version": "6.0.0", + "dev": true, + "license": "ISC", + "dependencies": { + "@istanbuljs/schema": "^0.1.2", + "glob": "^7.1.4", + "minimatch": "^3.0.4" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/text-table": { + "version": "0.2.0", + "dev": true, + "license": "MIT" + }, + "node_modules/tmpl": { + "version": "1.0.5", + "dev": true, + "license": "BSD-3-Clause" + }, + "node_modules/to-fast-properties": { + "version": "2.0.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "dev": true, + "license": "MIT", + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/toad-uri-js": { + "version": "5.0.1", + "license": "BSD-2-Clause-Views", + "dependencies": { + "punycode": "^2.3.1" + } + }, + "node_modules/tr46": { + "version": "0.0.3", + "license": "MIT" + }, + "node_modules/ts-api-utils": { + "version": "1.3.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=16" + }, + "peerDependencies": { + "typescript": ">=4.2.0" + } + }, + "node_modules/ts-jest": { + "version": "29.2.5", + "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.2.5.tgz", + "integrity": "sha512-KD8zB2aAZrcKIdGk4OwpJggeLcH1FgrICqDSROWqlnJXGCXK4Mn6FcdK2B6670Xr73lHMG1kHw8R87A0ecZ+vA==", + "dev": true, + "dependencies": { + "bs-logger": "^0.2.6", + "ejs": "^3.1.10", + "fast-json-stable-stringify": "^2.1.0", + "jest-util": "^29.0.0", + "json5": "^2.2.3", + "lodash.memoize": "^4.1.2", + "make-error": "^1.3.6", + "semver": "^7.6.3", + "yargs-parser": "^21.1.1" + }, + "bin": { + "ts-jest": "cli.js" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || ^18.0.0 || >=20.0.0" + }, + "peerDependencies": { + "@babel/core": ">=7.0.0-beta.0 <8", + "@jest/transform": "^29.0.0", + "@jest/types": "^29.0.0", + "babel-jest": "^29.0.0", + "jest": "^29.0.0", + "typescript": ">=4.3 <6" + }, + "peerDependenciesMeta": { + "@babel/core": { + "optional": true + }, + "@jest/transform": { + "optional": true + }, + "@jest/types": { + "optional": true + }, + "babel-jest": { + "optional": true + }, + "esbuild": { + "optional": true + } + } + }, + "node_modules/ts-jest/node_modules/yargs-parser": { + "version": "21.1.1", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/tslib": { + "version": "2.6.3", + "license": "0BSD" + }, + "node_modules/tv4": { + "version": "1.3.0", + "license": [ + { + "type": "Public Domain", + "url": "http://geraintluff.github.io/tv4/LICENSE.txt" + }, + { + "type": "MIT", + "url": "http://jsonary.com/LICENSE.txt" + } + ], + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/tweetnacl": { + "version": "0.14.5", + "license": "Unlicense" + }, + "node_modules/type-check": { + "version": "0.4.0", + "dev": true, + "license": "MIT", + "dependencies": { + "prelude-ls": "^1.2.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/type-detect": { + "version": "4.0.8", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/type-fest": { + "version": "0.20.2", + "dev": true, + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/typescript": { + "version": "5.5.4", + "dev": true, + "license": "Apache-2.0", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/typescript-eslint": { + "version": "8.2.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/eslint-plugin": "8.2.0", + "@typescript-eslint/parser": "8.2.0", + "@typescript-eslint/utils": "8.2.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/typescript-eslint/node_modules/@typescript-eslint/eslint-plugin": { + "version": "8.2.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/regexpp": "^4.10.0", + "@typescript-eslint/scope-manager": "8.2.0", + "@typescript-eslint/type-utils": "8.2.0", + "@typescript-eslint/utils": "8.2.0", + "@typescript-eslint/visitor-keys": "8.2.0", + "graphemer": "^1.4.0", + "ignore": "^5.3.1", + "natural-compare": "^1.4.0", + "ts-api-utils": "^1.3.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "@typescript-eslint/parser": "^8.0.0 || ^8.0.0-alpha.0", + "eslint": "^8.57.0 || ^9.0.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } } }, - "node_modules/strip-ansi": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "node_modules/typescript-eslint/node_modules/@typescript-eslint/parser": { + "version": "8.2.0", + "dev": true, + "license": "BSD-2-Clause", "dependencies": { - "ansi-regex": "^5.0.1" + "@typescript-eslint/scope-manager": "8.2.0", + "@typescript-eslint/types": "8.2.0", + "@typescript-eslint/typescript-estree": "8.2.0", + "@typescript-eslint/visitor-keys": "8.2.0", + "debug": "^4.3.4" }, "engines": { - "node": ">=8" + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } } }, - "node_modules/strip-ansi-cjs": { - "name": "strip-ansi", - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "node_modules/typescript-eslint/node_modules/@typescript-eslint/scope-manager": { + "version": "8.2.0", "dev": true, + "license": "MIT", "dependencies": { - "ansi-regex": "^5.0.1" + "@typescript-eslint/types": "8.2.0", + "@typescript-eslint/visitor-keys": "8.2.0" }, "engines": { - "node": ">=8" + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" } }, - "node_modules/strip-json-comments": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", - "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "node_modules/typescript-eslint/node_modules/@typescript-eslint/type-utils": { + "version": "8.2.0", "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/typescript-estree": "8.2.0", + "@typescript-eslint/utils": "8.2.0", + "debug": "^4.3.4", + "ts-api-utils": "^1.3.0" + }, "engines": { - "node": ">=8" + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" }, "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } } }, - "node_modules/supports-color": { - "version": "8.1.1", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", - "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "node_modules/typescript-eslint/node_modules/@typescript-eslint/types": { + "version": "8.2.0", "dev": true, - "dependencies": { - "has-flag": "^4.0.0" - }, + "license": "MIT", "engines": { - "node": ">=10" + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" }, "funding": { - "url": "https://github.com/chalk/supports-color?sponsor=1" + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" } }, - "node_modules/taffydb": { - "version": "2.6.2", - "resolved": "https://registry.npmjs.org/taffydb/-/taffydb-2.6.2.tgz", - "integrity": "sha512-y3JaeRSplks6NYQuCOj3ZFMO3j60rTwbuKCvZxsAraGYH2epusatvZ0baZYA01WsGqJBq/Dl6vOrMUJqyMj8kA==", - "dev": true - }, - "node_modules/tar": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/tar/-/tar-6.2.0.tgz", - "integrity": "sha512-/Wo7DcT0u5HUV486xg675HtjNd3BXZ6xDbzsCUZPt5iw8bTQ63bP0Raut3mvro9u+CUyq7YQd8Cx55fsZXxqLQ==", + "node_modules/typescript-eslint/node_modules/@typescript-eslint/typescript-estree": { + "version": "8.2.0", + "dev": true, + "license": "BSD-2-Clause", "dependencies": { - "chownr": "^2.0.0", - "fs-minipass": "^2.0.0", - "minipass": "^5.0.0", - "minizlib": "^2.1.1", - "mkdirp": "^1.0.3", - "yallist": "^4.0.0" + "@typescript-eslint/types": "8.2.0", + "@typescript-eslint/visitor-keys": "8.2.0", + "debug": "^4.3.4", + "globby": "^11.1.0", + "is-glob": "^4.0.3", + "minimatch": "^9.0.4", + "semver": "^7.6.0", + "ts-api-utils": "^1.3.0" }, "engines": { - "node": ">=10" + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } } }, - "node_modules/tar/node_modules/fs-minipass": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/fs-minipass/-/fs-minipass-2.1.0.tgz", - "integrity": "sha512-V/JgOLFCS+R6Vcq0slCuaeWEdNC3ouDlJMNIsacH2VtALiu9mV4LPrHc5cDl8k5aw6J8jwgWWpiTo5RYhmIzvg==", + "node_modules/typescript-eslint/node_modules/@typescript-eslint/utils": { + "version": "8.2.0", + "dev": true, + "license": "MIT", "dependencies": { - "minipass": "^3.0.0" + "@eslint-community/eslint-utils": "^4.4.0", + "@typescript-eslint/scope-manager": "8.2.0", + "@typescript-eslint/types": "8.2.0", + "@typescript-eslint/typescript-estree": "8.2.0" }, "engines": { - "node": ">= 8" + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0" } }, - "node_modules/tar/node_modules/fs-minipass/node_modules/minipass": { - "version": "3.3.6", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.3.6.tgz", - "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", + "node_modules/typescript-eslint/node_modules/@typescript-eslint/visitor-keys": { + "version": "8.2.0", + "dev": true, + "license": "MIT", "dependencies": { - "yallist": "^4.0.0" + "@typescript-eslint/types": "8.2.0", + "eslint-visitor-keys": "^3.4.3" }, "engines": { - "node": ">=8" + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" } }, - "node_modules/to-regex-range": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", - "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "node_modules/typescript-eslint/node_modules/brace-expansion": { + "version": "2.0.1", "dev": true, + "license": "MIT", "dependencies": { - "is-number": "^7.0.0" - }, - "engines": { - "node": ">=8.0" + "balanced-match": "^1.0.0" } }, - "node_modules/tr46": { - "version": "0.0.3", - "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz", - "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==" - }, - "node_modules/typescript": { - "version": "5.1.6", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.1.6.tgz", - "integrity": "sha512-zaWCozRZ6DLEWAWFrVDz1H6FVXzUSfTy5FUMWsQlU8Ym5JP9eO4xkTIROFCQvhQf61z6O/G6ugw3SgAnvvm+HA==", + "node_modules/typescript-eslint/node_modules/minimatch": { + "version": "9.0.5", "dev": true, - "bin": { - "tsc": "bin/tsc", - "tsserver": "bin/tsserver" + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" }, "engines": { - "node": ">=14.17" + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" } }, "node_modules/uc.micro": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/uc.micro/-/uc.micro-1.0.6.tgz", - "integrity": "sha512-8Y75pvTYkLJW2hWQHXxoqRgV7qb9B+9vFEtidML+7koHUFapnVJAZ6cKs+Qjz5Aw3aZWHMC6u0wJE3At+nSGwA==", - "dev": true + "version": "2.1.0", + "dev": true, + "license": "MIT" }, "node_modules/underscore": { - "version": "1.13.6", - "resolved": "https://registry.npmjs.org/underscore/-/underscore-1.13.6.tgz", - "integrity": "sha512-+A5Sja4HP1M08MaXya7p5LvjuM7K6q/2EaC0+iovj/wOcMsTzMvDFbasi/oSapiwOlt252IqsKqPjCl7huKS0A==", - "dev": true + "version": "1.13.7", + "dev": true, + "license": "MIT" }, "node_modules/undici-types": { - "version": "5.26.5", - "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz", - "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==", - "dev": true + "version": "6.19.8", + "license": "MIT" }, "node_modules/unique-filename": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/unique-filename/-/unique-filename-3.0.0.tgz", - "integrity": "sha512-afXhuC55wkAmZ0P18QsVE6kp8JaxrEokN2HGIoIVv2ijHQd419H0+6EigAFcIzXeMIkcIkNBpB3L/DXB3cTS/g==", + "version": "2.0.1", "dev": true, + "license": "ISC", "dependencies": { - "unique-slug": "^4.0.0" + "unique-slug": "^3.0.0" }, "engines": { - "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, "node_modules/unique-slug": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/unique-slug/-/unique-slug-4.0.0.tgz", - "integrity": "sha512-WrcA6AyEfqDX5bWige/4NQfPZMtASNVxdmWR76WESYQVAACSgWcR6e9i0mofqqBxYFtL4oAxPIptY73/0YE1DQ==", + "version": "3.0.0", "dev": true, + "license": "ISC", "dependencies": { "imurmurhash": "^0.1.4" }, "engines": { - "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/universalify": { + "version": "0.2.0", + "license": "MIT", + "engines": { + "node": ">= 4.0.0" + } + }, + "node_modules/update-browserslist-db": { + "version": "1.1.0", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "escalade": "^3.1.2", + "picocolors": "^1.0.1" + }, + "bin": { + "update-browserslist-db": "cli.js" + }, + "peerDependencies": { + "browserslist": ">= 4.21.0" + } + }, + "node_modules/uri-js": { + "version": "4.4.1", + "license": "BSD-2-Clause", + "dependencies": { + "punycode": "^2.1.0" + } + }, + "node_modules/url-parse": { + "version": "1.5.10", + "license": "MIT", + "dependencies": { + "querystringify": "^2.1.1", + "requires-port": "^1.0.0" } }, "node_modules/util-deprecate": { "version": "1.0.2", - "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", - "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==" + "license": "MIT" + }, + "node_modules/uuid": { + "version": "9.0.1", + "funding": [ + "https://github.com/sponsors/broofa", + "https://github.com/sponsors/ctavan" + ], + "license": "MIT", + "bin": { + "uuid": "dist/bin/uuid" + } + }, + "node_modules/v8-to-istanbul": { + "version": "9.3.0", + "dev": true, + "license": "ISC", + "dependencies": { + "@jridgewell/trace-mapping": "^0.3.12", + "@types/istanbul-lib-coverage": "^2.0.1", + "convert-source-map": "^2.0.0" + }, + "engines": { + "node": ">=10.12.0" + } + }, + "node_modules/validator": { + "version": "13.12.0", + "license": "MIT", + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/verror": { + "version": "1.10.0", + "engines": [ + "node >=0.6.0" + ], + "license": "MIT", + "dependencies": { + "assert-plus": "^1.0.0", + "core-util-is": "1.0.2", + "extsprintf": "^1.2.0" + } + }, + "node_modules/walker": { + "version": "1.0.8", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "makeerror": "1.0.12" + } }, "node_modules/webidl-conversions": { "version": "3.0.1", - "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz", - "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==" + "license": "BSD-2-Clause" }, "node_modules/whatwg-url": { "version": "5.0.0", - "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz", - "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==", + "license": "MIT", "dependencies": { "tr46": "~0.0.3", "webidl-conversions": "^3.0.0" @@ -2578,9 +9173,8 @@ }, "node_modules/which": { "version": "2.0.2", - "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", - "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", "dev": true, + "license": "ISC", "dependencies": { "isexe": "^2.0.0" }, @@ -2593,23 +9187,27 @@ }, "node_modules/wide-align": { "version": "1.1.5", - "resolved": "https://registry.npmjs.org/wide-align/-/wide-align-1.1.5.tgz", - "integrity": "sha512-eDMORYaPNZ4sQIuuYPDHdQvf4gyCF9rEEV/yPxGfwPkRodwEgiMUUXTx/dex+Me0wxx53S+NgUHaP7y3MGlDmg==", + "license": "ISC", "dependencies": { "string-width": "^1.0.2 || 2 || 3 || 4" } }, + "node_modules/word-wrap": { + "version": "1.2.5", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/workerpool": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/workerpool/-/workerpool-6.2.1.tgz", - "integrity": "sha512-ILEIE97kDZvF9Wb9f6h5aXK4swSlKGUcOEGiIYb2OOu/IrDU9iwj0fD//SsA6E5ibwJxpEvhullJY4Sl4GcpAw==", - "dev": true + "version": "6.5.1", + "dev": true, + "license": "Apache-2.0" }, "node_modules/wrap-ansi": { "version": "7.0.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", - "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", - "dev": true, + "license": "MIT", "dependencies": { "ansi-styles": "^4.0.0", "string-width": "^4.1.0", @@ -2622,81 +9220,67 @@ "url": "https://github.com/chalk/wrap-ansi?sponsor=1" } }, - "node_modules/wrap-ansi-cjs": { - "name": "wrap-ansi", - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", - "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "node_modules/wrappy": { + "version": "1.0.2", + "license": "ISC" + }, + "node_modules/write-file-atomic": { + "version": "4.0.2", "dev": true, + "license": "ISC", "dependencies": { - "ansi-styles": "^4.0.0", - "string-width": "^4.1.0", - "strip-ansi": "^6.0.0" + "imurmurhash": "^0.1.4", + "signal-exit": "^3.0.7" }, "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, - "node_modules/wrappy": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", - "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==" - }, "node_modules/xmlcreate": { "version": "2.0.4", - "resolved": "https://registry.npmjs.org/xmlcreate/-/xmlcreate-2.0.4.tgz", - "integrity": "sha512-nquOebG4sngPmGPICTS5EnxqhKbCmz5Ox5hsszI2T6U5qdrJizBc+0ilYSEjTSzU0yZcmvppztXe/5Al5fUwdg==", - "dev": true + "dev": true, + "license": "Apache-2.0" }, "node_modules/y18n": { "version": "5.0.8", - "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", - "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", - "dev": true, + "license": "ISC", "engines": { "node": ">=10" } }, "node_modules/yallist": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", - "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==" + "version": "3.1.1", + "dev": true, + "license": "ISC" }, "node_modules/yargs": { - "version": "16.2.0", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-16.2.0.tgz", - "integrity": "sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw==", - "dev": true, + "version": "17.7.2", + "license": "MIT", "dependencies": { - "cliui": "^7.0.2", + "cliui": "^8.0.1", "escalade": "^3.1.1", "get-caller-file": "^2.0.5", "require-directory": "^2.1.1", - "string-width": "^4.2.0", + "string-width": "^4.2.3", "y18n": "^5.0.5", - "yargs-parser": "^20.2.2" + "yargs-parser": "^21.1.1" }, "engines": { - "node": ">=10" + "node": ">=12" } }, "node_modules/yargs-parser": { - "version": "20.2.4", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.4.tgz", - "integrity": "sha512-WOkpgNhPTlE73h4VFAFsOnomJVaovO8VqLDzy5saChRBFQFBoMYirowyW+Q9HB4HFF4Z7VZTiG3iSzJJA29yRA==", + "version": "20.2.9", "dev": true, + "license": "ISC", "engines": { "node": ">=10" } }, "node_modules/yargs-unparser": { "version": "2.0.0", - "resolved": "https://registry.npmjs.org/yargs-unparser/-/yargs-unparser-2.0.0.tgz", - "integrity": "sha512-7pRTIA9Qc1caZ0bZ6RYRGbHJthJWuakf+WmHK0rVeLkNrrGhfoabBNdue6kdINI6r4if7ocq9aD/n7xwKOdzOA==", "dev": true, + "license": "MIT", "dependencies": { "camelcase": "^6.0.0", "decamelize": "^4.0.0", @@ -2707,17 +9291,119 @@ "node": ">=10" } }, + "node_modules/yargs-unparser/node_modules/camelcase": { + "version": "6.3.0", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/yargs/node_modules/yargs-parser": { + "version": "21.1.1", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, "node_modules/yocto-queue": { "version": "0.1.0", - "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", - "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", "dev": true, + "license": "MIT", "engines": { "node": ">=10" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" } + }, + "schemaregistry": { + "name": "@confluentinc/schemaregistry", + "version": "v0.1.17.6-devel", + "license": "MIT", + "dependencies": { + "@aws-sdk/client-kms": "^3.637.0", + "@azure/identity": "^4.4.1", + "@azure/keyvault-keys": "^4.8.0", + "@bufbuild/protobuf": "^2.0.0", + "@criteria/json-schema": "^0.10.0", + "@criteria/json-schema-validation": "^0.10.0", + "@google-cloud/kms": "^4.5.0", + "@hackbg/miscreant-esm": "^0.3.2-patch.3", + "@smithy/types": "^3.3.0", + "@types/validator": "^13.12.0", + "ajv": "^8.17.1", + "async-mutex": "^0.5.0", + "avsc": "^5.7.7", + "axios": "^1.7.3", + "json-stringify-deterministic": "^1.0.12", + "jsonata": "^2.0.5", + "lru-cache": "^11.0.0", + "node-vault": "^0.10.2", + "simple-oauth2": "^5.1.0", + "validator": "^13.12.0" + }, + "devDependencies": { + "@bufbuild/buf": "^1.37.0", + "@bufbuild/protoc-gen-es": "^2.0.0", + "@confluentinc/kafka-javascript": "^0.2.0", + "@eslint/js": "^9.9.0", + "@types/eslint__js": "^8.42.3", + "@types/node": "^20.16.1", + "@types/uuid": "^10.0.0", + "bluebird": "^3.5.3", + "eslint": "^8.57.0", + "eslint-plugin-jest": "^28.6.0", + "eslint-plugin-tsdoc": "^0.3.0", + "jest": "^29.7.0", + "jsdoc": "^4.0.2", + "mocha": "^10.7.0", + "node-gyp": "^9.3.1", + "ts-jest": "^29.2.4", + "typescript": "^5.5.4", + "typescript-eslint": "^8.2.0", + "uuid": "^10.0.0" + } + }, + "schemaregistry-examples": { + "version": "1.0.0", + "license": "ISC", + "devDependencies": { + "@confluentinc/kafka-javascript": "^0.2.0", + "@confluentinc/schemaregistry": "^v0.1.17.6-devel", + "uuid": "^10.0.0" + } + }, + "schemaregistry-examples/node_modules/uuid": { + "version": "10.0.0", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-10.0.0.tgz", + "integrity": "sha512-8XkAphELsDnEGrDxUOHB3RGvXz6TeuYSGEZBOjtTtPm2lwhGBjLgOzLHB63IUWfBpNucQjND6d3AOudO+H3RWQ==", + "dev": true, + "funding": [ + "https://github.com/sponsors/broofa", + "https://github.com/sponsors/ctavan" + ], + "license": "MIT", + "bin": { + "uuid": "dist/bin/uuid" + } + }, + "schemaregistry/node_modules/uuid": { + "version": "10.0.0", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-10.0.0.tgz", + "integrity": "sha512-8XkAphELsDnEGrDxUOHB3RGvXz6TeuYSGEZBOjtTtPm2lwhGBjLgOzLHB63IUWfBpNucQjND6d3AOudO+H3RWQ==", + "dev": true, + "funding": [ + "https://github.com/sponsors/broofa", + "https://github.com/sponsors/ctavan" + ], + "license": "MIT", + "bin": { + "uuid": "dist/bin/uuid" + } } } } diff --git a/package.json b/package.json index ed3f640b..92b9d0a0 100644 --- a/package.json +++ b/package.json @@ -1,8 +1,9 @@ { - "name": "confluent-kafka-js", - "version": "v2.18.0", + "name": "@confluentinc/kafka-javascript", + "version": "v0.2.1", "description": "Node.js bindings for librdkafka", - "librdkafka": "2.3.0", + "librdkafka": "2.5.3", + "librdkafka_win": "2.5.3", "main": "lib/index.js", "types": "types/index.d.ts", "scripts": { @@ -14,10 +15,10 @@ "test:types": "tsc -p ." }, "binary": { - "module_name": "confluent-kafka-js", + "module_name": "confluent-kafka-javascript", "module_path": "./build/{configuration}/", - "package_name": "{module_name}-v{version}-{node_abi}-{platform}-{arch}.tar.gz", - "host": "https://github.com/confluentinc/confluent-kafka-js/releases/download/", + "package_name": "{module_name}-v{version}-{node_abi}-{platform}-{libc}-{arch}.tar.gz", + "host": "https://github.com/confluentinc/confluent-kafka-javascript/releases/download/", "remote_path": "v{version}" }, "keywords": [ @@ -26,34 +27,57 @@ ], "repository": { "type": "git", - "url": "git@github.com:confluentinc/confluent-kafka-js.git" + "url": "git@github.com:confluentinc/confluent-kafka-javascript.git" }, - "contributors": [ - { - "name": "Stephen Parente", - "email": "webmakersteve@gmail.com" - }, - { - "name": "Matt Gollob", - "email": "mattness@users.noreply.github.com" - } - ], "license": "MIT", "devDependencies": { - "@types/node": "^20.4.5", + "@bufbuild/buf": "^1.37.0", + "@bufbuild/protoc-gen-es": "^2.0.0", + "@eslint/js": "^9.9.0", + "@types/eslint__js": "^8.42.3", + "@types/jest": "^29.5.13", + "@types/node": "^20.16.1", "bluebird": "^3.5.3", - "jsdoc": "^3.4.0", - "jshint": "^2.10.1", - "mocha": "^10.2.0", + "eslint": "^8.57.0", + "eslint-plugin-jest": "^28.6.0", + "jest": "^29.7.0", + "jsdoc": "^4.0.2", + "mocha": "^10.7.0", "node-gyp": "^9.3.1", - "typescript": "^5.1.6" + "ts-jest": "^29.2.5", + "typescript": "^5.5.4", + "typescript-eslint": "^8.2.0" }, "dependencies": { + "@aws-sdk/client-kms": "^3.637.0", + "@azure/identity": "^4.4.1", + "@azure/keyvault-keys": "^4.8.0", + "@bufbuild/protobuf": "^2.0.0", + "@criteria/json-schema": "^0.10.0", + "@criteria/json-schema-validation": "^0.10.0", + "@google-cloud/kms": "^4.5.0", + "@hackbg/miscreant-esm": "^0.3.2-patch.3", "@mapbox/node-pre-gyp": "^1.0.11", + "@smithy/types": "^3.3.0", + "@types/simple-oauth2": "^5.0.7", + "@types/validator": "^13.12.0", + "ajv": "^8.17.1", + "async-mutex": "^0.5.0", + "avsc": "^5.7.7", + "axios": "^1.7.3", "bindings": "^1.3.1", - "nan": "^2.17.0" + "json-stringify-deterministic": "^1.0.12", + "lru-cache": "^11.0.0", + "nan": "^2.17.0", + "node-vault": "^0.10.2", + "simple-oauth2": "^5.1.0", + "validator": "^13.12.0" }, "engines": { - "node": ">=14.0.0" - } + "node": ">=18.0.0" + }, + "workspaces": [ + "schemaregistry", + "schemaregistry-examples" + ] } diff --git a/proto/confluent/meta.proto b/proto/confluent/meta.proto new file mode 100644 index 00000000..6016459b --- /dev/null +++ b/proto/confluent/meta.proto @@ -0,0 +1,28 @@ +syntax = "proto3"; +package confluent; + +import "google/protobuf/descriptor.proto"; + +option go_package="../confluent"; + +message Meta { + string doc = 1; + map params = 2; + repeated string tags = 3; +} + +extend google.protobuf.FileOptions { + Meta file_meta = 1088; +} +extend google.protobuf.MessageOptions { + Meta message_meta = 1088; +} +extend google.protobuf.FieldOptions { + Meta field_meta = 1088; +} +extend google.protobuf.EnumOptions { + Meta enum_meta = 1088; +} +extend google.protobuf.EnumValueOptions { + Meta enum_value_meta = 1088; +} diff --git a/proto/confluent/types/decimal.proto b/proto/confluent/types/decimal.proto new file mode 100644 index 00000000..75d8b9b4 --- /dev/null +++ b/proto/confluent/types/decimal.proto @@ -0,0 +1,17 @@ +syntax = "proto3"; + +package confluent.type; + +option go_package="../types"; + +message Decimal { + + // The two's-complement representation of the unscaled integer value in big-endian byte order + bytes value = 1; + + // The precision + uint32 precision = 2; + + // The scale + int32 scale = 3; +} \ No newline at end of file diff --git a/proto/rules/encryption/tink/proto/aes_gcm.proto b/proto/rules/encryption/tink/proto/aes_gcm.proto new file mode 100644 index 00000000..fba7a89e --- /dev/null +++ b/proto/rules/encryption/tink/proto/aes_gcm.proto @@ -0,0 +1,67 @@ +// Copyright 2017 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +//////////////////////////////////////////////////////////////////////////////// + +syntax = "proto3"; + +package google.crypto.tink; + +option java_package = "com.google.crypto.tink.proto"; +option java_multiple_files = true; +option go_package = "github.com/google/tink/proto/aes_gcm_go_proto"; +option objc_class_prefix = "TINKPB"; + +message AesGcmKeyFormat { + uint32 key_size = 2; + uint32 version = 3; +} + +// key_type: type.googleapis.com/google.crypto.tink.AesGcmKey +// +// A AesGcmKey is an AEAD key. Mathematically, it represents the functions +// Encrypt and Decrypt which we define in the following. +// +// First, Tink computes a "output prefix" OP by considering the +// "OutputPrefixType" message in Keyset.Key and the ID of the key using the +// Tink function "AEAD-OutputPrefix": (AesGcmKeys must always be stored in a +// keyset). +// +// AEAD-OutputPrefix(output_prefix_type, id): +// if output_prefix_type == RAW: +// return ""; +// if output_prefix_type == TINK: +// return 0x01 + BigEndian(id) +// if output_prefix_type == CRUNCHY: +// return 0x00 + BigEndian(id) +// +// Then, the function defined by this is defined as: +// [GCM], Section 5.2.1: +// * "Encrypt" maps a plaintext P and associated data A to a ciphertext given +// by the concatenation OP || IV || C || T. In addition to [GCM], Tink +// has the following restriction: IV is a uniformly random initialization +// vector of length 12 bytes and T is restricted to 16 bytes. +// +// * If OP matches the result of AEAD-OutputPrefix, then "Decrypt" maps the +// input OP || IV || C || T and A to the the output P in the manner as +// described in [GCM], Section 5.2.2. If OP does not match, then "Decrypt" +// returns an error. +// [GCM]: NIST Special Publication 800-38D: Recommendation for Block Cipher +// Modes of Operation: Galois/Counter Mode (GCM) and GMAC. +// http://csrc.nist.gov/publications/nistpubs/800-38D/SP-800-38D.pdf. + +message AesGcmKey { + uint32 version = 1; + bytes key_value = 3; +} diff --git a/proto/rules/encryption/tink/proto/aes_siv.proto b/proto/rules/encryption/tink/proto/aes_siv.proto new file mode 100644 index 00000000..00230278 --- /dev/null +++ b/proto/rules/encryption/tink/proto/aes_siv.proto @@ -0,0 +1,36 @@ +// Copyright 2017 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +//////////////////////////////////////////////////////////////////////////////// + +syntax = "proto3"; + +package google.crypto.tink; + +option java_package = "com.google.crypto.tink.proto"; +option java_multiple_files = true; +option go_package = "github.com/google/tink/proto/aes_siv_go_proto"; + +message AesSivKeyFormat { + // Only valid value is: 64. + uint32 key_size = 1; + uint32 version = 2; +} + +// key_type: type.googleapis.com/google.crypto.tink.AesSivKey +message AesSivKey { + uint32 version = 1; + // First half is AES-CTR key, second is AES-SIV. + bytes key_value = 2; +} diff --git a/proto/test/schemaregistry/serde/cycle.proto b/proto/test/schemaregistry/serde/cycle.proto new file mode 100644 index 00000000..a5351102 --- /dev/null +++ b/proto/test/schemaregistry/serde/cycle.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +package test; +option go_package="../test"; + +message LinkedList { + int32 value = 1; + LinkedList next = 10; +} \ No newline at end of file diff --git a/proto/test/schemaregistry/serde/dep.proto b/proto/test/schemaregistry/serde/dep.proto new file mode 100644 index 00000000..c0beccf9 --- /dev/null +++ b/proto/test/schemaregistry/serde/dep.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; + +package test; +option go_package="../test"; + +import "test/schemaregistry/serde/test.proto"; + +message DependencyMessage { + bool is_active = 1; + TestMessage test_messsage = 2; +} diff --git a/proto/test/schemaregistry/serde/example.proto b/proto/test/schemaregistry/serde/example.proto new file mode 100644 index 00000000..a8e65fca --- /dev/null +++ b/proto/test/schemaregistry/serde/example.proto @@ -0,0 +1,22 @@ +syntax = "proto3"; + +package test; +option go_package="../test"; + +import "confluent/meta.proto"; + +message Author { + string name = 1 [ + (confluent.field_meta).tags = "PII" + ]; + int32 id = 2; + bytes picture = 3 [ + (confluent.field_meta).tags = "PII" + ]; + repeated string works = 4; +} + +message Pizza { + string size = 1; + repeated string toppings = 2; +} diff --git a/proto/test/schemaregistry/serde/nested.proto b/proto/test/schemaregistry/serde/nested.proto new file mode 100644 index 00000000..3b7ce1b5 --- /dev/null +++ b/proto/test/schemaregistry/serde/nested.proto @@ -0,0 +1,59 @@ +syntax = "proto3"; + +package test; +option go_package="../test"; + +import "google/protobuf/timestamp.proto"; + +message UserId { + oneof user_id { + string kafka_user_id = 1; + int32 other_user_id = 2; + MessageId another_id = 3; + } +} + +message MessageId { + string id = 1; +} + +enum Status { + ACTIVE = 0; + INACTIVE = 1; +} + +message ComplexType { + oneof some_val { + string one_id = 1; + int32 other_id = 2; + } + bool is_active = 3; +} + +/* + * Complex message using nested protos and repeated fields + */ +message NestedMessage { + UserId user_id = 1; + bool is_active = 2; + repeated string experiments_active = 3; + google.protobuf.Timestamp updated_at = 4; + Status status = 5; + ComplexType complex_type = 6; + map map_type = 7; + InnerMessage inner = 8; + + message InnerMessage { + string id = 1 [json_name="id"]; + repeated int32 ids = 2 [packed=true]; + } + + enum InnerEnum { + option allow_alias = true; + ZERO = 0; + ALSO_ZERO = 0; + } + + reserved 14, 15, 9 to 11; + reserved "foo", "bar"; +} diff --git a/proto/test/schemaregistry/serde/newerwidget.proto b/proto/test/schemaregistry/serde/newerwidget.proto new file mode 100644 index 00000000..4243c0eb --- /dev/null +++ b/proto/test/schemaregistry/serde/newerwidget.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +package test; +option go_package="../test"; + +message NewerWidget { + string name = 1; + int32 length = 2; + int32 version = 3; +} diff --git a/proto/test/schemaregistry/serde/newwidget.proto b/proto/test/schemaregistry/serde/newwidget.proto new file mode 100644 index 00000000..fdca7b28 --- /dev/null +++ b/proto/test/schemaregistry/serde/newwidget.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +package test; +option go_package="../test"; + +message NewWidget { + string name = 1; + int32 height = 2; + int32 version = 3; +} diff --git a/proto/test/schemaregistry/serde/test.proto b/proto/test/schemaregistry/serde/test.proto new file mode 100644 index 00000000..28afb444 --- /dev/null +++ b/proto/test/schemaregistry/serde/test.proto @@ -0,0 +1,24 @@ +syntax = "proto3"; + +package test; +option go_package="../test"; + +import "google/protobuf/descriptor.proto"; + +message TestMessage { + string test_string = 1; + bool test_bool = 2; + bytes test_bytes = 3; + double test_double = 4; + float test_float = 5; + fixed32 test_fixed32 = 6; + fixed64 test_fixed64 = 7; + int32 test_int32 = 8; + int64 test_int64 = 9; + sfixed32 test_sfixed32 = 10; + sfixed64 test_sfixed64 = 11; + sint32 test_sint32 = 12; + sint64 test_sint64 = 13; + uint32 test_uint32 = 14; + uint64 test_uint64 = 15; +} \ No newline at end of file diff --git a/proto/test/schemaregistry/serde/widget.proto b/proto/test/schemaregistry/serde/widget.proto new file mode 100644 index 00000000..94a99c2d --- /dev/null +++ b/proto/test/schemaregistry/serde/widget.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +package test; +option go_package="../test"; + +message Widget { + string name = 1; + int32 size = 2; + int32 version = 3; +} diff --git a/schemaregistry-examples/package.json b/schemaregistry-examples/package.json new file mode 100644 index 00000000..29e4d4f4 --- /dev/null +++ b/schemaregistry-examples/package.json @@ -0,0 +1,15 @@ +{ + "name": "schemaregistry-examples", + "version": "1.0.0", + "main": "index.js", + "scripts": {}, + "keywords": [], + "author": "", + "license": "ISC", + "description": "", + "devDependencies": { + "@confluentinc/kafka-javascript": "^0.2.0", + "@confluentinc/schemaregistry": "^v0.1.17.6-devel", + "uuid": "^10.0.0" + } +} diff --git a/schemaregistry-examples/src/constants.ts b/schemaregistry-examples/src/constants.ts new file mode 100644 index 00000000..05065da6 --- /dev/null +++ b/schemaregistry-examples/src/constants.ts @@ -0,0 +1,27 @@ +import { BasicAuthCredentials } from '@confluentinc/schemaregistry'; + +const issuerEndpointUrl = ''; // e.g. 'https://dev-123456.okta.com/oauth2/default/v1/token'; +const clientId = ''; +const clientSecret = ''; +const scope = ''; // e.g. 'schemaregistry'; +const identityPoolId = ''; // e.g. pool-Gx30 +const logicalCluster = ''; //e.g. lsrc-a6m5op +const baseUrl = ''; // e.g. 'https://psrc-3amt5nj.us-east-1.aws.confluent.cloud' +const clusterBootstrapUrl = ''; // e.g. "pkc-p34xa.us-east-1.aws.confluent.cloud:9092" +const clusterApiKey = ''; +const clusterApiSecret = ''; + +const localAuthCredentials: BasicAuthCredentials = { + credentialsSource: 'USER_INFO', + userInfo: 'RBACAllowedUser-lsrc1:nohash', +}; + +const basicAuthCredentials: BasicAuthCredentials = { + credentialsSource: 'USER_INFO', + userInfo: ':', +}; + +export { + issuerEndpointUrl, clientId, clientSecret, scope, identityPoolId, logicalCluster, baseUrl, + clusterBootstrapUrl, clusterApiKey, clusterApiSecret, basicAuthCredentials, localAuthCredentials +}; \ No newline at end of file diff --git a/schemaregistry-examples/src/csfle-schemaregistry.ts b/schemaregistry-examples/src/csfle-schemaregistry.ts new file mode 100644 index 00000000..16bb942c --- /dev/null +++ b/schemaregistry-examples/src/csfle-schemaregistry.ts @@ -0,0 +1,155 @@ +import { + AvroSerializer, AvroSerializerConfig, SerdeType, + AvroDeserializer, ClientConfig, + SchemaRegistryClient, SchemaInfo, Rule, RuleMode, + RuleRegistry, FieldEncryptionExecutor, AwsKmsDriver, RuleSet + } from "@confluentinc/schemaregistry"; + import { CreateAxiosDefaults } from "axios"; + import { KafkaJS } from '@confluentinc/kafka-javascript'; + import { + basicAuthCredentials, clusterApiKey, clusterApiSecret, + clusterBootstrapUrl, baseUrl + } from "./constants"; + + FieldEncryptionExecutor.register(); + AwsKmsDriver.register(); + + async function csfle() { + + const schemaString: string = JSON.stringify({ + type: 'record', + name: 'User', + fields: [ + { name: 'name', type: 'string' }, + { name: 'age', type: 'int' }, + { + name: 'address', type: 'string', + "confluent:tags": ["PII"] + }, + ], + }); + + const createAxiosDefaults: CreateAxiosDefaults = { + timeout: 10000 + }; + + const clientConfig: ClientConfig = { + baseURLs: [baseUrl], + createAxiosDefaults: createAxiosDefaults, + cacheCapacity: 512, + cacheLatestTtlSecs: 60, + basicAuthCredentials: basicAuthCredentials + }; + + const schemaRegistryClient = new SchemaRegistryClient(clientConfig); + + const kafka: KafkaJS.Kafka = new KafkaJS.Kafka({ + kafkaJS: { + brokers: [clusterBootstrapUrl], + ssl: true, + sasl: { + mechanism: 'plain', + username: clusterApiKey, + password: clusterApiSecret, + }, + }, + }); + + const producer: KafkaJS.Producer = kafka.producer({ + kafkaJS: { + allowAutoTopicCreation: true, + acks: 1, + compression: KafkaJS.CompressionTypes.GZIP, + } + }); + + let encRule: Rule = { + name: 'test-encrypt', + kind: 'TRANSFORM', + mode: RuleMode.WRITEREAD, + type: 'ENCRYPT', + tags: ['PII'], + params: { + 'encrypt.kek.name': 'csfle-example', + 'encrypt.kms.type': 'aws-kms', + 'encrypt.kms.key.id': 'your-key-id', + }, + onFailure: 'ERROR,NONE' + }; + + let ruleSet: RuleSet = { + domainRules: [encRule] + }; + + const schemaInfo: SchemaInfo = { + schemaType: 'AVRO', + schema: schemaString, + ruleSet: ruleSet + }; + + const userInfo = { name: 'Alice N Bob', age: 30, address: '369 Main St' }; + const userTopic = 'csfle-topic'; + + await schemaRegistryClient.register(userTopic+"-value", schemaInfo); + + const serializerConfig: AvroSerializerConfig = { useLatestVersion: true }; + const serializer: AvroSerializer = new AvroSerializer(schemaRegistryClient, SerdeType.VALUE, serializerConfig); + + const outgoingMessage = { + key: "1", + value: await serializer.serialize(userTopic, userInfo) + }; + + console.log("Outgoing Message:", outgoingMessage); + + await producer.connect(); + + await producer.send({ + topic: userTopic, + messages: [outgoingMessage] + }); + + await producer.disconnect(); + + const consumer: KafkaJS.Consumer = kafka.consumer({ + kafkaJS: { + groupId: 'demo-group', + fromBeginning: true, + partitionAssigners: [KafkaJS.PartitionAssigners.roundRobin], + }, + }); + + await consumer.connect(); + + const deserializer: AvroDeserializer = new AvroDeserializer(schemaRegistryClient, SerdeType.VALUE, {}); + + await consumer.subscribe({ topic: userTopic }); + + let messageRcvd = false; + await consumer.run({ + eachMessage: async ({ message }) => { + console.log("Message value", message.value); + const decodedMessage = { + ...message, + value: await deserializer.deserialize(userTopic, message.value as Buffer) + }; + console.log("Decoded message", decodedMessage); + let registry = new RuleRegistry(); + const weakDeserializer: AvroDeserializer = new AvroDeserializer(schemaRegistryClient, SerdeType.VALUE, {}, registry); + const weakDecodedMessage = { + ...message, + value: await weakDeserializer.deserialize(userTopic, message.value as Buffer) + }; + console.log("Weak decoded message", weakDecodedMessage); + messageRcvd = true; + }, + }); + + while (!messageRcvd) { + await new Promise((resolve) => setTimeout(resolve, 100)); + } + + await consumer.disconnect(); + } + + csfle(); \ No newline at end of file diff --git a/schemaregistry-examples/src/kafka-consumer-avro.ts b/schemaregistry-examples/src/kafka-consumer-avro.ts new file mode 100644 index 00000000..54c4b70b --- /dev/null +++ b/schemaregistry-examples/src/kafka-consumer-avro.ts @@ -0,0 +1,72 @@ +import { SerdeType,AvroDeserializer, ClientConfig, SchemaRegistryClient } from "@confluentinc/schemaregistry"; +import { CreateAxiosDefaults } from "axios"; +import { KafkaJS } from '@confluentinc/kafka-javascript'; +import { + basicAuthCredentials, + clusterApiKey, clusterApiSecret, + clusterBootstrapUrl, baseUrl +} from "./constants"; + +async function kafkaProducerAvro() { + const createAxiosDefaults: CreateAxiosDefaults = { + timeout: 10000 + }; + + const clientConfig: ClientConfig = { + baseURLs: [baseUrl], + createAxiosDefaults: createAxiosDefaults, + cacheCapacity: 512, + cacheLatestTtlSecs: 60, + basicAuthCredentials: basicAuthCredentials + }; + + const schemaRegistryClient = new SchemaRegistryClient(clientConfig); + + const kafka: KafkaJS.Kafka = new KafkaJS.Kafka({ + kafkaJS: { + brokers: [clusterBootstrapUrl], + ssl: true, + sasl: { + mechanism: 'plain', + username: clusterApiKey, + password: clusterApiSecret, + }, + }, + }); + + const userTopic = 'example-user-topic'; + + const deserializer: AvroDeserializer = new AvroDeserializer(schemaRegistryClient, SerdeType.VALUE, {}); + + const consumer: KafkaJS.Consumer = kafka.consumer({ + kafkaJS: { + groupId: 'example-group', + fromBeginning: true, + partitionAssigners: [KafkaJS.PartitionAssigners.roundRobin], + }, + }); + + await consumer.connect(); + await consumer.subscribe({ topic: userTopic }); + + let messageRcvd = false; + await consumer.run({ + eachMessage: async ({ message }) => { + console.log("Message value", message.value); + const decodedMessage = { + ...message, + value: await deserializer.deserialize(userTopic, message.value as Buffer) + }; + console.log("Decoded message", decodedMessage); + messageRcvd = true; + }, + }); + + while (!messageRcvd) { + await new Promise((resolve) => setTimeout(resolve, 100)); + } + + await consumer.disconnect(); +} + +kafkaProducerAvro(); diff --git a/schemaregistry-examples/src/kafka-consumer-json.ts b/schemaregistry-examples/src/kafka-consumer-json.ts new file mode 100644 index 00000000..b3073e4d --- /dev/null +++ b/schemaregistry-examples/src/kafka-consumer-json.ts @@ -0,0 +1,78 @@ +import { + SerdeType, + JsonDeserializer, ClientConfig, + SchemaRegistryClient + } from "@confluentinc/schemaregistry"; + import { CreateAxiosDefaults } from "axios"; + import { KafkaJS } from '@confluentinc/kafka-javascript'; + import { + basicAuthCredentials, + clusterApiKey, clusterApiSecret, + clusterBootstrapUrl, baseUrl + } from "./constants"; + + async function kafkaProducerJson() { + + const createAxiosDefaults: CreateAxiosDefaults = { + timeout: 10000 + }; + + const clientConfig: ClientConfig = { + baseURLs: [baseUrl], + createAxiosDefaults: createAxiosDefaults, + cacheCapacity: 512, + cacheLatestTtlSecs: 60, + basicAuthCredentials: basicAuthCredentials + }; + + const schemaRegistryClient = new SchemaRegistryClient(clientConfig); + + const kafka: KafkaJS.Kafka = new KafkaJS.Kafka({ + kafkaJS: { + brokers: [clusterBootstrapUrl], + ssl: true, + sasl: { + mechanism: 'plain', + username: clusterApiKey, + password: clusterApiSecret, + }, + }, + }); + + const userTopic = 'example-user-topic'; + + const deserializer: JsonDeserializer = new JsonDeserializer(schemaRegistryClient, SerdeType.VALUE, {}); + + const consumer: KafkaJS.Consumer = kafka.consumer({ + kafkaJS: { + groupId: 'example-group', + fromBeginning: true, + partitionAssigners: [KafkaJS.PartitionAssigners.roundRobin], + }, + }); + + await consumer.connect(); + await consumer.subscribe({ topic: userTopic }); + + let messageRcvd = false; + await consumer.run({ + eachMessage: async ({ message }) => { + console.log("Message value", message.value); + const decodedMessage = { + ...message, + value: await deserializer.deserialize(userTopic, message.value as Buffer) + }; + console.log("Decoded message", decodedMessage); + messageRcvd = true; + }, + }); + + while (!messageRcvd) { + await new Promise((resolve) => setTimeout(resolve, 100)); + } + + await consumer.disconnect(); + } + + kafkaProducerJson(); + \ No newline at end of file diff --git a/schemaregistry-examples/src/kafka-producer-avro.ts b/schemaregistry-examples/src/kafka-producer-avro.ts new file mode 100644 index 00000000..57752bc3 --- /dev/null +++ b/schemaregistry-examples/src/kafka-producer-avro.ts @@ -0,0 +1,90 @@ +import { + AvroSerializer, AvroSerializerConfig, SerdeType, + ClientConfig, SchemaRegistryClient, SchemaInfo +} from "@confluentinc/schemaregistry"; +import { CreateAxiosDefaults } from "axios"; +import { KafkaJS } from '@confluentinc/kafka-javascript'; +import { + basicAuthCredentials, + clusterApiKey, clusterApiSecret, + clusterBootstrapUrl, + baseUrl +} from "./constants"; + +async function kafkaProducerAvro() { + + const createAxiosDefaults: CreateAxiosDefaults = { + timeout: 10000 + }; + + const clientConfig: ClientConfig = { + baseURLs: [baseUrl], + createAxiosDefaults: createAxiosDefaults, + cacheCapacity: 512, + cacheLatestTtlSecs: 60, + basicAuthCredentials: basicAuthCredentials + }; + + const schemaRegistryClient = new SchemaRegistryClient(clientConfig); + + const kafka: KafkaJS.Kafka = new KafkaJS.Kafka({ + kafkaJS: { + brokers: [clusterBootstrapUrl], + ssl: true, + sasl: { + mechanism: 'plain', + username: clusterApiKey, + password: clusterApiSecret, + }, + }, + }); + + const producer: KafkaJS.Producer = kafka.producer({ + kafkaJS: { + allowAutoTopicCreation: true, + acks: 1, + compression: KafkaJS.CompressionTypes.GZIP, + } + }); + + const schemaString: string = JSON.stringify({ + type: 'record', + name: 'User', + fields: [ + { name: 'name', type: 'string' }, + { name: 'age', type: 'int' }, + ], + }); + + const schemaInfo: SchemaInfo = { + schemaType: 'AVRO', + schema: schemaString, + }; + + const userTopic = 'example-user-topic'; + await schemaRegistryClient.register(userTopic + "-value", schemaInfo); + + const userInfo = { name: 'Alice N Bob', age: 30 }; + + const avroSerializerConfig: AvroSerializerConfig = { useLatestVersion: true }; + + const serializer: AvroSerializer = new AvroSerializer(schemaRegistryClient, SerdeType.VALUE, avroSerializerConfig); + + const outgoingMessage = { + key: "1", + value: await serializer.serialize(userTopic, userInfo) + }; + + console.log("Outgoing message: ", outgoingMessage); + + await producer.connect(); + + await producer.send({ + topic: userTopic, + messages: [outgoingMessage] + }); + + await producer.disconnect(); +} + +kafkaProducerAvro(); \ No newline at end of file diff --git a/schemaregistry-examples/src/kafka-producer-json.ts b/schemaregistry-examples/src/kafka-producer-json.ts new file mode 100644 index 00000000..3fed9f9d --- /dev/null +++ b/schemaregistry-examples/src/kafka-producer-json.ts @@ -0,0 +1,98 @@ +import { + JsonSerializer, JsonSerializerConfig, SerdeType, + BearerAuthCredentials, ClientConfig, + SchemaRegistryClient, SchemaInfo +} from "@confluentinc/schemaregistry"; +import { CreateAxiosDefaults } from "axios"; +import { KafkaJS } from '@confluentinc/kafka-javascript'; +import { + basicAuthCredentials, + clusterApiKey, clusterApiSecret, + clusterBootstrapUrl, + baseUrl +} from "./constants"; + +async function kafkaProducerJson() { + + + const createAxiosDefaults: CreateAxiosDefaults = { + timeout: 10000 + }; + + const clientConfig: ClientConfig = { + baseURLs: [baseUrl], + createAxiosDefaults: createAxiosDefaults, + cacheCapacity: 512, + cacheLatestTtlSecs: 60, + basicAuthCredentials: basicAuthCredentials + }; + + const schemaRegistryClient = new SchemaRegistryClient(clientConfig); + + const kafka: KafkaJS.Kafka = new KafkaJS.Kafka({ + kafkaJS: { + brokers: [clusterBootstrapUrl], + ssl: true, + sasl: { + mechanism: 'plain', + username: clusterApiKey, + password: clusterApiSecret, + }, + }, + }); + + const producer: KafkaJS.Producer = kafka.producer({ + kafkaJS: { + allowAutoTopicCreation: true, + acks: 1, + compression: KafkaJS.CompressionTypes.GZIP, + } + }); + + const schemaString: string = JSON.stringify({ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "title": "User", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "age": { + "type": "integer" + } + }, + "required": ["name", "age"] + }); + + const schemaInfo: SchemaInfo = { + schemaType: 'JSON', + schema: schemaString, + }; + + const userTopic = 'example-user-topic'; + await schemaRegistryClient.register(userTopic + "-value", schemaInfo); + + const userInfo = { name: 'Alice N Bob', age: 30 }; + + const jsonSerializerConfig: JsonSerializerConfig = { useLatestVersion: true }; + + const serializer: JsonSerializer = new JsonSerializer(schemaRegistryClient, SerdeType.VALUE, jsonSerializerConfig); + + const outgoingMessage = { + key: "1", + value: await serializer.serialize(userTopic, userInfo) + }; + + console.log("Outgoing message: ", outgoingMessage); + + await producer.connect(); + + await producer.send({ + topic: userTopic, + messages: [outgoingMessage] + }); + + await producer.disconnect(); +} + +kafkaProducerJson(); \ No newline at end of file diff --git a/schemaregistry-examples/src/local-schemaregistry.ts b/schemaregistry-examples/src/local-schemaregistry.ts new file mode 100644 index 00000000..3a511adb --- /dev/null +++ b/schemaregistry-examples/src/local-schemaregistry.ts @@ -0,0 +1,48 @@ +import { SchemaRegistryClient, SchemaInfo, ClientConfig } from '@confluentinc/schemaregistry'; +import { v4 as uuidv4 } from 'uuid'; +import { CreateAxiosDefaults } from 'axios'; +import { localAuthCredentials } from './constants'; + +async function localDemo() { + const createAxiosDefaults: CreateAxiosDefaults = { + timeout: 10000 + }; + + const clientConfig: ClientConfig = { + baseURLs: ['http://localhost:8081'], + createAxiosDefaults: createAxiosDefaults, + cacheCapacity: 512, + cacheLatestTtlSecs: 60, + basicAuthCredentials: localAuthCredentials, + }; + + const schemaString: string = JSON.stringify({ + type: 'record', + name: 'User', + fields: [ + { name: 'name', type: 'string' }, + { name: 'age', type: 'int' }, + ], + }); + + const schemaInfo: SchemaInfo = { + schemaType: 'AVRO', + schema: schemaString, + }; + + const schemaRegistryClient = new SchemaRegistryClient(clientConfig); + + console.log("Current Subjects: ", await schemaRegistryClient.getAllSubjects()); + + const subject1 = `subject-${uuidv4()}`; + const subject2 = `subject-${uuidv4()}`; + console.log("subject1: ", subject1); + console.log("subject2: ", subject2); + + await schemaRegistryClient.register(subject1, schemaInfo); + await schemaRegistryClient.register(subject2, schemaInfo); + + console.log("Subjects After Registering: ", await schemaRegistryClient.getAllSubjects()); +} + +localDemo(); \ No newline at end of file diff --git a/schemaregistry-examples/src/oauth-schemaregistry.ts b/schemaregistry-examples/src/oauth-schemaregistry.ts new file mode 100644 index 00000000..3481abd6 --- /dev/null +++ b/schemaregistry-examples/src/oauth-schemaregistry.ts @@ -0,0 +1,39 @@ +import { SchemaRegistryClient, BearerAuthCredentials, ClientConfig } from '@confluentinc/schemaregistry'; +import { CreateAxiosDefaults } from 'axios'; +import { + issuerEndpointUrl, clientId, clientSecret, scope, + identityPoolId, logicalCluster, baseUrl +} from './constants'; + +async function oauthSchemaRegistry() { + + const bearerAuthCredentials: BearerAuthCredentials = { + credentialsSource: 'OAUTHBEARER', + issuerEndpointUrl: issuerEndpointUrl, + clientId: clientId, + clientSecret: clientSecret, + scope: scope, + identityPoolId: identityPoolId, + logicalCluster: logicalCluster + } + + const createAxiosDefaults: CreateAxiosDefaults = { + timeout: 10000 + }; + + const clientConfig: ClientConfig = { + baseURLs: [baseUrl], + createAxiosDefaults: createAxiosDefaults, + cacheCapacity: 512, + cacheLatestTtlSecs: 60, + bearerAuthCredentials: bearerAuthCredentials + }; + + const schemaRegistryClient = new SchemaRegistryClient(clientConfig); + + console.log("Current Subjects:", await schemaRegistryClient.getAllSubjects()); + console.log("Current Config:", await schemaRegistryClient.getDefaultConfig()); + console.log("Current Compatibility", await schemaRegistryClient.getDefaultCompatibility()); +} + +oauthSchemaRegistry(); \ No newline at end of file diff --git a/schemaregistry-examples/tsconfig.json b/schemaregistry-examples/tsconfig.json new file mode 100644 index 00000000..f66d3634 --- /dev/null +++ b/schemaregistry-examples/tsconfig.json @@ -0,0 +1,14 @@ +{ + "compilerOptions": { + "target": "ES6", + "module": "commonjs", + "outDir": "./dist", + "rootDir": "./src", + "strict": true, + "esModuleInterop": true, + "skipLibCheck": true, + "forceConsistentCasingInFileNames": true + }, + "include": ["src/**/*"], + "exclude": ["node_modules"] +} \ No newline at end of file diff --git a/schemaregistry/LICENSE.txt b/schemaregistry/LICENSE.txt new file mode 100644 index 00000000..efa768af --- /dev/null +++ b/schemaregistry/LICENSE.txt @@ -0,0 +1,20 @@ +The MIT License (MIT) +Copyright (c) 2024 Confluent, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +IN THE SOFTWARE. diff --git a/schemaregistry/Makefile b/schemaregistry/Makefile new file mode 100644 index 00000000..e6236175 --- /dev/null +++ b/schemaregistry/Makefile @@ -0,0 +1,28 @@ +# Makefile.schemaregistry + +# Variables +NODE ?= node +ESLINT ?= ../node_modules/.bin/eslint +JEST ?= ../node_modules/.bin/jest +TS_NODE ?= ../node_modules/.bin/ts-node +DOCKER ?= ./run_docker_schemaregistry.sh + +# Paths +SRC_DIR = . +SR_TEST_DIR = ../test/schemaregistry +DEK_TEST_DIR = ../test/schemaregistry/rules/encryption/dekregistry +INTEG_DIR = ../e2e/schemaregistry + +# Tasks +.PHONY: all lint test integtest + +all: lint test + +lint: + $(ESLINT) $(SRC_DIR) $(TEST_DIR) $(INTEG_DIR) $(DEK_TEST_DIR) + +test: + $(JEST) $(SR_TEST_DIR) $(DEK_TEST_DIR) + +integtest: + $(DOCKER) diff --git a/schemaregistry/README.md b/schemaregistry/README.md new file mode 100644 index 00000000..7b840662 --- /dev/null +++ b/schemaregistry/README.md @@ -0,0 +1,121 @@ +Confluent's JavaScript Client for Schema RegistryTM +===================================================== + +Confluent's JavaScript client for [Schema Registry](https://docs.confluent.io/cloud/current/sr/index.html) supports Avro, Protobuf and JSON Schema, and is designed to work with +[Confluent's JavaScript Client for Apache Kafka](https://www.npmjs.com/package/@confluentinc/kafka-javascript). This is an **Early Availability** library. +The goal is to provide a highly performant, reliable and easy to use JavaScript client in line with other Schema Registry clients +such as our [Go](https://github.com/confluentinc/confluent-kafka-go), [.NET](https://github.com/confluentinc/confluent-kafka-dotnet), +and [Java](https://github.com/confluentinc/schema-registry) clients. + +## Installation +```bash +npm install @confluentinc/schemaregistry +``` + +## Getting Started +Below is a simple example of using Avro serialization with the Schema Registry client and the KafkaJS client. +```javascript +const { Kafka } = require('@confluentinc/kafka-javascript').KafkaJS; +const { SchemaRegistryClient, SerdeType, AvroSerializer, AvroDeserializer} = require('@confluentinc/schemaregistry'); + +const registry = new SchemaRegistryClient({ baseURLs: ['http://localhost:8081'] }) +const kafka = new Kafka({ + kafkaJS: { + brokers: ['localhost:9092'] + } +}); + +let consumer = kafka.consumer({ + kafkaJS: { + groupId: "test-group", + fromBeginning: true, + }, +}); +let producer = kafka.producer(); + +const schema = { + type: 'record', + namespace: 'examples', + name: 'RandomTest', + fields: [ + { name: 'fullName', type: 'string' } + ], +}; + +const topicName = 'test-topic'; +const subjectName = topicName + '-value'; + +const run = async () => { + // Register schema + const id = await registry.register( + subjectName, + { + schemaType: 'AVRO', + schema: JSON.stringify(schema) + } + ) + + // Create an Avro serializer + const ser = new AvroSerializer(registry, SerdeType.VALUE, { useLatestVersion: true }); + + // Produce a message with the schema + await producer.connect() + const outgoingMessage = { + key: 'key', + value: await ser.serialize(topicName, { fullName: 'John Doe' }), + } + await producer.send({ + topic: topicName, + messages: [outgoingMessage] + }); + console.log("Producer sent its message.") + await producer.disconnect(); + producer = null; + + // Create an Avro deserializer + const deser = new AvroDeserializer(registry, SerdeType.VALUE, {}); + + await consumer.connect() + await consumer.subscribe({ topic: topicName }) + + let messageRcvd = false; + await consumer.run({ + eachMessage: async ({ message }) => { + const decodedMessage = { + ...message, + value: await deser.deserialize(topicName, message.value) + }; + console.log("Consumer received message.\nBefore decoding: " + JSON.stringify(message) + "\nAfter decoding: " + JSON.stringify(decodedMessage)); + messageRcvd = true; + }, + }); + + // Wait around until we get a message, and then disconnect. + while (!messageRcvd) { + await new Promise((resolve) => setTimeout(resolve, 100)); + } + + await consumer.disconnect(); + consumer = null; +} + +run().catch (async e => { + console.error(e); + consumer && await consumer.disconnect(); + producer && await producer.disconnect(); + process.exit(1); +}) +``` + +## Features and Limitations +- Full Avro and JSON Schema support +- Protobuf support requires Schema Registry in (upcoming) release: CP 7.4.8, 7.5.7, 7.6.4, 7.7.2, 7.8.0 +- Support for CSFLE (Client-Side Field Level Encryption) +- Support for schema migration rules for Avro and JSON Schema +- Data quality rules are not yet supported +- Support for OAuth + +## Contributing + +Bug reports and feedback is appreciated in the form of Github Issues. +For guidelines on contributing please see [CONTRIBUTING.md](CONTRIBUTING.md) diff --git a/schemaregistry/confluent/meta_pb.ts b/schemaregistry/confluent/meta_pb.ts new file mode 100644 index 00000000..0f38f3c3 --- /dev/null +++ b/schemaregistry/confluent/meta_pb.ts @@ -0,0 +1,73 @@ +// @generated by protoc-gen-es v2.0.0 with parameter "target=ts" +// @generated from file confluent/meta.proto (package confluent, syntax proto3) +/* eslint-disable */ + +import type { GenExtension, GenFile, GenMessage } from "@bufbuild/protobuf/codegenv1"; +import { extDesc, fileDesc, messageDesc } from "@bufbuild/protobuf/codegenv1"; +import type { EnumOptions, EnumValueOptions, FieldOptions, FileOptions, MessageOptions } from "@bufbuild/protobuf/wkt"; +import { file_google_protobuf_descriptor } from "@bufbuild/protobuf/wkt"; +import type { Message } from "@bufbuild/protobuf"; + +/** + * Describes the file confluent/meta.proto. + */ +export const file_confluent_meta: GenFile = /*@__PURE__*/ + fileDesc("ChRjb25mbHVlbnQvbWV0YS5wcm90bxIJY29uZmx1ZW50In0KBE1ldGESCwoDZG9jGAEgASgJEisKBnBhcmFtcxgCIAMoCzIbLmNvbmZsdWVudC5NZXRhLlBhcmFtc0VudHJ5EgwKBHRhZ3MYAyADKAkaLQoLUGFyYW1zRW50cnkSCwoDa2V5GAEgASgJEg0KBXZhbHVlGAIgASgJOgI4ATpLCglmaWxlX21ldGESHC5nb29nbGUucHJvdG9idWYuRmlsZU9wdGlvbnMYwAggASgLMg8uY29uZmx1ZW50Lk1ldGFSCGZpbGVNZXRhOlQKDG1lc3NhZ2VfbWV0YRIfLmdvb2dsZS5wcm90b2J1Zi5NZXNzYWdlT3B0aW9ucxjACCABKAsyDy5jb25mbHVlbnQuTWV0YVILbWVzc2FnZU1ldGE6TgoKZmllbGRfbWV0YRIdLmdvb2dsZS5wcm90b2J1Zi5GaWVsZE9wdGlvbnMYwAggASgLMg8uY29uZmx1ZW50Lk1ldGFSCWZpZWxkTWV0YTpLCgllbnVtX21ldGESHC5nb29nbGUucHJvdG9idWYuRW51bU9wdGlvbnMYwAggASgLMg8uY29uZmx1ZW50Lk1ldGFSCGVudW1NZXRhOlsKD2VudW1fdmFsdWVfbWV0YRIhLmdvb2dsZS5wcm90b2J1Zi5FbnVtVmFsdWVPcHRpb25zGMAIIAEoCzIPLmNvbmZsdWVudC5NZXRhUg1lbnVtVmFsdWVNZXRhQg5aDC4uL2NvbmZsdWVudGIGcHJvdG8z", [file_google_protobuf_descriptor]); + +/** + * @generated from message confluent.Meta + */ +export type Meta = Message<"confluent.Meta"> & { + /** + * @generated from field: string doc = 1; + */ + doc: string; + + /** + * @generated from field: map params = 2; + */ + params: { [key: string]: string }; + + /** + * @generated from field: repeated string tags = 3; + */ + tags: string[]; +}; + +/** + * Describes the message confluent.Meta. + * Use `create(MetaSchema)` to create a new message. + */ +export const MetaSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_confluent_meta, 0); + +/** + * @generated from extension: confluent.Meta file_meta = 1088; + */ +export const file_meta: GenExtension = /*@__PURE__*/ + extDesc(file_confluent_meta, 0); + +/** + * @generated from extension: confluent.Meta message_meta = 1088; + */ +export const message_meta: GenExtension = /*@__PURE__*/ + extDesc(file_confluent_meta, 1); + +/** + * @generated from extension: confluent.Meta field_meta = 1088; + */ +export const field_meta: GenExtension = /*@__PURE__*/ + extDesc(file_confluent_meta, 2); + +/** + * @generated from extension: confluent.Meta enum_meta = 1088; + */ +export const enum_meta: GenExtension = /*@__PURE__*/ + extDesc(file_confluent_meta, 3); + +/** + * @generated from extension: confluent.Meta enum_value_meta = 1088; + */ +export const enum_value_meta: GenExtension = /*@__PURE__*/ + extDesc(file_confluent_meta, 4); + diff --git a/schemaregistry/confluent/types/decimal_pb.ts b/schemaregistry/confluent/types/decimal_pb.ts new file mode 100644 index 00000000..67160fc5 --- /dev/null +++ b/schemaregistry/confluent/types/decimal_pb.ts @@ -0,0 +1,47 @@ +// @generated by protoc-gen-es v2.0.0 with parameter "target=ts" +// @generated from file confluent/types/decimal.proto (package confluent.type, syntax proto3) +/* eslint-disable */ + +import type { GenFile, GenMessage } from "@bufbuild/protobuf/codegenv1"; +import { fileDesc, messageDesc } from "@bufbuild/protobuf/codegenv1"; +import type { Message } from "@bufbuild/protobuf"; + +/** + * Describes the file confluent/types/decimal.proto. + */ +export const file_confluent_types_decimal: GenFile = /*@__PURE__*/ + fileDesc("Ch1jb25mbHVlbnQvdHlwZXMvZGVjaW1hbC5wcm90bxIOY29uZmx1ZW50LnR5cGUiOgoHRGVjaW1hbBINCgV2YWx1ZRgBIAEoDBIRCglwcmVjaXNpb24YAiABKA0SDQoFc2NhbGUYAyABKAVCCloILi4vdHlwZXNiBnByb3RvMw"); + +/** + * @generated from message confluent.type.Decimal + */ +export type Decimal = Message<"confluent.type.Decimal"> & { + /** + * The two's-complement representation of the unscaled integer value in big-endian byte order + * + * @generated from field: bytes value = 1; + */ + value: Uint8Array; + + /** + * The precision + * + * @generated from field: uint32 precision = 2; + */ + precision: number; + + /** + * The scale + * + * @generated from field: int32 scale = 3; + */ + scale: number; +}; + +/** + * Describes the message confluent.type.Decimal. + * Use `create(DecimalSchema)` to create a new message. + */ +export const DecimalSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_confluent_types_decimal, 0); + diff --git a/schemaregistry/docker-compose.schemaregistry.yml b/schemaregistry/docker-compose.schemaregistry.yml new file mode 100644 index 00000000..98bcca80 --- /dev/null +++ b/schemaregistry/docker-compose.schemaregistry.yml @@ -0,0 +1,33 @@ +version: '3' +services: + zookeeper: + image: confluentinc/cp-zookeeper + environment: + ZOOKEEPER_CLIENT_PORT: 2181 + kafka: + image: confluentinc/cp-kafka + restart: always + depends_on: + - zookeeper + ports: + - 9092:9092 + environment: + KAFKA_BROKER_ID: 0 + KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT_INTERNAL:PLAINTEXT,PLAINTEXT_EXTERNAL:PLAINTEXT + KAFKA_ADVERTISED_LISTENERS: PLAINTEXT_INTERNAL://kafka:9093,PLAINTEXT_EXTERNAL://localhost:9092 + KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT_INTERNAL + KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 + KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: 18000 + KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 + KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 + KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1 + schema-registry: + image: confluentinc/cp-schema-registry:7.6.0 + depends_on: + - kafka + ports: + - "8081:8081" + environment: + SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka:9093 + SCHEMA_REGISTRY_HOST_NAME: schema-registry + SCHEMA_REGISTRY_LISTENERS: http://0.0.0.0:8081 diff --git a/schemaregistry/google/type/calendar_period_pb.ts b/schemaregistry/google/type/calendar_period_pb.ts new file mode 100644 index 00000000..a8ad8988 --- /dev/null +++ b/schemaregistry/google/type/calendar_period_pb.ts @@ -0,0 +1,102 @@ +// Copyright 2021-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// @generated by protoc-gen-es v2.0.0 with parameter "target=ts" +// @generated from file google/type/calendar_period.proto (package google.type, syntax proto3) +/* eslint-disable */ + +import type { GenEnum, GenFile } from "@bufbuild/protobuf/codegenv1"; +import { enumDesc, fileDesc } from "@bufbuild/protobuf/codegenv1"; + +/** + * Describes the file google/type/calendar_period.proto. + */ +export const file_google_type_calendar_period: GenFile = /*@__PURE__*/ + fileDesc("CiFnb29nbGUvdHlwZS9jYWxlbmRhcl9wZXJpb2QucHJvdG8SC2dvb2dsZS50eXBlKn8KDkNhbGVuZGFyUGVyaW9kEh8KG0NBTEVOREFSX1BFUklPRF9VTlNQRUNJRklFRBAAEgcKA0RBWRABEggKBFdFRUsQAhINCglGT1JUTklHSFQQAxIJCgVNT05USBAEEgsKB1FVQVJURVIQBRIICgRIQUxGEAYSCAoEWUVBUhAHQngKD2NvbS5nb29nbGUudHlwZUITQ2FsZW5kYXJQZXJpb2RQcm90b1ABWkhnb29nbGUuZ29sYW5nLm9yZy9nZW5wcm90by9nb29nbGVhcGlzL3R5cGUvY2FsZW5kYXJwZXJpb2Q7Y2FsZW5kYXJwZXJpb2SiAgNHVFBiBnByb3RvMw"); + +/** + * A `CalendarPeriod` represents the abstract concept of a time period that has + * a canonical start. Grammatically, "the start of the current + * `CalendarPeriod`." All calendar times begin at midnight UTC. + * + * @generated from enum google.type.CalendarPeriod + */ +export enum CalendarPeriod { + /** + * Undefined period, raises an error. + * + * @generated from enum value: CALENDAR_PERIOD_UNSPECIFIED = 0; + */ + CALENDAR_PERIOD_UNSPECIFIED = 0, + + /** + * A day. + * + * @generated from enum value: DAY = 1; + */ + DAY = 1, + + /** + * A week. Weeks begin on Monday, following + * [ISO 8601](https://en.wikipedia.org/wiki/ISO_week_date). + * + * @generated from enum value: WEEK = 2; + */ + WEEK = 2, + + /** + * A fortnight. The first calendar fortnight of the year begins at the start + * of week 1 according to + * [ISO 8601](https://en.wikipedia.org/wiki/ISO_week_date). + * + * @generated from enum value: FORTNIGHT = 3; + */ + FORTNIGHT = 3, + + /** + * A month. + * + * @generated from enum value: MONTH = 4; + */ + MONTH = 4, + + /** + * A quarter. Quarters start on dates 1-Jan, 1-Apr, 1-Jul, and 1-Oct of each + * year. + * + * @generated from enum value: QUARTER = 5; + */ + QUARTER = 5, + + /** + * A half-year. Half-years start on dates 1-Jan and 1-Jul. + * + * @generated from enum value: HALF = 6; + */ + HALF = 6, + + /** + * A year. + * + * @generated from enum value: YEAR = 7; + */ + YEAR = 7, +} + +/** + * Describes the enum google.type.CalendarPeriod. + */ +export const CalendarPeriodSchema: GenEnum = /*@__PURE__*/ + enumDesc(file_google_type_calendar_period, 0); + diff --git a/schemaregistry/google/type/color_pb.ts b/schemaregistry/google/type/color_pb.ts new file mode 100644 index 00000000..462d70da --- /dev/null +++ b/schemaregistry/google/type/color_pb.ts @@ -0,0 +1,204 @@ +// Copyright 2021-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// @generated by protoc-gen-es v2.0.0 with parameter "target=ts" +// @generated from file google/type/color.proto (package google.type, syntax proto3) +/* eslint-disable */ + +import type { GenFile, GenMessage } from "@bufbuild/protobuf/codegenv1"; +import { fileDesc, messageDesc } from "@bufbuild/protobuf/codegenv1"; +import { file_google_protobuf_wrappers } from "@bufbuild/protobuf/wkt"; +import type { Message } from "@bufbuild/protobuf"; + +/** + * Describes the file google/type/color.proto. + */ +export const file_google_type_color: GenFile = /*@__PURE__*/ + fileDesc("Chdnb29nbGUvdHlwZS9jb2xvci5wcm90bxILZ29vZ2xlLnR5cGUiXQoFQ29sb3ISCwoDcmVkGAEgASgCEg0KBWdyZWVuGAIgASgCEgwKBGJsdWUYAyABKAISKgoFYWxwaGEYBCABKAsyGy5nb29nbGUucHJvdG9idWYuRmxvYXRWYWx1ZUJgCg9jb20uZ29vZ2xlLnR5cGVCCkNvbG9yUHJvdG9QAVo2Z29vZ2xlLmdvbGFuZy5vcmcvZ2VucHJvdG8vZ29vZ2xlYXBpcy90eXBlL2NvbG9yO2NvbG9y+AEBogIDR1RQYgZwcm90bzM", [file_google_protobuf_wrappers]); + +/** + * Represents a color in the RGBA color space. This representation is designed + * for simplicity of conversion to/from color representations in various + * languages over compactness. For example, the fields of this representation + * can be trivially provided to the constructor of `java.awt.Color` in Java; it + * can also be trivially provided to UIColor's `+colorWithRed:green:blue:alpha` + * method in iOS; and, with just a little work, it can be easily formatted into + * a CSS `rgba()` string in JavaScript. + * + * This reference page doesn't carry information about the absolute color + * space + * that should be used to interpret the RGB value (e.g. sRGB, Adobe RGB, + * DCI-P3, BT.2020, etc.). By default, applications should assume the sRGB color + * space. + * + * When color equality needs to be decided, implementations, unless + * documented otherwise, treat two colors as equal if all their red, + * green, blue, and alpha values each differ by at most 1e-5. + * + * Example (Java): + * + * import com.google.type.Color; + * + * // ... + * public static java.awt.Color fromProto(Color protocolor) { + * float alpha = protocolor.hasAlpha() + * ? protocolor.getAlpha().getValue() + * : 1.0; + * + * return new java.awt.Color( + * protocolor.getRed(), + * protocolor.getGreen(), + * protocolor.getBlue(), + * alpha); + * } + * + * public static Color toProto(java.awt.Color color) { + * float red = (float) color.getRed(); + * float green = (float) color.getGreen(); + * float blue = (float) color.getBlue(); + * float denominator = 255.0; + * Color.Builder resultBuilder = + * Color + * .newBuilder() + * .setRed(red / denominator) + * .setGreen(green / denominator) + * .setBlue(blue / denominator); + * int alpha = color.getAlpha(); + * if (alpha != 255) { + * result.setAlpha( + * FloatValue + * .newBuilder() + * .setValue(((float) alpha) / denominator) + * .build()); + * } + * return resultBuilder.build(); + * } + * // ... + * + * Example (iOS / Obj-C): + * + * // ... + * static UIColor* fromProto(Color* protocolor) { + * float red = [protocolor red]; + * float green = [protocolor green]; + * float blue = [protocolor blue]; + * FloatValue* alpha_wrapper = [protocolor alpha]; + * float alpha = 1.0; + * if (alpha_wrapper != nil) { + * alpha = [alpha_wrapper value]; + * } + * return [UIColor colorWithRed:red green:green blue:blue alpha:alpha]; + * } + * + * static Color* toProto(UIColor* color) { + * CGFloat red, green, blue, alpha; + * if (![color getRed:&red green:&green blue:&blue alpha:&alpha]) { + * return nil; + * } + * Color* result = [[Color alloc] init]; + * [result setRed:red]; + * [result setGreen:green]; + * [result setBlue:blue]; + * if (alpha <= 0.9999) { + * [result setAlpha:floatWrapperWithValue(alpha)]; + * } + * [result autorelease]; + * return result; + * } + * // ... + * + * Example (JavaScript): + * + * // ... + * + * var protoToCssColor = function(rgb_color) { + * var redFrac = rgb_color.red || 0.0; + * var greenFrac = rgb_color.green || 0.0; + * var blueFrac = rgb_color.blue || 0.0; + * var red = Math.floor(redFrac * 255); + * var green = Math.floor(greenFrac * 255); + * var blue = Math.floor(blueFrac * 255); + * + * if (!('alpha' in rgb_color)) { + * return rgbToCssColor(red, green, blue); + * } + * + * var alphaFrac = rgb_color.alpha.value || 0.0; + * var rgbParams = [red, green, blue].join(','); + * return ['rgba(', rgbParams, ',', alphaFrac, ')'].join(''); + * }; + * + * var rgbToCssColor = function(red, green, blue) { + * var rgbNumber = new Number((red << 16) | (green << 8) | blue); + * var hexString = rgbNumber.toString(16); + * var missingZeros = 6 - hexString.length; + * var resultBuilder = ['#']; + * for (var i = 0; i < missingZeros; i++) { + * resultBuilder.push('0'); + * } + * resultBuilder.push(hexString); + * return resultBuilder.join(''); + * }; + * + * // ... + * + * @generated from message google.type.Color + */ +export type Color = Message<"google.type.Color"> & { + /** + * The amount of red in the color as a value in the interval [0, 1]. + * + * @generated from field: float red = 1; + */ + red: number; + + /** + * The amount of green in the color as a value in the interval [0, 1]. + * + * @generated from field: float green = 2; + */ + green: number; + + /** + * The amount of blue in the color as a value in the interval [0, 1]. + * + * @generated from field: float blue = 3; + */ + blue: number; + + /** + * The fraction of this color that should be applied to the pixel. That is, + * the final pixel color is defined by the equation: + * + * `pixel color = alpha * (this color) + (1.0 - alpha) * (background color)` + * + * This means that a value of 1.0 corresponds to a solid color, whereas + * a value of 0.0 corresponds to a completely transparent color. This + * uses a wrapper message rather than a simple float scalar so that it is + * possible to distinguish between a default value and the value being unset. + * If omitted, this color object is rendered as a solid color + * (as if the alpha value had been explicitly given a value of 1.0). + * + * @generated from field: google.protobuf.FloatValue alpha = 4; + */ + alpha?: number; +}; + +/** + * Describes the message google.type.Color. + * Use `create(ColorSchema)` to create a new message. + */ +export const ColorSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_google_type_color, 0); + diff --git a/schemaregistry/google/type/date_pb.ts b/schemaregistry/google/type/date_pb.ts new file mode 100644 index 00000000..eaf34962 --- /dev/null +++ b/schemaregistry/google/type/date_pb.ts @@ -0,0 +1,79 @@ +// Copyright 2021-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// @generated by protoc-gen-es v2.0.0 with parameter "target=ts" +// @generated from file google/type/date.proto (package google.type, syntax proto3) +/* eslint-disable */ + +import type { GenFile, GenMessage } from "@bufbuild/protobuf/codegenv1"; +import { fileDesc, messageDesc } from "@bufbuild/protobuf/codegenv1"; +import type { Message } from "@bufbuild/protobuf"; + +/** + * Describes the file google/type/date.proto. + */ +export const file_google_type_date: GenFile = /*@__PURE__*/ + fileDesc("ChZnb29nbGUvdHlwZS9kYXRlLnByb3RvEgtnb29nbGUudHlwZSIwCgREYXRlEgwKBHllYXIYASABKAUSDQoFbW9udGgYAiABKAUSCwoDZGF5GAMgASgFQl0KD2NvbS5nb29nbGUudHlwZUIJRGF0ZVByb3RvUAFaNGdvb2dsZS5nb2xhbmcub3JnL2dlbnByb3RvL2dvb2dsZWFwaXMvdHlwZS9kYXRlO2RhdGX4AQGiAgNHVFBiBnByb3RvMw"); + +/** + * Represents a whole or partial calendar date, such as a birthday. The time of + * day and time zone are either specified elsewhere or are insignificant. The + * date is relative to the Gregorian Calendar. This can represent one of the + * following: + * + * * A full date, with non-zero year, month, and day values + * * A month and day value, with a zero year, such as an anniversary + * * A year on its own, with zero month and day values + * * A year and month value, with a zero day, such as a credit card expiration + * date + * + * Related types are [google.type.TimeOfDay][google.type.TimeOfDay] and + * `google.protobuf.Timestamp`. + * + * @generated from message google.type.Date + */ +export type Date = Message<"google.type.Date"> & { + /** + * Year of the date. Must be from 1 to 9999, or 0 to specify a date without + * a year. + * + * @generated from field: int32 year = 1; + */ + year: number; + + /** + * Month of a year. Must be from 1 to 12, or 0 to specify a year without a + * month and day. + * + * @generated from field: int32 month = 2; + */ + month: number; + + /** + * Day of a month. Must be from 1 to 31 and valid for the year and month, or 0 + * to specify a year by itself or a year and month where the day isn't + * significant. + * + * @generated from field: int32 day = 3; + */ + day: number; +}; + +/** + * Describes the message google.type.Date. + * Use `create(DateSchema)` to create a new message. + */ +export const DateSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_google_type_date, 0); + diff --git a/schemaregistry/google/type/datetime_pb.ts b/schemaregistry/google/type/datetime_pb.ts new file mode 100644 index 00000000..511b0859 --- /dev/null +++ b/schemaregistry/google/type/datetime_pb.ts @@ -0,0 +1,180 @@ +// Copyright 2021-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// @generated by protoc-gen-es v2.0.0 with parameter "target=ts" +// @generated from file google/type/datetime.proto (package google.type, syntax proto3) +/* eslint-disable */ + +import type { GenFile, GenMessage } from "@bufbuild/protobuf/codegenv1"; +import { fileDesc, messageDesc } from "@bufbuild/protobuf/codegenv1"; +import type { Duration } from "@bufbuild/protobuf/wkt"; +import { file_google_protobuf_duration } from "@bufbuild/protobuf/wkt"; +import type { Message } from "@bufbuild/protobuf"; + +/** + * Describes the file google/type/datetime.proto. + */ +export const file_google_type_datetime: GenFile = /*@__PURE__*/ + fileDesc("Chpnb29nbGUvdHlwZS9kYXRldGltZS5wcm90bxILZ29vZ2xlLnR5cGUi4AEKCERhdGVUaW1lEgwKBHllYXIYASABKAUSDQoFbW9udGgYAiABKAUSCwoDZGF5GAMgASgFEg0KBWhvdXJzGAQgASgFEg8KB21pbnV0ZXMYBSABKAUSDwoHc2Vjb25kcxgGIAEoBRINCgVuYW5vcxgHIAEoBRIvCgp1dGNfb2Zmc2V0GAggASgLMhkuZ29vZ2xlLnByb3RvYnVmLkR1cmF0aW9uSAASKgoJdGltZV96b25lGAkgASgLMhUuZ29vZ2xlLnR5cGUuVGltZVpvbmVIAEINCgt0aW1lX29mZnNldCInCghUaW1lWm9uZRIKCgJpZBgBIAEoCRIPCgd2ZXJzaW9uGAIgASgJQmkKD2NvbS5nb29nbGUudHlwZUINRGF0ZVRpbWVQcm90b1ABWjxnb29nbGUuZ29sYW5nLm9yZy9nZW5wcm90by9nb29nbGVhcGlzL3R5cGUvZGF0ZXRpbWU7ZGF0ZXRpbWX4AQGiAgNHVFBiBnByb3RvMw", [file_google_protobuf_duration]); + +/** + * Represents civil time (or occasionally physical time). + * + * This type can represent a civil time in one of a few possible ways: + * + * * When utc_offset is set and time_zone is unset: a civil time on a calendar + * day with a particular offset from UTC. + * * When time_zone is set and utc_offset is unset: a civil time on a calendar + * day in a particular time zone. + * * When neither time_zone nor utc_offset is set: a civil time on a calendar + * day in local time. + * + * The date is relative to the Proleptic Gregorian Calendar. + * + * If year is 0, the DateTime is considered not to have a specific year. month + * and day must have valid, non-zero values. + * + * This type may also be used to represent a physical time if all the date and + * time fields are set and either case of the `time_offset` oneof is set. + * Consider using `Timestamp` message for physical time instead. If your use + * case also would like to store the user's timezone, that can be done in + * another field. + * + * This type is more flexible than some applications may want. Make sure to + * document and validate your application's limitations. + * + * @generated from message google.type.DateTime + */ +export type DateTime = Message<"google.type.DateTime"> & { + /** + * Optional. Year of date. Must be from 1 to 9999, or 0 if specifying a + * datetime without a year. + * + * @generated from field: int32 year = 1; + */ + year: number; + + /** + * Required. Month of year. Must be from 1 to 12. + * + * @generated from field: int32 month = 2; + */ + month: number; + + /** + * Required. Day of month. Must be from 1 to 31 and valid for the year and + * month. + * + * @generated from field: int32 day = 3; + */ + day: number; + + /** + * Required. Hours of day in 24 hour format. Should be from 0 to 23. An API + * may choose to allow the value "24:00:00" for scenarios like business + * closing time. + * + * @generated from field: int32 hours = 4; + */ + hours: number; + + /** + * Required. Minutes of hour of day. Must be from 0 to 59. + * + * @generated from field: int32 minutes = 5; + */ + minutes: number; + + /** + * Required. Seconds of minutes of the time. Must normally be from 0 to 59. An + * API may allow the value 60 if it allows leap-seconds. + * + * @generated from field: int32 seconds = 6; + */ + seconds: number; + + /** + * Required. Fractions of seconds in nanoseconds. Must be from 0 to + * 999,999,999. + * + * @generated from field: int32 nanos = 7; + */ + nanos: number; + + /** + * Optional. Specifies either the UTC offset or the time zone of the DateTime. + * Choose carefully between them, considering that time zone data may change + * in the future (for example, a country modifies their DST start/end dates, + * and future DateTimes in the affected range had already been stored). + * If omitted, the DateTime is considered to be in local time. + * + * @generated from oneof google.type.DateTime.time_offset + */ + timeOffset: { + /** + * UTC offset. Must be whole seconds, between -18 hours and +18 hours. + * For example, a UTC offset of -4:00 would be represented as + * { seconds: -14400 }. + * + * @generated from field: google.protobuf.Duration utc_offset = 8; + */ + value: Duration; + case: "utcOffset"; + } | { + /** + * Time zone. + * + * @generated from field: google.type.TimeZone time_zone = 9; + */ + value: TimeZone; + case: "timeZone"; + } | { case: undefined; value?: undefined }; +}; + +/** + * Describes the message google.type.DateTime. + * Use `create(DateTimeSchema)` to create a new message. + */ +export const DateTimeSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_google_type_datetime, 0); + +/** + * Represents a time zone from the + * [IANA Time Zone Database](https://www.iana.org/time-zones). + * + * @generated from message google.type.TimeZone + */ +export type TimeZone = Message<"google.type.TimeZone"> & { + /** + * IANA Time Zone Database time zone, e.g. "America/New_York". + * + * @generated from field: string id = 1; + */ + id: string; + + /** + * Optional. IANA Time Zone Database version number, e.g. "2019a". + * + * @generated from field: string version = 2; + */ + version: string; +}; + +/** + * Describes the message google.type.TimeZone. + * Use `create(TimeZoneSchema)` to create a new message. + */ +export const TimeZoneSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_google_type_datetime, 1); + diff --git a/schemaregistry/google/type/dayofweek_pb.ts b/schemaregistry/google/type/dayofweek_pb.ts new file mode 100644 index 00000000..8932990c --- /dev/null +++ b/schemaregistry/google/type/dayofweek_pb.ts @@ -0,0 +1,96 @@ +// Copyright 2021-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// @generated by protoc-gen-es v2.0.0 with parameter "target=ts" +// @generated from file google/type/dayofweek.proto (package google.type, syntax proto3) +/* eslint-disable */ + +import type { GenEnum, GenFile } from "@bufbuild/protobuf/codegenv1"; +import { enumDesc, fileDesc } from "@bufbuild/protobuf/codegenv1"; + +/** + * Describes the file google/type/dayofweek.proto. + */ +export const file_google_type_dayofweek: GenFile = /*@__PURE__*/ + fileDesc("Chtnb29nbGUvdHlwZS9kYXlvZndlZWsucHJvdG8SC2dvb2dsZS50eXBlKoQBCglEYXlPZldlZWsSGwoXREFZX09GX1dFRUtfVU5TUEVDSUZJRUQQABIKCgZNT05EQVkQARILCgdUVUVTREFZEAISDQoJV0VETkVTREFZEAMSDAoIVEhVUlNEQVkQBBIKCgZGUklEQVkQBRIMCghTQVRVUkRBWRAGEgoKBlNVTkRBWRAHQmkKD2NvbS5nb29nbGUudHlwZUIORGF5T2ZXZWVrUHJvdG9QAVo+Z29vZ2xlLmdvbGFuZy5vcmcvZ2VucHJvdG8vZ29vZ2xlYXBpcy90eXBlL2RheW9md2VlaztkYXlvZndlZWuiAgNHVFBiBnByb3RvMw"); + +/** + * Represents a day of the week. + * + * @generated from enum google.type.DayOfWeek + */ +export enum DayOfWeek { + /** + * The day of the week is unspecified. + * + * @generated from enum value: DAY_OF_WEEK_UNSPECIFIED = 0; + */ + DAY_OF_WEEK_UNSPECIFIED = 0, + + /** + * Monday + * + * @generated from enum value: MONDAY = 1; + */ + MONDAY = 1, + + /** + * Tuesday + * + * @generated from enum value: TUESDAY = 2; + */ + TUESDAY = 2, + + /** + * Wednesday + * + * @generated from enum value: WEDNESDAY = 3; + */ + WEDNESDAY = 3, + + /** + * Thursday + * + * @generated from enum value: THURSDAY = 4; + */ + THURSDAY = 4, + + /** + * Friday + * + * @generated from enum value: FRIDAY = 5; + */ + FRIDAY = 5, + + /** + * Saturday + * + * @generated from enum value: SATURDAY = 6; + */ + SATURDAY = 6, + + /** + * Sunday + * + * @generated from enum value: SUNDAY = 7; + */ + SUNDAY = 7, +} + +/** + * Describes the enum google.type.DayOfWeek. + */ +export const DayOfWeekSchema: GenEnum = /*@__PURE__*/ + enumDesc(file_google_type_dayofweek, 0); + diff --git a/schemaregistry/google/type/decimal_pb.ts b/schemaregistry/google/type/decimal_pb.ts new file mode 100644 index 00000000..f3c65377 --- /dev/null +++ b/schemaregistry/google/type/decimal_pb.ts @@ -0,0 +1,114 @@ +// Copyright 2021-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// @generated by protoc-gen-es v2.0.0 with parameter "target=ts" +// @generated from file google/type/decimal.proto (package google.type, syntax proto3) +/* eslint-disable */ + +import type { GenFile, GenMessage } from "@bufbuild/protobuf/codegenv1"; +import { fileDesc, messageDesc } from "@bufbuild/protobuf/codegenv1"; +import type { Message } from "@bufbuild/protobuf"; + +/** + * Describes the file google/type/decimal.proto. + */ +export const file_google_type_decimal: GenFile = /*@__PURE__*/ + fileDesc("Chlnb29nbGUvdHlwZS9kZWNpbWFsLnByb3RvEgtnb29nbGUudHlwZSIYCgdEZWNpbWFsEg0KBXZhbHVlGAEgASgJQmYKD2NvbS5nb29nbGUudHlwZUIMRGVjaW1hbFByb3RvUAFaOmdvb2dsZS5nb2xhbmcub3JnL2dlbnByb3RvL2dvb2dsZWFwaXMvdHlwZS9kZWNpbWFsO2RlY2ltYWz4AQGiAgNHVFBiBnByb3RvMw"); + +/** + * A representation of a decimal value, such as 2.5. Clients may convert values + * into language-native decimal formats, such as Java's [BigDecimal][] or + * Python's [decimal.Decimal][]. + * + * [BigDecimal]: + * https://docs.oracle.com/en/java/javase/11/docs/api/java.base/java/math/BigDecimal.html + * [decimal.Decimal]: https://docs.python.org/3/library/decimal.html + * + * @generated from message google.type.Decimal + */ +export type Decimal = Message<"google.type.Decimal"> & { + /** + * The decimal value, as a string. + * + * The string representation consists of an optional sign, `+` (`U+002B`) + * or `-` (`U+002D`), followed by a sequence of zero or more decimal digits + * ("the integer"), optionally followed by a fraction, optionally followed + * by an exponent. + * + * The fraction consists of a decimal point followed by zero or more decimal + * digits. The string must contain at least one digit in either the integer + * or the fraction. The number formed by the sign, the integer and the + * fraction is referred to as the significand. + * + * The exponent consists of the character `e` (`U+0065`) or `E` (`U+0045`) + * followed by one or more decimal digits. + * + * Services **should** normalize decimal values before storing them by: + * + * - Removing an explicitly-provided `+` sign (`+2.5` -> `2.5`). + * - Replacing a zero-length integer value with `0` (`.5` -> `0.5`). + * - Coercing the exponent character to lower-case (`2.5E8` -> `2.5e8`). + * - Removing an explicitly-provided zero exponent (`2.5e0` -> `2.5`). + * + * Services **may** perform additional normalization based on its own needs + * and the internal decimal implementation selected, such as shifting the + * decimal point and exponent value together (example: `2.5e-1` <-> `0.25`). + * Additionally, services **may** preserve trailing zeroes in the fraction + * to indicate increased precision, but are not required to do so. + * + * Note that only the `.` character is supported to divide the integer + * and the fraction; `,` **should not** be supported regardless of locale. + * Additionally, thousand separators **should not** be supported. If a + * service does support them, values **must** be normalized. + * + * The ENBF grammar is: + * + * DecimalString = + * [Sign] Significand [Exponent]; + * + * Sign = '+' | '-'; + * + * Significand = + * Digits ['.'] [Digits] | [Digits] '.' Digits; + * + * Exponent = ('e' | 'E') [Sign] Digits; + * + * Digits = { '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' }; + * + * Services **should** clearly document the range of supported values, the + * maximum supported precision (total number of digits), and, if applicable, + * the scale (number of digits after the decimal point), as well as how it + * behaves when receiving out-of-bounds values. + * + * Services **may** choose to accept values passed as input even when the + * value has a higher precision or scale than the service supports, and + * **should** round the value to fit the supported scale. Alternatively, the + * service **may** error with `400 Bad Request` (`INVALID_ARGUMENT` in gRPC) + * if precision would be lost. + * + * Services **should** error with `400 Bad Request` (`INVALID_ARGUMENT` in + * gRPC) if the service receives a value outside of the supported range. + * + * @generated from field: string value = 1; + */ + value: string; +}; + +/** + * Describes the message google.type.Decimal. + * Use `create(DecimalSchema)` to create a new message. + */ +export const DecimalSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_google_type_decimal, 0); + diff --git a/schemaregistry/google/type/expr_pb.ts b/schemaregistry/google/type/expr_pb.ts new file mode 100644 index 00000000..a4dd114b --- /dev/null +++ b/schemaregistry/google/type/expr_pb.ts @@ -0,0 +1,105 @@ +// Copyright 2021-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// @generated by protoc-gen-es v2.0.0 with parameter "target=ts" +// @generated from file google/type/expr.proto (package google.type, syntax proto3) +/* eslint-disable */ + +import type { GenFile, GenMessage } from "@bufbuild/protobuf/codegenv1"; +import { fileDesc, messageDesc } from "@bufbuild/protobuf/codegenv1"; +import type { Message } from "@bufbuild/protobuf"; + +/** + * Describes the file google/type/expr.proto. + */ +export const file_google_type_expr: GenFile = /*@__PURE__*/ + fileDesc("ChZnb29nbGUvdHlwZS9leHByLnByb3RvEgtnb29nbGUudHlwZSJQCgRFeHByEhIKCmV4cHJlc3Npb24YASABKAkSDQoFdGl0bGUYAiABKAkSEwoLZGVzY3JpcHRpb24YAyABKAkSEAoIbG9jYXRpb24YBCABKAlCWgoPY29tLmdvb2dsZS50eXBlQglFeHByUHJvdG9QAVo0Z29vZ2xlLmdvbGFuZy5vcmcvZ2VucHJvdG8vZ29vZ2xlYXBpcy90eXBlL2V4cHI7ZXhwcqICA0dUUGIGcHJvdG8z"); + +/** + * Represents a textual expression in the Common Expression Language (CEL) + * syntax. CEL is a C-like expression language. The syntax and semantics of CEL + * are documented at https://github.com/google/cel-spec. + * + * Example (Comparison): + * + * title: "Summary size limit" + * description: "Determines if a summary is less than 100 chars" + * expression: "document.summary.size() < 100" + * + * Example (Equality): + * + * title: "Requestor is owner" + * description: "Determines if requestor is the document owner" + * expression: "document.owner == request.auth.claims.email" + * + * Example (Logic): + * + * title: "Public documents" + * description: "Determine whether the document should be publicly visible" + * expression: "document.type != 'private' && document.type != 'internal'" + * + * Example (Data Manipulation): + * + * title: "Notification string" + * description: "Create a notification string with a timestamp." + * expression: "'New message received at ' + string(document.create_time)" + * + * The exact variables and functions that may be referenced within an expression + * are determined by the service that evaluates it. See the service + * documentation for additional information. + * + * @generated from message google.type.Expr + */ +export type Expr = Message<"google.type.Expr"> & { + /** + * Textual representation of an expression in Common Expression Language + * syntax. + * + * @generated from field: string expression = 1; + */ + expression: string; + + /** + * Optional. Title for the expression, i.e. a short string describing + * its purpose. This can be used e.g. in UIs which allow to enter the + * expression. + * + * @generated from field: string title = 2; + */ + title: string; + + /** + * Optional. Description of the expression. This is a longer text which + * describes the expression, e.g. when hovered over it in a UI. + * + * @generated from field: string description = 3; + */ + description: string; + + /** + * Optional. String indicating the location of the expression for error + * reporting, e.g. a file name and a position in the file. + * + * @generated from field: string location = 4; + */ + location: string; +}; + +/** + * Describes the message google.type.Expr. + * Use `create(ExprSchema)` to create a new message. + */ +export const ExprSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_google_type_expr, 0); + diff --git a/schemaregistry/google/type/fraction_pb.ts b/schemaregistry/google/type/fraction_pb.ts new file mode 100644 index 00000000..2d180530 --- /dev/null +++ b/schemaregistry/google/type/fraction_pb.ts @@ -0,0 +1,57 @@ +// Copyright 2021-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// @generated by protoc-gen-es v2.0.0 with parameter "target=ts" +// @generated from file google/type/fraction.proto (package google.type, syntax proto3) +/* eslint-disable */ + +import type { GenFile, GenMessage } from "@bufbuild/protobuf/codegenv1"; +import { fileDesc, messageDesc } from "@bufbuild/protobuf/codegenv1"; +import type { Message } from "@bufbuild/protobuf"; + +/** + * Describes the file google/type/fraction.proto. + */ +export const file_google_type_fraction: GenFile = /*@__PURE__*/ + fileDesc("Chpnb29nbGUvdHlwZS9mcmFjdGlvbi5wcm90bxILZ29vZ2xlLnR5cGUiMgoIRnJhY3Rpb24SEQoJbnVtZXJhdG9yGAEgASgDEhMKC2Rlbm9taW5hdG9yGAIgASgDQmYKD2NvbS5nb29nbGUudHlwZUINRnJhY3Rpb25Qcm90b1ABWjxnb29nbGUuZ29sYW5nLm9yZy9nZW5wcm90by9nb29nbGVhcGlzL3R5cGUvZnJhY3Rpb247ZnJhY3Rpb26iAgNHVFBiBnByb3RvMw"); + +/** + * Represents a fraction in terms of a numerator divided by a denominator. + * + * @generated from message google.type.Fraction + */ +export type Fraction = Message<"google.type.Fraction"> & { + /** + * The numerator in the fraction, e.g. 2 in 2/3. + * + * @generated from field: int64 numerator = 1; + */ + numerator: bigint; + + /** + * The value by which the numerator is divided, e.g. 3 in 2/3. Must be + * positive. + * + * @generated from field: int64 denominator = 2; + */ + denominator: bigint; +}; + +/** + * Describes the message google.type.Fraction. + * Use `create(FractionSchema)` to create a new message. + */ +export const FractionSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_google_type_fraction, 0); + diff --git a/schemaregistry/google/type/interval_pb.ts b/schemaregistry/google/type/interval_pb.ts new file mode 100644 index 00000000..ae5ffa96 --- /dev/null +++ b/schemaregistry/google/type/interval_pb.ts @@ -0,0 +1,69 @@ +// Copyright 2021-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// @generated by protoc-gen-es v2.0.0 with parameter "target=ts" +// @generated from file google/type/interval.proto (package google.type, syntax proto3) +/* eslint-disable */ + +import type { GenFile, GenMessage } from "@bufbuild/protobuf/codegenv1"; +import { fileDesc, messageDesc } from "@bufbuild/protobuf/codegenv1"; +import type { Timestamp } from "@bufbuild/protobuf/wkt"; +import { file_google_protobuf_timestamp } from "@bufbuild/protobuf/wkt"; +import type { Message } from "@bufbuild/protobuf"; + +/** + * Describes the file google/type/interval.proto. + */ +export const file_google_type_interval: GenFile = /*@__PURE__*/ + fileDesc("Chpnb29nbGUvdHlwZS9pbnRlcnZhbC5wcm90bxILZ29vZ2xlLnR5cGUiaAoISW50ZXJ2YWwSLgoKc3RhcnRfdGltZRgBIAEoCzIaLmdvb2dsZS5wcm90b2J1Zi5UaW1lc3RhbXASLAoIZW5kX3RpbWUYAiABKAsyGi5nb29nbGUucHJvdG9idWYuVGltZXN0YW1wQmkKD2NvbS5nb29nbGUudHlwZUINSW50ZXJ2YWxQcm90b1ABWjxnb29nbGUuZ29sYW5nLm9yZy9nZW5wcm90by9nb29nbGVhcGlzL3R5cGUvaW50ZXJ2YWw7aW50ZXJ2YWz4AQGiAgNHVFBiBnByb3RvMw", [file_google_protobuf_timestamp]); + +/** + * Represents a time interval, encoded as a Timestamp start (inclusive) and a + * Timestamp end (exclusive). + * + * The start must be less than or equal to the end. + * When the start equals the end, the interval is empty (matches no time). + * When both start and end are unspecified, the interval matches any time. + * + * @generated from message google.type.Interval + */ +export type Interval = Message<"google.type.Interval"> & { + /** + * Optional. Inclusive start of the interval. + * + * If specified, a Timestamp matching this interval will have to be the same + * or after the start. + * + * @generated from field: google.protobuf.Timestamp start_time = 1; + */ + startTime?: Timestamp; + + /** + * Optional. Exclusive end of the interval. + * + * If specified, a Timestamp matching this interval will have to be before the + * end. + * + * @generated from field: google.protobuf.Timestamp end_time = 2; + */ + endTime?: Timestamp; +}; + +/** + * Describes the message google.type.Interval. + * Use `create(IntervalSchema)` to create a new message. + */ +export const IntervalSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_google_type_interval, 0); + diff --git a/schemaregistry/google/type/latlng_pb.ts b/schemaregistry/google/type/latlng_pb.ts new file mode 100644 index 00000000..41c99c15 --- /dev/null +++ b/schemaregistry/google/type/latlng_pb.ts @@ -0,0 +1,60 @@ +// Copyright 2021-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// @generated by protoc-gen-es v2.0.0 with parameter "target=ts" +// @generated from file google/type/latlng.proto (package google.type, syntax proto3) +/* eslint-disable */ + +import type { GenFile, GenMessage } from "@bufbuild/protobuf/codegenv1"; +import { fileDesc, messageDesc } from "@bufbuild/protobuf/codegenv1"; +import type { Message } from "@bufbuild/protobuf"; + +/** + * Describes the file google/type/latlng.proto. + */ +export const file_google_type_latlng: GenFile = /*@__PURE__*/ + fileDesc("Chhnb29nbGUvdHlwZS9sYXRsbmcucHJvdG8SC2dvb2dsZS50eXBlIi0KBkxhdExuZxIQCghsYXRpdHVkZRgBIAEoARIRCglsb25naXR1ZGUYAiABKAFCYwoPY29tLmdvb2dsZS50eXBlQgtMYXRMbmdQcm90b1ABWjhnb29nbGUuZ29sYW5nLm9yZy9nZW5wcm90by9nb29nbGVhcGlzL3R5cGUvbGF0bG5nO2xhdGxuZ/gBAaICA0dUUGIGcHJvdG8z"); + +/** + * An object that represents a latitude/longitude pair. This is expressed as a + * pair of doubles to represent degrees latitude and degrees longitude. Unless + * specified otherwise, this must conform to the + * WGS84 + * standard. Values must be within normalized ranges. + * + * @generated from message google.type.LatLng + */ +export type LatLng = Message<"google.type.LatLng"> & { + /** + * The latitude in degrees. It must be in the range [-90.0, +90.0]. + * + * @generated from field: double latitude = 1; + */ + latitude: number; + + /** + * The longitude in degrees. It must be in the range [-180.0, +180.0]. + * + * @generated from field: double longitude = 2; + */ + longitude: number; +}; + +/** + * Describes the message google.type.LatLng. + * Use `create(LatLngSchema)` to create a new message. + */ +export const LatLngSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_google_type_latlng, 0); + diff --git a/schemaregistry/google/type/localized_text_pb.ts b/schemaregistry/google/type/localized_text_pb.ts new file mode 100644 index 00000000..80235333 --- /dev/null +++ b/schemaregistry/google/type/localized_text_pb.ts @@ -0,0 +1,59 @@ +// Copyright 2021-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// @generated by protoc-gen-es v2.0.0 with parameter "target=ts" +// @generated from file google/type/localized_text.proto (package google.type, syntax proto3) +/* eslint-disable */ + +import type { GenFile, GenMessage } from "@bufbuild/protobuf/codegenv1"; +import { fileDesc, messageDesc } from "@bufbuild/protobuf/codegenv1"; +import type { Message } from "@bufbuild/protobuf"; + +/** + * Describes the file google/type/localized_text.proto. + */ +export const file_google_type_localized_text: GenFile = /*@__PURE__*/ + fileDesc("CiBnb29nbGUvdHlwZS9sb2NhbGl6ZWRfdGV4dC5wcm90bxILZ29vZ2xlLnR5cGUiNAoNTG9jYWxpemVkVGV4dBIMCgR0ZXh0GAEgASgJEhUKDWxhbmd1YWdlX2NvZGUYAiABKAlCegoPY29tLmdvb2dsZS50eXBlQhJMb2NhbGl6ZWRUZXh0UHJvdG9QAVpIZ29vZ2xlLmdvbGFuZy5vcmcvZ2VucHJvdG8vZ29vZ2xlYXBpcy90eXBlL2xvY2FsaXplZF90ZXh0O2xvY2FsaXplZF90ZXh0+AEBogIDR1RQYgZwcm90bzM"); + +/** + * Localized variant of a text in a particular language. + * + * @generated from message google.type.LocalizedText + */ +export type LocalizedText = Message<"google.type.LocalizedText"> & { + /** + * Localized string in the language corresponding to `language_code' below. + * + * @generated from field: string text = 1; + */ + text: string; + + /** + * The text's BCP-47 language code, such as "en-US" or "sr-Latn". + * + * For more information, see + * http://www.unicode.org/reports/tr35/#Unicode_locale_identifier. + * + * @generated from field: string language_code = 2; + */ + languageCode: string; +}; + +/** + * Describes the message google.type.LocalizedText. + * Use `create(LocalizedTextSchema)` to create a new message. + */ +export const LocalizedTextSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_google_type_localized_text, 0); + diff --git a/schemaregistry/google/type/money_pb.ts b/schemaregistry/google/type/money_pb.ts new file mode 100644 index 00000000..de6bd31d --- /dev/null +++ b/schemaregistry/google/type/money_pb.ts @@ -0,0 +1,69 @@ +// Copyright 2021-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// @generated by protoc-gen-es v2.0.0 with parameter "target=ts" +// @generated from file google/type/money.proto (package google.type, syntax proto3) +/* eslint-disable */ + +import type { GenFile, GenMessage } from "@bufbuild/protobuf/codegenv1"; +import { fileDesc, messageDesc } from "@bufbuild/protobuf/codegenv1"; +import type { Message } from "@bufbuild/protobuf"; + +/** + * Describes the file google/type/money.proto. + */ +export const file_google_type_money: GenFile = /*@__PURE__*/ + fileDesc("Chdnb29nbGUvdHlwZS9tb25leS5wcm90bxILZ29vZ2xlLnR5cGUiPAoFTW9uZXkSFQoNY3VycmVuY3lfY29kZRgBIAEoCRINCgV1bml0cxgCIAEoAxINCgVuYW5vcxgDIAEoBUJgCg9jb20uZ29vZ2xlLnR5cGVCCk1vbmV5UHJvdG9QAVo2Z29vZ2xlLmdvbGFuZy5vcmcvZ2VucHJvdG8vZ29vZ2xlYXBpcy90eXBlL21vbmV5O21vbmV5+AEBogIDR1RQYgZwcm90bzM"); + +/** + * Represents an amount of money with its currency type. + * + * @generated from message google.type.Money + */ +export type Money = Message<"google.type.Money"> & { + /** + * The three-letter currency code defined in ISO 4217. + * + * @generated from field: string currency_code = 1; + */ + currencyCode: string; + + /** + * The whole units of the amount. + * For example if `currencyCode` is `"USD"`, then 1 unit is one US dollar. + * + * @generated from field: int64 units = 2; + */ + units: bigint; + + /** + * Number of nano (10^-9) units of the amount. + * The value must be between -999,999,999 and +999,999,999 inclusive. + * If `units` is positive, `nanos` must be positive or zero. + * If `units` is zero, `nanos` can be positive, zero, or negative. + * If `units` is negative, `nanos` must be negative or zero. + * For example $-1.75 is represented as `units`=-1 and `nanos`=-750,000,000. + * + * @generated from field: int32 nanos = 3; + */ + nanos: number; +}; + +/** + * Describes the message google.type.Money. + * Use `create(MoneySchema)` to create a new message. + */ +export const MoneySchema: GenMessage = /*@__PURE__*/ + messageDesc(file_google_type_money, 0); + diff --git a/schemaregistry/google/type/month_pb.ts b/schemaregistry/google/type/month_pb.ts new file mode 100644 index 00000000..072f60b1 --- /dev/null +++ b/schemaregistry/google/type/month_pb.ts @@ -0,0 +1,131 @@ +// Copyright 2021-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// @generated by protoc-gen-es v2.0.0 with parameter "target=ts" +// @generated from file google/type/month.proto (package google.type, syntax proto3) +/* eslint-disable */ + +import type { GenEnum, GenFile } from "@bufbuild/protobuf/codegenv1"; +import { enumDesc, fileDesc } from "@bufbuild/protobuf/codegenv1"; + +/** + * Describes the file google/type/month.proto. + */ +export const file_google_type_month: GenFile = /*@__PURE__*/ + fileDesc("Chdnb29nbGUvdHlwZS9tb250aC5wcm90bxILZ29vZ2xlLnR5cGUqsAEKBU1vbnRoEhUKEU1PTlRIX1VOU1BFQ0lGSUVEEAASCwoHSkFOVUFSWRABEgwKCEZFQlJVQVJZEAISCQoFTUFSQ0gQAxIJCgVBUFJJTBAEEgcKA01BWRAFEggKBEpVTkUQBhIICgRKVUxZEAcSCgoGQVVHVVNUEAgSDQoJU0VQVEVNQkVSEAkSCwoHT0NUT0JFUhAKEgwKCE5PVkVNQkVSEAsSDAoIREVDRU1CRVIQDEJdCg9jb20uZ29vZ2xlLnR5cGVCCk1vbnRoUHJvdG9QAVo2Z29vZ2xlLmdvbGFuZy5vcmcvZ2VucHJvdG8vZ29vZ2xlYXBpcy90eXBlL21vbnRoO21vbnRoogIDR1RQYgZwcm90bzM"); + +/** + * Represents a month in the Gregorian calendar. + * + * @generated from enum google.type.Month + */ +export enum Month { + /** + * The unspecified month. + * + * @generated from enum value: MONTH_UNSPECIFIED = 0; + */ + MONTH_UNSPECIFIED = 0, + + /** + * The month of January. + * + * @generated from enum value: JANUARY = 1; + */ + JANUARY = 1, + + /** + * The month of February. + * + * @generated from enum value: FEBRUARY = 2; + */ + FEBRUARY = 2, + + /** + * The month of March. + * + * @generated from enum value: MARCH = 3; + */ + MARCH = 3, + + /** + * The month of April. + * + * @generated from enum value: APRIL = 4; + */ + APRIL = 4, + + /** + * The month of May. + * + * @generated from enum value: MAY = 5; + */ + MAY = 5, + + /** + * The month of June. + * + * @generated from enum value: JUNE = 6; + */ + JUNE = 6, + + /** + * The month of July. + * + * @generated from enum value: JULY = 7; + */ + JULY = 7, + + /** + * The month of August. + * + * @generated from enum value: AUGUST = 8; + */ + AUGUST = 8, + + /** + * The month of September. + * + * @generated from enum value: SEPTEMBER = 9; + */ + SEPTEMBER = 9, + + /** + * The month of October. + * + * @generated from enum value: OCTOBER = 10; + */ + OCTOBER = 10, + + /** + * The month of November. + * + * @generated from enum value: NOVEMBER = 11; + */ + NOVEMBER = 11, + + /** + * The month of December. + * + * @generated from enum value: DECEMBER = 12; + */ + DECEMBER = 12, +} + +/** + * Describes the enum google.type.Month. + */ +export const MonthSchema: GenEnum = /*@__PURE__*/ + enumDesc(file_google_type_month, 0); + diff --git a/schemaregistry/google/type/phone_number_pb.ts b/schemaregistry/google/type/phone_number_pb.ts new file mode 100644 index 00000000..f11e0e54 --- /dev/null +++ b/schemaregistry/google/type/phone_number_pb.ts @@ -0,0 +1,165 @@ +// Copyright 2021-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// @generated by protoc-gen-es v2.0.0 with parameter "target=ts" +// @generated from file google/type/phone_number.proto (package google.type, syntax proto3) +/* eslint-disable */ + +import type { GenFile, GenMessage } from "@bufbuild/protobuf/codegenv1"; +import { fileDesc, messageDesc } from "@bufbuild/protobuf/codegenv1"; +import type { Message } from "@bufbuild/protobuf"; + +/** + * Describes the file google/type/phone_number.proto. + */ +export const file_google_type_phone_number: GenFile = /*@__PURE__*/ + fileDesc("Ch5nb29nbGUvdHlwZS9waG9uZV9udW1iZXIucHJvdG8SC2dvb2dsZS50eXBlIqsBCgtQaG9uZU51bWJlchIVCgtlMTY0X251bWJlchgBIAEoCUgAEjgKCnNob3J0X2NvZGUYAiABKAsyIi5nb29nbGUudHlwZS5QaG9uZU51bWJlci5TaG9ydENvZGVIABIRCglleHRlbnNpb24YAyABKAkaMAoJU2hvcnRDb2RlEhMKC3JlZ2lvbl9jb2RlGAEgASgJEg4KBm51bWJlchgCIAEoCUIGCgRraW5kQnQKD2NvbS5nb29nbGUudHlwZUIQUGhvbmVOdW1iZXJQcm90b1ABWkRnb29nbGUuZ29sYW5nLm9yZy9nZW5wcm90by9nb29nbGVhcGlzL3R5cGUvcGhvbmVfbnVtYmVyO3Bob25lX251bWJlcvgBAaICA0dUUGIGcHJvdG8z"); + +/** + * An object representing a phone number, suitable as an API wire format. + * + * This representation: + * + * - should not be used for locale-specific formatting of a phone number, such + * as "+1 (650) 253-0000 ext. 123" + * + * - is not designed for efficient storage + * - may not be suitable for dialing - specialized libraries (see references) + * should be used to parse the number for that purpose + * + * To do something meaningful with this number, such as format it for various + * use-cases, convert it to an `i18n.phonenumbers.PhoneNumber` object first. + * + * For instance, in Java this would be: + * + * com.google.type.PhoneNumber wireProto = + * com.google.type.PhoneNumber.newBuilder().build(); + * com.google.i18n.phonenumbers.Phonenumber.PhoneNumber phoneNumber = + * PhoneNumberUtil.getInstance().parse(wireProto.getE164Number(), "ZZ"); + * if (!wireProto.getExtension().isEmpty()) { + * phoneNumber.setExtension(wireProto.getExtension()); + * } + * + * Reference(s): + * - https://github.com/google/libphonenumber + * + * @generated from message google.type.PhoneNumber + */ +export type PhoneNumber = Message<"google.type.PhoneNumber"> & { + /** + * Required. Either a regular number, or a short code. New fields may be + * added to the oneof below in the future, so clients should ignore phone + * numbers for which none of the fields they coded against are set. + * + * @generated from oneof google.type.PhoneNumber.kind + */ + kind: { + /** + * The phone number, represented as a leading plus sign ('+'), followed by a + * phone number that uses a relaxed ITU E.164 format consisting of the + * country calling code (1 to 3 digits) and the subscriber number, with no + * additional spaces or formatting, e.g.: + * - correct: "+15552220123" + * - incorrect: "+1 (555) 222-01234 x123". + * + * The ITU E.164 format limits the latter to 12 digits, but in practice not + * all countries respect that, so we relax that restriction here. + * National-only numbers are not allowed. + * + * References: + * - https://www.itu.int/rec/T-REC-E.164-201011-I + * - https://en.wikipedia.org/wiki/E.164. + * - https://en.wikipedia.org/wiki/List_of_country_calling_codes + * + * @generated from field: string e164_number = 1; + */ + value: string; + case: "e164Number"; + } | { + /** + * A short code. + * + * Reference(s): + * - https://en.wikipedia.org/wiki/Short_code + * + * @generated from field: google.type.PhoneNumber.ShortCode short_code = 2; + */ + value: PhoneNumber_ShortCode; + case: "shortCode"; + } | { case: undefined; value?: undefined }; + + /** + * The phone number's extension. The extension is not standardized in ITU + * recommendations, except for being defined as a series of numbers with a + * maximum length of 40 digits. Other than digits, some other dialing + * characters such as ',' (indicating a wait) or '#' may be stored here. + * + * Note that no regions currently use extensions with short codes, so this + * field is normally only set in conjunction with an E.164 number. It is held + * separately from the E.164 number to allow for short code extensions in the + * future. + * + * @generated from field: string extension = 3; + */ + extension: string; +}; + +/** + * Describes the message google.type.PhoneNumber. + * Use `create(PhoneNumberSchema)` to create a new message. + */ +export const PhoneNumberSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_google_type_phone_number, 0); + +/** + * An object representing a short code, which is a phone number that is + * typically much shorter than regular phone numbers and can be used to + * address messages in MMS and SMS systems, as well as for abbreviated dialing + * (e.g. "Text 611 to see how many minutes you have remaining on your plan."). + * + * Short codes are restricted to a region and are not internationally + * dialable, which means the same short code can exist in different regions, + * with different usage and pricing, even if those regions share the same + * country calling code (e.g. US and CA). + * + * @generated from message google.type.PhoneNumber.ShortCode + */ +export type PhoneNumber_ShortCode = Message<"google.type.PhoneNumber.ShortCode"> & { + /** + * Required. The BCP-47 region code of the location where calls to this + * short code can be made, such as "US" and "BB". + * + * Reference(s): + * - http://www.unicode.org/reports/tr35/#unicode_region_subtag + * + * @generated from field: string region_code = 1; + */ + regionCode: string; + + /** + * Required. The short code digits, without a leading plus ('+') or country + * calling code, e.g. "611". + * + * @generated from field: string number = 2; + */ + number: string; +}; + +/** + * Describes the message google.type.PhoneNumber.ShortCode. + * Use `create(PhoneNumber_ShortCodeSchema)` to create a new message. + */ +export const PhoneNumber_ShortCodeSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_google_type_phone_number, 0, 0); + diff --git a/schemaregistry/google/type/postal_address_pb.ts b/schemaregistry/google/type/postal_address_pb.ts new file mode 100644 index 00000000..2bde15eb --- /dev/null +++ b/schemaregistry/google/type/postal_address_pb.ts @@ -0,0 +1,193 @@ +// Copyright 2021-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// @generated by protoc-gen-es v2.0.0 with parameter "target=ts" +// @generated from file google/type/postal_address.proto (package google.type, syntax proto3) +/* eslint-disable */ + +import type { GenFile, GenMessage } from "@bufbuild/protobuf/codegenv1"; +import { fileDesc, messageDesc } from "@bufbuild/protobuf/codegenv1"; +import type { Message } from "@bufbuild/protobuf"; + +/** + * Describes the file google/type/postal_address.proto. + */ +export const file_google_type_postal_address: GenFile = /*@__PURE__*/ + fileDesc("CiBnb29nbGUvdHlwZS9wb3N0YWxfYWRkcmVzcy5wcm90bxILZ29vZ2xlLnR5cGUi/QEKDVBvc3RhbEFkZHJlc3MSEAoIcmV2aXNpb24YASABKAUSEwoLcmVnaW9uX2NvZGUYAiABKAkSFQoNbGFuZ3VhZ2VfY29kZRgDIAEoCRITCgtwb3N0YWxfY29kZRgEIAEoCRIUCgxzb3J0aW5nX2NvZGUYBSABKAkSGwoTYWRtaW5pc3RyYXRpdmVfYXJlYRgGIAEoCRIQCghsb2NhbGl0eRgHIAEoCRITCgtzdWJsb2NhbGl0eRgIIAEoCRIVCg1hZGRyZXNzX2xpbmVzGAkgAygJEhIKCnJlY2lwaWVudHMYCiADKAkSFAoMb3JnYW5pemF0aW9uGAsgASgJQngKD2NvbS5nb29nbGUudHlwZUISUG9zdGFsQWRkcmVzc1Byb3RvUAFaRmdvb2dsZS5nb2xhbmcub3JnL2dlbnByb3RvL2dvb2dsZWFwaXMvdHlwZS9wb3N0YWxhZGRyZXNzO3Bvc3RhbGFkZHJlc3P4AQGiAgNHVFBiBnByb3RvMw"); + +/** + * Represents a postal address, e.g. for postal delivery or payments addresses. + * Given a postal address, a postal service can deliver items to a premise, P.O. + * Box or similar. + * It is not intended to model geographical locations (roads, towns, + * mountains). + * + * In typical usage an address would be created via user input or from importing + * existing data, depending on the type of process. + * + * Advice on address input / editing: + * - Use an i18n-ready address widget such as + * https://github.com/google/libaddressinput) + * - Users should not be presented with UI elements for input or editing of + * fields outside countries where that field is used. + * + * For more guidance on how to use this schema, please see: + * https://support.google.com/business/answer/6397478 + * + * @generated from message google.type.PostalAddress + */ +export type PostalAddress = Message<"google.type.PostalAddress"> & { + /** + * The schema revision of the `PostalAddress`. This must be set to 0, which is + * the latest revision. + * + * All new revisions **must** be backward compatible with old revisions. + * + * @generated from field: int32 revision = 1; + */ + revision: number; + + /** + * Required. CLDR region code of the country/region of the address. This + * is never inferred and it is up to the user to ensure the value is + * correct. See http://cldr.unicode.org/ and + * http://www.unicode.org/cldr/charts/30/supplemental/territory_information.html + * for details. Example: "CH" for Switzerland. + * + * @generated from field: string region_code = 2; + */ + regionCode: string; + + /** + * Optional. BCP-47 language code of the contents of this address (if + * known). This is often the UI language of the input form or is expected + * to match one of the languages used in the address' country/region, or their + * transliterated equivalents. + * This can affect formatting in certain countries, but is not critical + * to the correctness of the data and will never affect any validation or + * other non-formatting related operations. + * + * If this value is not known, it should be omitted (rather than specifying a + * possibly incorrect default). + * + * Examples: "zh-Hant", "ja", "ja-Latn", "en". + * + * @generated from field: string language_code = 3; + */ + languageCode: string; + + /** + * Optional. Postal code of the address. Not all countries use or require + * postal codes to be present, but where they are used, they may trigger + * additional validation with other parts of the address (e.g. state/zip + * validation in the U.S.A.). + * + * @generated from field: string postal_code = 4; + */ + postalCode: string; + + /** + * Optional. Additional, country-specific, sorting code. This is not used + * in most regions. Where it is used, the value is either a string like + * "CEDEX", optionally followed by a number (e.g. "CEDEX 7"), or just a number + * alone, representing the "sector code" (Jamaica), "delivery area indicator" + * (Malawi) or "post office indicator" (e.g. Côte d'Ivoire). + * + * @generated from field: string sorting_code = 5; + */ + sortingCode: string; + + /** + * Optional. Highest administrative subdivision which is used for postal + * addresses of a country or region. + * For example, this can be a state, a province, an oblast, or a prefecture. + * Specifically, for Spain this is the province and not the autonomous + * community (e.g. "Barcelona" and not "Catalonia"). + * Many countries don't use an administrative area in postal addresses. E.g. + * in Switzerland this should be left unpopulated. + * + * @generated from field: string administrative_area = 6; + */ + administrativeArea: string; + + /** + * Optional. Generally refers to the city/town portion of the address. + * Examples: US city, IT comune, UK post town. + * In regions of the world where localities are not well defined or do not fit + * into this structure well, leave locality empty and use address_lines. + * + * @generated from field: string locality = 7; + */ + locality: string; + + /** + * Optional. Sublocality of the address. + * For example, this can be neighborhoods, boroughs, districts. + * + * @generated from field: string sublocality = 8; + */ + sublocality: string; + + /** + * Unstructured address lines describing the lower levels of an address. + * + * Because values in address_lines do not have type information and may + * sometimes contain multiple values in a single field (e.g. + * "Austin, TX"), it is important that the line order is clear. The order of + * address lines should be "envelope order" for the country/region of the + * address. In places where this can vary (e.g. Japan), address_language is + * used to make it explicit (e.g. "ja" for large-to-small ordering and + * "ja-Latn" or "en" for small-to-large). This way, the most specific line of + * an address can be selected based on the language. + * + * The minimum permitted structural representation of an address consists + * of a region_code with all remaining information placed in the + * address_lines. It would be possible to format such an address very + * approximately without geocoding, but no semantic reasoning could be + * made about any of the address components until it was at least + * partially resolved. + * + * Creating an address only containing a region_code and address_lines, and + * then geocoding is the recommended way to handle completely unstructured + * addresses (as opposed to guessing which parts of the address should be + * localities or administrative areas). + * + * @generated from field: repeated string address_lines = 9; + */ + addressLines: string[]; + + /** + * Optional. The recipient at the address. + * This field may, under certain circumstances, contain multiline information. + * For example, it might contain "care of" information. + * + * @generated from field: repeated string recipients = 10; + */ + recipients: string[]; + + /** + * Optional. The name of the organization at the address. + * + * @generated from field: string organization = 11; + */ + organization: string; +}; + +/** + * Describes the message google.type.PostalAddress. + * Use `create(PostalAddressSchema)` to create a new message. + */ +export const PostalAddressSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_google_type_postal_address, 0); + diff --git a/schemaregistry/google/type/quaternion_pb.ts b/schemaregistry/google/type/quaternion_pb.ts new file mode 100644 index 00000000..b0047bef --- /dev/null +++ b/schemaregistry/google/type/quaternion_pb.ts @@ -0,0 +1,125 @@ +// Copyright 2021-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// @generated by protoc-gen-es v2.0.0 with parameter "target=ts" +// @generated from file google/type/quaternion.proto (package google.type, syntax proto3) +/* eslint-disable */ + +import type { GenFile, GenMessage } from "@bufbuild/protobuf/codegenv1"; +import { fileDesc, messageDesc } from "@bufbuild/protobuf/codegenv1"; +import type { Message } from "@bufbuild/protobuf"; + +/** + * Describes the file google/type/quaternion.proto. + */ +export const file_google_type_quaternion: GenFile = /*@__PURE__*/ + fileDesc("Chxnb29nbGUvdHlwZS9xdWF0ZXJuaW9uLnByb3RvEgtnb29nbGUudHlwZSI4CgpRdWF0ZXJuaW9uEgkKAXgYASABKAESCQoBeRgCIAEoARIJCgF6GAMgASgBEgkKAXcYBCABKAFCbwoPY29tLmdvb2dsZS50eXBlQg9RdWF0ZXJuaW9uUHJvdG9QAVpAZ29vZ2xlLmdvbGFuZy5vcmcvZ2VucHJvdG8vZ29vZ2xlYXBpcy90eXBlL3F1YXRlcm5pb247cXVhdGVybmlvbvgBAaICA0dUUGIGcHJvdG8z"); + +/** + * A quaternion is defined as the quotient of two directed lines in a + * three-dimensional space or equivalently as the quotient of two Euclidean + * vectors (https://en.wikipedia.org/wiki/Quaternion). + * + * Quaternions are often used in calculations involving three-dimensional + * rotations (https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation), + * as they provide greater mathematical robustness by avoiding the gimbal lock + * problems that can be encountered when using Euler angles + * (https://en.wikipedia.org/wiki/Gimbal_lock). + * + * Quaternions are generally represented in this form: + * + * w + xi + yj + zk + * + * where x, y, z, and w are real numbers, and i, j, and k are three imaginary + * numbers. + * + * Our naming choice `(x, y, z, w)` comes from the desire to avoid confusion for + * those interested in the geometric properties of the quaternion in the 3D + * Cartesian space. Other texts often use alternative names or subscripts, such + * as `(a, b, c, d)`, `(1, i, j, k)`, or `(0, 1, 2, 3)`, which are perhaps + * better suited for mathematical interpretations. + * + * To avoid any confusion, as well as to maintain compatibility with a large + * number of software libraries, the quaternions represented using the protocol + * buffer below *must* follow the Hamilton convention, which defines `ij = k` + * (i.e. a right-handed algebra), and therefore: + * + * i^2 = j^2 = k^2 = ijk = −1 + * ij = −ji = k + * jk = −kj = i + * ki = −ik = j + * + * Please DO NOT use this to represent quaternions that follow the JPL + * convention, or any of the other quaternion flavors out there. + * + * Definitions: + * + * - Quaternion norm (or magnitude): `sqrt(x^2 + y^2 + z^2 + w^2)`. + * - Unit (or normalized) quaternion: a quaternion whose norm is 1. + * - Pure quaternion: a quaternion whose scalar component (`w`) is 0. + * - Rotation quaternion: a unit quaternion used to represent rotation. + * - Orientation quaternion: a unit quaternion used to represent orientation. + * + * A quaternion can be normalized by dividing it by its norm. The resulting + * quaternion maintains the same direction, but has a norm of 1, i.e. it moves + * on the unit sphere. This is generally necessary for rotation and orientation + * quaternions, to avoid rounding errors: + * https://en.wikipedia.org/wiki/Rotation_formalisms_in_three_dimensions + * + * Note that `(x, y, z, w)` and `(-x, -y, -z, -w)` represent the same rotation, + * but normalization would be even more useful, e.g. for comparison purposes, if + * it would produce a unique representation. It is thus recommended that `w` be + * kept positive, which can be achieved by changing all the signs when `w` is + * negative. + * + * + * @generated from message google.type.Quaternion + */ +export type Quaternion = Message<"google.type.Quaternion"> & { + /** + * The x component. + * + * @generated from field: double x = 1; + */ + x: number; + + /** + * The y component. + * + * @generated from field: double y = 2; + */ + y: number; + + /** + * The z component. + * + * @generated from field: double z = 3; + */ + z: number; + + /** + * The scalar component. + * + * @generated from field: double w = 4; + */ + w: number; +}; + +/** + * Describes the message google.type.Quaternion. + * Use `create(QuaternionSchema)` to create a new message. + */ +export const QuaternionSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_google_type_quaternion, 0); + diff --git a/schemaregistry/google/type/timeofday_pb.ts b/schemaregistry/google/type/timeofday_pb.ts new file mode 100644 index 00000000..ddcca796 --- /dev/null +++ b/schemaregistry/google/type/timeofday_pb.ts @@ -0,0 +1,75 @@ +// Copyright 2021-2024 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// @generated by protoc-gen-es v2.0.0 with parameter "target=ts" +// @generated from file google/type/timeofday.proto (package google.type, syntax proto3) +/* eslint-disable */ + +import type { GenFile, GenMessage } from "@bufbuild/protobuf/codegenv1"; +import { fileDesc, messageDesc } from "@bufbuild/protobuf/codegenv1"; +import type { Message } from "@bufbuild/protobuf"; + +/** + * Describes the file google/type/timeofday.proto. + */ +export const file_google_type_timeofday: GenFile = /*@__PURE__*/ + fileDesc("Chtnb29nbGUvdHlwZS90aW1lb2ZkYXkucHJvdG8SC2dvb2dsZS50eXBlIksKCVRpbWVPZkRheRINCgVob3VycxgBIAEoBRIPCgdtaW51dGVzGAIgASgFEg8KB3NlY29uZHMYAyABKAUSDQoFbmFub3MYBCABKAVCbAoPY29tLmdvb2dsZS50eXBlQg5UaW1lT2ZEYXlQcm90b1ABWj5nb29nbGUuZ29sYW5nLm9yZy9nZW5wcm90by9nb29nbGVhcGlzL3R5cGUvdGltZW9mZGF5O3RpbWVvZmRhefgBAaICA0dUUGIGcHJvdG8z"); + +/** + * Represents a time of day. The date and time zone are either not significant + * or are specified elsewhere. An API may choose to allow leap seconds. Related + * types are [google.type.Date][google.type.Date] and + * `google.protobuf.Timestamp`. + * + * @generated from message google.type.TimeOfDay + */ +export type TimeOfDay = Message<"google.type.TimeOfDay"> & { + /** + * Hours of day in 24 hour format. Should be from 0 to 23. An API may choose + * to allow the value "24:00:00" for scenarios like business closing time. + * + * @generated from field: int32 hours = 1; + */ + hours: number; + + /** + * Minutes of hour of day. Must be from 0 to 59. + * + * @generated from field: int32 minutes = 2; + */ + minutes: number; + + /** + * Seconds of minutes of the time. Must normally be from 0 to 59. An API may + * allow the value 60 if it allows leap-seconds. + * + * @generated from field: int32 seconds = 3; + */ + seconds: number; + + /** + * Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999. + * + * @generated from field: int32 nanos = 4; + */ + nanos: number; +}; + +/** + * Describes the message google.type.TimeOfDay. + * Use `create(TimeOfDaySchema)` to create a new message. + */ +export const TimeOfDaySchema: GenMessage = /*@__PURE__*/ + messageDesc(file_google_type_timeofday, 0); + diff --git a/schemaregistry/index.ts b/schemaregistry/index.ts new file mode 100644 index 00000000..a3bf6961 --- /dev/null +++ b/schemaregistry/index.ts @@ -0,0 +1,25 @@ +export * from './confluent/types/decimal_pb' +export * from './confluent/meta_pb' +export * from './rules/encryption/awskms/aws-driver' +export * from './rules/encryption/azurekms/azure-driver' +export * from './rules/encryption/dekregistry/dekregistry-client' +export * from './rules/encryption/gcpkms/gcp-driver' +export * from './rules/encryption/hcvault/hcvault-driver' +export * from './rules/encryption/localkms/local-driver' +export * from './rules/encryption/encrypt-executor' +export * from './rules/encryption/kms-registry' +export * from './rules/jsonata/jsonata-executor' +export * from './serde/avro' +export * from './serde/json' +export * from './serde/protobuf' +export * from './serde/rule-registry' +export * from './serde/serde' +export * from './rest-error' +export * from './mock-schemaregistry-client' +export * from './schemaregistry-client' +export { + BasicAuthCredentials, + BearerAuthCredentials, + ClientConfig, + SaslInfo +} from './rest-service'; diff --git a/schemaregistry/jest.config.js b/schemaregistry/jest.config.js new file mode 100644 index 00000000..c4caa2fb --- /dev/null +++ b/schemaregistry/jest.config.js @@ -0,0 +1,6 @@ +module.exports = { + roots: [".."], + transform: { + '^.+\\.tsx?$': 'ts-jest', + }, + }; diff --git a/schemaregistry/mock-schemaregistry-client.ts b/schemaregistry/mock-schemaregistry-client.ts new file mode 100644 index 00000000..2114164b --- /dev/null +++ b/schemaregistry/mock-schemaregistry-client.ts @@ -0,0 +1,446 @@ + +import { + Client, + Compatibility, + minimize, + SchemaInfo, + SchemaMetadata, + ServerConfig +} from './schemaregistry-client'; +import stringify from "json-stringify-deterministic"; +import {ClientConfig} from "./rest-service"; +import {RestError} from "./rest-error"; + +interface VersionCacheEntry { + version: number; + softDeleted: boolean; +} + +interface InfoCacheEntry { + info: SchemaInfo; + softDeleted: boolean; +} + +interface MetadataCacheEntry { + metadata: SchemaMetadata; + softDeleted: boolean; +} + +class Counter { + private count: number = 0; + + currentValue(): number { + return this.count; + } + + increment(): number { + this.count++; + return this.count; + } +} + +const noSubject = ""; + +class MockClient implements Client { + private clientConfig?: ClientConfig; + private infoToSchemaCache: Map; + private idToSchemaCache: Map; + private schemaToVersionCache: Map; + private configCache: Map; + private counter: Counter; + + constructor(config?: ClientConfig) { + this.clientConfig = config + this.infoToSchemaCache = new Map(); + this.idToSchemaCache = new Map(); + this.schemaToVersionCache = new Map(); + this.configCache = new Map(); + this.counter = new Counter(); + } + + config(): ClientConfig { + return this.clientConfig! + } + + async register(subject: string, schema: SchemaInfo, normalize: boolean = false): Promise { + const metadata = await this.registerFullResponse(subject, schema, normalize); + if (!metadata) { + throw new RestError("Failed to register schema", 422, 42200); + } + return metadata.id; + } + + async registerFullResponse(subject: string, schema: SchemaInfo, normalize: boolean = false): Promise { + const cacheKey = stringify({ subject, schema: minimize(schema) }); + + const cacheEntry = this.infoToSchemaCache.get(cacheKey); + if (cacheEntry && !cacheEntry.softDeleted) { + return cacheEntry.metadata; + } + + const id = await this.getIDFromRegistry(subject, schema); + if (id === -1) { + throw new RestError("Failed to retrieve schema ID from registry", 422, 42200); + } + + const metadata: SchemaMetadata = { ...schema, id }; + this.infoToSchemaCache.set(cacheKey, { metadata, softDeleted: false }); + + return metadata; + } + + private async getIDFromRegistry(subject: string, schema: SchemaInfo): Promise { + let id = -1; + + for (const [key, value] of this.idToSchemaCache.entries()) { + const parsedKey = JSON.parse(key); + if (parsedKey.subject === subject && this.schemasEqual(value.info, schema)) { + id = parsedKey.id; + break; + } + } + + await this.generateVersion(subject, schema); + if (id < 0) { + id = this.counter.increment(); + const idCacheKey = stringify({ subject, id }); + this.idToSchemaCache.set(idCacheKey, { info: schema, softDeleted: false }); + } + + return id; + } + + private async generateVersion(subject: string, schema: SchemaInfo): Promise { + const versions = await this.allVersions(subject); + let newVersion: number; + + if (versions.length === 0) { + newVersion = 1; + } else { + newVersion = versions[versions.length - 1] + 1; + } + + const cacheKey = stringify({ subject, schema: minimize(schema) }); + this.schemaToVersionCache.set(cacheKey, { version: newVersion, softDeleted: false }); + } + + async getBySubjectAndId(subject: string, id: number, format?: string): Promise { + const cacheKey = stringify({ subject, id }); + const cacheEntry = this.idToSchemaCache.get(cacheKey); + + if (!cacheEntry || cacheEntry.softDeleted) { + throw new RestError("Schema not found", 404, 40400); + } + return cacheEntry.info; + } + + async getId(subject: string, schema: SchemaInfo): Promise { + const cacheKey = stringify({ subject, schema: minimize(schema) }); + const cacheEntry = this.infoToSchemaCache.get(cacheKey); + if (!cacheEntry || cacheEntry.softDeleted) { + throw new RestError("Schema not found", 404, 40400); + } + return cacheEntry.metadata.id; + } + + async getLatestSchemaMetadata(subject: string, format?: string): Promise { + const version = await this.latestVersion(subject); + if (version === -1) { + throw new RestError("No versions found for subject", 404, 40400); + } + + return this.getSchemaMetadata(subject, version); + } + + async getSchemaMetadata(subject: string, version: number, deleted: boolean = false, format?: string): Promise { + let json; + for (const [key, value] of this.schemaToVersionCache.entries()) { + const parsedKey = JSON.parse(key); + if (parsedKey.subject === subject && value.version === version) { + json = parsedKey; + } + } + + if (!json) { + throw new RestError("Schema not found", 404, 40400); + } + + let id: number = -1; + for (const [key, value] of this.idToSchemaCache.entries()) { + const parsedKey = JSON.parse(key); + if (parsedKey.subject === subject && value.info.schema === json.schema.schema) { + id = parsedKey.id; + } + } + if (id === -1) { + throw new RestError("Schema not found", 404, 40400); + } + + + return { + id, + version, + subject, + ...json.schema, + }; + } + + async getLatestWithMetadata(subject: string, metadata: { [key: string]: string }, + deleted: boolean = false, format?: string): Promise { + let metadataStr = ''; + + for (const key in metadata) { + const encodedKey = encodeURIComponent(key); + const encodedValue = encodeURIComponent(metadata[key]); + metadataStr += `&key=${encodedKey}&value=${encodedValue}`; + } + + let results: SchemaMetadata[] = []; + + for (const [key, value] of this.schemaToVersionCache.entries()) { + const parsedKey = JSON.parse(key); + if (parsedKey.subject === subject && (!value.softDeleted || deleted)) { + if (parsedKey.schema.metadata && this.isSubset(metadata, parsedKey.schema.metadata.properties)) { + results.push({ + id: parsedKey.schema.id, + version: value.version, + subject, + ...parsedKey.schema + }); + } + } + } + + if (results.length === 0) { + throw new RestError("Schema not found", 404, 40400); + } + + let latest: SchemaMetadata = results[0]; + + results.forEach((result) => { + if (result.version! > latest.version!) { + latest = result; + } + }); + + return latest; + } + + private isSubset(containee: { [key: string]: string }, container: { [key: string]: string }){ + for (const key in containee) { + if (containee[key] !== container[key]) { + return false; + } + } + return true; + } + + async getAllVersions(subject: string): Promise { + const results = await this.allVersions(subject); + + if (results.length === 0) { + throw new RestError("No versions found for subject", 404, 40400); + } + return results; + } + + private async allVersions(subject: string): Promise { + const versions: number[] = []; + + for (const [key, value] of this.schemaToVersionCache.entries()) { + const parsedKey = JSON.parse(key); + if (parsedKey.subject === subject && !value.softDeleted) { + versions.push(value.version); + } + } + return versions; + } + + private async latestVersion(subject: string): Promise { + const versions = await this.allVersions(subject); + if (versions.length === 0) { + return -1; + } + return versions[versions.length - 1]; + } + + private async deleteVersion(cacheKey: string, version: number, permanent: boolean): Promise { + if (permanent) { + this.schemaToVersionCache.delete(cacheKey); + } else { + this.schemaToVersionCache.set(cacheKey, { version, softDeleted: true }); + } + } + + private async deleteInfo(cacheKey: string, info: SchemaInfo, permanent: boolean): Promise { + if (permanent) { + this.idToSchemaCache.delete(cacheKey); + } else { + this.idToSchemaCache.set(cacheKey, { info, softDeleted: true }); + } + } + + private async deleteMetadata(cacheKey: string, metadata: SchemaMetadata, permanent: boolean): Promise { + if (permanent) { + this.infoToSchemaCache.delete(cacheKey); + } else { + this.infoToSchemaCache.set(cacheKey, { metadata, softDeleted: true }); + } + } + + async getVersion(subject: string, schema: SchemaInfo, normalize: boolean = false): Promise { + const cacheKey = stringify({ subject, schema: minimize(schema) }); + const cacheEntry = this.schemaToVersionCache.get(cacheKey); + + if (!cacheEntry || cacheEntry.softDeleted) { + throw new RestError("Schema not found", 404, 40400); + } + + return cacheEntry.version; + } + + async getAllSubjects(): Promise { + const subjects: string[] = []; + for (const [key, value] of this.schemaToVersionCache.entries()) { + const parsedKey = JSON.parse(key); + if (!value.softDeleted && !subjects.includes(parsedKey.subject)) { + subjects.push(parsedKey.subject); + } + } + return subjects.sort(); + } + + async deleteSubject(subject: string, permanent: boolean = false): Promise { + const deletedVersions: number[] = []; + for (const [key, value] of this.infoToSchemaCache.entries()) { + const parsedKey = JSON.parse(key); + if (parsedKey.subject === subject && (permanent || !value.softDeleted)) { + await this.deleteMetadata(key, value.metadata, permanent); + } + } + + for (const [key, value] of this.schemaToVersionCache.entries()) { + const parsedKey = JSON.parse(key); + if (parsedKey.subject === subject && (permanent || !value.softDeleted)) { + await this.deleteVersion(key, value.version, permanent); + deletedVersions.push(value.version); + } + } + + this.configCache.delete(subject); + + if (permanent) { + for (const [key, value] of this.idToSchemaCache.entries()) { + const parsedKey = JSON.parse(key); + if (parsedKey.subject === subject && (!value.softDeleted)) { + await this.deleteInfo(key, value.info, permanent); + } + } + } + + return deletedVersions; + } + + async deleteSubjectVersion(subject: string, version: number, permanent: boolean = false): Promise { + for (const [key, value] of this.schemaToVersionCache.entries()) { + const parsedKey = JSON.parse(key); + if (parsedKey.subject === subject && value.version === version) { + await this.deleteVersion(key, version, permanent); + + const cacheKeySchema = stringify({ subject, schema: minimize(parsedKey.schema) }); + const cacheEntry = this.infoToSchemaCache.get(cacheKeySchema); + if (cacheEntry) { + await this.deleteMetadata(cacheKeySchema, cacheEntry.metadata, permanent); + } + + if (permanent && cacheEntry) { + const cacheKeyInfo = stringify({ subject, id: cacheEntry.metadata.id }); + const cacheSchemaEntry = this.idToSchemaCache.get(cacheKeyInfo); + if (cacheSchemaEntry) { + await this.deleteInfo(cacheKeyInfo, cacheSchemaEntry.info, permanent); + } + } + } + } + + return version; + } + + async testSubjectCompatibility(subject: string, schema: SchemaInfo): Promise { + throw new Error("Unsupported operation"); + } + + async testCompatibility(subject: string, version: number, schema: SchemaInfo): Promise { + throw new Error("Unsupported operation"); + } + + async getCompatibility(subject: string): Promise { + const cacheEntry = this.configCache.get(subject); + if (!cacheEntry) { + throw new RestError("Subject not found", 404, 40400); + } + return cacheEntry.compatibilityLevel as Compatibility; + } + + async updateCompatibility(subject: string, compatibility: Compatibility): Promise { + this.configCache.set(subject, { compatibilityLevel: compatibility }); + return compatibility; + } + + async getDefaultCompatibility(): Promise { + const cacheEntry = this.configCache.get(noSubject); + if (!cacheEntry) { + throw new RestError("Default compatibility not found", 404, 40400); + } + return cacheEntry.compatibilityLevel as Compatibility; + } + + async updateDefaultCompatibility(compatibility: Compatibility): Promise { + this.configCache.set(noSubject, { compatibilityLevel: compatibility }); + return compatibility; + } + + async getConfig(subject: string): Promise { + const cacheEntry = this.configCache.get(subject); + if (!cacheEntry) { + throw new RestError("Subject not found", 404, 40400); + } + return cacheEntry; + } + + async updateConfig(subject: string, config: ServerConfig): Promise { + this.configCache.set(subject, config); + return config; + } + + async getDefaultConfig(): Promise { + const cacheEntry = this.configCache.get(noSubject); + if (!cacheEntry) { + throw new RestError("Default config not found", 404, 40400); + } + return cacheEntry; + } + + async updateDefaultConfig(config: ServerConfig): Promise { + this.configCache.set(noSubject, config); + return config; + } + + clearLatestCaches(): void { + return; + } + + clearCaches(): void { + return; + } + + async close(): Promise { + return; + } + + private schemasEqual(schema1: SchemaInfo, schema2: SchemaInfo): boolean { + return stringify(schema1) === stringify(schema2); + } +} + +export { MockClient }; diff --git a/schemaregistry/oauth/oauth-client.ts b/schemaregistry/oauth/oauth-client.ts new file mode 100644 index 00000000..46334d0d --- /dev/null +++ b/schemaregistry/oauth/oauth-client.ts @@ -0,0 +1,56 @@ +import { ModuleOptions, ClientCredentials, ClientCredentialTokenConfig, AccessToken } from 'simple-oauth2'; + +const TOKEN_EXPIRATION_THRESHOLD_SECONDS = 30 * 60; // 30 minutes + +export class OAuthClient { + private client: ClientCredentials; + private token: AccessToken | undefined; + private tokenParams: ClientCredentialTokenConfig; + + constructor(clientId: string, clientSecret: string, tokenHost: string, tokenPath: string, scope: string) { + const clientConfig: ModuleOptions = { + client: { + id: clientId, + secret: clientSecret, + }, + auth: { + tokenHost: tokenHost, + tokenPath: tokenPath + } + } + + this.tokenParams = { scope }; + + this.client = new ClientCredentials(clientConfig); + } + + async getAccessToken(): Promise { + if (!this.token || this.token.expired(TOKEN_EXPIRATION_THRESHOLD_SECONDS)) { + await this.generateAccessToken(); + } + + return this.getAccessTokenString(); + } + + async generateAccessToken(): Promise { + try { + const token = await this.client.getToken(this.tokenParams); + this.token = token; + } catch (error) { + if (error instanceof Error) { + throw new Error(`Failed to get token from server: ${error.message}`); + } + throw new Error(`Failed to get token from server: ${error}`); + } + } + + async getAccessTokenString(): Promise { + const accessToken = this.token?.token?.['access_token']; + + if (typeof accessToken === 'string') { + return accessToken; + } + + throw new Error('Access token is not available'); + } +} diff --git a/schemaregistry/package.json b/schemaregistry/package.json new file mode 100644 index 00000000..abd64e2b --- /dev/null +++ b/schemaregistry/package.json @@ -0,0 +1,68 @@ +{ + "name": "@confluentinc/schemaregistry", + "version": "v0.2.1", + "description": "Node.js client for Confluent Schema Registry", + "main": "dist/index.js", + "types": "dist/index.d.ts", + "files": [ + "LICENSE.txt", + "dist/" + ], + "devDependencies": { + "@bufbuild/buf": "^1.37.0", + "@bufbuild/protoc-gen-es": "^2.0.0", + "@confluentinc/kafka-javascript": "^0.2.0", + "@eslint/js": "^9.9.0", + "@types/eslint__js": "^8.42.3", + "@types/node": "^20.16.1", + "@types/uuid": "^10.0.0", + "bluebird": "^3.5.3", + "eslint": "^8.57.0", + "eslint-plugin-jest": "^28.6.0", + "eslint-plugin-tsdoc": "^0.3.0", + "jest": "^29.7.0", + "jsdoc": "^4.0.2", + "mocha": "^10.7.0", + "node-gyp": "^9.3.1", + "ts-jest": "^29.2.4", + "typescript": "^5.5.4", + "typescript-eslint": "^8.2.0", + "uuid": "^10.0.0" + }, + "dependencies": { + "@aws-sdk/client-kms": "^3.637.0", + "@azure/identity": "^4.4.1", + "@azure/keyvault-keys": "^4.8.0", + "@bufbuild/protobuf": "^2.0.0", + "@criteria/json-schema": "^0.10.0", + "@criteria/json-schema-validation": "^0.10.0", + "@google-cloud/kms": "^4.5.0", + "@hackbg/miscreant-esm": "^0.3.2-patch.3", + "@smithy/types": "^3.3.0", + "@types/validator": "^13.12.0", + "ajv": "^8.17.1", + "async-mutex": "^0.5.0", + "avsc": "^5.7.7", + "axios": "^1.7.3", + "json-stringify-deterministic": "^1.0.12", + "jsonata": "^2.0.5", + "lru-cache": "^11.0.0", + "node-vault": "^0.10.2", + "simple-oauth2": "^5.1.0", + "validator": "^13.12.0" + }, + "scripts": { + "lint": "make lint", + "test": "make test", + "build": "rm -rf ./dist && tsc -p tsconfig-build.json" + }, + "keywords": [ + "schemaregistry", + "confluent" + ], + "repository": { + "type": "git", + "url": "git@github.com:confluentinc/confluent-kafka-javascript.git" + }, + "license": "MIT" +} diff --git a/schemaregistry/rest-error.ts b/schemaregistry/rest-error.ts new file mode 100644 index 00000000..a5db4ca8 --- /dev/null +++ b/schemaregistry/rest-error.ts @@ -0,0 +1,19 @@ +/** + * Represents a REST error. + */ +export class RestError extends Error { + status: number; + errorCode: number; + + /** + * Creates a new REST error. + * @param message - The error message. + * @param status - The HTTP status code. + * @param errorCode - The error code. + */ + constructor(message: string, status: number, errorCode: number) { + super(message + "; Error code: " + errorCode); + this.status = status; + this.errorCode = errorCode; + } +} diff --git a/schemaregistry/rest-service.ts b/schemaregistry/rest-service.ts new file mode 100644 index 00000000..8d929c82 --- /dev/null +++ b/schemaregistry/rest-service.ts @@ -0,0 +1,215 @@ +import axios, { AxiosInstance, AxiosRequestConfig, AxiosResponse, CreateAxiosDefaults } from 'axios'; +import { OAuthClient } from './oauth/oauth-client'; +import { RestError } from './rest-error'; +/* + * Confluent-Schema-Registry-TypeScript - Node.js wrapper for Confluent Schema Registry + * + * Copyright (c) 2024 Confluent, Inc. + * + * This software may be modified and distributed under the terms + * of the MIT license. See the LICENSE.txt file for details. + */ + +export interface BasicAuthCredentials { + credentialsSource: 'USER_INFO' | 'URL' | 'SASL_INHERIT', + userInfo?: string, + sasl?: SaslInfo +} + +export interface SaslInfo { + mechanism?: string, + username: string, + password: string +} + +export interface BearerAuthCredentials { + credentialsSource: 'STATIC_TOKEN' | 'OAUTHBEARER', + token?: string, + issuerEndpointUrl?: string, + clientId?: string, + clientSecret?: string, + scope?: string, + logicalCluster?: string, + identityPoolId?: string, +} + +//TODO: Consider retry policy, may need additional libraries on top of Axios +export interface ClientConfig { + baseURLs: string[], + cacheCapacity?: number, + cacheLatestTtlSecs?: number, + isForward?: boolean, + createAxiosDefaults?: CreateAxiosDefaults, + basicAuthCredentials?: BasicAuthCredentials, + bearerAuthCredentials?: BearerAuthCredentials, +} + +const toBase64 = (str: string): string => Buffer.from(str).toString('base64'); + +export class RestService { + private client: AxiosInstance; + private baseURLs: string[]; + private oauthClient?: OAuthClient; + private oauthBearer: boolean = false; + + constructor(baseURLs: string[], isForward?: boolean, axiosDefaults?: CreateAxiosDefaults, + basicAuthCredentials?: BasicAuthCredentials, bearerAuthCredentials?: BearerAuthCredentials) { + this.client = axios.create(axiosDefaults); + this.baseURLs = baseURLs; + + if (isForward) { + this.setHeaders({ 'X-Forward': 'true' }); + } + this.setHeaders({ 'Content-Type': 'application/vnd.schemaregistry.v1+json' }); + + this.handleBasicAuth(basicAuthCredentials); + this.handleBearerAuth(bearerAuthCredentials); + } + + handleBasicAuth(basicAuthCredentials?: BasicAuthCredentials): void { + if (basicAuthCredentials) { + switch (basicAuthCredentials.credentialsSource) { + case 'USER_INFO': + if (!basicAuthCredentials.userInfo) { + throw new Error('User info not provided'); + } + this.setAuth(toBase64(basicAuthCredentials.userInfo!)); + break; + case 'SASL_INHERIT': + if (!basicAuthCredentials.sasl) { + throw new Error('Sasl info not provided'); + } + if (basicAuthCredentials.sasl.mechanism?.toUpperCase() === 'GSSAPI') { + throw new Error('SASL_INHERIT support PLAIN and SCRAM SASL mechanisms only'); + } + this.setAuth(toBase64(`${basicAuthCredentials.sasl.username}:${basicAuthCredentials.sasl.password}`)); + break; + case 'URL': + if (!basicAuthCredentials.userInfo) { + throw new Error('User info not provided'); + } + const basicAuthUrl = new URL(basicAuthCredentials.userInfo); + this.setAuth(toBase64(`${basicAuthUrl.username}:${basicAuthUrl.password}`)); + break; + default: + throw new Error('Invalid basic auth credentials source'); + } + } + } + + handleBearerAuth(bearerAuthCredentials?: BearerAuthCredentials): void { + if (bearerAuthCredentials) { + delete this.client.defaults.auth; + + const headers = ['logicalCluster', 'identityPoolId']; + const missingHeader = headers.find(header => !(header in bearerAuthCredentials)); + + if (missingHeader) { + throw new Error(`Bearer auth header '${missingHeader}' not provided`); + } + + this.setHeaders({ + 'Confluent-Identity-Pool-Id': bearerAuthCredentials.identityPoolId!, + 'target-sr-cluster': bearerAuthCredentials.logicalCluster! + }); + + switch (bearerAuthCredentials.credentialsSource) { + case 'STATIC_TOKEN': + if (!bearerAuthCredentials.token) { + throw new Error('Bearer token not provided'); + } + this.setAuth(undefined, bearerAuthCredentials.token); + break; + case 'OAUTHBEARER': + this.oauthBearer = true; + const requiredFields = [ + 'clientId', + 'clientSecret', + 'issuerEndpointUrl', + 'scope' + ]; + const missingField = requiredFields.find(field => !(field in bearerAuthCredentials)); + + if (missingField) { + throw new Error(`OAuth credential '${missingField}' not provided`); + } + const issuerEndPointUrl = new URL(bearerAuthCredentials.issuerEndpointUrl!); + this.oauthClient = new OAuthClient(bearerAuthCredentials.clientId!, bearerAuthCredentials.clientSecret!, + issuerEndPointUrl.origin, issuerEndPointUrl.pathname, bearerAuthCredentials.scope!); + break; + default: + throw new Error('Invalid bearer auth credentials source'); + } + } + } + + async handleRequest( + url: string, + method: 'GET' | 'POST' | 'PUT' | 'DELETE', + data?: any, // eslint-disable-line @typescript-eslint/no-explicit-any + config?: AxiosRequestConfig, + ): Promise> { + + if (this.oauthBearer) { + await this.setOAuthBearerToken(); + } + + for (let i = 0; i < this.baseURLs.length; i++) { + try { + this.setBaseURL(this.baseURLs[i]); + const response = await this.client.request({ + url, + method, + data, + ...config, + }) + return response; + } catch (error) { + if (axios.isAxiosError(error) && error.response && (error.response.status < 200 || error.response.status > 299)) { + const data = error.response.data; + if (data.error_code && data.message) { + error = new RestError(data.message, error.response.status, data.error_code); + } else { + error = new Error(`Unknown error: ${error.message}`) + } + } + if (i === this.baseURLs.length - 1) { + throw error; + } + } + } + + throw new Error('Internal HTTP retry error'); // Should never reach here + } + + setHeaders(headers: Record): void { + this.client.defaults.headers.common = { ...this.client.defaults.headers.common, ...headers } + } + + setAuth(basicAuth?: string, bearerToken?: string): void { + if (basicAuth) { + this.client.defaults.headers.common['Authorization'] = `Basic ${basicAuth}` + } + + if (bearerToken) { + this.client.defaults.headers.common['Authorization'] = `Bearer ${bearerToken}` + } + } + + async setOAuthBearerToken(): Promise { + if (!this.oauthClient) { + throw new Error('OAuthClient not initialized'); + } + + const bearerToken: string = await this.oauthClient.getAccessToken(); + this.setAuth(undefined, bearerToken); + } + + setTimeout(timeout: number): void { + this.client.defaults.timeout = timeout + } + + setBaseURL(baseUrl: string): void { + this.client.defaults.baseURL = baseUrl + } +} diff --git a/schemaregistry/rules/encryption/awskms/aws-client.ts b/schemaregistry/rules/encryption/awskms/aws-client.ts new file mode 100644 index 00000000..c1f2f6f5 --- /dev/null +++ b/schemaregistry/rules/encryption/awskms/aws-client.ts @@ -0,0 +1,46 @@ +import {KmsClient} from "../kms-registry"; +import {AwsKmsDriver} from "./aws-driver"; +import { + DecryptCommand, + EncryptCommand, + KMSClient +} from '@aws-sdk/client-kms' +import {AwsCredentialIdentity} from "@smithy/types"; + +export class AwsKmsClient implements KmsClient { + + private kmsClient: KMSClient + private keyId: string + + constructor(keyUri: string, creds?: AwsCredentialIdentity) { + if (!keyUri.startsWith(AwsKmsDriver.PREFIX)) { + throw new Error(`key uri must start with ${AwsKmsDriver.PREFIX}`) + } + this.keyId = keyUri.substring(AwsKmsDriver.PREFIX.length) + const tokens = this.keyId.split(':') + if (tokens.length < 4) { + throw new Error(`invalid key uri ${this.keyId}`) + } + const regionName = tokens[3] + this.kmsClient = new KMSClient({ + region: regionName, + ...creds && {credentials: creds} + }) + } + + supported(keyUri: string): boolean { + return keyUri.startsWith(AwsKmsDriver.PREFIX) + } + + async encrypt(plaintext: Buffer): Promise { + const encryptCommand = new EncryptCommand({KeyId: this.keyId, Plaintext: plaintext}); + const data = await this.kmsClient.send(encryptCommand) + return Buffer.from(data.CiphertextBlob!); + } + + async decrypt(ciphertext: Buffer): Promise { + const decryptCommand = new DecryptCommand({KeyId: this.keyId, CiphertextBlob: ciphertext}); + const data = await this.kmsClient.send(decryptCommand); + return Buffer.from(data.Plaintext!) + } +} diff --git a/schemaregistry/rules/encryption/awskms/aws-driver.ts b/schemaregistry/rules/encryption/awskms/aws-driver.ts new file mode 100644 index 00000000..13c217c6 --- /dev/null +++ b/schemaregistry/rules/encryption/awskms/aws-driver.ts @@ -0,0 +1,32 @@ +import {KmsClient, KmsDriver, registerKmsDriver} from "../kms-registry"; +import {AwsKmsClient} from "./aws-client"; +import {AwsCredentialIdentity} from "@smithy/types"; + +export class AwsKmsDriver implements KmsDriver { + + static PREFIX = 'aws-kms://' + static ACCESS_KEY_ID = 'access.key.id' + static SECRET_ACCESS_KEY = 'secret.access.key' + + /** + * Register the AWS KMS driver with the KMS registry. + */ + static register(): void { + registerKmsDriver(new AwsKmsDriver()) + } + + getKeyUrlPrefix(): string { + return AwsKmsDriver.PREFIX + } + + newKmsClient(config: Map, keyUrl?: string): KmsClient { + const uriPrefix = keyUrl != null ? keyUrl : AwsKmsDriver.PREFIX + const key = config.get(AwsKmsDriver.ACCESS_KEY_ID) + const secret = config.get(AwsKmsDriver.SECRET_ACCESS_KEY) + let creds: AwsCredentialIdentity | undefined + if (key != null && secret != null) { + creds = {accessKeyId: key, secretAccessKey: secret} + } + return new AwsKmsClient(uriPrefix, creds) + } +} diff --git a/schemaregistry/rules/encryption/azurekms/azure-client.ts b/schemaregistry/rules/encryption/azurekms/azure-client.ts new file mode 100644 index 00000000..a0f33bb1 --- /dev/null +++ b/schemaregistry/rules/encryption/azurekms/azure-client.ts @@ -0,0 +1,33 @@ +import {KmsClient} from "../kms-registry"; +import {AzureKmsDriver} from "./azure-driver"; +import {TokenCredential} from "@azure/identity"; +import {CryptographyClient, EncryptionAlgorithm} from "@azure/keyvault-keys"; + +export class AzureKmsClient implements KmsClient { + private static ALGORITHM: EncryptionAlgorithm = 'RSA-OAEP-256' + + private kmsClient: CryptographyClient + private keyId: string + + constructor(keyUri: string, creds: TokenCredential) { + if (!keyUri.startsWith(AzureKmsDriver.PREFIX)) { + throw new Error(`key uri must start with ${AzureKmsDriver.PREFIX}`) + } + this.keyId = keyUri.substring(AzureKmsDriver.PREFIX.length) + this.kmsClient = new CryptographyClient(this.keyId, creds) + } + + supported(keyUri: string): boolean { + return keyUri.startsWith(AzureKmsDriver.PREFIX) + } + + async encrypt(plaintext: Buffer): Promise { + const result = await this.kmsClient.encrypt(AzureKmsClient.ALGORITHM, plaintext) + return Buffer.from(result.result) + } + + async decrypt(ciphertext: Buffer): Promise { + const result = await this.kmsClient.decrypt(AzureKmsClient.ALGORITHM, ciphertext) + return Buffer.from(result.result) + } +} diff --git a/schemaregistry/rules/encryption/azurekms/azure-driver.ts b/schemaregistry/rules/encryption/azurekms/azure-driver.ts new file mode 100644 index 00000000..01c01cd1 --- /dev/null +++ b/schemaregistry/rules/encryption/azurekms/azure-driver.ts @@ -0,0 +1,36 @@ +import {KmsClient, KmsDriver, registerKmsDriver} from "../kms-registry"; +import {ClientSecretCredential, DefaultAzureCredential, TokenCredential} from '@azure/identity' +import {AzureKmsClient} from "./azure-client"; + +export class AzureKmsDriver implements KmsDriver { + + static PREFIX = 'azure-kms://' + static TENANT_ID = 'tenant.id' + static CLIENT_ID = 'client.id' + static CLIENT_SECRET = 'client.secret' + + /** + * Register the Azure KMS driver with the KMS registry. + */ + static register(): void { + registerKmsDriver(new AzureKmsDriver()) + } + + getKeyUrlPrefix(): string { + return AzureKmsDriver.PREFIX + } + + newKmsClient(config: Map, keyUrl?: string): KmsClient { + const uriPrefix = keyUrl != null ? keyUrl : AzureKmsDriver.PREFIX + const tenantId = config.get(AzureKmsDriver.TENANT_ID) + const clientId = config.get(AzureKmsDriver.CLIENT_ID) + const clientSecret = config.get(AzureKmsDriver.CLIENT_SECRET) + let creds: TokenCredential + if (tenantId != null && clientId != null && clientSecret != null) { + creds = new ClientSecretCredential(tenantId, clientId, clientSecret) + } else { + creds = new DefaultAzureCredential() + } + return new AzureKmsClient(uriPrefix, creds) + } +} diff --git a/schemaregistry/rules/encryption/dekregistry/constants.ts b/schemaregistry/rules/encryption/dekregistry/constants.ts new file mode 100644 index 00000000..ce8dfce0 --- /dev/null +++ b/schemaregistry/rules/encryption/dekregistry/constants.ts @@ -0,0 +1,5 @@ +const MOCK_TS = 11112223334; + +export { + MOCK_TS +}; \ No newline at end of file diff --git a/schemaregistry/rules/encryption/dekregistry/dekregistry-client.ts b/schemaregistry/rules/encryption/dekregistry/dekregistry-client.ts new file mode 100644 index 00000000..16fea683 --- /dev/null +++ b/schemaregistry/rules/encryption/dekregistry/dekregistry-client.ts @@ -0,0 +1,246 @@ +import { LRUCache } from 'lru-cache'; +import { Mutex } from 'async-mutex'; +import { ClientConfig, RestService } from '../../../rest-service'; +import stringify from 'json-stringify-deterministic'; +import {MockDekRegistryClient} from "./mock-dekregistry-client"; + +/* + * Confluent-Schema-Registry-TypeScript - Node.js wrapper for Confluent Schema Registry + * + * Copyright (c) 2024 Confluent, Inc. + * + * This software may be modified and distributed under the terms + * of the MIT license. See the LICENSE.txt file for details. + */ + +interface Kek { + name?: string; + kmsType?: string; + kmsKeyId?: string; + kmsProps?: { [key: string]: string }; + doc?: string; + shared?: boolean; + ts?: number; + deleted?: boolean; +} + +interface CreateKekRequest { + name?: string; + kmsType?: string; + kmsKeyId?: string; + kmsProps?: { [key: string]: string }; + doc?: string; + shared?: boolean; +} + +interface Dek { + kekName?: string; + subject?: string; + version?: number; + algorithm?: string; + encryptedKeyMaterial?: string; + encryptedKeyMaterialBytes?: Buffer; + keyMaterial?: string; + keyMaterialBytes?: Buffer; + ts?: number; + deleted?: boolean; +} + +interface DekClient { + registerKek(name: string, kmsType: string, kmsKeyId: string, shared: boolean, + kmsProps?: { [key: string]: string }, doc?: string): Promise; + getKek(name: string, deleted: boolean): Promise; + registerDek(kekName: string, subject: string, algorithm: string, version: number, + encryptedKeyMaterial?: string): Promise; + getDek(kekName: string, subject: string, algorithm: string, version: number, deleted: boolean): Promise; + close(): Promise; +} + +class DekRegistryClient implements DekClient { + private restService: RestService; + private kekCache: LRUCache; + private dekCache: LRUCache; + private kekMutex: Mutex; + private dekMutex: Mutex; + + constructor(config: ClientConfig) { + const cacheOptions = { + max: config.cacheCapacity !== undefined ? config.cacheCapacity : 1000, + ...(config.cacheLatestTtlSecs !== undefined && { maxAge: config.cacheLatestTtlSecs * 1000 }), + }; + + + this.restService = new RestService(config.baseURLs, config.isForward, config.createAxiosDefaults, + config.basicAuthCredentials, config.bearerAuthCredentials); + this.kekCache = new LRUCache(cacheOptions); + this.dekCache = new LRUCache(cacheOptions); + this.kekMutex = new Mutex(); + this.dekMutex = new Mutex(); + } + + static newClient(config: ClientConfig): DekClient { + const url = config.baseURLs[0]; + if (url.startsWith("mock://")) { + return new MockDekRegistryClient() + } + return new DekRegistryClient(config) + } + + static getEncryptedKeyMaterialBytes(dek: Dek): Buffer | null { + if (!dek.encryptedKeyMaterial) { + return null; + } + + if (!dek.encryptedKeyMaterialBytes) { + try { + const bytes = Buffer.from(dek.encryptedKeyMaterial, 'base64'); + dek.encryptedKeyMaterialBytes = bytes; + } catch (err) { + if (err instanceof Error) { + throw new Error(`Failed to decode base64 string: ${err.message}`); + } + throw new Error(`Unknown error: ${err}`); + } + } + + return dek.encryptedKeyMaterialBytes; + } + + static getKeyMaterialBytes(dek: Dek): Buffer | null { + if (!dek.keyMaterial) { + return null; + } + + if (!dek.keyMaterialBytes) { + try { + const bytes = Buffer.from(dek.keyMaterial, 'base64'); + dek.keyMaterialBytes = bytes; + } catch (err) { + if (err instanceof Error) { + throw new Error(`Failed to decode base64 string: ${err.message}`); + } + throw new Error(`Unknown error: ${err}`); + } + } + + return dek.keyMaterialBytes; + } + + static setKeyMaterial(dek: Dek, keyMaterialBytes: Buffer): void { + if (keyMaterialBytes) { + const str = keyMaterialBytes.toString('base64'); + dek.keyMaterial = str; + } + } + + async registerKek(name: string, kmsType: string, kmsKeyId: string, shared: boolean, + kmsProps?: { [key: string]: string }, doc?: string): Promise { + const cacheKey = stringify({ name, deleted: false }); + + return await this.kekMutex.runExclusive(async () => { + const kek = this.kekCache.get(cacheKey); + if (kek) { + return kek; + } + + const request: CreateKekRequest = { + name, + kmsType, + kmsKeyId, + ...kmsProps && { kmsProps }, + ...doc && { doc }, + shared, + }; + + const response = await this.restService.handleRequest( + '/dek-registry/v1/keks', + 'POST', + request); + this.kekCache.set(cacheKey, response.data); + return response.data; + }); + } + + async getKek(name: string, deleted: boolean = false): Promise { + const cacheKey = stringify({ name, deleted }); + + return await this.kekMutex.runExclusive(async () => { + const kek = this.kekCache.get(cacheKey); + if (kek) { + return kek; + } + name = encodeURIComponent(name); + + const response = await this.restService.handleRequest( + `/dek-registry/v1/keks/${name}?deleted=${deleted}`, + 'GET'); + this.kekCache.set(cacheKey, response.data); + return response.data; + }); + } + + async registerDek(kekName: string, subject: string, algorithm: string, + version: number = 1, encryptedKeyMaterial?: string): Promise { + const cacheKey = stringify({ kekName, subject, version, algorithm, deleted: false }); + + return await this.dekMutex.runExclusive(async () => { + const dek = this.dekCache.get(cacheKey); + if (dek) { + return dek; + } + + const request: Dek = { + subject, + version, + algorithm, + ...encryptedKeyMaterial && { encryptedKeyMaterial }, + }; + kekName = encodeURIComponent(kekName); + + const response = await this.restService.handleRequest( + `/dek-registry/v1/keks/${kekName}/deks`, + 'POST', + request); + this.dekCache.set(cacheKey, response.data); + + this.dekCache.delete(stringify({ kekName, subject, version: -1, algorithm, deleted: false })); + this.dekCache.delete(stringify({ kekName, subject, version: -1, algorithm, deleted: true })); + + return response.data; + }); + } + + async getDek(kekName: string, subject: string, + algorithm: string, version: number = 1, deleted: boolean = false): Promise { + const cacheKey = stringify({ kekName, subject, version, algorithm, deleted }); + + return await this.dekMutex.runExclusive(async () => { + const dek = this.dekCache.get(cacheKey); + if (dek) { + return dek; + } + kekName = encodeURIComponent(kekName); + subject = encodeURIComponent(subject); + + const response = await this.restService.handleRequest( + `/dek-registry/v1/keks/${kekName}/deks/${subject}/versions/${version}?deleted=${deleted}`, + 'GET'); + this.dekCache.set(cacheKey, response.data); + return response.data; + }); + } + + async close(): Promise { + return; + } + + //Cache methods for testing + async checkLatestDekInCache(kekName: string, subject: string, algorithm: string): Promise { + const cacheKey = stringify({ kekName, subject, version: -1, algorithm, deleted: false }); + const cachedDek = this.dekCache.get(cacheKey); + return cachedDek !== undefined; + } +} + +export { DekRegistryClient, DekClient, Kek, Dek }; + diff --git a/schemaregistry/rules/encryption/dekregistry/mock-dekregistry-client.ts b/schemaregistry/rules/encryption/dekregistry/mock-dekregistry-client.ts new file mode 100644 index 00000000..beef28e0 --- /dev/null +++ b/schemaregistry/rules/encryption/dekregistry/mock-dekregistry-client.ts @@ -0,0 +1,98 @@ +import { DekClient, Dek, Kek } from "./dekregistry-client"; +import { MOCK_TS } from "./constants"; +import stringify from "json-stringify-deterministic"; +import {RestError} from "../../../rest-error"; + +class MockDekRegistryClient implements DekClient { + private kekCache: Map; + private dekCache: Map; + + constructor() { + this.kekCache = new Map(); + this.dekCache = new Map(); + } + + async registerKek(name: string, kmsType: string, kmsKeyId: string, shared: boolean, + kmsProps?: { [key: string]: string }, doc?: string): Promise { + const cacheKey = stringify({ name, deleted: false }); + const cachedKek = this.kekCache.get(cacheKey); + if (cachedKek) { + return cachedKek; + } + + const kek: Kek = { + name, + kmsType, + kmsKeyId, + ...kmsProps && { kmsProps }, + ...doc && { doc }, + shared + }; + + this.kekCache.set(cacheKey, kek); + return kek; + } + + async getKek(name: string, deleted: boolean = false): Promise { + const cacheKey = stringify({ name, deleted }); + const cachedKek = this.kekCache.get(cacheKey); + if (cachedKek && (!cachedKek.deleted || deleted)) { + return cachedKek; + } + + throw new RestError(`Kek not found: ${name}`, 404, 40400); + } + + async registerDek(kekName: string, subject: string, algorithm: string, + version: number = 1, encryptedKeyMaterial?: string): Promise { + const cacheKey = stringify({ kekName, subject, version, algorithm, deleted: false }); + const cachedDek = this.dekCache.get(cacheKey); + if (cachedDek) { + return cachedDek; + } + + const dek: Dek = { + kekName, + subject, + algorithm, + ...encryptedKeyMaterial && { encryptedKeyMaterial }, + version, + ts: MOCK_TS + }; + + this.dekCache.set(cacheKey, dek); + return dek; + } + + async getDek(kekName: string, subject: string, + algorithm: string, version: number = 1, deleted: boolean = false): Promise { + if (version === -1) { + let latestVersion = 0; + for (const key of this.dekCache.keys()) { + const parsedKey = JSON.parse(key); + if (parsedKey.kekName === kekName && parsedKey.subject === subject + && parsedKey.algorithm === algorithm && parsedKey.deleted === deleted) { + latestVersion = Math.max(latestVersion, parsedKey.version); + } + } + if (latestVersion === 0) { + throw new RestError(`Dek not found: ${subject}`, 404, 40400); + } + version = latestVersion; + } + + const cacheKey = stringify({ kekName, subject, version, algorithm, deleted: false }); + const cachedDek = this.dekCache.get(cacheKey); + if (cachedDek) { + return cachedDek; + } + + throw new RestError(`Dek not found: ${subject}`, 404, 40400); + } + + async close() { + return; + } +} + +export { MockDekRegistryClient }; diff --git a/schemaregistry/rules/encryption/encrypt-executor.ts b/schemaregistry/rules/encryption/encrypt-executor.ts new file mode 100644 index 00000000..ff70df41 --- /dev/null +++ b/schemaregistry/rules/encryption/encrypt-executor.ts @@ -0,0 +1,549 @@ +import { + FieldContext, + FieldRuleExecutor, + FieldTransform, + FieldType, + MAGIC_BYTE, + RuleContext, + RuleError, +} from "../../serde/serde"; +import {RuleMode,} from "../../schemaregistry-client"; +import {DekClient, Dek, DekRegistryClient, Kek} from "./dekregistry/dekregistry-client"; +import {RuleRegistry} from "../../serde/rule-registry"; +import {ClientConfig} from "../../rest-service"; +import {RestError} from "../../rest-error"; +import * as Random from './tink/random'; +import * as Registry from './kms-registry' +import {KmsClient} from "./kms-registry"; +import {AesGcmKey, AesGcmKeySchema} from "./tink/proto/aes_gcm_pb"; +import {AesSivKey, AesSivKeySchema} from "./tink/proto/aes_siv_pb"; +import {create, fromBinary, toBinary} from "@bufbuild/protobuf"; +import {fromRawKey as aesGcmFromRawKey} from "./tink/aes_gcm"; +import {fromRawKey as aesSivFromRawKey} from "./tink/aes_siv"; + +// EncryptKekName represents a kek name +const ENCRYPT_KEK_NAME = 'encrypt.kek.name' +// EncryptKmsKeyId represents a kms key ID +const ENCRYPT_KMS_KEY_ID = 'encrypt.kms.key.id' +// EncryptKmsType represents a kms type +const ENCRYPT_KMS_TYPE = 'encrypt.kms.type' +// EncryptDekAlgorithm represents a dek algorithm +const ENCRYPT_DEK_ALGORITHM = 'encrypt.dek.algorithm' +// EncryptDekExpiryDays represents dek expiry days +const ENCRYPT_DEK_EXPIRY_DAYS = 'encrypt.dek.expiry.days' + +// MillisInDay represents number of milliseconds in a day +const MILLIS_IN_DAY = 24 * 60 * 60 * 1000 + +export enum DekFormat { + AES128_GCM = 'AES128_GCM', + AES256_GCM = 'AES256_GCM', + AES256_SIV = 'AES256_SIV', +} + +interface KekId { + name: string + deleted: boolean +} + +interface DekId { + kekName: string + subject: string + version: number | null + algorithm: string + deleted: boolean +} + +export class Clock { + now(): number { + return Date.now() + } +} + +export class FieldEncryptionExecutor extends FieldRuleExecutor { + client: DekClient | null = null + clock: Clock + + /** + * Register the field encryption executor with the rule registry. + */ + static register(): FieldEncryptionExecutor { + return this.registerWithClock(new Clock()) + } + + static registerWithClock(clock: Clock): FieldEncryptionExecutor { + const executor = new FieldEncryptionExecutor(clock) + RuleRegistry.registerRuleExecutor(executor) + return executor + } + + constructor(clock: Clock = new Clock()) { + super() + this.clock = clock + } + + override configure(clientConfig: ClientConfig, config: Map) { + this.client = DekRegistryClient.newClient(clientConfig) + this.config = config + } + + override type(): string { + return 'ENCRYPT' + } + + override newTransform(ctx: RuleContext): FieldTransform { + const cryptor = this.getCryptor(ctx) + const kekName = this.getKekName(ctx) + const dekExpiryDays = this.getDekExpiryDays(ctx) + const transform = + new FieldEncryptionExecutorTransform(this, cryptor, kekName, dekExpiryDays) + return transform + } + + override async close(): Promise { + if (this.client != null) { + await this.client.close() + } + } + + private getCryptor(ctx: RuleContext): Cryptor { + let dekAlgorithm = DekFormat.AES256_GCM + const dekAlgorithmStr = ctx.getParameter(ENCRYPT_DEK_ALGORITHM) + if (dekAlgorithmStr != null) { + dekAlgorithm = DekFormat[dekAlgorithmStr as keyof typeof DekFormat] + } + const cryptor = new Cryptor(dekAlgorithm) + return cryptor + } + + private getKekName(ctx: RuleContext): string { + const kekName = ctx.getParameter(ENCRYPT_KEK_NAME) + if (kekName == null) { + throw new RuleError('no kek name found') + } + if (kekName === '') { + throw new RuleError('empty kek name') + } + return kekName + } + + private getDekExpiryDays(ctx: RuleContext): number { + const expiryDaysStr = ctx.getParameter(ENCRYPT_DEK_EXPIRY_DAYS) + if (expiryDaysStr == null) { + return 0 + } + const expiryDays = Number(expiryDaysStr) + if (isNaN(expiryDays)) { + throw new RuleError('invalid expiry days') + } + if (expiryDays < 0) { + throw new RuleError('negative expiry days') + } + return expiryDays + } +} + +export class Cryptor { + static readonly EMPTY_AAD = Buffer.from([]) + + dekFormat: DekFormat + isDeterministic: boolean + + constructor(dekFormat: DekFormat) { + this.dekFormat = dekFormat + this.isDeterministic = dekFormat === DekFormat.AES256_SIV + } + + private keySize(): number { + switch (this.dekFormat) { + case DekFormat.AES256_SIV: + // Generate 2 256-bit keys + return 64 + case DekFormat.AES128_GCM: + // Generate 128-bit key + return 16 + case DekFormat.AES256_GCM: + // Generate 256-bit key + return 32 + default: + throw new RuleError('unsupported dek format') + } + } + + generateKey(): Buffer { + let rawKey = Random.randBytes(this.keySize()) + switch (this.dekFormat) { + case DekFormat.AES256_SIV: + const aesSivKey: AesSivKey = create(AesSivKeySchema, { + version: 0, + keyValue: rawKey + }); + return Buffer.from(toBinary(AesSivKeySchema, aesSivKey)) + case DekFormat.AES128_GCM: + case DekFormat.AES256_GCM: + const aesGcmKey: AesGcmKey = create(AesGcmKeySchema, { + version: 0, + keyValue: rawKey + }); + return Buffer.from(toBinary(AesGcmKeySchema, aesGcmKey)) + default: + throw new RuleError('unsupported dek format') + } + } + + async encrypt(dek: Buffer, plaintext: Buffer): Promise { + let rawKey + switch (this.dekFormat) { + case DekFormat.AES256_SIV: + const aesSivKey = fromBinary(AesSivKeySchema, dek) + rawKey = aesSivKey.keyValue + return Buffer.from(await this.encryptWithAesSiv(rawKey, plaintext)) + case DekFormat.AES128_GCM: + case DekFormat.AES256_GCM: + const aesGcmKey = fromBinary(AesGcmKeySchema, dek) + rawKey = aesGcmKey.keyValue + return Buffer.from(await this.encryptWithAesGcm(rawKey, plaintext)) + default: + throw new RuleError('unsupported dek format') + } + } + + async decrypt(dek: Buffer, ciphertext: Buffer): Promise { + let rawKey + switch (this.dekFormat) { + case DekFormat.AES256_SIV: + const aesSivKey = fromBinary(AesSivKeySchema, dek) + rawKey = aesSivKey.keyValue + return Buffer.from(await this.decryptWithAesSiv(rawKey, ciphertext)) + case DekFormat.AES128_GCM: + case DekFormat.AES256_GCM: + const aesGcmKey = fromBinary(AesGcmKeySchema, dek) + rawKey = aesGcmKey.keyValue + return Buffer.from(await this.decryptWithAesGcm(rawKey, ciphertext)) + default: + throw new RuleError('unsupported dek format') + } + } + + async encryptWithAesSiv(key: Uint8Array, plaintext: Uint8Array): Promise { + const aead = await aesSivFromRawKey(key) + return aead.encrypt(plaintext, Cryptor.EMPTY_AAD) + } + + async decryptWithAesSiv(key: Uint8Array, ciphertext: Uint8Array): Promise { + const aead = await aesSivFromRawKey(key) + return aead.decrypt(ciphertext, Cryptor.EMPTY_AAD) + } + + async encryptWithAesGcm(key: Uint8Array, plaintext: Uint8Array): Promise { + const aead = await aesGcmFromRawKey(key) + return aead.encrypt(plaintext, Cryptor.EMPTY_AAD) + } + + async decryptWithAesGcm(key: Uint8Array, ciphertext: Uint8Array): Promise { + const aead = await aesGcmFromRawKey(key) + return aead.decrypt(ciphertext, Cryptor.EMPTY_AAD) + } +} + +export class FieldEncryptionExecutorTransform implements FieldTransform { + private executor: FieldEncryptionExecutor + private cryptor: Cryptor + private kekName: string + private kek: Kek | null = null + private dekExpiryDays: number + + constructor( + executor: FieldEncryptionExecutor, + cryptor: Cryptor, + kekName: string, + dekExpiryDays: number, + ) { + this.executor = executor + this.cryptor = cryptor + this.kekName = kekName + this.dekExpiryDays = dekExpiryDays + } + + isDekRotated() { + return this.dekExpiryDays > 0 + } + + async getKek(ctx: RuleContext) { + if (this.kek == null) { + this.kek = await this.getOrCreateKek(ctx) + } + return this.kek + } + + async getOrCreateKek(ctx: RuleContext): Promise { + const isRead = ctx.ruleMode === RuleMode.READ + const kmsType = ctx.getParameter(ENCRYPT_KMS_TYPE) + const kmsKeyId = ctx.getParameter(ENCRYPT_KMS_KEY_ID) + const kekId: KekId = { + name: this.kekName, + deleted: false, + } + let kek = await this.retrieveKekFromRegistry(kekId) + if (kek == null) { + if (isRead) { + throw new RuleError(`no kek found for ${this.kekName} during consume`) + } + if (kmsType == null || kmsType.length === 0) { + throw new RuleError(`no kms type found for ${this.kekName} during produce`) + } + if (kmsKeyId == null || kmsKeyId.length === 0) { + throw new RuleError(`no kms key id found for ${this.kekName} during produce`) + } + kek = await this.storeKekToRegistry(kekId, kmsType, kmsKeyId, false) + if (kek == null) { + // handle conflicts (409) + kek = await this.retrieveKekFromRegistry(kekId) + } + if (kek == null) { + throw new RuleError(`no kek found for ${this.kekName} during produce`) + } + } + if (kmsType != null && kmsType.length !== 0 && kmsType !== kek.kmsType) { + throw new RuleError( + `found ${this.kekName} with kms type ${kek.kmsType} which differs from rule kms type ${kmsType}`, + ) + } + if (kmsKeyId != null && kmsKeyId.length !== 0 && kmsKeyId !== kek.kmsKeyId) { + throw new RuleError( + `found ${this.kekName} with kms key id ${kek.kmsKeyId} which differs from rule kms keyId ${kmsKeyId}`, + ) + } + return kek + } + + async retrieveKekFromRegistry(key: KekId): Promise { + try { + return await this.executor.client!.getKek(key.name, key.deleted) + } catch (err) { + if (err instanceof RestError && err.status === 404) { + return null + } + throw new RuleError(`could not get kek ${key.name}: ${err}`) + } + } + + async storeKekToRegistry(key: KekId, kmsType: string, kmsKeyId: string, shared: boolean): Promise { + try { + return await this.executor.client!.registerKek(key.name, kmsType, kmsKeyId, shared) + } catch (err) { + if (err instanceof RestError && err.status === 409) { + return null + } + throw new RuleError(`could not register kek ${key.name}: ${err}`) + } + } + + async getOrCreateDek(ctx: RuleContext, version: number | null): Promise { + const kek = await this.getKek(ctx) + const isRead = ctx.ruleMode === RuleMode.READ + if (version == null || version === 0) { + version = 1 + } + const dekId: DekId = { + kekName: this.kekName, + subject: ctx.subject, + version, + algorithm: this.cryptor.dekFormat, + deleted: isRead + } + let dek = await this.retrieveDekFromRegistry(dekId) + const isExpired = this.isExpired(ctx, dek) + let kmsClient: KmsClient | null = null + if (dek == null || isExpired) { + if (isRead) { + throw new RuleError(`no dek found for ${this.kekName} during consume`) + } + let encryptedDek: Buffer | null = null + if (!kek.shared) { + kmsClient = getKmsClient(this.executor.config!, kek) + // Generate new dek + const rawDek = this.cryptor.generateKey() + encryptedDek = await kmsClient.encrypt(rawDek) + } + const newVersion = isExpired ? dek!.version! + 1 : null + const newDekId: DekId = { + kekName: this.kekName, + subject: ctx.subject, + version: newVersion, + algorithm: this.cryptor.dekFormat, + deleted: isRead, + } + // encryptedDek may be passed as null if kek is shared + dek = await this.storeDekToRegistry(newDekId, encryptedDek) + if (dek == null) { + // handle conflicts (409) + dek = await this.retrieveDekFromRegistry(dekId) + } + if (dek == null) { + throw new RuleError(`no dek found for ${this.kekName} during produce`) + } + } + + if (DekRegistryClient.getKeyMaterialBytes(dek) == null) { + if (kmsClient == null) { + kmsClient = getKmsClient(this.executor.config!, kek) + } + const rawDek = await kmsClient.decrypt(DekRegistryClient.getEncryptedKeyMaterialBytes(dek)!) + DekRegistryClient.setKeyMaterial(dek, rawDek) + } + + return dek + } + + async retrieveDekFromRegistry(key: DekId): Promise { + try { + let dek: Dek + let version = key.version + if (version == null || version === 0) { + version = 1 + } + dek = await this.executor.client!.getDek(key.kekName, key.subject, key.algorithm, version, key.deleted) + return dek != null && dek.encryptedKeyMaterial != null ? dek : null + } catch (err) { + if (err instanceof RestError && err.status === 404) { + return null + } + throw new RuleError(`could not get dek for kek ${key.kekName}, subject ${key.subject}: ${err}`) + } + } + + async storeDekToRegistry(key: DekId, encryptedDek: Buffer | null): Promise { + try { + let dek: Dek + let encryptedDekStr: string | undefined = undefined + if (encryptedDek != null) { + encryptedDekStr = encryptedDek.toString('base64') + } + let version = key.version + if (version == null || version === 0) { + version = 1 + } + dek = await this.executor.client!.registerDek(key.kekName, key.subject, key.algorithm, version, encryptedDekStr) + return dek + } catch (err) { + if (err instanceof RestError && err.status === 409) { + return null + } + throw new RuleError(`could not register dek for kek ${key.kekName}, subject ${key.subject}: ${err}`) + } + } + + isExpired(ctx: RuleContext, dek: Dek | null): boolean { + const now = this.executor.clock.now() + return ctx.ruleMode !== RuleMode.READ && + this.dekExpiryDays > 0 && + dek != null && + (now - dek.ts!) / MILLIS_IN_DAY >= this.dekExpiryDays + } + + async transform(ctx: RuleContext, fieldCtx: FieldContext, fieldValue: any): Promise { + if (fieldValue == null) { + return null + } + switch (ctx.ruleMode) { + case RuleMode.WRITE: { + let plaintext = this.toBytes(fieldCtx.type, fieldValue) + if (plaintext == null) { + throw new RuleError(`type ${fieldCtx.type} not supported for encryption`) + } + let version: number | null = null + if (this.isDekRotated()) { + version = -1 + } + let dek = await this.getOrCreateDek(ctx, version) + let keyMaterialBytes = DekRegistryClient.getKeyMaterialBytes(dek)! + let ciphertext = await this.cryptor.encrypt(keyMaterialBytes, plaintext) + if (this.isDekRotated()) { + ciphertext = this.prefixVersion(dek.version!, ciphertext) + } + if (fieldCtx.type === FieldType.STRING) { + return ciphertext.toString('base64') + } else { + return this.toObject(fieldCtx.type, ciphertext) + } + } + case RuleMode.READ: { + let ciphertext + if (fieldCtx.type === FieldType.STRING) { + ciphertext = Buffer.from(fieldValue, 'base64') + } else { + ciphertext = this.toBytes(fieldCtx.type, fieldValue) + } + if (ciphertext == null) { + return fieldValue + } + let version: number | null = null + if (this.isDekRotated()) { + version = this.extractVersion(ciphertext) + if (version == null) { + throw new RuleError('no version found in ciphertext') + } + ciphertext = ciphertext.subarray(5) + } + let dek = await this.getOrCreateDek(ctx, version) + let keyMaterialBytes = DekRegistryClient.getKeyMaterialBytes(dek)! + let plaintext = await this.cryptor.decrypt(keyMaterialBytes, ciphertext) + return this.toObject(fieldCtx.type, plaintext) + } + default: + throw new RuleError(`unsupported rule mode ${ctx.ruleMode}`) + } + } + + prefixVersion(version: number, ciphertext: Buffer): Buffer { + const versionBuf = Buffer.alloc(4) + versionBuf.writeInt32BE(version) + return Buffer.concat([MAGIC_BYTE, versionBuf, ciphertext]) + } + + extractVersion(ciphertext: Buffer): number | null { + let magicByte = ciphertext.subarray(0, 1) + if (!magicByte.equals(MAGIC_BYTE)) { + throw new RuleError( + `Message encoded with magic byte ${JSON.stringify(magicByte)}, expected ${JSON.stringify( + MAGIC_BYTE, + )}`, + ) + } + return ciphertext.subarray(1, 5).readInt32BE(0) + } + + toBytes(type: FieldType, value: any): Buffer | null { + switch (type) { + case FieldType.BYTES: + return value as Buffer + case FieldType.STRING: + return Buffer.from(value as string) + default: + return null + } + } + + toObject(type: FieldType, value: Buffer): any { + switch (type) { + case FieldType.BYTES: + return value + case FieldType.STRING: + return value.toString() + default: + return null + } + } +} + +function getKmsClient(config: Map, kek: Kek): KmsClient { + let keyUrl = kek.kmsType + '://' + kek.kmsKeyId + let kmsClient = Registry.getKmsClient(keyUrl) + if (kmsClient == null) { + let kmsDriver = Registry.getKmsDriver(keyUrl) + kmsClient = kmsDriver.newKmsClient(config, keyUrl) + Registry.registerKmsClient(kmsClient) + } + return kmsClient +} diff --git a/schemaregistry/rules/encryption/gcpkms/gcp-client.ts b/schemaregistry/rules/encryption/gcpkms/gcp-client.ts new file mode 100644 index 00000000..1ef561e8 --- /dev/null +++ b/schemaregistry/rules/encryption/gcpkms/gcp-client.ts @@ -0,0 +1,39 @@ +import {KmsClient} from "../kms-registry"; +import {GcpCredentials, GcpKmsDriver} from "./gcp-driver"; +import {KeyManagementServiceClient} from "@google-cloud/kms"; + +export class GcpKmsClient implements KmsClient { + + private kmsClient: KeyManagementServiceClient + private keyId: string + + constructor(keyUri: string, creds?: GcpCredentials) { + if (!keyUri.startsWith(GcpKmsDriver.PREFIX)) { + throw new Error(`key uri must start with ${GcpKmsDriver.PREFIX}`) + } + this.keyId = keyUri.substring(GcpKmsDriver.PREFIX.length) + this.kmsClient = creds != null + ? new KeyManagementServiceClient({credentials: creds}) + : new KeyManagementServiceClient() + } + + supported(keyUri: string): boolean { + return keyUri.startsWith(GcpKmsDriver.PREFIX) + } + + async encrypt(plaintext: Buffer): Promise { + const [result] = await this.kmsClient.encrypt({ + name: this.keyId, + plaintext: plaintext + }) + return Buffer.from(result.ciphertext as string) + } + + async decrypt(ciphertext: Buffer): Promise { + const [result] = await this.kmsClient.decrypt({ + name: this.keyId, + ciphertext: ciphertext + }) + return Buffer.from(result.plaintext as string) + } +} diff --git a/schemaregistry/rules/encryption/gcpkms/gcp-driver.ts b/schemaregistry/rules/encryption/gcpkms/gcp-driver.ts new file mode 100644 index 00000000..60ee47dc --- /dev/null +++ b/schemaregistry/rules/encryption/gcpkms/gcp-driver.ts @@ -0,0 +1,54 @@ +import {KmsClient, KmsDriver, registerKmsDriver} from "../kms-registry"; +import {GcpKmsClient} from "./gcp-client"; + +export class GcpKmsDriver implements KmsDriver { + + static PREFIX = 'gcp-kms://' + static ACCOUNT_TYPE = "account.type"; + static CLIENT_ID= "client.id"; + static CLIENT_EMAIL = "client.email"; + static PRIVATE_KEY_ID = "private.key.id"; + static PRIVATE_KEY= "private.key"; + + /** + * Register the GCP KMS driver with the KMS registry. + */ + static register(): void { + registerKmsDriver(new GcpKmsDriver()) + } + + getKeyUrlPrefix(): string { + return GcpKmsDriver.PREFIX + } + + newKmsClient(config: Map, keyUrl?: string): KmsClient { + const uriPrefix = keyUrl != null ? keyUrl : GcpKmsDriver.PREFIX + let accountType = config.get(GcpKmsDriver.ACCOUNT_TYPE) + const clientId = config.get(GcpKmsDriver.CLIENT_ID) + const clientEmail = config.get(GcpKmsDriver.CLIENT_EMAIL) + const privateKeyId = config.get(GcpKmsDriver.PRIVATE_KEY_ID) + const privateKey = config.get(GcpKmsDriver.PRIVATE_KEY) + let creds: GcpCredentials | undefined + if (clientId != null && clientEmail != null && privateKeyId != null && privateKey != null) { + if (accountType == null) { + accountType = "service_account" + } + creds = { + ...accountType && {type: accountType}, + private_key_id: privateKeyId, + private_key: privateKey, + client_email: clientEmail, + client_id: clientId, + } + } + return new GcpKmsClient(uriPrefix, creds) + } +} + +export interface GcpCredentials { + type?: string + private_key_id?: string + private_key?: string + client_email?: string + client_id?: string +} diff --git a/schemaregistry/rules/encryption/hcvault/hcvault-client.ts b/schemaregistry/rules/encryption/hcvault/hcvault-client.ts new file mode 100644 index 00000000..ea13aa09 --- /dev/null +++ b/schemaregistry/rules/encryption/hcvault/hcvault-client.ts @@ -0,0 +1,49 @@ +import {KmsClient} from "../kms-registry"; +import {HcVaultDriver} from "./hcvault-driver"; +import NodeVault from "node-vault"; + +export class HcVaultClient implements KmsClient { + + private kmsClient: NodeVault.client + private keyId: string + private keyName: string + + constructor(keyUri: string, namespace?: string, token?: string) { + if (token == null) + { + namespace = process.env["VAULT_NAMESPACE"] + } + if (!keyUri.startsWith(HcVaultDriver.PREFIX)) { + throw new Error(`key uri must start with ${HcVaultDriver.PREFIX}`) + } + this.keyId = keyUri.substring(HcVaultDriver.PREFIX.length) + let url = new URL(this.keyId) + let parts = url.pathname.split('/') + if (parts.length === 0) { + throw new Error('key uri must contain a key name') + } + this.keyName = parts.pop()! + this.kmsClient = NodeVault({ + endpoint: url.protocol + '//' + url.host, + ...namespace && { namespace }, + ...token && { token }, + apiVersion: 'v1', + }) + } + + supported(keyUri: string): boolean { + return keyUri.startsWith(HcVaultDriver.PREFIX) + } + + async encrypt(plaintext: Buffer): Promise { + const response = await this.kmsClient.encryptData({name: this.keyName, plaintext: plaintext.toString('base64') }) + let data = response.data.ciphertext + return Buffer.from(data, 'utf8') + } + + async decrypt(ciphertext: Buffer): Promise { + const response = await this.kmsClient.decryptData({name: this.keyName, ciphertext: ciphertext.toString('utf8') }) + let data = response.data.plaintext + return Buffer.from(data, 'base64'); + } +} diff --git a/schemaregistry/rules/encryption/hcvault/hcvault-driver.ts b/schemaregistry/rules/encryption/hcvault/hcvault-driver.ts new file mode 100644 index 00000000..de9ccdfc --- /dev/null +++ b/schemaregistry/rules/encryption/hcvault/hcvault-driver.ts @@ -0,0 +1,27 @@ +import {KmsClient, KmsDriver, registerKmsDriver} from "../kms-registry"; +import {HcVaultClient} from "./hcvault-client"; + +export class HcVaultDriver implements KmsDriver { + + static PREFIX = 'hcvault://' + static TOKEN_ID = 'token.id' + static NAMESPACE = 'namespace' + + /** + * Register the HashiCorp Vault driver with the KMS registry. + */ + static register(): void { + registerKmsDriver(new HcVaultDriver()) + } + + getKeyUrlPrefix(): string { + return HcVaultDriver.PREFIX + } + + newKmsClient(config: Map, keyUrl?: string): KmsClient { + const uriPrefix = keyUrl != null ? keyUrl : HcVaultDriver.PREFIX + const tokenId = config.get(HcVaultDriver.TOKEN_ID) + const ns = config.get(HcVaultDriver.NAMESPACE) + return new HcVaultClient(uriPrefix, ns, tokenId) + } +} diff --git a/schemaregistry/rules/encryption/kms-registry.ts b/schemaregistry/rules/encryption/kms-registry.ts new file mode 100644 index 00000000..c23e9a5d --- /dev/null +++ b/schemaregistry/rules/encryption/kms-registry.ts @@ -0,0 +1,74 @@ +import {SecurityException} from "./tink/exception/security_exception"; + +/** + * Key management service (KMS) driver. + */ +export interface KmsDriver { + getKeyUrlPrefix(): string + newKmsClient(config: Map, keyUrl: string): KmsClient +} + +/** + * Key management service (KMS) client. + */ +export interface KmsClient { + supported(keyUri: string): boolean + encrypt(plaintext: Buffer): Promise + decrypt(ciphertext: Buffer): Promise +} + +const kmsDrivers: KmsDriver[] = [] + +const kmsClients: KmsClient[] = [] + + +/** + * Register a KMS driver. + * @param kmsDriver - the KMS driver to register + */ +export function registerKmsDriver(kmsDriver: KmsDriver): void { + kmsDrivers.push(kmsDriver) +} + +/** + * Get the KMS driver for the given key URL. + * @param keyUrl - the key URL + */ +export function getKmsDriver(keyUrl: string): KmsDriver { + for (let driver of kmsDrivers) { + if (keyUrl.startsWith(driver.getKeyUrlPrefix())) { + return driver + } + } + throw new SecurityException('no KMS driver found for key URL: ' + keyUrl) +} + +/** + * Register a KMS client. + * @param kmsClient - the KMS client to register + */ +export function registerKmsClient(kmsClient: KmsClient): void { + kmsClients.push(kmsClient) +} + +/** + * Get the KMS client for the given key URL. + * @param keyUrl - the key URL + */ +export function getKmsClient(keyUrl: string): KmsClient | null { + for (let client of kmsClients) { + if (client.supported(keyUrl)) { + return client + } + } + return null +} + +/** + * Clear the KMS clients. + */ +export function clearKmsClients(): void { + kmsClients.length = 0 +} + + diff --git a/schemaregistry/rules/encryption/localkms/local-client.ts b/schemaregistry/rules/encryption/localkms/local-client.ts new file mode 100644 index 00000000..c65cc089 --- /dev/null +++ b/schemaregistry/rules/encryption/localkms/local-client.ts @@ -0,0 +1,44 @@ +import {KmsClient} from "../kms-registry"; +import {Cryptor, DekFormat} from "../encrypt-executor"; +import * as Hkdf from '../tink/hkdf'; +import {LocalKmsDriver} from "./local-driver"; +import {AesGcmKey, AesGcmKeySchema} from "../tink/proto/aes_gcm_pb"; +import {create, toBinary} from "@bufbuild/protobuf"; + +export class LocalKmsClient implements KmsClient { + + private secret: string + private cryptor: Cryptor + + constructor(secret?: string) { + if (secret == null) { + secret = process.env['LOCAL_SECRET'] + } + if (secret == null) { + throw new Error('cannot load secret') + } + this.secret = secret + this.cryptor = new Cryptor(DekFormat.AES128_GCM) + } + + async getKey(): Promise { + const rawKey = await Hkdf.compute(16, 'SHA-256', Buffer.from(this.secret, 'utf8'), new Uint8Array(0)); + const aesGcmKey: AesGcmKey = create(AesGcmKeySchema, { + version: 0, + keyValue: rawKey + }); + return Buffer.from(toBinary(AesGcmKeySchema, aesGcmKey)) + } + + supported(keyUri: string): boolean { + return keyUri.startsWith(LocalKmsDriver.PREFIX) + } + + async encrypt(plaintext: Buffer): Promise { + return this.cryptor.encrypt(await this.getKey(), plaintext) + } + + async decrypt(ciphertext: Buffer): Promise { + return this.cryptor.decrypt(await this.getKey(), ciphertext) + } +} diff --git a/schemaregistry/rules/encryption/localkms/local-driver.ts b/schemaregistry/rules/encryption/localkms/local-driver.ts new file mode 100644 index 00000000..5e1b9184 --- /dev/null +++ b/schemaregistry/rules/encryption/localkms/local-driver.ts @@ -0,0 +1,24 @@ +import {KmsClient, KmsDriver, registerKmsDriver} from "../kms-registry"; +import {LocalKmsClient} from "./local-client"; + +export class LocalKmsDriver implements KmsDriver { + + static PREFIX: string = 'local-kms://' + static SECRET: string = 'secret' + + /** + * Register the local KMS driver with the KMS registry. + */ + static register(): void { + registerKmsDriver(new LocalKmsDriver()) + } + + getKeyUrlPrefix(): string { + return LocalKmsDriver.PREFIX + } + + newKmsClient(config: Map, keyUrl: string): KmsClient { + const secret = config.get(LocalKmsDriver.SECRET) + return new LocalKmsClient(secret) + } +} diff --git a/schemaregistry/rules/encryption/tink/aead.ts b/schemaregistry/rules/encryption/tink/aead.ts new file mode 100644 index 00000000..df85ca73 --- /dev/null +++ b/schemaregistry/rules/encryption/tink/aead.ts @@ -0,0 +1,51 @@ +/** + * Copyright 2020 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + + +/** + * Interface for Authenticated Encryption with Associated Data (AEAD). + * + * Security guarantees: Implementations of this interface are secure against + * adaptive chosen ciphertext attacks. Encryption with associated data ensures + * authenticity (who the sender is) and integrity (the data has not been + * tampered with) of that data, but not its secrecy. + * + * @see https://tools.ietf.org/html/rfc5116 + */ +export abstract class Aead { + /** + * Encrypts `plaintext` with `opt_associatedData` as associated authenticated + * data. The resulting ciphertext allows for checking authenticity and + * integrity of associated data, but does not guarantee its secrecy. + * + * @param plaintext - the plaintext to be encrypted. It must be + * non-null, but can also be an empty (zero-length) byte array. + * @param opt_associatedData - optional associated data to be + * authenticated, but not encrypted. A null value is equivalent to an + * empty (zero-length) byte array. For successful decryption the same + * associated data must be provided along with the ciphertext. + * @returns resulting ciphertext + * + */ + abstract encrypt(plaintext: Uint8Array, opt_associatedData?: Uint8Array|null): + Promise; + + /** + * Decrypts ciphertext with associated authenticated data. + * The decryption verifies the authenticity and integrity of the associated + * data, but there are no guarantees wrt. secrecy of that data. + * + * @param ciphertext - the ciphertext to be decrypted, must be + * non-null. + * @param opt_associatedData - optional associated data to be + * authenticated. A null value is equivalent to an empty (zero-length) + * byte array. For successful decryption the same associated data must be + * provided along with the ciphertext. + * @returns resulting plaintext + */ + abstract decrypt( + ciphertext: Uint8Array, + opt_associatedData?: Uint8Array|null): Promise; +} diff --git a/schemaregistry/rules/encryption/tink/aes_gcm.ts b/schemaregistry/rules/encryption/tink/aes_gcm.ts new file mode 100644 index 00000000..0035759f --- /dev/null +++ b/schemaregistry/rules/encryption/tink/aes_gcm.ts @@ -0,0 +1,106 @@ +/** + * Copyright 2020 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import {Aead} from './aead'; +import {SecurityException} from './exception/security_exception'; + +import * as Bytes from './bytes'; +import * as Random from './random'; +import * as Validators from './validators'; +import * as crypto from 'crypto'; + +/** + * The only supported IV size. + * + */ +const IV_SIZE_IN_BYTES: number = 12; + +/** + * The only supported tag size. + * + */ +const TAG_SIZE_IN_BITS: number = 128; + +/** + * Implementation of AES-GCM. + * + */ +export class AesGcm extends Aead { + constructor(private readonly key: CryptoKey) { + super(); + } + + /** + */ + async encrypt(plaintext: Uint8Array, associatedData?: Uint8Array): + Promise { + Validators.requireUint8Array(plaintext); + if (associatedData != null) { + Validators.requireUint8Array(associatedData); + } + const iv = Random.randBytes(IV_SIZE_IN_BYTES); + const alg: AesGcmParams = { + 'name': 'AES-GCM', + 'iv': iv, + 'tagLength': TAG_SIZE_IN_BITS + }; + if (associatedData) { + alg['additionalData'] = associatedData; + } + const ciphertext = + await crypto.subtle.encrypt(alg, this.key, plaintext); + return Bytes.concat(iv, new Uint8Array(ciphertext)); + } + + /** + */ + async decrypt(ciphertext: Uint8Array, associatedData?: Uint8Array): + Promise { + Validators.requireUint8Array(ciphertext); + if (ciphertext.length < IV_SIZE_IN_BYTES + TAG_SIZE_IN_BITS / 8) { + throw new SecurityException('ciphertext too short'); + } + if (associatedData != null) { + Validators.requireUint8Array(associatedData); + } + const iv = new Uint8Array(IV_SIZE_IN_BYTES); + iv.set(ciphertext.subarray(0, IV_SIZE_IN_BYTES)); + const alg: AesGcmParams = { + 'name': 'AES-GCM', + 'iv': iv, + 'tagLength': TAG_SIZE_IN_BITS + }; + if (associatedData) { + alg['additionalData'] = associatedData; + } + try { + return new Uint8Array(await crypto.subtle.decrypt( + alg, this.key, + new Uint8Array(ciphertext.subarray(IV_SIZE_IN_BYTES)))); + // Preserving old behavior when moving to + // https://www.typescriptlang.org/tsconfig#useUnknownInCatchVariables + // tslint:disable-next-line:no-any + } catch (e: any) { + throw new SecurityException(e.toString()); + } + } +} + +export async function fromRawKey(key: Uint8Array): Promise { + Validators.requireUint8Array(key); + Validators.validateAesKeySize(key.length); + const webCryptoKey = await crypto.subtle.importKey( + /* format */ + 'raw', key, + /* keyData */ + {'name': 'AES-GCM', 'length': key.length}, + /* algo */ + false, + /* extractable*/ + ['encrypt', 'decrypt']); + + /* usage */ + return new AesGcm(webCryptoKey); +} diff --git a/schemaregistry/rules/encryption/tink/aes_siv.ts b/schemaregistry/rules/encryption/tink/aes_siv.ts new file mode 100644 index 00000000..1e26583c --- /dev/null +++ b/schemaregistry/rules/encryption/tink/aes_siv.ts @@ -0,0 +1,40 @@ +/** + * Copyright 2020 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import {Aead} from './aead'; + +// @ts-expect-error miscreant does not have types +import {SIV, WebCryptoProvider} from "@hackbg/miscreant-esm"; +import * as crypto from 'crypto'; + +/** + * Implementation of AES-SIV. + * + */ +export class AesSiv extends Aead { + constructor(private readonly key: Uint8Array) { + super(); + } + + /** + */ + async encrypt(plaintext: Uint8Array, associatedData?: Uint8Array): + Promise { + let key = await SIV.importKey(this.key, "AES-CMAC-SIV", new WebCryptoProvider(crypto)); + return key.seal(plaintext, [associatedData]); + } + + /** + */ + async decrypt(ciphertext: Uint8Array, associatedData?: Uint8Array): + Promise { + let key = await SIV.importKey(this.key, "AES-CMAC-SIV", new WebCryptoProvider(crypto)); + return key.open(ciphertext, [associatedData]); + } +} + +export async function fromRawKey(key: Uint8Array): Promise { + return new AesSiv(key); +} diff --git a/schemaregistry/rules/encryption/tink/bytes.ts b/schemaregistry/rules/encryption/tink/bytes.ts new file mode 100644 index 00000000..f2aafdaf --- /dev/null +++ b/schemaregistry/rules/encryption/tink/bytes.ts @@ -0,0 +1,179 @@ +/** + * Copyright 2020 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import {InvalidArgumentsException} from './exception/invalid_arguments_exception'; + +/** + * Does near constant time byte array comparison. + * @param ba1 - The first bytearray to check. + * @param ba2 - The second bytearray to check. + * @returns If the array are equal. + */ +export function isEqual(ba1: Uint8Array, ba2: Uint8Array): boolean { + if (ba1.length !== ba2.length) { + return false; + } + let result = 0; + for (let i = 0; i < ba1.length; i++) { + result |= ba1[i] ^ ba2[i]; + } + return result == 0; +} + +/** + * Returns a new array that is the result of joining the arguments. + */ +export function concat(...var_args: Uint8Array[]): Uint8Array { + let length = 0; + for (let i = 0; i < arguments.length; i++) { + // eslint-disable-next-line prefer-rest-params + length += arguments[i].length; + } + const result = new Uint8Array(length); + let curOffset = 0; + for (let i = 0; i < arguments.length; i++) { + // eslint-disable-next-line prefer-rest-params + result.set(arguments[i], curOffset); + // eslint-disable-next-line prefer-rest-params + curOffset += arguments[i].length; + } + return result; +} + +/** + * Converts a non-negative integer number to a 64-bit big-endian byte array. + * @param value - The number to convert. + * @returns The number as a big-endian byte array. + * @throws {@link InvalidArgumentsException} + */ +export function fromNumber(value: number): Uint8Array { + if (Number.isNaN(value) || value % 1 !== 0) { + throw new InvalidArgumentsException('cannot convert non-integer value'); + } + if (value < 0) { + throw new InvalidArgumentsException('cannot convert negative number'); + } + if (value > Number.MAX_SAFE_INTEGER) { + throw new InvalidArgumentsException( + 'cannot convert number larger than ' + Number.MAX_SAFE_INTEGER); + } + const twoPower32 = 2 ** 32; + let low = value % twoPower32; + let high = value / twoPower32; + const result = new Uint8Array(8); + for (let i = 7; i >= 4; i--) { + result[i] = low & 255; + low >>>= 8; + } + for (let i = 3; i >= 0; i--) { + result[i] = high & 255; + high >>>= 8; + } + return result; +} + +/** + * Converts the hex string to a byte array. + * + * @param hex - the input + * @returns the byte array output + * @throws {@link InvalidArgumentsException} + */ +export function fromHex(hex: string): Uint8Array { + if (hex.length % 2 != 0) { + throw new InvalidArgumentsException( + 'Hex string length must be multiple of 2'); + } + const arr = new Uint8Array(hex.length / 2); + for (let i = 0; i < hex.length; i += 2) { + arr[i / 2] = parseInt(hex.substring(i, i + 2), 16); + } + return arr; +} + +/** + * Converts a byte array to hex. + * + * @param bytes - the byte array input + * @returns hex the output + */ +export function toHex(bytes: Uint8Array): string { + let result = ''; + for (let i = 0; i < bytes.length; i++) { + const hexByte = bytes[i].toString(16); + result += hexByte.length > 1 ? hexByte : '0' + hexByte; + } + return result; +} + +/** + * Converts the Base64 string to a byte array. + * + * @param encoded - the base64 string + * @param opt_webSafe - True indicates we should use the alternative + * alphabet, which does not require escaping for use in URLs. + * @returns the byte array output + */ +export function fromBase64(encoded: string, opt_webSafe?: boolean): Uint8Array { + if (opt_webSafe) { + const normalBase64 = encoded.replace(/-/g, '+').replace(/_/g, '/'); + return fromByteString(window.atob(normalBase64)); + } + return fromByteString(window.atob(encoded)); +} + +/** + * Base64 encode a byte array. + * + * @param bytes - the byte array input + * @param opt_webSafe - True indicates we should use the alternative + * alphabet, which does not require escaping for use in URLs. + * @returns base64 output + */ +export function toBase64(bytes: Uint8Array, opt_webSafe?: boolean): string { + const encoded = window + .btoa( + /* padding */ + toByteString(bytes)) + .replace(/=/g, ''); + if (opt_webSafe) { + return encoded.replace(/\+/g, '-').replace(/\//g, '_'); + } + return encoded; +} + +/** + * Converts a byte string to a byte array. Only support ASCII and Latin-1 + * strings, does not support multi-byte characters. + * + * @param str - the input + * @returns the byte array output + */ +export function fromByteString(str: string): Uint8Array { + const output = []; + let p = 0; + for (let i = 0; i < str.length; i++) { + const c = str.charCodeAt(i); + output[p++] = c; + } + return new Uint8Array(output); +} + +/** + * Turns a byte array into the string given by the concatenation of the + * characters to which the numbers correspond. Each byte is corresponding to a + * character. Does not support multi-byte characters. + * + * @param bytes - Array of numbers representing + * characters. + * @returns Stringification of the array. + */ +export function toByteString(bytes: Uint8Array): string { + let str = ''; + for (let i = 0; i < bytes.length; i += 1) { + str += String.fromCharCode(bytes[i]); + } + return str; +} diff --git a/schemaregistry/rules/encryption/tink/exception/invalid_arguments_exception.ts b/schemaregistry/rules/encryption/tink/exception/invalid_arguments_exception.ts new file mode 100644 index 00000000..26481153 --- /dev/null +++ b/schemaregistry/rules/encryption/tink/exception/invalid_arguments_exception.ts @@ -0,0 +1,16 @@ +/** + * Copyright 2020 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + + +/** + * Exception used when a function receives an invalid argument. + */ +export class InvalidArgumentsException extends Error { + constructor(message?: string) { + super(message); + Object.setPrototypeOf(this, InvalidArgumentsException.prototype); + } +} +InvalidArgumentsException.prototype.name = 'InvalidArgumentsException'; diff --git a/schemaregistry/rules/encryption/tink/exception/security_exception.ts b/schemaregistry/rules/encryption/tink/exception/security_exception.ts new file mode 100644 index 00000000..25d81b75 --- /dev/null +++ b/schemaregistry/rules/encryption/tink/exception/security_exception.ts @@ -0,0 +1,16 @@ +/** + * Copyright 2020 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + + +/** + * The base class for all security exceptions. + */ +export class SecurityException extends Error { + constructor(message?: string) { + super(message); + Object.setPrototypeOf(this, SecurityException.prototype); + } +} +SecurityException.prototype.name = 'SecurityException'; diff --git a/schemaregistry/rules/encryption/tink/hkdf.ts b/schemaregistry/rules/encryption/tink/hkdf.ts new file mode 100644 index 00000000..2f0406df --- /dev/null +++ b/schemaregistry/rules/encryption/tink/hkdf.ts @@ -0,0 +1,98 @@ +/** + * Copyright 2020 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + + +/** + * An implementation of HKDF, RFC 5869. + */ +import {InvalidArgumentsException} from './exception/invalid_arguments_exception'; + +import {fromRawKey as hmacFromRawKey} from './hmac'; +import * as Validators from './validators'; + +/** + * Computes an HKDF. + * + * @param size - The length of the generated pseudorandom string in + * bytes. The maximal size is 255 * DigestSize, where DigestSize is the size + * of the underlying HMAC. + * @param hash - the name of the hash function. Accepted names are SHA-1, + * SHA-256 and SHA-512 + * @param ikm - Input keying material. + * @param info - Context and application specific + * information (can be a zero-length array). + * @param opt_salt - Salt value (a non-secret random + * value). If not provided, it is set to a string of hash length zeros. + * @returns Output keying material (okm). + */ +export async function compute( + size: number, hash: string, ikm: Uint8Array, info: Uint8Array, + opt_salt?: Uint8Array): Promise { + let digestSize; + if (!Number.isInteger(size)) { + throw new InvalidArgumentsException('size must be an integer'); + } + if (size <= 0) { + throw new InvalidArgumentsException('size must be positive'); + } + switch (hash) { + case 'SHA-1': + digestSize = 20; + if (size > 255 * 20) { + throw new InvalidArgumentsException('size too large'); + } + break; + case 'SHA-256': + digestSize = 32; + if (size > 255 * 32) { + throw new InvalidArgumentsException('size too large'); + } + break; + case 'SHA-512': + digestSize = 64; + if (size > 255 * 64) { + throw new InvalidArgumentsException('size too large'); + } + break; + default: + throw new InvalidArgumentsException(hash + ' is not supported'); + } + Validators.requireUint8Array(ikm); + Validators.requireUint8Array(info); + let salt = opt_salt; + if (opt_salt == null || salt === undefined || salt.length == 0) { + salt = new Uint8Array(digestSize); + } + Validators.requireUint8Array(salt); + + // Extract. + let hmac = await hmacFromRawKey(hash, salt, digestSize); + const prk = await hmac.computeMac( + // Pseudorandom Key + ikm); + + // Expand + hmac = await hmacFromRawKey(hash, prk, digestSize); + let ctr = 1; + let pos = 0; + let digest = new Uint8Array(0); + const result = new Uint8Array(size); + while (true) { + const input = new Uint8Array(digest.length + info.length + 1); + input.set(digest, 0); + input.set(info, digest.length); + input[input.length - 1] = ctr; + digest = await hmac.computeMac(input); + if (pos + digest.length < size) { + result.set(digest, pos); + pos += digest.length; + ctr++; + } else { + result.set(digest.subarray(0, size - pos), pos); + break; + } + } + return result; +} diff --git a/schemaregistry/rules/encryption/tink/hmac.ts b/schemaregistry/rules/encryption/tink/hmac.ts new file mode 100644 index 00000000..c83ccfff --- /dev/null +++ b/schemaregistry/rules/encryption/tink/hmac.ts @@ -0,0 +1,96 @@ +/** + * Copyright 2020 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import {InvalidArgumentsException} from './exception/invalid_arguments_exception'; +import {Mac} from './mac'; + +import * as Bytes from './bytes'; +import * as Validators from './validators'; +import * as crypto from 'crypto'; + +/** + * The minimum tag size. + * + */ +const MIN_TAG_SIZE_IN_BYTES: number = 10; + +/** + * Implementation of HMAC. + * + */ +export class Hmac extends Mac { + /** + * @param hash - accepted names are SHA-1, SHA-256 and SHA-512 + * @param tagSize - the size of the tag + */ + constructor( + private readonly hash: string, private readonly key: CryptoKey, + private readonly tagSize: number) { + super(); + } + + /** + */ + async computeMac(data: Uint8Array): Promise { + Validators.requireUint8Array(data); + const tag = await crypto.subtle.sign( + {'name': 'HMAC', 'hash': {'name': this.hash}}, this.key, data); + return new Uint8Array(tag.slice(0, this.tagSize)); + } + + /** + */ + async verifyMac(tag: Uint8Array, data: Uint8Array): Promise { + Validators.requireUint8Array(tag); + Validators.requireUint8Array(data); + const computedTag = await this.computeMac(data); + return Bytes.isEqual(tag, computedTag); + } +} + +/** + * @param hash - accepted names are SHA-1, SHA-256 and SHA-512 + * @param tagSize - the size of the tag + */ +export async function fromRawKey( + hash: string, key: Uint8Array, tagSize: number): Promise { + Validators.requireUint8Array(key); + if (!Number.isInteger(tagSize)) { + throw new InvalidArgumentsException('invalid tag size, must be an integer'); + } + if (tagSize < MIN_TAG_SIZE_IN_BYTES) { + throw new InvalidArgumentsException( + 'tag too short, must be at least ' + MIN_TAG_SIZE_IN_BYTES + ' bytes'); + } + switch (hash) { + case 'SHA-1': + if (tagSize > 20) { + throw new InvalidArgumentsException( + 'tag too long, must not be larger than 20 bytes'); + } + break; + case 'SHA-256': + if (tagSize > 32) { + throw new InvalidArgumentsException( + 'tag too long, must not be larger than 32 bytes'); + } + break; + case 'SHA-512': + if (tagSize > 64) { + throw new InvalidArgumentsException( + 'tag too long, must not be larger than 64 bytes'); + } + break; + default: + throw new InvalidArgumentsException(hash + ' is not supported'); + } + + // TODO(b/115974209): Add check that key.length > 16. + const cryptoKey = await crypto.subtle.importKey( + 'raw', key, + {'name': 'HMAC', 'hash': {'name': hash}, 'length': key.length * 8}, false, + ['sign', 'verify']); + return new Hmac(hash, cryptoKey, tagSize); +} diff --git a/schemaregistry/rules/encryption/tink/mac.ts b/schemaregistry/rules/encryption/tink/mac.ts new file mode 100644 index 00000000..034259da --- /dev/null +++ b/schemaregistry/rules/encryption/tink/mac.ts @@ -0,0 +1,33 @@ +/** + * Copyright 2020 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + + +/** + * Interface for Message Authentication Codes (MAC). + * + * Security guarantees: Message Authentication Codes provide symmetric message + * authentication. Instances implementing this interface are secure against + * existential forgery under chosen plaintext attack, and can be deterministic + * or randomized. This interface should be used for authentication only, and not + * for other purposes like generation of pseudorandom bytes. + * + */ +export abstract class Mac { + /** + * Computes message authentication code (MAC) for `data`. + * + * @param data - the data to compute MAC + * @returns the MAC tag + */ + abstract computeMac(data: Uint8Array): Promise; + + /** + * Verifies whether `tag` is a correct authentication code for `data`. + * + * @param tag - the MAC tag + * @param data - the data to compute MAC + */ + abstract verifyMac(tag: Uint8Array, data: Uint8Array): Promise; +} diff --git a/schemaregistry/rules/encryption/tink/proto/aes_gcm_pb.ts b/schemaregistry/rules/encryption/tink/proto/aes_gcm_pb.ts new file mode 100644 index 00000000..f774cf14 --- /dev/null +++ b/schemaregistry/rules/encryption/tink/proto/aes_gcm_pb.ts @@ -0,0 +1,74 @@ +// Copyright 2017 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +//////////////////////////////////////////////////////////////////////////////// + +// @generated by protoc-gen-es v2.0.0 with parameter "target=ts" +// @generated from file rules/encryption/tink/proto/aes_gcm.proto (package google.crypto.tink, syntax proto3) +/* eslint-disable */ + +import type { GenFile, GenMessage } from "@bufbuild/protobuf/codegenv1"; +import { fileDesc, messageDesc } from "@bufbuild/protobuf/codegenv1"; +import type { Message } from "@bufbuild/protobuf"; + +/** + * Describes the file rules/encryption/tink/proto/aes_gcm.proto. + */ +export const file_rules_encryption_tink_proto_aes_gcm: GenFile = /*@__PURE__*/ + fileDesc("CilydWxlcy9lbmNyeXB0aW9uL3RpbmsvcHJvdG8vYWVzX2djbS5wcm90bxISZ29vZ2xlLmNyeXB0by50aW5rIjQKD0Flc0djbUtleUZvcm1hdBIQCghrZXlfc2l6ZRgCIAEoDRIPCgd2ZXJzaW9uGAMgASgNIi8KCUFlc0djbUtleRIPCgd2ZXJzaW9uGAEgASgNEhEKCWtleV92YWx1ZRgDIAEoDEJYChxjb20uZ29vZ2xlLmNyeXB0by50aW5rLnByb3RvUAFaLWdpdGh1Yi5jb20vZ29vZ2xlL3RpbmsvcHJvdG8vYWVzX2djbV9nb19wcm90b6ICBlRJTktQQmIGcHJvdG8z"); + +/** + * @generated from message google.crypto.tink.AesGcmKeyFormat + */ +export type AesGcmKeyFormat = Message<"google.crypto.tink.AesGcmKeyFormat"> & { + /** + * @generated from field: uint32 key_size = 2; + */ + keySize: number; + + /** + * @generated from field: uint32 version = 3; + */ + version: number; +}; + +/** + * Describes the message google.crypto.tink.AesGcmKeyFormat. + * Use `create(AesGcmKeyFormatSchema)` to create a new message. + */ +export const AesGcmKeyFormatSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_rules_encryption_tink_proto_aes_gcm, 0); + +/** + * @generated from message google.crypto.tink.AesGcmKey + */ +export type AesGcmKey = Message<"google.crypto.tink.AesGcmKey"> & { + /** + * @generated from field: uint32 version = 1; + */ + version: number; + + /** + * @generated from field: bytes key_value = 3; + */ + keyValue: Uint8Array; +}; + +/** + * Describes the message google.crypto.tink.AesGcmKey. + * Use `create(AesGcmKeySchema)` to create a new message. + */ +export const AesGcmKeySchema: GenMessage = /*@__PURE__*/ + messageDesc(file_rules_encryption_tink_proto_aes_gcm, 1); + diff --git a/schemaregistry/rules/encryption/tink/proto/aes_siv_pb.ts b/schemaregistry/rules/encryption/tink/proto/aes_siv_pb.ts new file mode 100644 index 00000000..95d871bb --- /dev/null +++ b/schemaregistry/rules/encryption/tink/proto/aes_siv_pb.ts @@ -0,0 +1,80 @@ +// Copyright 2017 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +//////////////////////////////////////////////////////////////////////////////// + +// @generated by protoc-gen-es v2.0.0 with parameter "target=ts" +// @generated from file rules/encryption/tink/proto/aes_siv.proto (package google.crypto.tink, syntax proto3) +/* eslint-disable */ + +import type { GenFile, GenMessage } from "@bufbuild/protobuf/codegenv1"; +import { fileDesc, messageDesc } from "@bufbuild/protobuf/codegenv1"; +import type { Message } from "@bufbuild/protobuf"; + +/** + * Describes the file rules/encryption/tink/proto/aes_siv.proto. + */ +export const file_rules_encryption_tink_proto_aes_siv: GenFile = /*@__PURE__*/ + fileDesc("CilydWxlcy9lbmNyeXB0aW9uL3RpbmsvcHJvdG8vYWVzX3Npdi5wcm90bxISZ29vZ2xlLmNyeXB0by50aW5rIjQKD0Flc1NpdktleUZvcm1hdBIQCghrZXlfc2l6ZRgBIAEoDRIPCgd2ZXJzaW9uGAIgASgNIi8KCUFlc1NpdktleRIPCgd2ZXJzaW9uGAEgASgNEhEKCWtleV92YWx1ZRgCIAEoDEJPChxjb20uZ29vZ2xlLmNyeXB0by50aW5rLnByb3RvUAFaLWdpdGh1Yi5jb20vZ29vZ2xlL3RpbmsvcHJvdG8vYWVzX3Npdl9nb19wcm90b2IGcHJvdG8z"); + +/** + * @generated from message google.crypto.tink.AesSivKeyFormat + */ +export type AesSivKeyFormat = Message<"google.crypto.tink.AesSivKeyFormat"> & { + /** + * Only valid value is: 64. + * + * @generated from field: uint32 key_size = 1; + */ + keySize: number; + + /** + * @generated from field: uint32 version = 2; + */ + version: number; +}; + +/** + * Describes the message google.crypto.tink.AesSivKeyFormat. + * Use `create(AesSivKeyFormatSchema)` to create a new message. + */ +export const AesSivKeyFormatSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_rules_encryption_tink_proto_aes_siv, 0); + +/** + * key_type: type.googleapis.com/google.crypto.tink.AesSivKey + * + * @generated from message google.crypto.tink.AesSivKey + */ +export type AesSivKey = Message<"google.crypto.tink.AesSivKey"> & { + /** + * @generated from field: uint32 version = 1; + */ + version: number; + + /** + * First half is AES-CTR key, second is AES-SIV. + * + * @generated from field: bytes key_value = 2; + */ + keyValue: Uint8Array; +}; + +/** + * Describes the message google.crypto.tink.AesSivKey. + * Use `create(AesSivKeySchema)` to create a new message. + */ +export const AesSivKeySchema: GenMessage = /*@__PURE__*/ + messageDesc(file_rules_encryption_tink_proto_aes_siv, 1); + diff --git a/schemaregistry/rules/encryption/tink/random.ts b/schemaregistry/rules/encryption/tink/random.ts new file mode 100644 index 00000000..7ec2dbdc --- /dev/null +++ b/schemaregistry/rules/encryption/tink/random.ts @@ -0,0 +1,26 @@ +/** + * Copyright 2020 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + + +/** + * Several simple wrappers of crypto.getRandomValues. + */ +import {InvalidArgumentsException} from './exception/invalid_arguments_exception'; +import * as crypto from 'crypto'; + +/** + * Randomly generates `n` bytes. + * + * @param n - number of bytes to generate + * @returns the random bytes + */ +export function randBytes(n: number): Uint8Array { + if (!Number.isInteger(n) || n < 0) { + throw new InvalidArgumentsException('n must be a nonnegative integer'); + } + const result = new Uint8Array(n); + crypto.getRandomValues(result); + return result; +} diff --git a/schemaregistry/rules/encryption/tink/validators.ts b/schemaregistry/rules/encryption/tink/validators.ts new file mode 100644 index 00000000..6b73ae55 --- /dev/null +++ b/schemaregistry/rules/encryption/tink/validators.ts @@ -0,0 +1,79 @@ +/** + * Copyright 2020 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import {InvalidArgumentsException} from './exception/invalid_arguments_exception'; +import {SecurityException} from './exception/security_exception'; +const SUPPORTED_AES_KEY_SIZES: number[] = [16, 32]; + +/** + * Validates AES key sizes, at the moment only 128-bit and 256-bit keys are + * supported. + * + * @param n - the key size in bytes + * @throws {@link InvalidArgumentsException} + */ +export function validateAesKeySize(n: number) { + if (!SUPPORTED_AES_KEY_SIZES.includes(n)) { + throw new InvalidArgumentsException('unsupported AES key size: ' + n); + } +} + +/** + * Validates that the input is a non null Uint8Array. + * + * @throws {@link InvalidArgumentsException} + */ +export function requireUint8Array(input: Uint8Array) { + if (input == null || !(input instanceof Uint8Array)) { + throw new InvalidArgumentsException('input must be a non null Uint8Array'); + } +} + +/** + * Validates version, throws exception if candidate version is negative or + * bigger than expected. + * + * @param candidate - version to be validated + * @param maxVersion - upper bound on version + * @throws {@link SecurityException} + */ +export function validateVersion(candidate: number, maxVersion: number) { + if (candidate < 0 || candidate > maxVersion) { + throw new SecurityException( + 'Version is out of bound, must be ' + + 'between 0 and ' + maxVersion + '.'); + } +} + +/** + * Validates ECDSA parameters. + * + * @throws {@link SecurityException} + */ +export function validateEcdsaParams(curve: string, hash: string) { + switch (curve) { + case 'P-256': + if (hash != 'SHA-256') { + throw new SecurityException( + 'expected SHA-256 (because curve is P-256) but got ' + hash); + } + break; + case 'P-384': + if (hash != 'SHA-384' && hash != 'SHA-512') { + throw new SecurityException( + 'expected SHA-384 or SHA-512 (because curve is P-384) but got ' + + hash); + } + break; + case 'P-521': + if (hash != 'SHA-512') { + throw new SecurityException( + 'expected SHA-512 (because curve is P-521) but got ' + hash); + } + break; + default: + throw new SecurityException('unsupported curve: ' + curve); + } +} diff --git a/schemaregistry/rules/jsonata/jsonata-executor.ts b/schemaregistry/rules/jsonata/jsonata-executor.ts new file mode 100644 index 00000000..556d4a98 --- /dev/null +++ b/schemaregistry/rules/jsonata/jsonata-executor.ts @@ -0,0 +1,43 @@ +import {RuleRegistry} from "../../serde/rule-registry"; +import {RuleContext, RuleExecutor} from "../../serde/serde"; +import {ClientConfig} from "../../rest-service"; +import {LRUCache} from "lru-cache"; +import jsonata, {Expression} from "jsonata"; + +export class JsonataExecutor implements RuleExecutor { + config: Map | null = null + cache: LRUCache = new LRUCache({max: 1000}) + + /** + * Register the JSONata rule executor with the rule registry. + */ + static register(): JsonataExecutor { + const executor = new JsonataExecutor() + RuleRegistry.registerRuleExecutor(executor) + return executor + } + + configure(clientConfig: ClientConfig, config: Map) { + this.config = config + } + + type(): string { + return "JSONATA" + } + + async transform(ctx: RuleContext, msg: any): Promise { + let expr = ctx.rule.expr + if (expr == null) { + return msg + } + let jsonataExpr = this.cache.get(expr) + if (jsonataExpr == null) { + jsonataExpr = jsonata(expr) + this.cache.set(expr, jsonataExpr) + } + return jsonataExpr.evaluate(msg) + } + + async close(): Promise { + } +} diff --git a/schemaregistry/run_docker_schemaregistry.sh b/schemaregistry/run_docker_schemaregistry.sh new file mode 100755 index 00000000..d3338c07 --- /dev/null +++ b/schemaregistry/run_docker_schemaregistry.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +COMPOSE_VERSION=$(docker-compose --version) +DOCKER_VERSION=$(docker --version) +JEST=${JEST:-../node_modules/.bin/jest} +INTEG_DIR=../e2e/schemaregistry + +# Start the docker compose file +echo "Running docker compose up. Docker version $DOCKER_VERSION. Compose version $COMPOSE_VERSION. " + +docker-compose -f docker-compose.schemaregistry.yml up -d + +if [ "$?" == "1" ]; then + echo "Failed to start docker images." + exit 1 +fi + +echo "Running schema registry e2e tests" + +# Waiting for Zookeeper and Kafka to start +sleep 10 + +$JEST $INTEG_DIR diff --git a/schemaregistry/schemaregistry-client.ts b/schemaregistry/schemaregistry-client.ts new file mode 100644 index 00000000..3c7c4b0e --- /dev/null +++ b/schemaregistry/schemaregistry-client.ts @@ -0,0 +1,785 @@ +import { RestService, ClientConfig } from './rest-service'; +import { AxiosResponse } from 'axios'; +import stringify from "json-stringify-deterministic"; +import { LRUCache } from 'lru-cache'; +import { Mutex } from 'async-mutex'; +import { MockClient } from "./mock-schemaregistry-client"; + +/* + * Confluent-Schema-Registry-TypeScript - Node.js wrapper for Confluent Schema Registry + * + * Copyright (c) 2024 Confluent, Inc. + * + * This software may be modified and distributed under the terms + * of the MIT license. See the LICENSE.txt file for details. + */ + +export enum Compatibility { + NONE = "NONE", + BACKWARD = "BACKWARD", + FORWARD = "FORWARD", + FULL = "FULL", + BACKWARD_TRANSITIVE = "BACKWARD_TRANSITIVE", + FORWARD_TRANSITIVE = "FORWARD_TRANSITIVE", + FULL_TRANSITIVE = "FULL_TRANSITIVE" +} + +export interface CompatibilityLevel { + compatibility?: Compatibility; + compatibilityLevel?: Compatibility; +} + +/** + * Rule represents a data contract rule + */ +export interface Rule { + name: string + doc?: string + kind?: string + mode?: RuleMode + type: string + tags?: string[] + params?: { [key: string]: string } + expr?: string + onSuccess?: string + onFailure?: string + disabled?: boolean +} + +export enum RuleMode { + UPGRADE = 'UPGRADE', + DOWNGRADE = 'DOWNGRADE', + UPDOWN = 'UPDOWN', + WRITE = 'WRITE', + READ = 'READ', + WRITEREAD = 'WRITEREAD', +} + +/** + * SchemaInfo represents a schema and its associated information + */ +export interface SchemaInfo { + schema: string; + schemaType?: string; + references?: Reference[]; + metadata?: Metadata; + ruleSet?: RuleSet; +} + +// Ensure that SchemaMetadata fields are removed from the SchemaInfo +export function minimize(info: SchemaInfo): SchemaInfo { + return { + schemaType: info.schemaType, + schema: info.schema, + references: info.references, + metadata: info.metadata, + ruleSet: info.ruleSet + } +} + +/** + * SchemaMetadata extends SchemaInfo with additional metadata + */ +export interface SchemaMetadata extends SchemaInfo { + id: number; + subject?: string; + version?: number; +} + +/** + * Reference represents a schema reference + */ +export interface Reference { + name: string; + subject: string; + version: number; +} + +/** + * Metadata represents user-defined metadata + */ +export interface Metadata { + tags?: { [key: string]: string[] }; + properties?: { [key: string]: string }; + sensitive?: string[]; +} + +/** + * RuleSet represents a data contract rule set + */ +export interface RuleSet { + migrationRules?: Rule[]; + domainRules?: Rule[]; +} + +/** + * ServerConfig represents config params for Schema Registry + */ +export interface ServerConfig { + alias?: string; + normalize?: boolean; + compatibility?: Compatibility; + compatibilityLevel?: Compatibility; + compatibilityGroup?: string; + defaultMetadata?: Metadata; + overrideMetadata?: Metadata; + defaultRuleSet?: RuleSet; + overrideRuleSet?: RuleSet; +} + +export interface isCompatibleResponse { + is_compatible: boolean; +} + +/** + * Client is an interface for clients interacting with the Confluent Schema Registry. + * The Schema Registry's REST interface is further explained in Confluent's Schema Registry API documentation + * https://github.com/confluentinc/schema-registry/blob/master/client/src/main/java/io/confluent/kafka/schemaregistry/client/SchemaRegistryClient.java + */ +export interface Client { + config(): ClientConfig; + register(subject: string, schema: SchemaInfo, normalize: boolean): Promise; + registerFullResponse(subject: string, schema: SchemaInfo, normalize: boolean): Promise; + getBySubjectAndId(subject: string, id: number, format?: string): Promise; + getId(subject: string, schema: SchemaInfo, normalize: boolean): Promise; + getLatestSchemaMetadata(subject: string, format?: string): Promise; + getSchemaMetadata(subject: string, version: number, deleted: boolean, format?: string): Promise; + getLatestWithMetadata(subject: string, metadata: { [key: string]: string }, + deleted: boolean, format?: string): Promise; + getAllVersions(subject: string): Promise; + getVersion(subject: string, schema: SchemaInfo, normalize: boolean): Promise; + getAllSubjects(): Promise; + deleteSubject(subject: string, permanent: boolean): Promise; + deleteSubjectVersion(subject: string, version: number, permanent: boolean): Promise; + testSubjectCompatibility(subject: string, schema: SchemaInfo): Promise; + testCompatibility(subject: string, version: number, schema: SchemaInfo): Promise; + getCompatibility(subject: string): Promise; + updateCompatibility(subject: string, update: Compatibility): Promise; + getDefaultCompatibility(): Promise; + updateDefaultCompatibility(update: Compatibility): Promise; + getConfig(subject: string): Promise; + updateConfig(subject: string, update: ServerConfig): Promise; + getDefaultConfig(): Promise; + updateDefaultConfig(update: ServerConfig): Promise; + clearLatestCaches(): void; + clearCaches(): void; + close(): void; +} + +/** + * SchemaRegistryClient is a client for interacting with the Confluent Schema Registry. + * This client will cache responses from Schema Registry to reduce network requests. + */ +export class SchemaRegistryClient implements Client { + private clientConfig: ClientConfig; + private restService: RestService; + + private schemaToIdCache: LRUCache; + private idToSchemaInfoCache: LRUCache; + private infoToSchemaCache: LRUCache; + private latestToSchemaCache: LRUCache; + private schemaToVersionCache: LRUCache; + private versionToSchemaCache: LRUCache; + private metadataToSchemaCache: LRUCache; + + private schemaToIdMutex: Mutex; + private idToSchemaInfoMutex: Mutex; + private infoToSchemaMutex: Mutex; + private latestToSchemaMutex: Mutex; + private schemaToVersionMutex: Mutex; + private versionToSchemaMutex: Mutex; + private metadataToSchemaMutex: Mutex; + + /** + * Create a new Schema Registry client. + * @param config - The client configuration. + */ + constructor(config: ClientConfig) { + this.clientConfig = config + const cacheOptions = { + max: config.cacheCapacity !== undefined ? config.cacheCapacity : 1000, + ...(config.cacheLatestTtlSecs !== undefined && { maxAge: config.cacheLatestTtlSecs * 1000 }) + }; + + this.restService = new RestService(config.baseURLs, config.isForward, config.createAxiosDefaults, + config.basicAuthCredentials, config.bearerAuthCredentials); + + this.schemaToIdCache = new LRUCache(cacheOptions); + this.idToSchemaInfoCache = new LRUCache(cacheOptions); + this.infoToSchemaCache = new LRUCache(cacheOptions); + this.latestToSchemaCache = new LRUCache(cacheOptions); + this.schemaToVersionCache = new LRUCache(cacheOptions); + this.versionToSchemaCache = new LRUCache(cacheOptions); + this.metadataToSchemaCache = new LRUCache(cacheOptions); + this.schemaToIdMutex = new Mutex(); + this.idToSchemaInfoMutex = new Mutex(); + this.infoToSchemaMutex = new Mutex(); + this.latestToSchemaMutex = new Mutex(); + this.schemaToVersionMutex = new Mutex(); + this.versionToSchemaMutex = new Mutex(); + this.metadataToSchemaMutex = new Mutex(); + } + + static newClient(config: ClientConfig): Client { + let url = config.baseURLs[0] + if (url.startsWith("mock://")) { + return new MockClient(config) + } + return new SchemaRegistryClient(config) + } + + config(): ClientConfig { + return this.clientConfig + } + + /** + * Register a schema with the Schema Registry and return the schema ID. + * @param subject - The subject under which to register the schema. + * @param schema - The schema to register. + * @param normalize - Whether to normalize the schema before registering. + */ + async register(subject: string, schema: SchemaInfo, normalize: boolean = false): Promise { + const metadataResult = await this.registerFullResponse(subject, schema, normalize); + + return metadataResult.id; + } + + /** + * Register a schema with the Schema Registry and return the full response. + * @param subject - The subject under which to register the schema. + * @param schema - The schema to register. + * @param normalize - Whether to normalize the schema before registering. + */ + async registerFullResponse(subject: string, schema: SchemaInfo, normalize: boolean = false): Promise { + const cacheKey = stringify({ subject, schema: minimize(schema) }); + + return await this.infoToSchemaMutex.runExclusive(async () => { + const cachedSchemaMetadata: SchemaMetadata | undefined = this.infoToSchemaCache.get(cacheKey); + if (cachedSchemaMetadata) { + return cachedSchemaMetadata; + } + + subject = encodeURIComponent(subject); + + const response: AxiosResponse = await this.restService.handleRequest( + `/subjects/${subject}/versions?normalize=${normalize}`, + 'POST', + schema + ); + this.infoToSchemaCache.set(cacheKey, response.data); + return response.data; + }); + } + + /** + * Get a schema by subject and ID. + * @param subject - The subject under which the schema is registered. + * @param id - The schema ID. + * @param format - The format of the schema. + */ + async getBySubjectAndId(subject: string, id: number, format?: string): Promise { + const cacheKey = stringify({ subject, id }); + return await this.idToSchemaInfoMutex.runExclusive(async () => { + const cachedSchema: SchemaInfo | undefined = this.idToSchemaInfoCache.get(cacheKey); + if (cachedSchema) { + return cachedSchema; + } + + subject = encodeURIComponent(subject); + + let formatStr = format != null ? `&format=${format}` : ''; + + const response: AxiosResponse = await this.restService.handleRequest( + `/schemas/ids/${id}?subject=${subject}${formatStr}`, + 'GET' + ); + this.idToSchemaInfoCache.set(cacheKey, response.data); + return response.data; + }); + } + + /** + * Get the ID for a schema. + * @param subject - The subject under which the schema is registered. + * @param schema - The schema whose ID to get. + * @param normalize - Whether to normalize the schema before getting the ID. + */ + async getId(subject: string, schema: SchemaInfo, normalize: boolean = false): Promise { + const cacheKey = stringify({ subject, schema: minimize(schema) }); + + return await this.schemaToIdMutex.runExclusive(async () => { + const cachedId: number | undefined = this.schemaToIdCache.get(cacheKey); + if (cachedId) { + return cachedId; + } + + subject = encodeURIComponent(subject); + + const response: AxiosResponse = await this.restService.handleRequest( + `/subjects/${subject}?normalize=${normalize}`, + 'POST', + schema + ); + this.schemaToIdCache.set(cacheKey, response.data.id); + return response.data.id; + }); + } + + /** + * Get the latest schema metadata for a subject. + * @param subject - The subject for which to get the latest schema metadata. + * @param format - The format of the schema. + */ + async getLatestSchemaMetadata(subject: string, format?: string): Promise { + return await this.latestToSchemaMutex.runExclusive(async () => { + const cachedSchema: SchemaMetadata | undefined = this.latestToSchemaCache.get(subject); + if (cachedSchema) { + return cachedSchema; + } + + subject = encodeURIComponent(subject); + + let formatStr = format != null ? `?format=${format}` : ''; + + const response: AxiosResponse = await this.restService.handleRequest( + `/subjects/${subject}/versions/latest${formatStr}`, + 'GET' + ); + this.latestToSchemaCache.set(subject, response.data); + return response.data; + }); + } + + /** + * Get the schema metadata for a subject and version. + * @param subject - The subject for which to get the schema metadata. + * @param version - The version of the schema. + * @param deleted - Whether to include deleted schemas. + * @param format - The format of the schema. + */ + async getSchemaMetadata(subject: string, version: number, deleted: boolean = false, format?: string): Promise { + const cacheKey = stringify({ subject, version, deleted }); + + return await this.versionToSchemaMutex.runExclusive(async () => { + const cachedSchemaMetadata: SchemaMetadata | undefined = this.versionToSchemaCache.get(cacheKey); + if (cachedSchemaMetadata) { + return cachedSchemaMetadata; + } + + subject = encodeURIComponent(subject); + + let formatStr = format != null ? `&format=${format}` : ''; + + const response: AxiosResponse = await this.restService.handleRequest( + `/subjects/${subject}/versions/${version}?deleted=${deleted}${formatStr}`, + 'GET' + ); + this.versionToSchemaCache.set(cacheKey, response.data); + return response.data; + }); + } + + /** + * Get the latest schema metadata for a subject with the given metadata. + * @param subject - The subject for which to get the latest schema metadata. + * @param metadata - The metadata to match. + * @param deleted - Whether to include deleted schemas. + * @param format - The format of the schema. + */ + async getLatestWithMetadata(subject: string, metadata: { [key: string]: string }, + deleted: boolean = false, format?: string): Promise { + const cacheKey = stringify({ subject, metadata, deleted }); + + return await this.metadataToSchemaMutex.runExclusive(async () => { + const cachedSchemaMetadata: SchemaMetadata | undefined = this.metadataToSchemaCache.get(cacheKey); + if (cachedSchemaMetadata) { + return cachedSchemaMetadata; + } + + subject = encodeURIComponent(subject); + + let metadataStr = ''; + + for (const key in metadata) { + const encodedKey = encodeURIComponent(key); + const encodedValue = encodeURIComponent(metadata[key]); + metadataStr += `&key=${encodedKey}&value=${encodedValue}`; + } + + let formatStr = format != null ? `&format=${format}` : ''; + + const response: AxiosResponse = await this.restService.handleRequest( + `/subjects/${subject}/metadata?deleted=${deleted}&${metadataStr}${formatStr}`, + 'GET' + ); + this.metadataToSchemaCache.set(cacheKey, response.data); + return response.data; + }); + } + + /** + * Get all versions of a schema for a subject. + * @param subject - The subject for which to get all versions. + */ + async getAllVersions(subject: string): Promise { + const response: AxiosResponse = await this.restService.handleRequest( + `/subjects/${subject}/versions`, + 'GET' + ); + return response.data; + } + + /** + * Get the version of a schema for a subject. + * @param subject - The subject for which to get the version. + * @param schema - The schema for which to get the version. + * @param normalize - Whether to normalize the schema before getting the version. + */ + async getVersion(subject: string, schema: SchemaInfo, normalize: boolean = false): Promise { + const cacheKey = stringify({ subject, schema: minimize(schema) }); + + return await this.schemaToVersionMutex.runExclusive(async () => { + const cachedVersion: number | undefined = this.schemaToVersionCache.get(cacheKey); + if (cachedVersion) { + return cachedVersion; + } + + subject = encodeURIComponent(subject); + + const response: AxiosResponse = await this.restService.handleRequest( + `/subjects/${subject}?normalize=${normalize}`, + 'POST', + schema + ); + this.schemaToVersionCache.set(cacheKey, response.data.version); + return response.data.version!; + }); + } + + /** + * Get all subjects in the Schema Registry. + */ + async getAllSubjects(): Promise { + const response: AxiosResponse = await this.restService.handleRequest( + `/subjects`, + 'GET' + ); + return response.data; + } + + /** + * Delete a subject from the Schema Registry. + * @param subject - The subject to delete. + * @param permanent - Whether to permanently delete the subject. + */ + async deleteSubject(subject: string, permanent: boolean = false): Promise { + await this.infoToSchemaMutex.runExclusive(async () => { + this.infoToSchemaCache.forEach((_, key) => { + const parsedKey = JSON.parse(key); + if (parsedKey.subject === subject) { + this.infoToSchemaCache.delete(key); + } + }); + }); + + await this.schemaToVersionMutex.runExclusive(async () => { + this.schemaToVersionCache.forEach((_, key) => { + const parsedKey = JSON.parse(key); + if (parsedKey.subject === subject) { + this.schemaToVersionCache.delete(key); + } + }); + }); + + await this.versionToSchemaMutex.runExclusive(async () => { + this.versionToSchemaCache.forEach((_, key) => { + const parsedKey = JSON.parse(key); + if (parsedKey.subject === subject) { + this.versionToSchemaCache.delete(key); + } + }); + }); + + await this.idToSchemaInfoMutex.runExclusive(async () => { + this.idToSchemaInfoCache.forEach((_, key) => { + const parsedKey = JSON.parse(key); + if (parsedKey.subject === subject) { + this.idToSchemaInfoCache.delete(key); + } + }); + }); + + subject = encodeURIComponent(subject); + + const response: AxiosResponse = await this.restService.handleRequest( + `/subjects/${subject}?permanent=${permanent}`, + 'DELETE' + ); + return response.data; + } + + /** + * Delete a version of a subject from the Schema Registry. + * @param subject - The subject to delete. + * @param version - The version to delete. + * @param permanent - Whether to permanently delete the version. + */ + async deleteSubjectVersion(subject: string, version: number, permanent: boolean = false): Promise { + return await this.schemaToVersionMutex.runExclusive(async () => { + let metadataValue: SchemaMetadata | undefined; + + this.schemaToVersionCache.forEach((value, key) => { + const parsedKey = JSON.parse(key); + if (parsedKey.subject === subject && value === version) { + this.schemaToVersionCache.delete(key); + const infoToSchemaCacheKey = stringify({ subject: subject, schema: minimize(parsedKey.schema) }); + + this.infoToSchemaMutex.runExclusive(async () => { + metadataValue = this.infoToSchemaCache.get(infoToSchemaCacheKey); + if (metadataValue) { + this.infoToSchemaCache.delete(infoToSchemaCacheKey); + const cacheKeyID = stringify({ subject: subject, id: metadataValue.id }); + + this.idToSchemaInfoMutex.runExclusive(async () => { + this.idToSchemaInfoCache.delete(cacheKeyID); + }); + } + }); + } + }); + + const cacheKey = stringify({ subject: subject, version: version }); + this.versionToSchemaMutex.runExclusive(async () => { + this.versionToSchemaCache.delete(cacheKey); + }); + + subject = encodeURIComponent(subject); + + const response: AxiosResponse = await this.restService.handleRequest( + `/subjects/${subject}/versions/${version}?permanent=${permanent}`, + 'DELETE' + ); + return response.data; + }); + } + + /** + * Test the compatibility of a schema with the latest schema for a subject. + * @param subject - The subject for which to test compatibility. + * @param schema - The schema to test compatibility. + */ + async testSubjectCompatibility(subject: string, schema: SchemaInfo): Promise { + subject = encodeURIComponent(subject); + + const response: AxiosResponse = await this.restService.handleRequest( + `/compatibility/subjects/${subject}/versions/latest`, + 'POST', + schema + ); + return response.data.is_compatible; + } + + /** + * Test the compatibility of a schema with a specific version of a subject. + * @param subject - The subject for which to test compatibility. + * @param version - The version of the schema for which to test compatibility. + * @param schema - The schema to test compatibility. + */ + async testCompatibility(subject: string, version: number, schema: SchemaInfo): Promise { + subject = encodeURIComponent(subject); + + const response: AxiosResponse = await this.restService.handleRequest( + `/compatibility/subjects/${subject}/versions/${version}`, + 'POST', + schema + ); + return response.data.is_compatible; + } + + /** + * Get the compatibility level for a subject. + * @param subject - The subject for which to get the compatibility level. + */ + async getCompatibility(subject: string): Promise { + subject = encodeURIComponent(subject); + + const response: AxiosResponse = await this.restService.handleRequest( + `/config/${subject}`, + 'GET' + ); + return response.data.compatibilityLevel!; + } + + /** + * Update the compatibility level for a subject. + * @param subject - The subject for which to update the compatibility level. + * @param update - The compatibility level to update to. + */ + async updateCompatibility(subject: string, update: Compatibility): Promise { + subject = encodeURIComponent(subject); + + const response: AxiosResponse = await this.restService.handleRequest( + `/config/${subject}`, + 'PUT', + { compatibility: update } + ); + return response.data.compatibility!; + } + + /** + * Get the default/global compatibility level. + */ + async getDefaultCompatibility(): Promise { + const response: AxiosResponse = await this.restService.handleRequest( + `/config`, + 'GET' + ); + return response.data.compatibilityLevel!; + } + + /** + * Update the default/global compatibility level. + * @param update - The compatibility level to update to. + */ + async updateDefaultCompatibility(update: Compatibility): Promise { + const response: AxiosResponse = await this.restService.handleRequest( + `/config`, + 'PUT', + { compatibility: update } + ); + return response.data.compatibility!; + } + + /** + * Get the config for a subject. + * @param subject - The subject for which to get the config. + */ + async getConfig(subject: string): Promise { + subject = encodeURIComponent(subject); + + const response: AxiosResponse = await this.restService.handleRequest( + `/config/${subject}`, + 'GET' + ); + return response.data; + } + + /** + * Update the config for a subject. + * @param subject - The subject for which to update the config. + * @param update - The config to update to. + */ + async updateConfig(subject: string, update: ServerConfig): Promise { + const response: AxiosResponse = await this.restService.handleRequest( + `/config/${subject}`, + 'PUT', + update + ); + return response.data; + } + + /** + * Get the default/global config. + */ + async getDefaultConfig(): Promise { + const response: AxiosResponse = await this.restService.handleRequest( + `/config`, + 'GET' + ); + return response.data; + } + + /** + * Update the default/global config. + * @param update - The config to update to. + */ + async updateDefaultConfig(update: ServerConfig): Promise { + const response: AxiosResponse = await this.restService.handleRequest( + `/config`, + 'PUT', + update + ); + return response.data; + } + + /** + * Clear the latest caches. + */ + clearLatestCaches(): void { + this.latestToSchemaCache.clear(); + this.metadataToSchemaCache.clear(); + } + + /** + * Clear all caches. + */ + clearCaches(): void { + this.schemaToIdCache.clear(); + this.idToSchemaInfoCache.clear(); + this.infoToSchemaCache.clear(); + this.latestToSchemaCache.clear(); + this.schemaToVersionCache.clear(); + this.versionToSchemaCache.clear(); + this.metadataToSchemaCache.clear(); + } + + /** + * Close the client. + */ + async close(): Promise { + this.clearCaches(); + } + + // Cache methods for testing + async addToInfoToSchemaCache(subject: string, schema: SchemaInfo, metadata: SchemaMetadata): Promise { + const cacheKey = stringify({ subject, schema: minimize(schema) }); + await this.infoToSchemaMutex.runExclusive(async () => { + this.infoToSchemaCache.set(cacheKey, metadata); + }); + } + + async addToSchemaToVersionCache(subject: string, schema: SchemaInfo, version: number): Promise { + const cacheKey = stringify({ subject, schema: minimize(schema) }); + await this.schemaToVersionMutex.runExclusive(async () => { + this.schemaToVersionCache.set(cacheKey, version); + }); + } + + async addToVersionToSchemaCache(subject: string, version: number, metadata: SchemaMetadata): Promise { + const cacheKey = stringify({ subject, version }); + await this.versionToSchemaMutex.runExclusive(async () => { + this.versionToSchemaCache.set(cacheKey, metadata); + }); + } + + async addToIdToSchemaInfoCache(subject: string, id: number, schema: SchemaInfo): Promise { + const cacheKey = stringify({ subject, id }); + await this.idToSchemaInfoMutex.runExclusive(async () => { + this.idToSchemaInfoCache.set(cacheKey, schema); + }); + } + + async getInfoToSchemaCacheSize(): Promise { + return await this.infoToSchemaMutex.runExclusive(async () => { + return this.infoToSchemaCache.size; + }); + } + + async getSchemaToVersionCacheSize(): Promise { + return await this.schemaToVersionMutex.runExclusive(async () => { + return this.schemaToVersionCache.size; + }); + } + + async getVersionToSchemaCacheSize(): Promise { + return await this.versionToSchemaMutex.runExclusive(async () => { + return this.versionToSchemaCache.size; + }); + } + + async getIdToSchemaInfoCacheSize(): Promise { + return await this.idToSchemaInfoMutex.runExclusive(async () => { + return this.idToSchemaInfoCache.size; + }); + } +} diff --git a/schemaregistry/serde/avro.ts b/schemaregistry/serde/avro.ts new file mode 100644 index 00000000..055fabca --- /dev/null +++ b/schemaregistry/serde/avro.ts @@ -0,0 +1,449 @@ +import { + Deserializer, DeserializerConfig, + FieldTransform, + FieldType, Migration, RefResolver, + RuleConditionError, + RuleContext, SerdeType, + Serializer, SerializerConfig +} from "./serde"; +import { + Client, RuleMode, + SchemaInfo +} from "../schemaregistry-client"; +import avro, {ForSchemaOptions, Type, types} from "avsc"; +import UnwrappedUnionType = types.UnwrappedUnionType +import WrappedUnionType = types.WrappedUnionType +import ArrayType = types.ArrayType +import MapType = types.MapType +import RecordType = types.RecordType +import Field = types.Field +import { LRUCache } from 'lru-cache' +import {RuleRegistry} from "./rule-registry"; +import stringify from "json-stringify-deterministic"; + +type TypeHook = (schema: avro.Schema, opts: ForSchemaOptions) => Type | undefined + +export type AvroSerdeConfig = Partial + +export interface AvroSerde { + schemaToTypeCache: LRUCache]> +} + +/** + * AvroSerializerConfig is used to configure the AvroSerializer. + */ +export type AvroSerializerConfig = SerializerConfig & AvroSerdeConfig + +/** + * AvroSerializer is used to serialize messages using Avro. + */ +export class AvroSerializer extends Serializer implements AvroSerde { + schemaToTypeCache: LRUCache]> + + /** + * Create a new AvroSerializer. + * @param client - the schema registry client + * @param serdeType - the type of the serializer + * @param conf - the serializer configuration + * @param ruleRegistry - the rule registry + */ + constructor(client: Client, serdeType: SerdeType, conf: AvroSerializerConfig, ruleRegistry?: RuleRegistry) { + super(client, serdeType, conf, ruleRegistry) + this.schemaToTypeCache = new LRUCache]>({ max: this.conf.cacheCapacity ?? 1000 }) + this.fieldTransformer = async (ctx: RuleContext, fieldTransform: FieldTransform, msg: any) => { + return await this.fieldTransform(ctx, fieldTransform, msg) + } + for (const rule of this.ruleRegistry.getExecutors()) { + rule.configure(client.config(), new Map(Object.entries(conf.ruleConfig ?? {}))) + } + } + + /** + * serialize is used to serialize a message using Avro. + * @param topic - the topic to serialize the message for + * @param msg - the message to serialize + */ + override async serialize(topic: string, msg: any): Promise { + if (this.client == null) { + throw new Error('client is not initialized') + } + if (msg == null) { + throw new Error('message is empty') + } + + let avroSchema = AvroSerializer.messageToSchema(msg) + const schema: SchemaInfo = { + schemaType: 'AVRO', + schema: JSON.stringify(avroSchema), + } + const [id, info] = await this.getId(topic, msg, schema) + let deps: Map + [avroSchema, deps] = await this.toType(info) + const subject = this.subjectName(topic, info) + msg = await this.executeRules( + subject, topic, RuleMode.WRITE, null, info, msg, getInlineTags(info, deps)) + const msgBytes = avroSchema.toBuffer(msg) + return this.writeBytes(id, msgBytes) + } + + async fieldTransform(ctx: RuleContext, fieldTransform: FieldTransform, msg: any): Promise { + const [schema, ] = await this.toType(ctx.target) + return await transform(ctx, schema, msg, fieldTransform) + } + + async toType(info: SchemaInfo): Promise<[Type, Map]> { + return toType(this.client, this.conf as AvroDeserializerConfig, this, info, async (client, info) => { + const deps = new Map() + await this.resolveReferences(client, info, deps) + return deps + }) + } + + static messageToSchema(msg: any): avro.Type { + let enumIndex = 1 + let fixedIndex = 1 + let recordIndex = 1 + + const namingHook: TypeHook = ( + avroSchema: avro.Schema, + opts: ForSchemaOptions, + ) => { + let schema = avroSchema as any + switch (schema.type) { + case 'enum': + schema.name = `Enum${enumIndex++}`; + break; + case 'fixed': + schema.name = `Fixed${fixedIndex++}`; + break; + case 'record': + schema.name = `Record${recordIndex++}`; + break; + default: + } + return undefined + } + + return Type.forValue(msg, { typeHook: namingHook }) + } +} + +/** + * AvroDeserializerConfig is used to configure the AvroDeserializer. + */ +export type AvroDeserializerConfig = DeserializerConfig & AvroSerdeConfig + +/** + * AvroDeserializer is used to deserialize messages using Avro. + */ +export class AvroDeserializer extends Deserializer implements AvroSerde { + schemaToTypeCache: LRUCache]> + + /** + * Create a new AvroDeserializer. + * @param client - the schema registry client + * @param serdeType - the type of the deserializer + * @param conf - the deserializer configuration + * @param ruleRegistry - the rule registry + */ + constructor(client: Client, serdeType: SerdeType, conf: AvroDeserializerConfig, ruleRegistry?: RuleRegistry) { + super(client, serdeType, conf, ruleRegistry) + this.schemaToTypeCache = new LRUCache]>({ max: this.conf.cacheCapacity ?? 1000 }) + this.fieldTransformer = async (ctx: RuleContext, fieldTransform: FieldTransform, msg: any) => { + return await this.fieldTransform(ctx, fieldTransform, msg) + } + for (const rule of this.ruleRegistry.getExecutors()) { + rule.configure(client.config(), new Map(Object.entries(conf.ruleConfig ?? {}))) + } + } + + override async deserialize(topic: string, payload: Buffer): Promise { + if (!Buffer.isBuffer(payload)) { + throw new Error('Invalid buffer') + } + if (payload.length === 0) { + return null + } + + const info = await this.getSchema(topic, payload) + const subject = this.subjectName(topic, info) + const readerMeta = await this.getReaderSchema(subject) + let migrations: Migration[] = [] + if (readerMeta != null) { + migrations = await this.getMigrations(subject, info, readerMeta) + } + const [writer, deps] = await this.toType(info) + + let msg: any + const msgBytes = payload.subarray(5) + if (migrations.length > 0) { + msg = writer.fromBuffer(msgBytes) + msg = await this.executeMigrations(migrations, subject, topic, msg) + } else { + if (readerMeta != null) { + const [reader, ] = await this.toType(readerMeta) + if (reader.equals(writer)) { + msg = reader.fromBuffer(msgBytes) + } else { + msg = reader.fromBuffer(msgBytes, reader.createResolver(writer)) + } + } else { + msg = writer.fromBuffer(msgBytes) + } + } + let target: SchemaInfo + if (readerMeta != null) { + target = readerMeta + } else { + target = info + } + msg = await this.executeRules( + subject, topic, RuleMode.READ, null, target, msg, getInlineTags(info, deps)) + return msg + } + + async fieldTransform(ctx: RuleContext, fieldTransform: FieldTransform, msg: any): Promise { + const [schema, ] = await this.toType(ctx.target) + return await transform(ctx, schema, msg, fieldTransform) + } + + async toType(info: SchemaInfo): Promise<[Type, Map]> { + return toType(this.client, this.conf as AvroDeserializerConfig, this, info, async (client, info) => { + const deps = new Map() + await this.resolveReferences(client, info, deps) + return deps + }) + } +} + +async function toType( + client: Client, + conf: AvroSerdeConfig, + serde: AvroSerde, + info: SchemaInfo, + refResolver: RefResolver, +): Promise<[Type, Map]> { + let tuple = serde.schemaToTypeCache.get(stringify(info.schema)) + if (tuple != null) { + return tuple + } + + const deps = await refResolver(client, info) + + const addReferencedSchemas = (userHook?: TypeHook): TypeHook | undefined => ( + schema: avro.Schema, + opts: ForSchemaOptions, + ) => { + const avroOpts = opts as AvroSerdeConfig + deps.forEach((schema, _name) => { + avroOpts.typeHook = userHook + avro.Type.forSchema(JSON.parse(schema), avroOpts) + }) + if (userHook) { + return userHook(schema, opts) + } + return + } + + const avroOpts = conf + let type = avro.Type.forSchema(JSON.parse(info.schema), { + ...avroOpts, + typeHook: addReferencedSchemas(avroOpts?.typeHook), + }) + serde.schemaToTypeCache.set(stringify(info.schema), [type, deps]) + return [type, deps] +} + +async function transform(ctx: RuleContext, schema: Type, msg: any, fieldTransform: FieldTransform): Promise { + if (msg == null || schema == null) { + return msg + } + const fieldCtx = ctx.currentField() + if (fieldCtx != null) { + fieldCtx.type = getType(schema) + } + switch (schema.typeName) { + case 'union:unwrapped': + case 'union:wrapped': + const subschema = resolveUnion(schema, msg) + if (subschema == null) { + return null + } + return await transform(ctx, subschema, msg, fieldTransform) + case 'array': + const arraySchema = schema as ArrayType + const array = msg as any[] + return await Promise.all(array.map(item => transform(ctx, arraySchema.itemsType, item, fieldTransform))) + case 'map': + const mapSchema = schema as MapType + const map = msg as { [key: string]: any } + for (const key of Object.keys(map)) { + map[key] = await transform(ctx, mapSchema.valuesType, map[key], fieldTransform) + } + return map + case 'record': + const recordSchema = schema as RecordType + const record = msg as Record + for (const field of recordSchema.fields) { + await transformField(ctx, recordSchema, field, record, record[field.name], fieldTransform) + } + return record + default: + if (fieldCtx != null) { + const ruleTags = ctx.rule.tags ?? [] + if (ruleTags == null || ruleTags.length === 0 || !disjoint(new Set(ruleTags), fieldCtx.tags)) { + return await fieldTransform.transform(ctx, fieldCtx, msg) + } + } + return msg + } +} + +async function transformField( + ctx: RuleContext, + recordSchema: RecordType, + field: Field, + record: Record, + val: any, + fieldTransform: FieldTransform, +): Promise { + const fullName = recordSchema.name + '.' + field.name + try { + ctx.enterField( + val, + fullName, + field.name, + getType(field.type), + ctx.getInlineTags(fullName), + ) + const newVal = await transform(ctx, field.type, record[field.name], fieldTransform) + if (ctx.rule.kind === 'CONDITION') { + if (!newVal) { + throw new RuleConditionError(ctx.rule) + } + } else { + record[field.name] = newVal + } + } finally { + ctx.leaveField() + } +} + +function getType(schema: Type): FieldType { + switch (schema.typeName) { + case 'record': + return FieldType.RECORD + case 'enum': + return FieldType.ENUM + case 'array': + return FieldType.ARRAY + case 'map': + return FieldType.MAP + case 'union:unwrapped': + case 'union:wrapped': + return FieldType.COMBINED + case 'fixed': + return FieldType.FIXED + case 'string': + return FieldType.STRING + case 'bytes': + return FieldType.BYTES + case 'int': + return FieldType.INT + case 'abstract:long': + case 'long': + return FieldType.LONG + case 'float': + return FieldType.FLOAT + case 'double': + return FieldType.DOUBLE + case 'boolean': + return FieldType.BOOLEAN + case 'null': + return FieldType.NULL + default: + return FieldType.NULL + } +} + +function disjoint(slice1: Set, map1: Set): boolean { + for (const v of slice1) { + if (map1.has(v)) { + return false + } + } + return true +} + +function resolveUnion(schema: Type, msg: any): Type | null { + let unionTypes = null + if (schema.typeName === 'union:unwrapped') { + const union = schema as UnwrappedUnionType + unionTypes = union.types.slice() + } else if (schema.typeName === 'union:wrapped') { + const union = schema as WrappedUnionType + unionTypes = union.types.slice() + } + if (unionTypes != null) { + for (let i = 0; i < unionTypes.length; i++) { + if (unionTypes[i].isValid(msg)) { + return unionTypes[i] + } + } + } + return null +} + +function getInlineTags(info: SchemaInfo, deps: Map): Map> { + const inlineTags = new Map>() + getInlineTagsRecursively('', '', JSON.parse(info.schema), inlineTags) + for (const depSchema of deps.values()) { + getInlineTagsRecursively('', '', JSON.parse(depSchema), inlineTags) + } + return inlineTags +} + +// iterate over the object and get all properties named 'confluent:tags' +function getInlineTagsRecursively(ns: string, name: string, schema: any, tags: Map>): void { + if (schema == null || typeof schema === 'string') { + return + } else if (Array.isArray(schema)) { + for (let i = 0; i < schema.length; i++) { + getInlineTagsRecursively(ns, name, schema[i], tags) + } + } else if (typeof schema === 'object') { + const type = schema['type'] + if (type === 'record') { + let recordNs = schema['namespace'] + let recordName = schema['name'] + if (recordNs === undefined) { + recordNs = impliedNamespace(name) + } + if (recordNs == null) { + recordNs = ns + } + if (recordNs !== '' && !recordName.startsWith(recordNs)) { + recordName = recordNs + '.' + recordName + } + const fields = schema['fields'] + for (const field of fields) { + const fieldTags = field['confluent:tags'] + const fieldName = field['name'] + if (fieldTags !== undefined && fieldName !== undefined) { + tags.set(recordName + '.' + fieldName, new Set(fieldTags)) + } + const fieldType = field['type'] + if (fieldType !== undefined) { + getInlineTagsRecursively(recordNs, recordName, fieldType, tags) + } + } + } + } +} + +function impliedNamespace(name: string): string | null { + const match = /^(.*)\.[^.]+$/.exec(name) + return match ? match[1] : null +} + + diff --git a/schemaregistry/serde/buffer-wrapper.ts b/schemaregistry/serde/buffer-wrapper.ts new file mode 100644 index 00000000..98f2c1a8 --- /dev/null +++ b/schemaregistry/serde/buffer-wrapper.ts @@ -0,0 +1,63 @@ +export const MAX_VARINT_LEN_16 = 3 +export const MAX_VARINT_LEN_32 = 5 +export const MAX_VARINT_LEN_64 = 10 + +export class BufferWrapper { + buf: Buffer + pos: number + + constructor(buf: Buffer) { + this.buf = buf + this.pos = 0 + } + + // Adapted from avro-js + writeVarInt(n: number): void { + let f, m + + if (n >= -1073741824 && n < 1073741824) { + // Won't overflow, we can use integer arithmetic. + m = n >= 0 ? n << 1 : (~n << 1) | 1 + do { + this.buf[this.pos] = m & 0x7f + m >>= 7 + } while (m && (this.buf[this.pos++] |= 0x80)) + } else { + // We have to use slower floating arithmetic. + f = n >= 0 ? n * 2 : -n * 2 - 1 + do { + this.buf[this.pos] = f & 0x7f + f /= 128 + } while (f >= 1 && (this.buf[this.pos++] |= 0x80)) + } + this.pos++ + } + + // Adapted from avro-js + readVarInt(): number { + let n = 0 + let k = 0 + let b, h, f, fk + + do { + b = this.buf[this.pos++] + h = b & 0x80 + n |= (b & 0x7f) << k + k += 7 + } while (h && k < 28) + + if (h) { + // Switch to float arithmetic, otherwise we might overflow. + f = n + fk = 268435456 // 2 ** 28. + do { + b = this.buf[this.pos++] + f += (b & 0x7f) * fk + fk *= 128 + } while (b & 0x80) + return (f % 2 ? -(f + 1) : f) / 2 + } + + return (n >> 1) ^ -(n & 1) + } +} diff --git a/schemaregistry/serde/json-util.ts b/schemaregistry/serde/json-util.ts new file mode 100644 index 00000000..35a3daef --- /dev/null +++ b/schemaregistry/serde/json-util.ts @@ -0,0 +1,122 @@ +/* + * Copyright (c) 2023 Menglin "Mark" Xu + * (c) 2024 Confluent, Inc. + * + * This software may be modified and distributed under the terms + * of the MIT license. See the LICENSE.txt file for details. + */ +import validator from 'validator'; +import { deepStrictEqual } from 'assert'; + +/** + * Generate JSON schema from value. + * + * @param value - Value. + * @returns JSON schema. + */ +export function generateSchema(value: any): any { + switch (true) { + case value === undefined: + case typeof value === 'undefined': + case typeof value === 'function': + case typeof value === 'symbol': + case value instanceof Date: + throw new TypeError(`Invalid JSON value: ${String(value)}`); + + /** + * @see https://json-schema.org/understanding-json-schema/reference/null.html + */ + case value === null: + return { type: 'null' }; + + /** + * @see https://json-schema.org/understanding-json-schema/reference/numeric.html + */ + case typeof value === 'number': + return { type: Number.isInteger(value) ? 'integer' : 'number' }; + + /** + * @see https://json-schema.org/understanding-json-schema/reference/boolean.html + */ + case typeof value === 'boolean': + return { type: 'boolean' }; + + /** + * @see https://json-schema.org/understanding-json-schema/reference/string.html + */ + case typeof value === 'string': + if (validator.isISO8601(value)) { + return { + type: 'string', + format: value.includes('T') ? 'date-time' : 'date', + }; + } + + if (validator.isTime(value.split('+')[0], { mode: 'withSeconds' })) { + return { type: 'string', format: 'time' }; + } + + if (validator.isEmail(value)) { + return { type: 'string', format: 'email' }; + } + + return { type: 'string' }; + + /** + * @see https://json-schema.org/understanding-json-schema/reference/array.html + */ + case Array.isArray(value): + if (value.length === 1) { + return { type: 'array', items: generateSchema(value[0]) }; + } + + if (value.length > 1) { + const items = value.map(generateSchema); + if (deepEqual(...items)) { + return { type: 'array', items: items[0] }; + } + } + + return { type: 'array' }; + + /** + * @see https://json-schema.org/understanding-json-schema/reference/object.html + */ + case value instanceof Object: + if (!Object.keys(value).length) { + return { type: 'object' }; + } + + return { + type: 'object', + properties: Object.entries(value).reduce( + (accumulator, [key, value]) => { + accumulator[key] = generateSchema(value); + return accumulator; + }, + {} as Record, + ), + }; + + /* istanbul ignore next */ + default: + throw new TypeError(`Invalid JSON value: ${value}`); + } +} + +/** + * Tests for deep equality between the `actual` and `expected` parameters. + */ +export function deepEqual(...args: unknown[]): boolean { + try { + for (let index = 0, count = args.length; index < count; index++) { + if (index + 1 === count) { + continue; + } + deepStrictEqual(args[index], args[index + 1]); + } + return true; + } catch (error) { + return false; + } +} diff --git a/schemaregistry/serde/json.ts b/schemaregistry/serde/json.ts new file mode 100644 index 00000000..77487a4f --- /dev/null +++ b/schemaregistry/serde/json.ts @@ -0,0 +1,456 @@ +import { + Deserializer, DeserializerConfig, + FieldTransform, + FieldType, Migration, RefResolver, RuleConditionError, + RuleContext, + SerdeType, SerializationError, + Serializer, SerializerConfig +} from "./serde"; +import { + Client, RuleMode, + SchemaInfo +} from "../schemaregistry-client"; +import Ajv, {ErrorObject} from "ajv"; +import Ajv2019 from "ajv/dist/2019"; +import Ajv2020 from "ajv/dist/2020"; +import * as draft6MetaSchema from 'ajv/dist/refs/json-schema-draft-06.json' +import * as draft7MetaSchema from 'ajv/dist/refs/json-schema-draft-07.json' +import { + DereferencedJSONSchemaDraft07, + DereferencedJSONSchemaDraft2020_12, +} from '@criteria/json-schema' +import { + dereferenceJSONSchema as dereferenceJSONSchemaDraft2020_12, +} from '@criteria/json-schema/draft-2020-12' +import { + dereferenceJSONSchema as dereferenceJSONSchemaDraft07, +} from '@criteria/json-schema/draft-07' +import { validateJSON } from '@criteria/json-schema-validation' +import { LRUCache } from "lru-cache"; +import { generateSchema } from "./json-util"; +import {RuleRegistry} from "./rule-registry"; +import stringify from "json-stringify-deterministic"; + +export interface ValidateFunction { + (this: any, data: any): boolean + errors?: null | ErrorObject[] +} + +export type DereferencedJSONSchema = DereferencedJSONSchemaDraft07 | DereferencedJSONSchemaDraft2020_12 + +export type JsonSerdeConfig = ConstructorParameters[0] & { + validate?: boolean +} + +export interface JsonSerde { + schemaToTypeCache: LRUCache + schemaToValidateCache: LRUCache +} + +/** + * JsonSerializerConfig is the configuration for the JsonSerializer. + */ +export type JsonSerializerConfig = SerializerConfig & JsonSerdeConfig + +/** + * JsonSerializer is a serializer for JSON messages. + */ +export class JsonSerializer extends Serializer implements JsonSerde { + schemaToTypeCache: LRUCache + schemaToValidateCache: LRUCache + + /** + * Creates a new JsonSerializer. + * @param client - the schema registry client + * @param serdeType - the serializer type + * @param conf - the serializer configuration + * @param ruleRegistry - the rule registry + */ + constructor(client: Client, serdeType: SerdeType, conf: JsonSerializerConfig, ruleRegistry?: RuleRegistry) { + super(client, serdeType, conf, ruleRegistry) + this.schemaToTypeCache = new LRUCache({ max: this.config().cacheCapacity ?? 1000 }) + this.schemaToValidateCache = new LRUCache({ max: this.config().cacheCapacity ?? 1000 }) + this.fieldTransformer = async (ctx: RuleContext, fieldTransform: FieldTransform, msg: any) => { + return await this.fieldTransform(ctx, fieldTransform, msg) + } + for (const rule of this.ruleRegistry.getExecutors()) { + rule.configure(client.config(), new Map(Object.entries(conf.ruleConfig ?? {}))) + } + } + + /** + * Serializes a message. + * @param topic - the topic + * @param msg - the message + */ + override async serialize(topic: string, msg: any): Promise { + if (this.client == null) { + throw new Error('client is not initialized') + } + if (msg == null) { + throw new Error('message is empty') + } + + const jsonSchema = JsonSerializer.messageToSchema(msg) + const schema: SchemaInfo = { + schemaType: 'JSON', + schema: JSON.stringify(jsonSchema), + } + const [id, info] = await this.getId(topic, msg, schema) + const subject = this.subjectName(topic, info) + msg = await this.executeRules(subject, topic, RuleMode.WRITE, null, info, msg, null) + const msgBytes = Buffer.from(JSON.stringify(msg)) + if ((this.conf as JsonSerdeConfig).validate) { + const validate = await this.toValidateFunction(info) + if (validate != null && !validate(msg)) { + throw new SerializationError('Invalid message') + } + } + return this.writeBytes(id, msgBytes) + } + + async fieldTransform(ctx: RuleContext, fieldTransform: FieldTransform, msg: any): Promise { + const schema = await this.toType(ctx.target) + if (typeof schema === 'boolean') { + return msg + } + return await transform(ctx, schema, '$', msg, fieldTransform) + } + + async toType(info: SchemaInfo): Promise { + return toType(this.client, this.conf as JsonDeserializerConfig, this, info, async (client, info) => { + const deps = new Map() + await this.resolveReferences(client, info, deps) + return deps + }) + } + + async toValidateFunction(info: SchemaInfo): Promise { + return await toValidateFunction(this.client, this.conf as JsonDeserializerConfig, this, info, async (client, info) => { + const deps = new Map() + await this.resolveReferences(client, info, deps) + return deps + }, + ) + } + + static messageToSchema(msg: any): DereferencedJSONSchema { + return generateSchema(msg) + } +} + +/** + * JsonDeserializerConfig is the configuration for the JsonDeserializer. + */ +export type JsonDeserializerConfig = DeserializerConfig & JsonSerdeConfig + +/** + * JsonDeserializer is a deserializer for JSON messages. + */ +export class JsonDeserializer extends Deserializer implements JsonSerde { + schemaToTypeCache: LRUCache + schemaToValidateCache: LRUCache + + /** + * Creates a new JsonDeserializer. + * @param client - the schema registry client + * @param serdeType - the deserializer type + * @param conf - the deserializer configuration + * @param ruleRegistry - the rule registry + */ + constructor(client: Client, serdeType: SerdeType, conf: JsonDeserializerConfig, ruleRegistry?: RuleRegistry) { + super(client, serdeType, conf, ruleRegistry) + this.schemaToTypeCache = new LRUCache({ max: this.config().cacheCapacity ?? 1000 }) + this.schemaToValidateCache = new LRUCache({ max: this.config().cacheCapacity ?? 1000 }) + this.fieldTransformer = async (ctx: RuleContext, fieldTransform: FieldTransform, msg: any) => { + return await this.fieldTransform(ctx, fieldTransform, msg) + } + for (const rule of this.ruleRegistry.getExecutors()) { + rule.configure(client.config(), new Map(Object.entries(conf.ruleConfig ?? {}))) + } + } + + /** + * Deserializes a message. + * @param topic - the topic + * @param payload - the message payload + */ + override async deserialize(topic: string, payload: Buffer): Promise { + if (!Buffer.isBuffer(payload)) { + throw new Error('Invalid buffer') + } + if (payload.length === 0) { + return null + } + + const info = await this.getSchema(topic, payload) + if ((this.conf as JsonSerdeConfig).validate) { + const validate = await this.toValidateFunction(info) + if (validate != null && !validate(JSON.parse(payload.subarray(5).toString()))) { + throw new SerializationError('Invalid message') + } + + } + const subject = this.subjectName(topic, info) + const readerMeta = await this.getReaderSchema(subject) + let migrations: Migration[] = [] + if (readerMeta != null) { + migrations = await this.getMigrations(subject, info, readerMeta) + } + const msgBytes = payload.subarray(5) + let msg = JSON.parse(msgBytes.toString()) + if (migrations.length > 0) { + msg = await this.executeMigrations(migrations, subject, topic, msg) + } + let target: SchemaInfo + if (readerMeta != null) { + target = readerMeta + } else { + target = info + } + msg = this.executeRules(subject, topic, RuleMode.READ, null, target, msg, null) + return msg + } + + async fieldTransform(ctx: RuleContext, fieldTransform: FieldTransform, msg: any): Promise { + const schema = await this.toType(ctx.target) + return await transform(ctx, schema, '$', msg, fieldTransform) + } + + toType(info: SchemaInfo): DereferencedJSONSchema { + return toType(this.client, this.conf as JsonDeserializerConfig, this, info, async (client, info) => { + const deps = new Map() + await this.resolveReferences(client, info, deps) + return deps + }) + } + + async toValidateFunction(info: SchemaInfo): Promise { + return await toValidateFunction(this.client, this.conf as JsonDeserializerConfig, this, info, async (client, info) => { + const deps = new Map() + await this.resolveReferences(client, info, deps) + return deps + }, + ) + } +} + +async function toValidateFunction( + client: Client, + conf: JsonSerdeConfig, + serde: JsonSerde, + info: SchemaInfo, + refResolver: RefResolver, +): Promise { + let fn = serde.schemaToValidateCache.get(stringify(info.schema)) + if (fn != null) { + return fn + } + + const deps = await refResolver(client, info) + + const json = JSON.parse(info.schema) + const spec = json.$schema + if (spec === 'http://json-schema.org/draft/2020-12/schema' + || spec === 'https://json-schema.org/draft/2020-12/schema') { + const ajv2020 = new Ajv2020(conf as JsonSerdeConfig) + ajv2020.addKeyword("confluent:tags") + deps.forEach((schema, name) => { + ajv2020.addSchema(JSON.parse(schema), name) + }) + fn = ajv2020.compile(json) + } else { + const ajv = new Ajv2019(conf as JsonSerdeConfig) + ajv.addKeyword("confluent:tags") + ajv.addMetaSchema(draft6MetaSchema) + ajv.addMetaSchema(draft7MetaSchema) + deps.forEach((schema, name) => { + ajv.addSchema(JSON.parse(schema), name) + }) + fn = ajv.compile(json) + } + serde.schemaToValidateCache.set(stringify(info.schema), fn) + return fn +} + +async function toType( + client: Client, + conf: JsonSerdeConfig, + serde: JsonSerde, + info: SchemaInfo, + refResolver: RefResolver, +): Promise { + let type = serde.schemaToTypeCache.get(stringify(info.schema)) + if (type != null) { + return type + } + + const deps = await refResolver(client, info) + + const retrieve = (uri: string) => { + const data = deps.get(uri) + if (data == null) { + throw new SerializationError(`Schema not found: ${uri}`) + } + return JSON.parse(data) + } + + const json = JSON.parse(info.schema) + const spec = json.$schema + let schema + if (spec === 'http://json-schema.org/draft/2020-12/schema' + || spec === 'https://json-schema.org/draft/2020-12/schema') { + schema = await dereferenceJSONSchemaDraft2020_12(json, { retrieve }) + } else { + schema = await dereferenceJSONSchemaDraft07(json, { retrieve }) + } + serde.schemaToTypeCache.set(stringify(info.schema), schema) + return schema +} + +async function transform(ctx: RuleContext, schema: DereferencedJSONSchema, path:string, msg: any, fieldTransform: FieldTransform): Promise { + if (msg == null || schema == null || typeof schema === 'boolean') { + return msg + } + let fieldCtx = ctx.currentField() + if (fieldCtx != null) { + fieldCtx.type = getType(schema) + } + if (schema.allOf != null && schema.allOf.length > 0) { + let subschema = validateSubschemas(schema.allOf, msg) + if (subschema != null) { + return await transform(ctx, subschema, path, msg, fieldTransform) + } + } + if (schema.anyOf != null && schema.anyOf.length > 0) { + let subschema = validateSubschemas(schema.anyOf, msg) + if (subschema != null) { + return await transform(ctx, subschema, path, msg, fieldTransform) + } + } + if (schema.oneOf != null && schema.oneOf.length > 0) { + let subschema = validateSubschemas(schema.oneOf, msg) + if (subschema != null) { + return await transform(ctx, subschema, path, msg, fieldTransform) + } + } + if (schema.items != null) { + if (Array.isArray(msg)) { + for (let i = 0; i < msg.length; i++) { + msg[i] = await transform(ctx, schema.items, path, msg[i], fieldTransform) + } + return msg + } + } + if (schema.$ref != null) { + return await transform(ctx, schema.$ref, path, msg, fieldTransform) + } + let type = getType(schema) + switch (type) { + case FieldType.RECORD: + if (schema.properties != null) { + for (let [propName, propSchema] of Object.entries(schema.properties)) { + let value = msg[propName] + await transformField(ctx, path, propName, msg, value, propSchema, fieldTransform) + } + } + return msg + case FieldType.ENUM: + case FieldType.STRING: + case FieldType.INT: + case FieldType.DOUBLE: + case FieldType.BOOLEAN: + if (fieldCtx != null) { + const ruleTags = ctx.rule.tags + if (ruleTags == null || ruleTags.length === 0 || !disjoint(new Set(ruleTags), fieldCtx.tags)) { + return await fieldTransform.transform(ctx, fieldCtx, msg) + } + } + } + + return msg +} + +async function transformField(ctx: RuleContext, path: string, propName: string, msg: any, value: any, + propSchema: DereferencedJSONSchema, + fieldTransform: FieldTransform): Promise { + const fullName = path + '.' + propName + try { + ctx.enterField(msg, fullName, propName, getType(propSchema), getInlineTags(propSchema)) + const newVal = await transform(ctx, propSchema, fullName, value, fieldTransform) + if (ctx.rule.kind === 'CONDITION') { + if (newVal === false) { + throw new RuleConditionError(ctx.rule) + } + } else { + msg[propName] = newVal + } + } finally { + ctx.leaveField() + } +} + +function validateSubschemas(subschemas: DereferencedJSONSchema[], msg: any): DereferencedJSONSchema | null { + for (let subschema of subschemas) { + try { + validateJSON(msg, subschema) + return subschema + } catch (error) { + // ignore + } + } + return null +} + +function getType(schema: DereferencedJSONSchema): FieldType { + if (typeof schema === 'boolean') { + return FieldType.NULL + } + if (schema.type == null) { + return FieldType.NULL + } + if (Array.isArray(schema.type)) { + return FieldType.COMBINED + } + if (schema.const != null || schema.enum != null) { + return FieldType.ENUM + } + switch (schema.type) { + case 'object': + if (schema.properties == null || Object.keys(schema.properties).length === 0) { + return FieldType.MAP + } + return FieldType.RECORD + case 'array': + return FieldType.ARRAY + case 'string': + return FieldType.STRING + case 'integer': + return FieldType.INT + case 'number': + return FieldType.DOUBLE + case 'boolean': + return FieldType.BOOLEAN + case 'null': + return FieldType.NULL + default: + return FieldType.NULL + } +} + +function getInlineTags(schema: DereferencedJSONSchema): Set { + let tagsKey = 'confluent:tags' as keyof DereferencedJSONSchema + return new Set(schema[tagsKey]) +} + +function disjoint(tags1: Set, tags2: Set): boolean { + for (let tag of tags1) { + if (tags2.has(tag)) { + return false + } + } + return true +} + + + diff --git a/schemaregistry/serde/protobuf.ts b/schemaregistry/serde/protobuf.ts new file mode 100644 index 00000000..b545b8bb --- /dev/null +++ b/schemaregistry/serde/protobuf.ts @@ -0,0 +1,557 @@ +import { + Deserializer, + DeserializerConfig, + FieldTransform, + FieldType, RuleConditionError, + RuleContext, + SerdeType, SerializationError, + Serializer, + SerializerConfig +} from "./serde"; +import { + Client, Reference, RuleMode, + SchemaInfo, + SchemaMetadata +} from "../schemaregistry-client"; +import { + createFileRegistry, createMutableRegistry, + DescField, + DescFile, + DescMessage, + FileRegistry, + fromBinary, getExtension, hasExtension, MutableRegistry, + ScalarType, + toBinary, +} from "@bufbuild/protobuf"; +import { + file_google_protobuf_any, + file_google_protobuf_api, + file_google_protobuf_descriptor, + file_google_protobuf_duration, + file_google_protobuf_empty, + file_google_protobuf_field_mask, + file_google_protobuf_source_context, + file_google_protobuf_struct, + file_google_protobuf_timestamp, file_google_protobuf_type, file_google_protobuf_wrappers, + FileDescriptorProtoSchema +} from "@bufbuild/protobuf/wkt"; +import { BufferWrapper, MAX_VARINT_LEN_64 } from "./buffer-wrapper"; +import { LRUCache } from "lru-cache"; +import {field_meta, file_confluent_meta, Meta} from "../confluent/meta_pb"; +import {RuleRegistry} from "./rule-registry"; +import stringify from "json-stringify-deterministic"; +import {file_confluent_types_decimal} from "../confluent/types/decimal_pb"; +import {file_google_type_calendar_period} from "../google/type/calendar_period_pb"; +import {file_google_type_color} from "../google/type/color_pb"; +import {file_google_type_date} from "../google/type/date_pb"; +import {file_google_type_datetime} from "../google/type/datetime_pb"; +import {file_google_type_dayofweek} from "../google/type/dayofweek_pb"; +import {file_google_type_fraction} from "../google/type/fraction_pb"; +import {file_google_type_expr} from "../google/type/expr_pb"; +import {file_google_type_latlng} from "../google/type/latlng_pb"; +import {file_google_type_money} from "../google/type/money_pb"; +import {file_google_type_postal_address} from "../google/type/postal_address_pb"; +import {file_google_type_quaternion} from "../google/type/quaternion_pb"; +import {file_google_type_timeofday} from "../google/type/timeofday_pb"; +import {file_google_type_month} from "../google/type/month_pb"; + +const builtinDeps = new Map([ + ['confluent/meta.proto', file_confluent_meta], + ['confluent/type/decimal.proto', file_confluent_types_decimal], + ['google/type/calendar_period.proto', file_google_type_calendar_period], + ['google/type/color.proto', file_google_type_color], + ['google/type/date.proto', file_google_type_date], + ['google/type/datetime.proto', file_google_type_datetime], + ['google/type/dayofweek.proto', file_google_type_dayofweek], + ['google/type/expr.proto', file_google_type_expr], + ['google/type/fraction.proto', file_google_type_fraction], + ['google/type/latlng.proto', file_google_type_latlng], + ['google/type/money.proto', file_google_type_money], + ['google/type/month.proto', file_google_type_month], + ['google/type/postal_address.proto', file_google_type_postal_address], + ['google/type/quaternion.proto', file_google_type_quaternion], + ['google/type/timeofday.proto', file_google_type_timeofday], + ['google/protobuf/any.proto', file_google_protobuf_any], + ['google/protobuf/api.proto', file_google_protobuf_api], + ['google/protobuf/descriptor.proto', file_google_protobuf_descriptor], + ['google/protobuf/duration.proto', file_google_protobuf_duration], + ['google/protobuf/empty.proto', file_google_protobuf_empty], + ['google/protobuf/field_mask.proto', file_google_protobuf_field_mask], + ['google/protobuf/source_context.proto', file_google_protobuf_source_context], + ['google/protobuf/struct.proto', file_google_protobuf_struct], + ['google/protobuf/timestamp.proto', file_google_protobuf_timestamp], + ['google/protobuf/type.proto', file_google_protobuf_type], + ['google/protobuf/wrappers.proto', file_google_protobuf_wrappers], +]) + +export interface ProtobufSerde { + schemaToDescCache: LRUCache +} + +/** + * ProtobufSerializerConfig is the configuration for ProtobufSerializer. + */ +export type ProtobufSerializerConfig = SerializerConfig & { + registry?: MutableRegistry +} + +/** + * ProtobufSerializer is a serializer for Protobuf messages. + */ +export class ProtobufSerializer extends Serializer implements ProtobufSerde { + registry: MutableRegistry + schemaToDescCache: LRUCache + descToSchemaCache: LRUCache + + /** + * Creates a new ProtobufSerializer. + * @param client - the schema registry client + * @param serdeType - the serializer type + * @param conf - the serializer configuration + * @param ruleRegistry - the rule registry + */ + constructor(client: Client, serdeType: SerdeType, conf: ProtobufSerializerConfig, ruleRegistry?: RuleRegistry) { + super(client, serdeType, conf, ruleRegistry) + this.registry = conf.registry ?? createMutableRegistry() + this.schemaToDescCache = new LRUCache({ max: this.config().cacheCapacity ?? 1000 } ) + this.descToSchemaCache = new LRUCache({ max: this.config().cacheCapacity ?? 1000 } ) + this.fieldTransformer = async (ctx: RuleContext, fieldTransform: FieldTransform, msg: any) => { + return await this.fieldTransform(ctx, fieldTransform, msg) + } + for (const rule of this.ruleRegistry.getExecutors()) { + rule.configure(client.config(), new Map(Object.entries(conf.ruleConfig ?? {}))) + } + } + + /** + * Serializes a message. + * @param topic - the topic + * @param msg - the message + */ + override async serialize(topic: string, msg: any): Promise { + if (this.client == null) { + throw new Error('client is not initialized') + } + if (msg == null) { + throw new Error('message is empty') + } + + const typeName = msg.$typeName + if (typeName == null) { + throw new SerializationError('message type name is empty') + } + const messageDesc = this.registry.getMessage(typeName) + if (messageDesc == null) { + throw new SerializationError('message descriptor not in registry') + } + const fileDesc = messageDesc.file + const schema = await this.getSchemaInfo(fileDesc) + const [id, info] = await this.getId(topic, msg, schema, 'serialized') + const subject = this.subjectName(topic, info) + msg = await this.executeRules(subject, topic, RuleMode.WRITE, null, info, msg, null) + const msgIndexBytes = this.toMessageIndexBytes(messageDesc) + const msgBytes = Buffer.from(toBinary(messageDesc, msg)) + return this.writeBytes(id, Buffer.concat([msgIndexBytes, msgBytes])) + } + + async getSchemaInfo(fileDesc: DescFile): Promise { + const value = this.descToSchemaCache.get(fileDesc.name) + if (value != null) { + return value + } + const deps = this.toProtobufSchema(fileDesc) + const autoRegister = this.config().autoRegisterSchemas + const normalize = this.config().normalizeSchemas + const metadata = await this.resolveDependencies( + fileDesc, deps, "", Boolean(autoRegister), Boolean(normalize)) + const info = { + schema: metadata.schema, + schemaType: metadata.schemaType, + references: metadata.references, + metadata: metadata.metadata, + ruleSet: metadata.ruleSet, + } + this.descToSchemaCache.set(fileDesc.name, info) + return info + } + + toProtobufSchema(fileDesc: DescFile): Map { + const deps = new Map() + this.toDependencies(fileDesc, deps) + return deps + } + + toDependencies(fileDesc: DescFile, deps: Map) { + deps.set(fileDesc.name, Buffer.from(toBinary(FileDescriptorProtoSchema, fileDesc.proto)).toString('base64')) + fileDesc.dependencies.forEach((dep) => { + if (!isBuiltin(dep.name)) { + this.toDependencies(dep, deps) + } + }) + } + + async resolveDependencies(fileDesc: DescFile, deps: Map, subject: string, + autoRegister: boolean, normalize: boolean): Promise { + const refs: Reference[] = [] + for (let i = 0; i < fileDesc.dependencies.length; i++) { + const dep = fileDesc.dependencies[i] + const depName = dep.name + '.proto' + if (isBuiltin(depName)) { + continue + } + const ref = await this.resolveDependencies(dep, deps, depName, autoRegister, normalize) + if (ref == null) { + throw new SerializationError('dependency not found') + } + refs.push({name: depName, subject: ref.subject!, version: ref.version!}) + } + const info: SchemaInfo = { + schema: deps.get(fileDesc.name)!, + schemaType: 'PROTOBUF', + references: refs + } + let id = -1 + let version = 0 + if (subject !== '') { + if (autoRegister) { + id = await this.client.register(subject, info, normalize) + } else { + id = await this.client.getId(subject, info, normalize) + + } + version = await this.client.getVersion(subject, info, normalize) + } + return { + id: id, + subject: subject, + version: version, + schema: info.schema, + schemaType: info.schemaType, + references: info.references, + metadata: info.metadata, + ruleSet: info.ruleSet, + } + } + + toMessageIndexBytes(messageDesc: DescMessage): Buffer { + const msgIndexes: number[] = this.toMessageIndexes(messageDesc, 0) + const buffer = Buffer.alloc((1 + msgIndexes.length) * MAX_VARINT_LEN_64) + const bw = new BufferWrapper(buffer) + bw.writeVarInt(msgIndexes.length) + for (let i = 0; i < msgIndexes.length; i++) { + bw.writeVarInt(msgIndexes[i]) + } + return buffer.subarray(0, bw.pos) + } + + toMessageIndexes(messageDesc: DescMessage, count: number): number[] { + const index = this.toIndex(messageDesc) + const parent = messageDesc.parent + if (parent == null) { + // parent is FileDescriptor, we reached the top of the stack, so we are + // done. Allocate an array large enough to hold count+1 entries and + // populate first value with index + const msgIndexes: number[] = [] + msgIndexes.push(index) + return msgIndexes + } else { + const msgIndexes = this.toMessageIndexes(parent, count + 1) + msgIndexes.push(index) + return msgIndexes + } + } + + toIndex(messageDesc: DescMessage) { + const parent = messageDesc.parent + if (parent == null) { + const fileDesc = messageDesc.file + for (let i = 0; i < fileDesc.messages.length; i++) { + if (fileDesc.messages[i] === messageDesc) { + return i + } + } + } else { + for (let i = 0; i < parent.nestedMessages.length; i++) { + if (parent.nestedMessages[i] === messageDesc) { + return i + } + } + } + throw new SerializationError('message descriptor not found in file descriptor'); + } + + async fieldTransform(ctx: RuleContext, fieldTransform: FieldTransform, msg: any): Promise { + const typeName = msg.$typeName + if (typeName == null) { + throw new SerializationError('message type name is empty') + } + const messageDesc = this.registry.getMessage(typeName) + if (messageDesc == null) { + throw new SerializationError('message descriptor not in registry') + } + return await transform(ctx, messageDesc, msg, fieldTransform) + } +} + +/** + * ProtobufDeserializerConfig is the configuration for ProtobufDeserializer. + */ +export type ProtobufDeserializerConfig = DeserializerConfig + +/** + * ProtobufDeserializer is a deserializer for Protobuf messages. + */ +export class ProtobufDeserializer extends Deserializer implements ProtobufSerde { + registry: FileRegistry + schemaToDescCache: LRUCache + + /** + * Creates a new ProtobufDeserializer. + * @param client - the schema registry client + * @param serdeType - the deserializer type + * @param conf - the deserializer configuration + * @param ruleRegistry - the rule registry + */ + constructor(client: Client, serdeType: SerdeType, conf: ProtobufDeserializerConfig, ruleRegistry?: RuleRegistry) { + super(client, serdeType, conf, ruleRegistry) + this.registry = createFileRegistry() + this.schemaToDescCache = new LRUCache({ max: this.config().cacheCapacity ?? 1000 } ) + this.fieldTransformer = async (ctx: RuleContext, fieldTransform: FieldTransform, msg: any) => { + return await this.fieldTransform(ctx, fieldTransform, msg) + } + for (const rule of this.ruleRegistry.getExecutors()) { + rule.configure(client.config(), new Map(Object.entries(conf.ruleConfig ?? {}))) + } + } + + /** + * Deserializes a message. + * @param topic - the topic + * @param payload - the message payload + */ + override async deserialize(topic: string, payload: Buffer): Promise { + if (!Buffer.isBuffer(payload)) { + throw new Error('Invalid buffer') + } + if (payload.length === 0) { + return null + } + + const info = await this.getSchema(topic, payload, 'serialized') + const fd = await this.toFileDesc(this.client, info) + const [bytesRead, msgIndexes] = this.readMessageIndexes(payload.subarray(5)) + const messageDesc = this.toMessageDesc(fd, msgIndexes) + + const subject = this.subjectName(topic, info) + const readerMeta = await this.getReaderSchema(subject, 'serialized') + + const msgBytes = payload.subarray(5 + bytesRead) + let msg = fromBinary(messageDesc, msgBytes) + + // Currently JavaScript does not support migration rules + // because of lack of support for DynamicMessage + let target: SchemaInfo + if (readerMeta != null) { + target = readerMeta + } else { + target = info + } + msg = await this.executeRules(subject, topic, RuleMode.READ, null, target, msg, null) + return msg + } + + async fieldTransform(ctx: RuleContext, fieldTransform: FieldTransform, msg: any): Promise { + const typeName = msg.$typeName + if (typeName == null) { + throw new SerializationError('message type name is empty') + } + const messageDesc = this.registry.getMessage(typeName) + if (messageDesc == null) { + throw new SerializationError('message descriptor not in registry') + } + return await transform(ctx, messageDesc, msg, fieldTransform) + } + + async toFileDesc(client: Client, info: SchemaInfo): Promise { + const value = this.schemaToDescCache.get(stringify(info.schema)) + if (value != null) { + return value + } + const fileDesc = await this.parseFileDesc(client, info) + if (fileDesc == null) { + throw new SerializationError('file descriptor not found') + } + this.schemaToDescCache.set(stringify(info.schema), fileDesc) + return fileDesc + } + + async parseFileDesc(client: Client, info: SchemaInfo): Promise { + const deps = new Map() + await this.resolveReferences(client, info, deps, 'serialized') + const fileDesc = fromBinary(FileDescriptorProtoSchema, Buffer.from(info.schema, 'base64')) + const resolve = (depName: string) => { + if (isBuiltin(depName)) { + const dep = builtinDeps.get(depName) + if (dep == null) { + throw new SerializationError(`dependency ${depName} not found`) + } + return dep + } else { + const dep = deps.get(depName) + if (dep == null) { + throw new SerializationError(`dependency ${depName} not found`) + } + const fileDesc = fromBinary(FileDescriptorProtoSchema, Buffer.from(dep, 'base64')) + fileDesc.name = depName + return fileDesc + } + } + const fileRegistry = createFileRegistry(fileDesc, resolve) + this.registry = createFileRegistry(this.registry, fileRegistry) + return this.registry.getFile(fileDesc.name) + } + + readMessageIndexes(payload: Buffer): [number, number[]] { + const bw = new BufferWrapper(payload) + const count = bw.readVarInt() + const msgIndexes = [] + for (let i = 0; i < count; i++) { + msgIndexes.push(bw.readVarInt()) + } + return [bw.pos, msgIndexes] + } + + toMessageDesc(fd: DescFile, msgIndexes: number[]): DescMessage { + let index = msgIndexes[0] + if (msgIndexes.length === 1) { + return fd.messages[index] + } + return this.toNestedMessageDesc(fd.messages[index], msgIndexes.slice(1)) + } + + toNestedMessageDesc(parent: DescMessage, msgIndexes: number[]): DescMessage { + let index = msgIndexes[0] + if (msgIndexes.length === 1) { + return parent.nestedMessages[index] + } + return this.toNestedMessageDesc(parent.nestedMessages[index], msgIndexes.slice(1)) + } +} + +async function transform(ctx: RuleContext, descriptor: DescMessage, msg: any, fieldTransform: FieldTransform): Promise { + if (msg == null || descriptor == null) { + return msg + } + if (Array.isArray(msg)) { + for (let i = 0; i < msg.length; i++) { + msg[i] = await transform(ctx, descriptor, msg[i], fieldTransform) + } + } + if (msg instanceof Map) { + return msg + } + const typeName = msg.$typeName + if (typeName != null) { + const fields = descriptor.fields + for (let i = 0; i < fields.length; i++) { + const fd = fields[i] + await transformField(ctx, fd, descriptor, msg, fieldTransform) + } + return msg + } + const fieldCtx = ctx.currentField() + if (fieldCtx != null) { + const ruleTags = ctx.rule.tags ?? [] + if (ruleTags == null || ruleTags.length === 0 || !disjoint(new Set(ruleTags), fieldCtx.tags)) { + return await fieldTransform.transform(ctx, fieldCtx, msg) + } + } + return msg +} + +async function transformField(ctx: RuleContext, fd: DescField, desc: DescMessage, + msg: any, fieldTransform: FieldTransform) { + try { + ctx.enterField( + msg, + desc.name + '.' + fd.name, + fd.name, + getType(fd), + getInlineTags(fd) + ) + const value = msg[fd.name] + const newValue = await transform(ctx, desc, value, fieldTransform) + if (ctx.rule.kind === 'CONDITION') { + if (newValue === false) { + throw new RuleConditionError(ctx.rule) + } + } else { + msg[fd.name] = newValue + } + } finally { + ctx.leaveField() + } +} + +function getType(fd: DescField): FieldType { + switch (fd.fieldKind) { + case 'map': + return FieldType.MAP + case 'list': + return FieldType.ARRAY + case 'message': + return FieldType.RECORD + case 'enum': + return FieldType.ENUM + case 'scalar': + switch (fd.scalar) { + case ScalarType.STRING: + return FieldType.STRING + case ScalarType.BYTES: + return FieldType.BYTES + case ScalarType.INT32: + case ScalarType.SINT32: + case ScalarType.UINT32: + case ScalarType.FIXED32: + case ScalarType.SFIXED32: + return FieldType.INT + case ScalarType.INT64: + case ScalarType.SINT64: + case ScalarType.UINT64: + case ScalarType.FIXED64: + case ScalarType.SFIXED64: + return FieldType.LONG + case ScalarType.FLOAT: + case ScalarType.DOUBLE: + return FieldType.DOUBLE + case ScalarType.BOOL: + return FieldType.BOOLEAN + } + default: + return FieldType.NULL + } +} + +function getInlineTags(fd: DescField): Set { + const options = fd.proto.options + if (options != null && hasExtension(options, field_meta)) { + const option: Meta = getExtension(options, field_meta) + return new Set(option.tags) + } + return new Set() +} + +function disjoint(tags1: Set, tags2: Set): boolean { + for (let tag of tags1) { + if (tags2.has(tag)) { + return false + } + } + return true +} + +function isBuiltin(name: string): boolean { + return name.startsWith('confluent/') || + name.startsWith('google/protobuf/') || + name.startsWith('google/type/') +} diff --git a/schemaregistry/serde/rule-registry.ts b/schemaregistry/serde/rule-registry.ts new file mode 100644 index 00000000..2c6e9278 --- /dev/null +++ b/schemaregistry/serde/rule-registry.ts @@ -0,0 +1,88 @@ +import {RuleAction, RuleExecutor} from "./serde"; + +/** + * RuleRegistry is used to register and fetch rule executors and actions. + */ +export class RuleRegistry { + private ruleExecutors: Map = new Map() + private ruleActions: Map = new Map() + + private static globalInstance: RuleRegistry = new RuleRegistry() + + /** + * registerExecutor is used to register a new rule executor. + * @param ruleExecutor - the rule executor to register + */ + public registerExecutor(ruleExecutor: RuleExecutor): void { + this.ruleExecutors.set(ruleExecutor.type(), ruleExecutor) + } + + /** + * getExecutor fetches a rule executor by a given name. + * @param name - the name of the rule executor to fetch + */ + public getExecutor(name: string): RuleExecutor | undefined { + return this.ruleExecutors.get(name) + } + + /** + * getExecutors fetches all rule executors + */ + public getExecutors(): RuleExecutor[] { + return Array.from(this.ruleExecutors.values()) + } + + /** + * registerAction is used to register a new rule action. + * @param ruleAction - the rule action to register + */ + public registerAction(ruleAction: RuleAction): void { + this.ruleActions.set(ruleAction.type(), ruleAction) + } + + /** + * getAction fetches a rule action by a given name. + * @param name - the name of the rule action to fetch + */ + public getAction(name: string): RuleAction | undefined { + return this.ruleActions.get(name) + } + + /** + * getActions fetches all rule actions + */ + public getActions(): RuleAction[] { + return Array.from(this.ruleActions.values()) + } + + /** + * clear clears all registered rules + */ + public clear(): void { + this.ruleExecutors.clear() + this.ruleActions.clear() + } + + /** + * getGlobalInstance fetches the global instance of the rule registry + */ + public static getGlobalInstance(): RuleRegistry { + return RuleRegistry.globalInstance + } + + /** + * registerRuleExecutor is used to register a new rule executor globally. + * @param ruleExecutor - the rule executor to register + */ + public static registerRuleExecutor(ruleExecutor: RuleExecutor): void { + RuleRegistry.globalInstance.registerExecutor(ruleExecutor) + } + + /** + * registerRuleAction is used to register a new rule action globally. + * @param ruleAction - the rule action to register + */ + public static registerRuleAction(ruleAction: RuleAction): void { + RuleRegistry.globalInstance.registerAction(ruleAction) + } +} diff --git a/schemaregistry/serde/serde.ts b/schemaregistry/serde/serde.ts new file mode 100644 index 00000000..52c6eec2 --- /dev/null +++ b/schemaregistry/serde/serde.ts @@ -0,0 +1,796 @@ +import {match} from './wildcard-matcher'; +import { + Client, + Rule, + RuleMode, + RuleSet, + SchemaInfo, + SchemaMetadata +} from "../schemaregistry-client"; +import {RuleRegistry} from "./rule-registry"; +import {ClientConfig} from "../rest-service"; + +export enum SerdeType { + KEY = 'KEY', + VALUE = 'VALUE' +} + +export const MAGIC_BYTE = Buffer.alloc(1) + +/** + * SerializationError represents a serialization error + */ +export class SerializationError extends Error { + + constructor(message?: string) { + super(message) + } +} + +export interface SerdeConfig { + // useLatestVersion specifies whether to use the latest schema version + useLatestVersion?: boolean + // useLatestWithMetadata specifies whether to use the latest schema with metadata + useLatestWithMetadata?: { [key: string]: string }; + // cacheCapacity specifies the cache capacity + cacheCapacity?: number, + // cacheLatestTtlSecs specifies the cache latest TTL in seconds + cacheLatestTtlSecs?: number + // ruleConfig specifies configuration options to the rules + ruleConfig?: { [key: string]: string }; + // subjectNameStrategy specifies a function to generate a subject name + subjectNameStrategy?: SubjectNameStrategyFunc +} + +export type RefResolver = (client: Client, info: SchemaInfo) => Promise> + +/** + * Serde represents a serializer/deserializer + */ +export abstract class Serde { + client: Client + serdeType: SerdeType + conf: SerdeConfig + fieldTransformer: FieldTransformer | null = null + ruleRegistry: RuleRegistry + + protected constructor(client: Client, serdeType: SerdeType, conf: SerdeConfig, ruleRegistry?: RuleRegistry) { + this.client = client + this.serdeType = serdeType + this.conf = conf + this.ruleRegistry = ruleRegistry ?? RuleRegistry.getGlobalInstance() + } + + abstract config(): SerdeConfig + + close(): void { + return + } + + subjectName(topic: string, info?: SchemaInfo): string { + const strategy = this.conf.subjectNameStrategy ?? TopicNameStrategy + return strategy(topic, this.serdeType, info) + } + + async resolveReferences(client: Client, schema: SchemaInfo, deps: Map, format?: string): Promise { + let references = schema.references + if (references == null) { + return + } + for (let ref of references) { + let metadata = await client.getSchemaMetadata(ref.subject, ref.version, true, format) + deps.set(ref.name, metadata.schema) + await this.resolveReferences(client, metadata, deps) + } + } + + async executeRules(subject: string, topic: string, ruleMode: RuleMode, + source: SchemaInfo | null, target: SchemaInfo | null, msg: any, + inlineTags: Map> | null): Promise { + if (msg == null || target == null) { + return msg + } + let rules: Rule[] | undefined + switch (ruleMode) { + case RuleMode.UPGRADE: + rules = target.ruleSet?.migrationRules + break + case RuleMode.DOWNGRADE: + rules = source?.ruleSet?.migrationRules?.map(x => x).reverse() + break + default: + rules = target.ruleSet?.domainRules + if (ruleMode === RuleMode.READ) { + // Execute read rules in reverse order for symmetry + rules = rules?.map(x => x).reverse() + } + break + } + if (rules == null) { + return msg + } + for (let i = 0; i < rules.length; i++ ) { + let rule = rules[i] + if (rule.disabled) { + continue + } + let mode = rule.mode + switch (mode) { + case RuleMode.WRITEREAD: + if (ruleMode !== RuleMode.WRITE && ruleMode !== RuleMode.READ) { + continue + } + break + case RuleMode.UPDOWN: + if (ruleMode !== RuleMode.UPGRADE && ruleMode !== RuleMode.DOWNGRADE) { + continue + } + break + default: + if (mode !== ruleMode) { + continue + } + break + } + let ctx = new RuleContext(source, target, subject, topic, + this.serdeType === SerdeType.KEY, ruleMode, rule, i, rules, inlineTags, this.fieldTransformer!) + let ruleExecutor = this.ruleRegistry.getExecutor(rule.type) + if (ruleExecutor == null) { + await this.runAction(ctx, ruleMode, rule, rule.onFailure, msg, + new Error(`could not find rule executor of type ${rule.type}`), 'ERROR') + return msg + } + try { + let result = await ruleExecutor.transform(ctx, msg) + switch (rule.kind) { + case 'CONDITION': + if (result === false) { + throw new RuleConditionError(rule) + } + break + case 'TRANSFORM': + msg = result + break + } + await this.runAction(ctx, ruleMode, rule, msg != null ? rule.onSuccess : rule.onFailure, + msg, null, msg != null ? 'NONE' : 'ERROR') + } catch (error) { + if (error instanceof SerializationError) { + throw error + } + await this.runAction(ctx, ruleMode, rule, rule.onFailure, msg, error as Error, 'ERROR') + } + } + return msg + } + + async runAction(ctx: RuleContext, ruleMode: RuleMode, rule: Rule, action: string | undefined, + msg: any, err: Error | null, defaultAction: string): Promise { + let actionName = this.getRuleActionName(rule, ruleMode, action) + if (actionName == null) { + actionName = defaultAction + } + let ruleAction = this.getRuleAction(ctx, actionName) + if (ruleAction == null) { + throw new RuleError(`Could not find rule action of type ${actionName}`) + } + try { + await ruleAction.run(ctx, msg, err) + } catch (error) { + if (error instanceof SerializationError) { + throw error + } + console.warn("could not run post-rule action %s: %s", actionName, error) + } + } + + getRuleActionName(rule: Rule, ruleMode: RuleMode, actionName: string | undefined): string | null { + if (actionName == null || actionName === '') { + return null + } + if ((rule.mode === RuleMode.WRITEREAD || rule.mode === RuleMode.UPDOWN) && actionName.includes(',')) { + let parts = actionName.split(',') + switch (ruleMode) { + case RuleMode.WRITE: + case RuleMode.UPGRADE: + return parts[0] + case RuleMode.READ: + case RuleMode.DOWNGRADE: + return parts[1] + } + } + return actionName + } + + getRuleAction(ctx: RuleContext, actionName: string): RuleAction | undefined { + if (actionName === 'ERROR') { + return new ErrorAction(); + } else if (actionName === 'NONE') { + return new NoneAction() + } + return this.ruleRegistry.getAction(actionName) + } +} + +/** + * SerializerConfig represents a serializer configuration + */ +export interface SerializerConfig extends SerdeConfig { + // autoRegisterSchemas determines whether to automatically register schemas + autoRegisterSchemas?: boolean + // useSchemaID specifies a schema ID to use + useSchemaId?: number + // normalizeSchemas determines whether to normalize schemas + normalizeSchemas?: boolean +} + +/** + * Serializer represents a serializer + */ +export abstract class Serializer extends Serde { + protected constructor(client: Client, serdeType: SerdeType, conf: SerializerConfig, ruleRegistry?: RuleRegistry) { + super(client, serdeType, conf, ruleRegistry) + } + + override config(): SerializerConfig { + return this.conf as SerializerConfig + } + + /** + * Serialize serializes a message + * @param topic - the topic + * @param msg - the message + */ + abstract serialize(topic: string, msg: any): Promise + + // GetID returns a schema ID for the given schema + async getId(topic: string, msg: any, info: SchemaInfo, format?: string): Promise<[number, SchemaInfo]> { + let autoRegister = this.config().autoRegisterSchemas + let useSchemaId = this.config().useSchemaId + let useLatestWithMetadata = this.conf.useLatestWithMetadata + let useLatest = this.config().useLatestVersion + let normalizeSchema = this.config().normalizeSchemas + + let id = -1 + let subject = this.subjectName(topic, info) + if (autoRegister) { + id = await this.client.register(subject, info, Boolean(normalizeSchema)) + } else if (useSchemaId != null && useSchemaId >= 0) { + info = await this.client.getBySubjectAndId(subject, useSchemaId, format) + id = await this.client.getId(subject, info, false) + if (id !== useSchemaId) { + throw new SerializationError(`failed to match schema ID (${id} != ${useSchemaId})`) + } + } else if (useLatestWithMetadata != null && Object.keys(useLatestWithMetadata).length !== 0) { + info = await this.client.getLatestWithMetadata(subject, useLatestWithMetadata, true, format) + id = await this.client.getId(subject, info, false) + } else if (useLatest) { + info = await this.client.getLatestSchemaMetadata(subject, format) + id = await this.client.getId(subject, info, false) + } else { + id = await this.client.getId(subject, info, Boolean(normalizeSchema)) + } + return [id, info] + } + + writeBytes(id: number, msgBytes: Buffer): Buffer { + const idBuffer = Buffer.alloc(4) + idBuffer.writeInt32BE(id, 0) + return Buffer.concat([MAGIC_BYTE, idBuffer, msgBytes]) + } +} + +/** + * DeserializerConfig represents a deserializer configuration + */ +export type DeserializerConfig = SerdeConfig + +/** + * Migration represents a migration + */ +export interface Migration { + ruleMode: RuleMode + source: SchemaMetadata | null + target: SchemaMetadata | null +} + +/** + * Deserializer represents a deserializer + */ +export abstract class Deserializer extends Serde { + protected constructor(client: Client, serdeType: SerdeType, conf: DeserializerConfig, ruleRegistry?: RuleRegistry) { + super(client, serdeType, conf, ruleRegistry) + } + + override config(): DeserializerConfig { + return this.conf as DeserializerConfig + } + + /** + * Deserialize deserializes a message + * @param topic - the topic + * @param payload - the payload + */ + abstract deserialize(topic: string, payload: Buffer): Promise + + async getSchema(topic: string, payload: Buffer, format?: string): Promise { + const magicByte = payload.subarray(0, 1) + if (!magicByte.equals(MAGIC_BYTE)) { + throw new SerializationError( + `Message encoded with magic byte ${JSON.stringify(magicByte)}, expected ${JSON.stringify( + MAGIC_BYTE, + )}`, + ) + } + const id = payload.subarray(1, 5).readInt32BE(0) + let subject = this.subjectName(topic) + return await this.client.getBySubjectAndId(subject, id, format) + } + + async getReaderSchema(subject: string, format?: string): Promise { + let useLatestWithMetadata = this.config().useLatestWithMetadata + let useLatest = this.config().useLatestVersion + if (useLatestWithMetadata != null && Object.keys(useLatestWithMetadata).length !== 0) { + return await this.client.getLatestWithMetadata(subject, useLatestWithMetadata, true, format) + } + if (useLatest) { + return await this.client.getLatestSchemaMetadata(subject, format) + } + return null + } + + hasRules(ruleSet: RuleSet, mode: RuleMode): boolean { + switch (mode) { + case RuleMode.UPGRADE: + case RuleMode.DOWNGRADE: + return this.checkRules(ruleSet?.migrationRules, (ruleMode: RuleMode): boolean => + ruleMode === mode || ruleMode === RuleMode.UPDOWN) + case RuleMode.UPDOWN: + return this.checkRules(ruleSet?.migrationRules, (ruleMode: RuleMode): boolean => + ruleMode === mode) + case RuleMode.WRITE: + case RuleMode.READ: + return this.checkRules(ruleSet?.domainRules, (ruleMode: RuleMode): boolean => + ruleMode === mode || ruleMode === RuleMode.WRITEREAD) + case RuleMode.WRITEREAD: + return this.checkRules(ruleSet?.domainRules, (ruleMode: RuleMode): boolean => + ruleMode === mode) + } + } + + checkRules(rules: Rule[] | undefined, filter: (ruleMode: RuleMode) => boolean): boolean { + if (rules == null) { + return false + } + for (let rule of rules) { + let ruleMode = rule.mode + if (ruleMode && filter(ruleMode)) { + return true + } + } + return false + } + + async getMigrations(subject: string, sourceInfo: SchemaInfo, + target: SchemaMetadata, format?: string): Promise { + let version = await this.client.getVersion(subject, sourceInfo, false) + let source: SchemaMetadata = { + id: 0, + version: version, + schema: sourceInfo.schema, + references: sourceInfo.references, + metadata: sourceInfo.metadata, + ruleSet: sourceInfo.ruleSet, + } + let migrationMode: RuleMode + let migrations: Migration[] = [] + let first: SchemaMetadata + let last: SchemaMetadata + if (source.version! < target.version!) { + migrationMode = RuleMode.UPGRADE + first = source + last = target + } else if (source.version! > target.version!) { + migrationMode = RuleMode.DOWNGRADE + first = target + last = source + } else { + return migrations + } + let previous: SchemaMetadata | null = null + let versions = await this.getSchemasBetween(subject, first, last, format) + for (let i = 0; i < versions.length; i++) { + let version = versions[i] + if (i === 0) { + previous = version + continue + } + if (version.ruleSet != null && this.hasRules(version.ruleSet, migrationMode)) { + let m: Migration + if (migrationMode === RuleMode.UPGRADE) { + m = { + ruleMode: migrationMode, + source: previous, + target: version, + } + } else { + m = { + ruleMode: migrationMode, + source: version, + target: previous, + } + } + migrations.push(m) + } + previous = version + } + if (migrationMode === RuleMode.DOWNGRADE) { + migrations = migrations.map(x => x).reverse() + } + return migrations + } + + async getSchemasBetween(subject: string, first: SchemaMetadata, + last: SchemaMetadata, format?: string): Promise { + if (last.version!-first.version! <= 1) { + return [first, last] + } + let version1 = first.version! + let version2 = last.version! + let result = [first] + for (let i = version1 + 1; i < version2; i++) { + let meta = await this.client.getSchemaMetadata(subject, i, true, format) + result.push(meta) + } + result.push(last) + return result + } + + async executeMigrations(migrations: Migration[], subject: string, topic: string, msg: any): Promise { + for (let migration of migrations) { + // TODO fix source, target? + msg = await this.executeRules(subject, topic, migration.ruleMode, migration.source, migration.target, msg, null) + } + return msg + } +} + +/** + * SubjectNameStrategyFunc determines the subject from the given parameters + */ +export type SubjectNameStrategyFunc = ( + topic: string, + serdeType: SerdeType, + schema?: SchemaInfo, +) => string + +/** + * TopicNameStrategy creates a subject name by appending -[key|value] to the topic name. + * @param topic - the topic name + * @param serdeType - the serde type + */ +export const TopicNameStrategy: SubjectNameStrategyFunc = (topic: string, serdeType: SerdeType) => { + let suffix = '-value' + if (serdeType === SerdeType.KEY) { + suffix = '-key' + } + return topic + suffix +} + +/** + * RuleContext represents a rule context + */ +export class RuleContext { + source: SchemaInfo | null + target: SchemaInfo + subject: string + topic: string + isKey: boolean + ruleMode: RuleMode + rule: Rule + index: number + rules: Rule[] + inlineTags: Map> | null + fieldTransformer: FieldTransformer + private fieldContexts: FieldContext[] + + constructor(source: SchemaInfo | null, target: SchemaInfo, subject: string, topic: string, + isKey: boolean, ruleMode: RuleMode, rule: Rule, index: number, rules: Rule[], + inlineTags: Map> | null, fieldTransformer: FieldTransformer) { + this.source = source + this.target = target + this.subject = subject + this.topic = topic + this.isKey = isKey + this.ruleMode = ruleMode + this.rule = rule + this.index = index + this.rules = rules + this.inlineTags = inlineTags + this.fieldTransformer = fieldTransformer + this.fieldContexts = [] + } + + getParameter(name: string): string | null { + const params = this.rule.params + if (params == null) { + return null + } + let value = params[name] + if (value != null) { + return value + } + let metadata = this.target.metadata + if (metadata != null && metadata.properties != null) { + value = metadata.properties[name] + if (value != null) { + return value + } + } + return null + } + + getInlineTags(name: string): Set { + let tags = this.inlineTags?.get(name) + if (tags != null) { + return tags + } + return new Set() + } + + currentField(): FieldContext | null { + let size = this.fieldContexts.length + if (size === 0) { + return null + } + return this.fieldContexts[size - 1] + } + + enterField(containingMessage: any, fullName: string, name: string, fieldType: FieldType, tags: Set): FieldContext { + let allTags = new Set(tags) + for (let v of this.getTags(fullName)) { + allTags.add(v) + } + let fieldContext = new FieldContext( + containingMessage, + fullName, + name, + fieldType, + allTags + ) + this.fieldContexts.push(fieldContext) + return fieldContext + } + + getTags(fullName: string): Set { + let tags = new Set() + let metadata = this.target.metadata + if (metadata?.tags != null) { + for (let [k, v] of Object.entries(metadata.tags)) { + if (match(fullName, k)) { + for (let tag of v) { + tags.add(tag) + } + } + } + } + return tags + } + + leaveField(): void { + let size = this.fieldContexts.length - 1 + this.fieldContexts = this.fieldContexts.slice(0, size) + } +} + +export interface RuleBase { + configure(clientConfig: ClientConfig, config: Map): void + + type(): string; + + close(): void +} + +/** + * RuleExecutor represents a rule executor + */ +export interface RuleExecutor extends RuleBase { + transform(ctx: RuleContext, msg: any): Promise +} + +/** + * FieldTransformer represents a field transformer + */ +export type FieldTransformer = (ctx: RuleContext, fieldTransform: FieldTransform, msg: any) => any; + +/** + * FieldTransform represents a field transform + */ +export interface FieldTransform { + transform(ctx: RuleContext, fieldCtx: FieldContext, fieldValue: any): Promise; +} + +/** + * FieldRuleExecutor represents a field rule executor + */ +export abstract class FieldRuleExecutor implements RuleExecutor { + config: Map | null = null + + abstract configure(clientConfig: ClientConfig, config: Map): void + + abstract type(): string; + + abstract newTransform(ctx: RuleContext): FieldTransform; + + async transform(ctx: RuleContext, msg: any): Promise { + // TODO preserve source + switch (ctx.ruleMode) { + case RuleMode.WRITE: + case RuleMode.UPGRADE: + for (let i = 0; i < ctx.index; i++) { + let otherRule = ctx.rules[i] + if (areTransformsWithSameTag(ctx.rule, otherRule)) { + // ignore this transform if an earlier one has the same tag + return msg + } + } + break + case RuleMode.READ: + case RuleMode.DOWNGRADE: + for (let i = ctx.index + 1; i < ctx.rules.length; i++) { + let otherRule = ctx.rules[i] + if (areTransformsWithSameTag(ctx.rule, otherRule)) { + // ignore this transform if a later one has the same tag + return msg + } + } + break + } + let fieldTransform = this.newTransform(ctx) + return ctx.fieldTransformer(ctx, fieldTransform, msg) + } + + abstract close(): void +} + +function areTransformsWithSameTag(rule1: Rule, rule2: Rule): boolean { + return rule1.tags != null && rule1.tags.length > 0 + && rule1.kind === 'TRANSFORM' + && rule1.kind === rule2.kind + && rule1.mode === rule2.mode + && rule1.type === rule2.type + && rule1.tags === rule2.tags +} + +/** + * FieldContext represents a field context + */ +export class FieldContext { + containingMessage: any + fullName: string + name: string + type: FieldType + tags: Set + + constructor(containingMessage: any, fullName: string, name: string, fieldType: FieldType, tags: Set) { + this.containingMessage = containingMessage + this.fullName = fullName + this.name = name + this.type = fieldType + this.tags = new Set(tags) + } + + isPrimitive(): boolean { + let t = this.type + return t === FieldType.STRING || t === FieldType.BYTES || t === FieldType.INT + || t === FieldType.LONG || t === FieldType.FLOAT || t === FieldType.DOUBLE + || t === FieldType.BOOLEAN || t === FieldType.NULL + } + + typeName(): string { + return this.type.toString() + } +} + +export enum FieldType { + RECORD = 'RECORD', + ENUM = 'ENUM', + ARRAY = 'ARRAY', + MAP = 'MAP', + COMBINED = 'COMBINED', + FIXED = 'FIXED', + STRING = 'STRING', + BYTES = 'BYTES', + INT = 'INT', + LONG = 'LONG', + FLOAT = 'FLOAT', + DOUBLE = 'DOUBLE', + BOOLEAN = 'BOOLEAN', + NULL = 'NULL', +} + +/** + * RuleAction represents a rule action + */ +export interface RuleAction extends RuleBase { + run(ctx: RuleContext, msg: any, err: Error | null): Promise +} + +/** + * ErrorAction represents an error action + */ +export class ErrorAction implements RuleAction { + configure(clientConfig: ClientConfig, config: Map): void { + } + + type(): string { + return 'ERROR' + } + + async run(ctx: RuleContext, msg: any, err: Error): Promise { + throw new SerializationError(err.message) + } + + close(): void { + } +} + +/** + * NoneAction represents a no-op action + */ +export class NoneAction implements RuleAction { + configure(clientConfig: ClientConfig, config: Map): void { + } + + type(): string { + return 'NONE' + } + + async run(ctx: RuleContext, msg: any, err: Error): Promise { + return + } + + close(): void { + } +} + +/** + * RuleError represents a rule error + */ +export class RuleError extends Error { + + /** + * Creates a new rule error. + * @param message - The error message. + */ + constructor(message?: string) { + super(message) + } +} + +/** + * RuleConditionError represents a rule condition error + */ +export class RuleConditionError extends RuleError { + rule: Rule + + /** + * Creates a new rule condition error. + * @param rule - The rule. + */ + constructor(rule: Rule) { + super(RuleConditionError.error(rule)) + this.rule = rule + } + + static error(rule: Rule): string { + let errMsg = rule.doc + if (!errMsg) { + if (rule.expr !== '') { + return `Expr failed: '${rule.expr}'` + } + return `Condition failed: '${rule.name}'` + } + return errMsg + } +} diff --git a/schemaregistry/serde/wildcard-matcher.ts b/schemaregistry/serde/wildcard-matcher.ts new file mode 100644 index 00000000..37a953b8 --- /dev/null +++ b/schemaregistry/serde/wildcard-matcher.ts @@ -0,0 +1,90 @@ +/** + * Matches fully-qualified names that use dot (.) as the name boundary. + * + *

A '?' matches a single character. + * A '*' matches one or more characters within a name boundary. + * A '**' matches one or more characters across name boundaries. + * + *

Examples: + *

+ * wildcardMatch("eve", "eve*")                  --> true
+ * wildcardMatch("alice.bob.eve", "a*.bob.eve")  --> true
+ * wildcardMatch("alice.bob.eve", "a*.bob.e*")   --> true
+ * wildcardMatch("alice.bob.eve", "a*")          --> false
+ * wildcardMatch("alice.bob.eve", "a**")         --> true
+ * wildcardMatch("alice.bob.eve", "alice.bob*")  --> false
+ * wildcardMatch("alice.bob.eve", "alice.bob**") --> true
+ * 
+ * + * @param str - the string to match on + * @param wildcardMatcher - the wildcard string to match against + * @returns true if the string matches the wildcard string + */ +export function match(str: string, wildcardMatcher: string): boolean { + let re = wildcardToRegexp(wildcardMatcher, '.') + let pattern: RegExp + try { + pattern = new RegExp(re) + } catch (error) { + return false + } + let match = str.match(pattern) + return match != null && match[0] === str +} + +function wildcardToRegexp(globExp: string, separator: string): string { + let dst = '' + let src = globExp.replaceAll('**'+separator+'*', '**') + let i = 0; + let size = src.length; + while (i < size) { + let c = src[i] + i++ + switch (c) { + case '*': + // One char lookahead for ** + if (i < src.length && src[i] == '*') { + dst += '.*' + i++ + } else { + dst += '[^' + separator + ']*' + } + break + case '?': + dst += '[^' + separator + ']' + break + case '.': + case '+': + case '{': + case '}': + case '(': + case ')': + case '|': + case '^': + case '$': + // These need to be escaped in regular expressions + dst += '\\' + c + break + case '\\': + [dst, i] = doubleSlashes(dst, src, i) + break + default: + dst += c + break + } + } + return dst +} + +function doubleSlashes(dst: string, src: string, i: number): [string, number] { + // Emit the next character without special interpretation + dst += '\\' + if (i+1 < src.length) { + dst += '\\' + src[i] + i++ + } else { + // A backslash at the very end is treated like an escaped backslash + dst += '\\' + } + return [dst, i] +} diff --git a/schemaregistry/tsconfig-build.json b/schemaregistry/tsconfig-build.json new file mode 100644 index 00000000..5192d5f1 --- /dev/null +++ b/schemaregistry/tsconfig-build.json @@ -0,0 +1,7 @@ +{ + "extends": "./tsconfig.json", + "exclude": [ + "../test/**/*", + "dist" + ] +} diff --git a/schemaregistry/tsconfig.json b/schemaregistry/tsconfig.json new file mode 100644 index 00000000..cc46dcf3 --- /dev/null +++ b/schemaregistry/tsconfig.json @@ -0,0 +1,34 @@ +{ + "compilerOptions": { + "baseUrl": ".", + "target": "es2021", + "lib": [ + "es2021", "dom" + ], + "declaration": true, + "outDir": "dist", + "types": ["../node_modules/@types/node"], + "esModuleInterop": true, + "strict": true, + "forceConsistentCasingInFileNames": true, + "allowUnusedLabels": false, + "allowUnreachableCode": false, + "noFallthroughCasesInSwitch": true, + "noImplicitOverride": true, + "noImplicitReturns": true, + "noPropertyAccessFromIndexSignature": true, + "noUnusedLocals": true, + "useUnknownInCatchVariables": true, + "resolveJsonModule": true, + "moduleResolution": "nodenext", + "module": "nodenext", + "skipLibCheck": true + }, + "include": [ + "**/*", + "../test/**/*" + ], + "exclude": [ + "dist" + ] +} diff --git a/service.yml b/service.yml index 760002d2..c63b80c2 100644 --- a/service.yml +++ b/service.yml @@ -1,10 +1,11 @@ -name: confluent-kafka-js +name: confluent-kafka-javascript lang: unknown lang_version: unknown git: enable: true github: enable: true + repo_name: confluentinc/confluent-kafka-javascript codeowners: enable: true semaphore: diff --git a/src/admin.cc b/src/admin.cc index a0e6a011..fccae5f4 100644 --- a/src/admin.cc +++ b/src/admin.cc @@ -1,18 +1,20 @@ /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * * Copyright (c) 2016-2023 Blizzard Entertainment + * (c) 2023 Confluent, Inc. * * This software may be modified and distributed under the terms * of the MIT license. See the LICENSE.txt file for details. */ +#include "src/admin.h" + +#include #include #include -#include #include "src/workers.h" -#include "src/admin.h" using Nan::FunctionCallbackInfo; @@ -38,22 +40,44 @@ AdminClient::~AdminClient() { } Baton AdminClient::Connect() { - std::string errstr; + if (IsConnected()) { + return Baton(RdKafka::ERR_NO_ERROR); + } + + Baton baton = setupSaslOAuthBearerConfig(); + if (baton.err() != RdKafka::ERR_NO_ERROR) { + return baton; + } + // Activate the dispatchers before the connection, as some callbacks may run + // on the background thread. + // We will deactivate them if the connection fails. + ActivateDispatchers(); + + std::string errstr; { scoped_shared_write_lock lock(m_connection_lock); m_client = RdKafka::Producer::create(m_gconfig, errstr); } if (!m_client || !errstr.empty()) { + DeactivateDispatchers(); return Baton(RdKafka::ERR__STATE, errstr); } + /* Set the client name at the first possible opportunity for logging. */ + m_event_cb.dispatcher.SetClientName(m_client->name()); + if (rkqu == NULL) { rkqu = rd_kafka_queue_new(m_client->c_ptr()); } - return Baton(RdKafka::ERR_NO_ERROR); + baton = setupSaslOAuthBearerBackgroundQueue(); + if (baton.err() != RdKafka::ERR_NO_ERROR) { + DeactivateDispatchers(); + } + + return baton; } Baton AdminClient::Disconnect() { @@ -65,6 +89,8 @@ Baton AdminClient::Disconnect() { rkqu = NULL; } + DeactivateDispatchers(); + delete m_client; m_client = NULL; } @@ -81,13 +107,27 @@ void AdminClient::Init(v8::Local exports) { tpl->SetClassName(Nan::New("AdminClient").ToLocalChecked()); tpl->InstanceTemplate()->SetInternalFieldCount(1); + // Inherited from NodeKafka::Connection + Nan::SetPrototypeMethod(tpl, "configureCallbacks", NodeConfigureCallbacks); + Nan::SetPrototypeMethod(tpl, "name", NodeName); + // Admin client operations Nan::SetPrototypeMethod(tpl, "createTopic", NodeCreateTopic); Nan::SetPrototypeMethod(tpl, "deleteTopic", NodeDeleteTopic); Nan::SetPrototypeMethod(tpl, "createPartitions", NodeCreatePartitions); + // Consumer group related operations + Nan::SetPrototypeMethod(tpl, "listGroups", NodeListGroups); + Nan::SetPrototypeMethod(tpl, "describeGroups", NodeDescribeGroups); + Nan::SetPrototypeMethod(tpl, "deleteGroups", NodeDeleteGroups); + Nan::SetPrototypeMethod(tpl, "connect", NodeConnect); Nan::SetPrototypeMethod(tpl, "disconnect", NodeDisconnect); + Nan::SetPrototypeMethod(tpl, "setSaslCredentials", NodeSetSaslCredentials); + Nan::SetPrototypeMethod(tpl, "getMetadata", NodeGetMetadata); + Nan::SetPrototypeMethod(tpl, "setOAuthBearerToken", NodeSetOAuthBearerToken); + Nan::SetPrototypeMethod(tpl, "setOAuthBearerTokenFailure", + NodeSetOAuthBearerTokenFailure); constructor.Reset( (tpl->GetFunction(Nan::GetCurrentContext())).ToLocalChecked()); @@ -177,6 +217,9 @@ rd_kafka_event_t* PollForEvent( rd_kafka_event_type(event_response) != event_type && attempts > 0); + // TODO: change this function so a type mismatch leads to an INVALID_TYPE + // error rather than a null event. A null event is treated as a timeout, which + // isn't true all the time. // If this isn't the type of response we want, or if we do not have a response // type, bail out with a null if (event_response == NULL || @@ -419,6 +462,214 @@ Baton AdminClient::CreatePartitions( } } +Baton AdminClient::ListGroups( + bool is_match_states_set, + std::vector &match_states, int timeout_ms, + /* out */ rd_kafka_event_t **event_response) { + if (!IsConnected()) { + return Baton(RdKafka::ERR__STATE); + } + + { + scoped_shared_write_lock lock(m_connection_lock); + if (!IsConnected()) { + return Baton(RdKafka::ERR__STATE); + } + + // Make admin options to establish that we are listing groups + rd_kafka_AdminOptions_t *options = rd_kafka_AdminOptions_new( + m_client->c_ptr(), RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPS); + + char errstr[512]; + rd_kafka_resp_err_t err = rd_kafka_AdminOptions_set_request_timeout( + options, timeout_ms, errstr, sizeof(errstr)); + if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { + return Baton(static_cast(err), errstr); + } + + if (is_match_states_set) { + rd_kafka_error_t *error = + rd_kafka_AdminOptions_set_match_consumer_group_states( + options, &match_states[0], match_states.size()); + if (error) { + return Baton::BatonFromErrorAndDestroy(error); + } + } + + // Create queue just for this operation. + rd_kafka_queue_t *rkqu = rd_kafka_queue_new(m_client->c_ptr()); + + rd_kafka_ListConsumerGroups(m_client->c_ptr(), options, rkqu); + + // Poll for an event by type in that queue + // DON'T destroy the event. It is the out parameter, and ownership is + // the caller's. + *event_response = PollForEvent( + rkqu, RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT, timeout_ms); + + // Destroy the queue since we are done with it. + rd_kafka_queue_destroy(rkqu); + + // Destroy the options we just made because we polled already + rd_kafka_AdminOptions_destroy(options); + + // If we got no response from that operation, this is a failure + // likely due to time out + if (*event_response == NULL) { + return Baton(RdKafka::ERR__TIMED_OUT); + } + + // Now we can get the error code from the event + if (rd_kafka_event_error(*event_response)) { + // If we had a special error code, get out of here with it + const rd_kafka_resp_err_t errcode = rd_kafka_event_error(*event_response); + return Baton(static_cast(errcode)); + } + + // At this point, event_response contains the result, which needs + // to be parsed/converted by the caller. + return Baton(RdKafka::ERR_NO_ERROR); + } +} + +Baton AdminClient::DescribeGroups(std::vector &groups, + bool include_authorized_operations, + int timeout_ms, + /* out */ rd_kafka_event_t **event_response) { + if (!IsConnected()) { + return Baton(RdKafka::ERR__STATE); + } + + { + scoped_shared_write_lock lock(m_connection_lock); + if (!IsConnected()) { + return Baton(RdKafka::ERR__STATE); + } + + // Make admin options to establish that we are describing groups + rd_kafka_AdminOptions_t *options = rd_kafka_AdminOptions_new( + m_client->c_ptr(), RD_KAFKA_ADMIN_OP_DESCRIBECONSUMERGROUPS); + + char errstr[512]; + rd_kafka_resp_err_t err = rd_kafka_AdminOptions_set_request_timeout( + options, timeout_ms, errstr, sizeof(errstr)); + if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { + return Baton(static_cast(err), errstr); + } + + if (include_authorized_operations) { + rd_kafka_error_t *error = + rd_kafka_AdminOptions_set_include_authorized_operations( + options, include_authorized_operations); + if (error) { + return Baton::BatonFromErrorAndDestroy(error); + } + } + + // Create queue just for this operation. + rd_kafka_queue_t *rkqu = rd_kafka_queue_new(m_client->c_ptr()); + + // Construct a char** to pass to librdkafka. Avoid too many allocations. + std::vector c_groups(groups.size()); + for (size_t i = 0; i < groups.size(); i++) { + c_groups[i] = groups[i].c_str(); + } + + rd_kafka_DescribeConsumerGroups(m_client->c_ptr(), &c_groups[0], + groups.size(), options, rkqu); + + // Poll for an event by type in that queue + // DON'T destroy the event. It is the out parameter, and ownership is + // the caller's. + *event_response = PollForEvent( + rkqu, RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT, timeout_ms); + + // Destroy the queue since we are done with it. + rd_kafka_queue_destroy(rkqu); + + // Destroy the options we just made because we polled already + rd_kafka_AdminOptions_destroy(options); + + // If we got no response from that operation, this is a failure + // likely due to time out + if (*event_response == NULL) { + return Baton(RdKafka::ERR__TIMED_OUT); + } + + // Now we can get the error code from the event + if (rd_kafka_event_error(*event_response)) { + // If we had a special error code, get out of here with it + const rd_kafka_resp_err_t errcode = rd_kafka_event_error(*event_response); + return Baton(static_cast(errcode)); + } + + // At this point, event_response contains the result, which needs + // to be parsed/converted by the caller. + return Baton(RdKafka::ERR_NO_ERROR); + } +} + +Baton AdminClient::DeleteGroups(rd_kafka_DeleteGroup_t **group_list, + size_t group_cnt, int timeout_ms, + /* out */ rd_kafka_event_t **event_response) { + if (!IsConnected()) { + return Baton(RdKafka::ERR__STATE); + } + + { + scoped_shared_write_lock lock(m_connection_lock); + if (!IsConnected()) { + return Baton(RdKafka::ERR__STATE); + } + + // Make admin options to establish that we are deleting groups + rd_kafka_AdminOptions_t *options = rd_kafka_AdminOptions_new( + m_client->c_ptr(), RD_KAFKA_ADMIN_OP_DELETEGROUPS); + + char errstr[512]; + rd_kafka_resp_err_t err = rd_kafka_AdminOptions_set_request_timeout( + options, timeout_ms, errstr, sizeof(errstr)); + if (err != RD_KAFKA_RESP_ERR_NO_ERROR) { + return Baton(static_cast(err), errstr); + } + + // Create queue just for this operation. + rd_kafka_queue_t *rkqu = rd_kafka_queue_new(m_client->c_ptr()); + + rd_kafka_DeleteGroups(m_client->c_ptr(), group_list, group_cnt, options, + rkqu); + + // Poll for an event by type in that queue + // DON'T destroy the event. It is the out parameter, and ownership is + // the caller's. + *event_response = + PollForEvent(rkqu, RD_KAFKA_EVENT_DELETEGROUPS_RESULT, timeout_ms); + + // Destroy the queue since we are done with it. + rd_kafka_queue_destroy(rkqu); + + // Destroy the options we just made because we polled already + rd_kafka_AdminOptions_destroy(options); + + // If we got no response from that operation, this is a failure + // likely due to time out + if (*event_response == NULL) { + return Baton(RdKafka::ERR__TIMED_OUT); + } + + // Now we can get the error code from the event + if (rd_kafka_event_error(*event_response)) { + // If we had a special error code, get out of here with it + const rd_kafka_resp_err_t errcode = rd_kafka_event_error(*event_response); + return Baton(static_cast(errcode)); + } + + // At this point, event_response contains the result, which needs + // to be parsed/converted by the caller. + return Baton(RdKafka::ERR_NO_ERROR); + } +} + void AdminClient::ActivateDispatchers() { // Listen to global config m_gconfig->listen(); @@ -598,4 +849,148 @@ NAN_METHOD(AdminClient::NodeCreatePartitions) { return info.GetReturnValue().Set(Nan::Null()); } +/** + * List Consumer Groups. + */ +NAN_METHOD(AdminClient::NodeListGroups) { + Nan::HandleScope scope; + + if (info.Length() < 2 || !info[1]->IsFunction()) { + // Just throw an exception + return Nan::ThrowError("Need to specify a callback"); + } + + if (!info[0]->IsObject()) { + return Nan::ThrowError("Must provide options object"); + } + + v8::Local config = info[0].As(); + + // Create the final callback object + v8::Local cb = info[1].As(); + Nan::Callback *callback = new Nan::Callback(cb); + AdminClient *client = ObjectWrap::Unwrap(info.This()); + + // Get the timeout - default 5000. + int timeout_ms = GetParameter(config, "timeout", 5000); + + // Get the match states, or not if they are unset. + std::vector match_states; + v8::Local match_consumer_group_states_key = + Nan::New("matchConsumerGroupStates").ToLocalChecked(); + bool is_match_states_set = + Nan::Has(config, match_consumer_group_states_key).FromMaybe(false); + v8::Local match_states_array = Nan::New(); + + if (is_match_states_set) { + match_states_array = GetParameter>( + config, "matchConsumerGroupStates", match_states_array); + if (match_states_array->Length()) { + match_states = Conversion::Admin::FromV8GroupStateArray( + match_states_array); + } + } + + // Queue the work. + Nan::AsyncQueueWorker(new Workers::AdminClientListGroups( + callback, client, is_match_states_set, match_states, timeout_ms)); +} + +/** + * Describe Consumer Groups. + */ +NAN_METHOD(AdminClient::NodeDescribeGroups) { + Nan::HandleScope scope; + + if (info.Length() < 3 || !info[2]->IsFunction()) { + // Just throw an exception + return Nan::ThrowError("Need to specify a callback"); + } + + if (!info[0]->IsArray()) { + return Nan::ThrowError("Must provide group name array"); + } + + if (!info[1]->IsObject()) { + return Nan::ThrowError("Must provide options object"); + } + + // Get list of group names to describe. + v8::Local group_names = info[0].As(); + if (group_names->Length() == 0) { + return Nan::ThrowError("Must provide at least one group name"); + } + std::vector group_names_vector = + v8ArrayToStringVector(group_names); + + v8::Local config = info[1].As(); + + // Get the timeout - default 5000. + int timeout_ms = GetParameter(config, "timeout", 5000); + + // Get whether to include authorized operations - default false. + bool include_authorized_operations = + GetParameter(config, "includeAuthorizedOperations", false); + + // Create the final callback object + v8::Local cb = info[2].As(); + Nan::Callback *callback = new Nan::Callback(cb); + AdminClient *client = ObjectWrap::Unwrap(info.This()); + + // Queue the work. + Nan::AsyncQueueWorker(new Workers::AdminClientDescribeGroups( + callback, client, group_names_vector, include_authorized_operations, + timeout_ms)); +} + +/** + * Delete Consumer Groups. + */ +NAN_METHOD(AdminClient::NodeDeleteGroups) { + Nan::HandleScope scope; + + if (info.Length() < 3 || !info[2]->IsFunction()) { + // Just throw an exception + return Nan::ThrowError("Need to specify a callback"); + } + + if (!info[0]->IsArray()) { + return Nan::ThrowError("Must provide group name array"); + } + + if (!info[1]->IsObject()) { + return Nan::ThrowError("Must provide options object"); + } + + // Get list of group names to delete, and convert it into an + // rd_kafka_DeleteGroup_t array. + v8::Local group_names = info[0].As(); + if (group_names->Length() == 0) { + return Nan::ThrowError("Must provide at least one group name"); + } + std::vector group_names_vector = + v8ArrayToStringVector(group_names); + + // The ownership of this array is transferred to the worker. + rd_kafka_DeleteGroup_t **group_list = static_cast( + malloc(sizeof(rd_kafka_DeleteGroup_t *) * group_names_vector.size())); + for (size_t i = 0; i < group_names_vector.size(); i++) { + group_list[i] = rd_kafka_DeleteGroup_new(group_names_vector[i].c_str()); + } + + v8::Local config = info[1].As(); + + // Get the timeout - default 5000. + int timeout_ms = GetParameter(config, "timeout", 5000); + + // Create the final callback object + v8::Local cb = info[2].As(); + Nan::Callback *callback = new Nan::Callback(cb); + AdminClient *client = ObjectWrap::Unwrap(info.This()); + + // Queue the work. + Nan::AsyncQueueWorker(new Workers::AdminClientDeleteGroups( + callback, client, group_list, group_names_vector.size(), timeout_ms)); +} + } // namespace NodeKafka diff --git a/src/admin.h b/src/admin.h index 6e597a2c..3af30f21 100644 --- a/src/admin.h +++ b/src/admin.h @@ -1,7 +1,8 @@ /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * * Copyright (c) 2016-2023 Blizzard Entertainment + * (c) 2024 Confluent, Inc. * * This software may be modified and distributed under the terms * of the MIT license. See the LICENSE.txt file for details. @@ -16,7 +17,7 @@ #include #include -#include "rdkafkacpp.h" +#include "rdkafkacpp.h" // NOLINT #include "rdkafka.h" // NOLINT #include "src/common.h" @@ -51,6 +52,15 @@ class AdminClient : public Connection { Baton CreatePartitions(rd_kafka_NewPartitions_t* topic, int timeout_ms); // Baton AlterConfig(rd_kafka_NewTopic_t* topic, int timeout_ms); // Baton DescribeConfig(rd_kafka_NewTopic_t* topic, int timeout_ms); + Baton ListGroups(bool is_match_states_set, + std::vector& match_states, + int timeout_ms, + rd_kafka_event_t** event_response); + Baton DescribeGroups(std::vector& groups, + bool include_authorized_operations, int timeout_ms, + rd_kafka_event_t** event_response); + Baton DeleteGroups(rd_kafka_DeleteGroup_t** group_list, size_t group_cnt, + int timeout_ms, rd_kafka_event_t** event_response); protected: static Nan::Persistent constructor; @@ -68,6 +78,11 @@ class AdminClient : public Connection { static NAN_METHOD(NodeDeleteTopic); static NAN_METHOD(NodeCreatePartitions); + // Consumer group operations + static NAN_METHOD(NodeListGroups); + static NAN_METHOD(NodeDescribeGroups); + static NAN_METHOD(NodeDeleteGroups); + static NAN_METHOD(NodeConnect); static NAN_METHOD(NodeDisconnect); }; diff --git a/src/binding.cc b/src/binding.cc index 85baf129..7b3fe77c 100644 --- a/src/binding.cc +++ b/src/binding.cc @@ -1,12 +1,14 @@ /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * * Copyright (c) 2016-2023 Blizzard Entertainment + * (c) 2024 Confluent, Inc. * * This software may be modified and distributed under the terms * of the MIT license. See the LICENSE.txt file for details. */ +#include #include #include "src/binding.h" diff --git a/src/binding.h b/src/binding.h index bebb5bf8..0d656b10 100644 --- a/src/binding.h +++ b/src/binding.h @@ -1,5 +1,5 @@ /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * * Copyright (c) 2016-2023 Blizzard Entertainment * @@ -12,7 +12,7 @@ #include #include -#include "rdkafkacpp.h" +#include "rdkafkacpp.h" // NOLINT #include "src/common.h" #include "src/errors.h" #include "src/config.h" diff --git a/src/callbacks.cc b/src/callbacks.cc index b5a1a7c7..faebcec4 100644 --- a/src/callbacks.cc +++ b/src/callbacks.cc @@ -1,17 +1,20 @@ /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * * Copyright (c) 2016-2023 Blizzard Entertainment + * (c) 2023 Confluent, Inc. * * This software may be modified and distributed under the terms * of the MIT license. See the LICENSE.txt file for details. */ +#include "src/callbacks.h" + +#include #include +#include #include -#include -#include "src/callbacks.h" #include "src/kafka-consumer.h" using v8::Local; @@ -73,10 +76,16 @@ void Dispatcher::Activate() { } } +void Dispatcher::AsyncHandleCloseCallback(uv_handle_t *handle) { + uv_async_t *a = reinterpret_cast(handle); + delete a; +} + // Should be able to run this regardless of whether it is active or not void Dispatcher::Deactivate() { if (async) { - uv_close(reinterpret_cast(async), NULL); + uv_close(reinterpret_cast(async), + Dispatcher::AsyncHandleCloseCallback); async = NULL; } } @@ -172,7 +181,7 @@ void Event::event_cb(RdKafka::Event &event) { dispatcher.Execute(); } -EventDispatcher::EventDispatcher() {} +EventDispatcher::EventDispatcher() : client_name("") {} EventDispatcher::~EventDispatcher() {} void EventDispatcher::Add(const event_t &e) { @@ -223,6 +232,8 @@ void EventDispatcher::Flush() { Nan::New(_events[i].fac.c_str()).ToLocalChecked()); Nan::Set(jsobj, Nan::New("message").ToLocalChecked(), Nan::New(_events[i].message.c_str()).ToLocalChecked()); + Nan::Set(jsobj, Nan::New("name").ToLocalChecked(), + Nan::New(this->client_name.c_str()).ToLocalChecked()); break; case RdKafka::Event::EVENT_THROTTLE: @@ -257,6 +268,10 @@ void EventDispatcher::Flush() { } } +void EventDispatcher::SetClientName(const std::string& client_name) { + this->client_name = client_name; +} + DeliveryReportDispatcher::DeliveryReportDispatcher() {} DeliveryReportDispatcher::~DeliveryReportDispatcher() {} @@ -291,7 +306,7 @@ void DeliveryReportDispatcher::Flush() { if (event.is_error) { // If it is an error we need the first argument to be set - argv[0] = Nan::Error(event.error_string.c_str()); + argv[0] = Nan::New(event.error_code); } else { argv[0] = Nan::Null(); } @@ -540,6 +555,37 @@ void OffsetCommit::offset_commit_cb(RdKafka::ErrorCode err, dispatcher.Execute(); } +// OAuthBearerTokenRefresh callback +void OAuthBearerTokenRefreshDispatcher::Add( + const std::string &oauthbearer_config) { + scoped_mutex_lock lock(async_lock); + m_oauthbearer_config = oauthbearer_config; +} + +void OAuthBearerTokenRefreshDispatcher::Flush() { + Nan::HandleScope scope; + + const unsigned int argc = 1; + + std::string oauthbearer_config; + { + scoped_mutex_lock lock(async_lock); + oauthbearer_config = m_oauthbearer_config; + m_oauthbearer_config.clear(); + } + + v8::Local argv[argc] = {}; + argv[0] = Nan::New(oauthbearer_config.c_str()).ToLocalChecked(); + + Dispatch(argc, argv); +} + +void OAuthBearerTokenRefresh::oauthbearer_token_refresh_cb( + RdKafka::Handle *handle, const std::string &oauthbearer_config) { + dispatcher.Add(oauthbearer_config); + dispatcher.Execute(); +} + // Partitioner callback Partitioner::Partitioner() {} diff --git a/src/callbacks.h b/src/callbacks.h index 0944b61e..f69c0685 100644 --- a/src/callbacks.h +++ b/src/callbacks.h @@ -1,6 +1,7 @@ /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * Copyright (c) 2016-2023 Blizzard Entertainment + * (c) 2023 Confluent, Inc. * * This software may be modified and distributed under the terms * of the MIT license. See the LICENSE.txt file for details. @@ -15,7 +16,7 @@ #include #include -#include "rdkafkacpp.h" +#include "rdkafkacpp.h" // NOLINT #include "src/common.h" typedef Nan::Persistent(async->data); dispatcher->Flush(); } + static void AsyncHandleCloseCallback(uv_handle_t *); uv_async_t *async; }; @@ -77,8 +79,10 @@ class EventDispatcher : public Dispatcher { ~EventDispatcher(); void Add(const event_t &); void Flush(); + void SetClientName(const std::string &); protected: std::vector events; + std::string client_name; }; class Event : public RdKafka::EventCb { @@ -118,7 +122,7 @@ class DeliveryReport { void* opaque; // Key. It is a pointer to avoid corrupted values - // https://github.com/confluentinc/confluent-kafka-js/issues/208 + // https://github.com/confluentinc/confluent-kafka-javascript/issues/208 void* key; size_t key_len; @@ -246,6 +250,23 @@ class OffsetCommit : public RdKafka::OffsetCommitCb { v8::Persistent m_cb; }; +class OAuthBearerTokenRefreshDispatcher : public Dispatcher { + public: + OAuthBearerTokenRefreshDispatcher() {} + ~OAuthBearerTokenRefreshDispatcher() {} + void Add(const std::string &oauthbearer_config); + void Flush(); + + private: + std::string m_oauthbearer_config; +}; + +class OAuthBearerTokenRefresh : public RdKafka::OAuthBearerTokenRefreshCb { + public: + void oauthbearer_token_refresh_cb(RdKafka::Handle *, const std::string &); + OAuthBearerTokenRefreshDispatcher dispatcher; +}; + class Partitioner : public RdKafka::PartitionerCb { public: Partitioner(); diff --git a/src/common.cc b/src/common.cc index 3d0a425d..d83525cf 100644 --- a/src/common.cc +++ b/src/common.cc @@ -1,17 +1,19 @@ /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * * Copyright (c) 2016-2023 Blizzard Entertainment + * (c) 2024 Confluent, Inc. * * This software may be modified and distributed under the terms * of the MIT license. See the LICENSE.txt file for details. */ +#include "src/common.h" +#include +#include #include #include -#include "src/common.h" - namespace NodeKafka { void Log(std::string str) { @@ -142,9 +144,46 @@ std::vector v8ArrayToStringVector(v8::Local parameter) { return newItem; } +std::list v8ArrayToStringList(v8::Local parameter) { + std::list newItem; + if (parameter->Length() >= 1) { + for (unsigned int i = 0; i < parameter->Length(); i++) { + v8::Local v; + if (!Nan::Get(parameter, i).ToLocal(&v)) { + continue; + } + Nan::MaybeLocal p = Nan::To(v); + if (p.IsEmpty()) { + continue; + } + Nan::Utf8String pVal(p.ToLocalChecked()); + std::string pString(*pVal); + newItem.push_back(pString); + } + } + return newItem; +} + +template<> v8::Local GetParameter >( + v8::Local object, + std::string field_name, + v8::Local def) { + v8::Local field = Nan::New(field_name.c_str()).ToLocalChecked(); + + if (Nan::Has(object, field).FromMaybe(false)) { + v8::Local maybeArray = Nan::Get(object, field).ToLocalChecked(); + if (maybeArray->IsArray()) { + v8::Local parameter = maybeArray.As(); + return parameter; + } + } + + return def; +} + namespace Conversion { -namespace Topic { +namespace Util { std::vector ToStringVector(v8::Local parameter) { std::vector newItem; @@ -185,14 +224,76 @@ v8::Local ToV8Array(std::vector parameter) { for (size_t i = 0; i < parameter.size(); i++) { std::string topic = parameter[i]; - Nan::Set(newItem, i, Nan::New(topic).ToLocalChecked()); } return newItem; } -} // namespace Topic +/** + * @brief Converts a list of rd_kafka_error_t* into a v8 array of RdKafkaError + * objects. + */ +v8::Local ToV8Array(const rd_kafka_error_t** error_list, + size_t error_cnt) { + v8::Local errors = Nan::New(); + + for (size_t i = 0; i < error_cnt; i++) { + RdKafka::ErrorCode code = + static_cast(rd_kafka_error_code(error_list[i])); + std::string msg = std::string(rd_kafka_error_string(error_list[i])); + Nan::Set(errors, i, RdKafkaError(code, msg)); + } + + return errors; +} + +/** + * @brief Converts a rd_kafka_Node_t* into a v8 object. + */ +v8::Local ToV8Object(const rd_kafka_Node_t* node) { + /* Return object type + { + id: number + host: string + port: number + rack?: string + } + */ + v8::Local obj = Nan::New(); + + Nan::Set(obj, Nan::New("id").ToLocalChecked(), + Nan::New(rd_kafka_Node_id(node))); + Nan::Set(obj, Nan::New("host").ToLocalChecked(), + Nan::New(rd_kafka_Node_host(node)).ToLocalChecked()); + Nan::Set(obj, Nan::New("port").ToLocalChecked(), + Nan::New(rd_kafka_Node_port(node))); + + const char* rack = rd_kafka_Node_rack(node); + if (rack) { + Nan::Set(obj, Nan::New("rack").ToLocalChecked(), + Nan::New(rack).ToLocalChecked()); + } + + return obj; +} + +/** + * @brief Converts a list of rd_kafka_AclOperation_t into a v8 array. + */ +v8::Local ToV8Array( + const rd_kafka_AclOperation_t* authorized_operations, + size_t authorized_operations_cnt) { + v8::Local array = Nan::New(); + + for (size_t i = 0; i < authorized_operations_cnt; i++) { + Nan::Set(array, i, Nan::New(authorized_operations[i])); + } + + return array; +} + +} // namespace Util namespace TopicPartition { @@ -200,6 +301,11 @@ namespace TopicPartition { * @brief RdKafka::TopicPartition vector to a v8 Array * * @see v8ArrayToTopicPartitionVector + * @note This method returns a v8 array of a mix of topic partition + * objects and errors. For a more uniform return type of + * topic partitions (which have an internal error property), + * use `ToTopicPartitionV8Array(const rd_kafka_topic_partition_list_t*, + * bool)`. */ v8::Local ToV8Array( std::vector & topic_partition_list) { // NOLINT @@ -209,6 +315,8 @@ v8::Local ToV8Array( RdKafka::TopicPartition* topic_partition = topic_partition_list[topic_partition_i]; + // TODO: why do we set the entire array element to be an error rather adding + // an error field to TopicPartition? Or create a TopicPartitionError? if (topic_partition->err() != RdKafka::ErrorCode::ERR_NO_ERROR) { Nan::Set(array, topic_partition_i, Nan::Error(Nan::New(RdKafka::err2str(topic_partition->err())) @@ -221,12 +329,30 @@ v8::Local ToV8Array( Nan::Set(obj, Nan::New("offset").ToLocalChecked(), Nan::New(topic_partition->offset())); } + + // If present, size >= 1, since it will include at least the + // null terminator. + if (topic_partition->get_metadata().size() > 0) { + Nan::Set(obj, Nan::New("metadata").ToLocalChecked(), + Nan::New( + reinterpret_cast(topic_partition->get_metadata().data()), // NOLINT + // null terminator is not required by the constructor. + topic_partition->get_metadata().size() - 1) + .ToLocalChecked()); + } + Nan::Set(obj, Nan::New("partition").ToLocalChecked(), Nan::New(topic_partition->partition())); Nan::Set(obj, Nan::New("topic").ToLocalChecked(), Nan::New(topic_partition->topic().c_str()) .ToLocalChecked()); + int leader_epoch = topic_partition->get_leader_epoch(); + if (leader_epoch >= 0) { + Nan::Set(obj, Nan::New("leaderEpoch").ToLocalChecked(), + Nan::New(leader_epoch)); + } + Nan::Set(array, topic_partition_i, obj); } } @@ -234,7 +360,56 @@ v8::Local ToV8Array( return array; } +/** + * @brief Converts a rd_kafka_topic_partition_list_t* into a list of v8 objects. + * + * @param topic_partition_list The list of topic partitions to convert. + * @param include_offset Whether to include the offset in the output. + * @returns [{topic: string, partition: number, offset?: number, error?: + * LibrdKafkaError}] + * + * @note Contains error within the topic partitions object, and not as separate + * array elements, unlike the `ToV8Array(std::vector & + * topic_partition_list)`. + */ +v8::Local ToTopicPartitionV8Array( + const rd_kafka_topic_partition_list_t* topic_partition_list, + bool include_offset) { + v8::Local array = Nan::New(); + for (int topic_partition_i = 0; topic_partition_i < topic_partition_list->cnt; + topic_partition_i++) { + rd_kafka_topic_partition_t topic_partition = + topic_partition_list->elems[topic_partition_i]; + v8::Local obj = Nan::New(); + + Nan::Set(obj, Nan::New("partition").ToLocalChecked(), + Nan::New(topic_partition.partition)); + Nan::Set(obj, Nan::New("topic").ToLocalChecked(), + Nan::New(topic_partition.topic).ToLocalChecked()); + + if (topic_partition.err != RD_KAFKA_RESP_ERR_NO_ERROR) { + v8::Local error = NodeKafka::RdKafkaError( + static_cast(topic_partition.err)); + Nan::Set(obj, Nan::New("error").ToLocalChecked(), error); + } + + if (include_offset) { + Nan::Set(obj, Nan::New("offset").ToLocalChecked(), + Nan::New(topic_partition.offset)); + } + + int leader_epoch = + rd_kafka_topic_partition_get_leader_epoch(&topic_partition); + if (leader_epoch >= 0) { + Nan::Set(obj, Nan::New("leaderEpoch").ToLocalChecked(), + Nan::New(leader_epoch)); + } + + Nan::Set(array, topic_partition_i, obj); + } + return array; +} /** * @brief v8 Array of topic partitions to RdKafka::TopicPartition vector @@ -275,14 +450,46 @@ RdKafka::TopicPartition * FromV8Object(v8::Local topic_partition) { int64_t offset = GetParameter(topic_partition, "offset", 0); if (partition == -1) { - return NULL; +return NULL; } if (topic.empty()) { return NULL; } - return RdKafka::TopicPartition::create(topic, partition, offset); + RdKafka::TopicPartition *toppar = + RdKafka::TopicPartition::create(topic, partition, offset); + + v8::Local metadataKey = Nan::New("metadata").ToLocalChecked(); + if (Nan::Has(topic_partition, metadataKey).FromMaybe(false)) { + v8::Local metadataValue = + Nan::Get(topic_partition, metadataKey).ToLocalChecked(); + + if (metadataValue->IsString()) { + Nan::Utf8String metadataValueUtf8Str(metadataValue.As()); + std::string metadataValueStr(*metadataValueUtf8Str); + std::vector metadataVector(metadataValueStr.begin(), + metadataValueStr.end()); + metadataVector.push_back( + '\0'); // The null terminator is not included in the iterator. + toppar->set_metadata(metadataVector); + } + } + + toppar->set_leader_epoch(-1); + v8::Local leaderEpochKey = + Nan::New("leaderEpoch").ToLocalChecked(); + if (Nan::Has(topic_partition, leaderEpochKey).FromMaybe(false)) { + v8::Local leaderEpochValue = + Nan::Get(topic_partition, leaderEpochKey).ToLocalChecked(); + + if (leaderEpochValue->IsNumber()) { + int32_t leaderEpoch = Nan::To(leaderEpochValue).FromJust(); + toppar->set_leader_epoch(leaderEpoch); + } + } + + return toppar; } } // namespace TopicPartition @@ -455,7 +662,7 @@ v8::Local ToV8Object(RdKafka::Message *message, if (key_payload) { // We want this to also be a buffer to avoid corruption - // https://github.com/confluentinc/confluent-kafka-js/issues/208 + // https://github.com/confluentinc/confluent-kafka-javascript/issues/208 Nan::Set(pack, Nan::New("key").ToLocalChecked(), Nan::Encode(key_payload, message->key_len(), Nan::Encoding::BUFFER)); } else { @@ -472,6 +679,12 @@ v8::Local ToV8Object(RdKafka::Message *message, Nan::Set(pack, Nan::New("timestamp").ToLocalChecked(), Nan::New(message->timestamp().timestamp)); + int32_t leader_epoch = message->leader_epoch(); + if (leader_epoch >= 0) { + Nan::Set(pack, Nan::New("leaderEpoch").ToLocalChecked(), + Nan::New(leader_epoch)); + } + return pack; } else { return RdKafkaError(message->err()); @@ -497,21 +710,17 @@ rd_kafka_NewTopic_t* FromV8TopicObject( int num_partitions = GetParameter(object, "num_partitions", 0); int replication_factor = GetParameter(object, "replication_factor", 0); - // Too slow to allocate this every call but admin api - // shouldn't be called that often - char* errbuf = reinterpret_cast(malloc(100)); - size_t errstr_size = 100; + char errbuf[512]; rd_kafka_NewTopic_t* new_topic = rd_kafka_NewTopic_new( topic_name.c_str(), num_partitions, replication_factor, errbuf, - errstr_size); + sizeof(errbuf)); if (new_topic == NULL) { - errstr = std::string(errbuf, errstr_size); - free(errbuf); + errstr = std::string(errbuf); return NULL; } @@ -559,8 +768,6 @@ rd_kafka_NewTopic_t* FromV8TopicObject( } } - // Free it again cuz we malloc'd it. - // free(errbuf); return new_topic; } @@ -568,6 +775,324 @@ rd_kafka_NewTopic_t** FromV8TopicObjectArray(v8::Local) { return NULL; } +/** + * @brief Converts a v8 array of group states into a vector of + * rd_kafka_consumer_group_state_t. + */ +std::vector FromV8GroupStateArray( + v8::Local array) { + v8::Local parameter = array.As(); + std::vector returnVec; + if (parameter->Length() >= 1) { + for (unsigned int i = 0; i < parameter->Length(); i++) { + v8::Local v; + if (!Nan::Get(parameter, i).ToLocal(&v)) { + continue; + } + Nan::Maybe maybeT = Nan::To(v); + if (maybeT.IsNothing()) { + continue; + } + int64_t state_number = maybeT.FromJust(); + if (state_number >= RD_KAFKA_CONSUMER_GROUP_STATE__CNT) { + continue; + } + returnVec.push_back( + static_cast(state_number)); + } + } + return returnVec; +} + +/** + * @brief Converts a rd_kafka_ListConsumerGroups_result_t* into a v8 object. + */ +v8::Local FromListConsumerGroupsResult( + const rd_kafka_ListConsumerGroups_result_t* result) { + /* Return object type: + { + groups: { + groupId: string, + protocolType: string, + isSimpleConsumerGroup: boolean, + state: ConsumerGroupState (internally a number) + }[], + errors: LibrdKafkaError[] + } + */ + v8::Local returnObject = Nan::New(); + + size_t error_cnt; + const rd_kafka_error_t** error_list = + rd_kafka_ListConsumerGroups_result_errors(result, &error_cnt); + Nan::Set(returnObject, Nan::New("errors").ToLocalChecked(), + Conversion::Util::ToV8Array(error_list, error_cnt)); + + v8::Local groups = Nan::New(); + size_t groups_cnt; + const rd_kafka_ConsumerGroupListing_t** groups_list = + rd_kafka_ListConsumerGroups_result_valid(result, &groups_cnt); + + for (size_t i = 0; i < groups_cnt; i++) { + const rd_kafka_ConsumerGroupListing_t* group = groups_list[i]; + v8::Local groupObject = Nan::New(); + + Nan::Set(groupObject, Nan::New("groupId").ToLocalChecked(), + Nan::New(rd_kafka_ConsumerGroupListing_group_id(group)) + .ToLocalChecked()); + + bool is_simple = + rd_kafka_ConsumerGroupListing_is_simple_consumer_group(group); + Nan::Set(groupObject, Nan::New("isSimpleConsumerGroup").ToLocalChecked(), + Nan::New(is_simple)); + + std::string protocol_type = is_simple ? "simple" : "consumer"; + Nan::Set(groupObject, Nan::New("protocolType").ToLocalChecked(), + Nan::New(protocol_type).ToLocalChecked()); + + Nan::Set(groupObject, Nan::New("state").ToLocalChecked(), + Nan::New(rd_kafka_ConsumerGroupListing_state(group))); + + Nan::Set(groups, i, groupObject); + } + + Nan::Set(returnObject, Nan::New("groups").ToLocalChecked(), groups); + return returnObject; +} + +/** + * @brief Converts a rd_kafka_MemberDescription_t* into a v8 object. + */ +v8::Local FromMemberDescription( + const rd_kafka_MemberDescription_t* member) { + /* Return object type: + { + clientHost: string + clientId: string + memberId: string + memberAssignment: Buffer // will be always null + memberMetadata: Buffer // will be always null + groupInstanceId: string + assignment: { + topicPartitions: TopicPartition[] + }, + } + */ + v8::Local returnObject = Nan::New(); + + // clientHost + Nan::Set(returnObject, Nan::New("clientHost").ToLocalChecked(), + Nan::New(rd_kafka_MemberDescription_host(member)) + .ToLocalChecked()); + + // clientId + Nan::Set(returnObject, Nan::New("clientId").ToLocalChecked(), + Nan::New(rd_kafka_MemberDescription_client_id(member)) + .ToLocalChecked()); + + // memberId + Nan::Set(returnObject, Nan::New("memberId").ToLocalChecked(), + Nan::New(rd_kafka_MemberDescription_consumer_id(member)) + .ToLocalChecked()); + + // memberAssignment - not passed to user, always null + Nan::Set(returnObject, Nan::New("memberAssignment").ToLocalChecked(), + Nan::Null()); + + // memberMetadata - not passed to user, always null + Nan::Set(returnObject, Nan::New("memberMetadata").ToLocalChecked(), + Nan::Null()); + + // groupInstanceId + const char* group_instance_id = + rd_kafka_MemberDescription_group_instance_id(member); + if (group_instance_id) { + Nan::Set(returnObject, Nan::New("groupInstanceId").ToLocalChecked(), + Nan::New(group_instance_id).ToLocalChecked()); + } + + // assignment + const rd_kafka_MemberAssignment_t* assignment = + rd_kafka_MemberDescription_assignment(member); + const rd_kafka_topic_partition_list_t* partitions = + rd_kafka_MemberAssignment_partitions(assignment); + v8::Local topicPartitions = + Conversion::TopicPartition::ToTopicPartitionV8Array(partitions, false); + v8::Local assignmentObject = Nan::New(); + Nan::Set(assignmentObject, Nan::New("topicPartitions").ToLocalChecked(), + topicPartitions); + Nan::Set(returnObject, Nan::New("assignment").ToLocalChecked(), + assignmentObject); + + return returnObject; +} + +/** + * @brief Converts a rd_kafka_ConsumerGroupDescription_t* into a v8 object. + */ +v8::Local FromConsumerGroupDescription( + const rd_kafka_ConsumerGroupDescription_t* desc) { + /* Return object type: + { + groupId: string, + error: LibrdKafkaError, + members: MemberDescription[], + protocol: string + isSimpleConsumerGroup: boolean + protocolType: string + partitionAssignor: string + state: ConsumerGroupState - internally a number + coordinator: Node + authorizedOperations: AclOperationType[] - internally numbers + } + */ + v8::Local returnObject = Nan::New(); + + // groupId + Nan::Set( + returnObject, Nan::New("groupId").ToLocalChecked(), + Nan::New(rd_kafka_ConsumerGroupDescription_group_id(desc)) + .ToLocalChecked()); + + // error + const rd_kafka_error_t* error = rd_kafka_ConsumerGroupDescription_error(desc); + if (error) { + RdKafka::ErrorCode code = + static_cast(rd_kafka_error_code(error)); + std::string msg = std::string(rd_kafka_error_string(error)); + Nan::Set(returnObject, Nan::New("error").ToLocalChecked(), + RdKafkaError(code, msg)); + } + + // members + v8::Local members = Nan::New(); + size_t member_cnt = rd_kafka_ConsumerGroupDescription_member_count(desc); + for (size_t i = 0; i < member_cnt; i++) { + const rd_kafka_MemberDescription_t* member = + rd_kafka_ConsumerGroupDescription_member(desc, i); + Nan::Set(members, i, FromMemberDescription(member)); + } + Nan::Set(returnObject, Nan::New("members").ToLocalChecked(), members); + + // isSimpleConsumerGroup + bool is_simple = + rd_kafka_ConsumerGroupDescription_is_simple_consumer_group(desc); + Nan::Set(returnObject, Nan::New("isSimpleConsumerGroup").ToLocalChecked(), + Nan::New(is_simple)); + + // protocolType + std::string protocolType = is_simple ? "simple" : "consumer"; + Nan::Set(returnObject, Nan::New("protocolType").ToLocalChecked(), + Nan::New(protocolType).ToLocalChecked()); + + // protocol + Nan::Set(returnObject, Nan::New("protocol").ToLocalChecked(), + Nan::New( + rd_kafka_ConsumerGroupDescription_partition_assignor(desc)) + .ToLocalChecked()); + + // partitionAssignor + Nan::Set(returnObject, Nan::New("partitionAssignor").ToLocalChecked(), + Nan::New( + rd_kafka_ConsumerGroupDescription_partition_assignor(desc)) + .ToLocalChecked()); + + // state + Nan::Set(returnObject, Nan::New("state").ToLocalChecked(), + Nan::New(rd_kafka_ConsumerGroupDescription_state(desc))); + + // coordinator + const rd_kafka_Node_t* coordinator = + rd_kafka_ConsumerGroupDescription_coordinator(desc); + if (coordinator) { + v8::Local coordinatorObject = + Conversion::Util::ToV8Object(coordinator); + Nan::Set(returnObject, Nan::New("coordinator").ToLocalChecked(), + coordinatorObject); + } + + // authorizedOperations + size_t authorized_operations_cnt; + const rd_kafka_AclOperation_t* authorized_operations = + rd_kafka_ConsumerGroupDescription_authorized_operations( + desc, &authorized_operations_cnt); + if (authorized_operations) { + Nan::Set(returnObject, Nan::New("authorizedOperations").ToLocalChecked(), + Conversion::Util::ToV8Array(authorized_operations, + authorized_operations_cnt)); + } + + return returnObject; +} + +/** + * @brief Converts a rd_kafka_DescribeConsumerGroups_result_t* into a v8 object. + */ +v8::Local FromDescribeConsumerGroupsResult( + const rd_kafka_DescribeConsumerGroups_result_t* result) { + /* Return object type: + { groups: GroupDescription[] } + */ + v8::Local returnObject = Nan::New(); + v8::Local groups = Nan::New(); + size_t groups_cnt; + const rd_kafka_ConsumerGroupDescription_t** groups_list = + rd_kafka_DescribeConsumerGroups_result_groups(result, &groups_cnt); + + for (size_t i = 0; i < groups_cnt; i++) { + const rd_kafka_ConsumerGroupDescription_t* group = groups_list[i]; + Nan::Set(groups, i, FromConsumerGroupDescription(group)); + } + + Nan::Set(returnObject, Nan::New("groups").ToLocalChecked(), groups); + return returnObject; +} + +/** + * @brief Converts a rd_kafka_DeleteGroups_result_t* into a v8 array. +*/ +v8::Local FromDeleteGroupsResult( + const rd_kafka_DeleteGroups_result_t* result) { + /* Return object type: + [{ + groupId: string + errorCode?: number + error?: LibrdKafkaError + }] + */ + v8::Local returnArray = Nan::New(); + size_t result_cnt; + const rd_kafka_group_result_t** results = + rd_kafka_DeleteGroups_result_groups(result, &result_cnt); + + for (size_t i = 0; i < result_cnt; i++) { + const rd_kafka_group_result_t* group_result = results[i]; + v8::Local group_object = Nan::New(); + + Nan::Set(group_object, Nan::New("groupId").ToLocalChecked(), + Nan::New(rd_kafka_group_result_name(group_result)) + .ToLocalChecked()); + + const rd_kafka_error_t* error = rd_kafka_group_result_error(group_result); + if (!error) { + Nan::Set(group_object, Nan::New("errorCode").ToLocalChecked(), + Nan::New(RD_KAFKA_RESP_ERR_NO_ERROR)); + } else { + RdKafka::ErrorCode code = + static_cast(rd_kafka_error_code(error)); + const char* msg = rd_kafka_error_string(error); + + Nan::Set(group_object, Nan::New("errorCode").ToLocalChecked(), + Nan::New(code)); + Nan::Set(group_object, Nan::New("error").ToLocalChecked(), + RdKafkaError(code, msg)); + } + Nan::Set(returnArray, i, group_object); + } + + return returnArray; +} + } // namespace Admin } // namespace Conversion diff --git a/src/common.h b/src/common.h index 785a6515..d98508e3 100644 --- a/src/common.h +++ b/src/common.h @@ -1,7 +1,8 @@ /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * * Copyright (c) 2016-2023 Blizzard Entertainment + * (c) 2024 Confluent, Inc. * * This software may be modified and distributed under the terms * of the MIT license. See the LICENSE.txt file for details. @@ -12,11 +13,12 @@ #include +#include #include #include #include -#include "rdkafkacpp.h" +#include "rdkafkacpp.h" // NOLINT #include "rdkafka.h" // NOLINT #include "src/errors.h" @@ -34,8 +36,11 @@ template<> std::string GetParameter( v8::Local, std::string, std::string); template<> std::vector GetParameter >( v8::Local, std::string, std::vector); +template<> v8::Local GetParameter >( + v8::Local, std::string, v8::Local); // template int GetParameter(v8::Local v8ArrayToStringVector(v8::Local); +std::list v8ArrayToStringList(v8::Local); class scoped_mutex_lock { public: @@ -90,22 +95,49 @@ class scoped_shared_read_lock { namespace Conversion { -namespace Admin { - // Topics from topic object, or topic object array - rd_kafka_NewTopic_t* FromV8TopicObject( - v8::Local, std::string &errstr); // NOLINT - rd_kafka_NewTopic_t** FromV8TopicObjectArray(v8::Local); -} +namespace Util { +std::vector ToStringVector(v8::Local); +v8::Local ToV8Array(std::vector); +v8::Local ToV8Array(const rd_kafka_error_t **error_list, + size_t error_cnt); +v8::Local ToV8Array(const rd_kafka_AclOperation_t *, size_t); -namespace Topic { - std::vector ToStringVector(v8::Local); - v8::Local ToV8Array(std::vector); -} // namespace Topic +v8::Local ToV8Object(const rd_kafka_Node_t *); +} // namespace Util + +namespace Admin { +// Topics from topic object, or topic object array +rd_kafka_NewTopic_t *FromV8TopicObject(v8::Local, + std::string &errstr); +rd_kafka_NewTopic_t **FromV8TopicObjectArray(v8::Local); + +// ListGroups: request +std::vector FromV8GroupStateArray( + v8::Local); + +// ListGroups: response +v8::Local FromListConsumerGroupsResult( + const rd_kafka_ListConsumerGroups_result_t *); + +// DescribeGroups: response +v8::Local FromMemberDescription( + const rd_kafka_MemberDescription_t *member); +v8::Local FromConsumerGroupDescription( + const rd_kafka_ConsumerGroupDescription_t *desc); +v8::Local FromDescribeConsumerGroupsResult( + const rd_kafka_DescribeConsumerGroups_result_t *); + +// DeleteGroups: Response +v8::Local FromDeleteGroupsResult( + const rd_kafka_DeleteGroups_result_t *); +} // namespace Admin namespace TopicPartition { -v8::Local ToV8Array(std::vector &); -RdKafka::TopicPartition * FromV8Object(v8::Local); +v8::Local ToV8Array(std::vector &); +v8::Local ToTopicPartitionV8Array( + const rd_kafka_topic_partition_list_t *, bool include_offset); +RdKafka::TopicPartition *FromV8Object(v8::Local); std::vector FromV8Array(const v8::Local &); // NOLINT } // namespace TopicPartition diff --git a/src/config.cc b/src/config.cc index c1584b48..5f66b2d8 100644 --- a/src/config.cc +++ b/src/config.cc @@ -1,18 +1,18 @@ /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * * Copyright (c) 2016-2023 Blizzard Entertainment * * This software may be modified and distributed under the terms * of the MIT license. See the LICENSE.txt file for details. */ +#include "src/config.h" +#include #include #include #include -#include "src/config.h" - using Nan::MaybeLocal; using Nan::Maybe; using v8::Local; @@ -81,75 +81,139 @@ Conf * Conf::create(RdKafka::Conf::ConfType type, v8::Local object, return NULL; } } else { - v8::Local cb = value.As(); - rdconf->ConfigureCallback(string_key, cb, true, errstr); - if (!errstr.empty()) { - delete rdconf; - return NULL; - } - rdconf->ConfigureCallback(string_key, cb, false, errstr); - if (!errstr.empty()) { - delete rdconf; - return NULL; - } + // Do nothing - NodeConfigureCallbacks will handle this for each + // of the three client types, called from within JavaScript. } } return rdconf; } -void Conf::ConfigureCallback(const std::string &string_key, const v8::Local &cb, bool add, std::string &errstr) { +void Conf::ConfigureCallback( + const std::string &string_key, + const v8::Local &cb, + bool add, std::string &errstr) { if (string_key.compare("rebalance_cb") == 0) { + NodeKafka::Callbacks::Rebalance *rebalance = rebalance_cb(); if (add) { - if (this->m_rebalance_cb == NULL) { - this->m_rebalance_cb = new NodeKafka::Callbacks::Rebalance(); + if (rebalance == NULL) { + rebalance = new NodeKafka::Callbacks::Rebalance(); + this->set(string_key, rebalance, errstr); } - this->m_rebalance_cb->dispatcher.AddCallback(cb); - this->set(string_key, this->m_rebalance_cb, errstr); + rebalance->dispatcher.AddCallback(cb); + this->set(string_key, rebalance, errstr); } else { - if (this->m_rebalance_cb != NULL) { - this->m_rebalance_cb->dispatcher.RemoveCallback(cb); + if (rebalance == NULL) { + rebalance->dispatcher.RemoveCallback(cb); + this->set(string_key, rebalance, errstr); } } } else if (string_key.compare("offset_commit_cb") == 0) { + NodeKafka::Callbacks::OffsetCommit *offset_commit = offset_commit_cb(); if (add) { - if (this->m_offset_commit_cb == NULL) { - this->m_offset_commit_cb = new NodeKafka::Callbacks::OffsetCommit(); + if (offset_commit == NULL) { + offset_commit = new NodeKafka::Callbacks::OffsetCommit(); + this->set(string_key, offset_commit, errstr); } - this->m_offset_commit_cb->dispatcher.AddCallback(cb); - this->set(string_key, this->m_offset_commit_cb, errstr); + offset_commit->dispatcher.AddCallback(cb); } else { - if (this->m_offset_commit_cb != NULL) { - this->m_offset_commit_cb->dispatcher.RemoveCallback(cb); + if (offset_commit != NULL) { + offset_commit->dispatcher.RemoveCallback(cb); } } + } else if (string_key.compare("oauthbearer_token_refresh_cb") == 0) { + NodeKafka::Callbacks::OAuthBearerTokenRefresh *oauthbearer_token_refresh = + oauthbearer_token_refresh_cb(); + if (add) { + if (oauthbearer_token_refresh == NULL) { + oauthbearer_token_refresh = + new NodeKafka::Callbacks::OAuthBearerTokenRefresh(); + this->set(string_key, oauthbearer_token_refresh, errstr); + } + oauthbearer_token_refresh->dispatcher.AddCallback(cb); + } else { + if (oauthbearer_token_refresh != NULL) { + oauthbearer_token_refresh->dispatcher.RemoveCallback(cb); + } + } + } else { + errstr = "Invalid callback type"; } } void Conf::listen() { - if (m_rebalance_cb) { - m_rebalance_cb->dispatcher.Activate(); + NodeKafka::Callbacks::Rebalance *rebalance = rebalance_cb(); + if (rebalance) { + rebalance->dispatcher.Activate(); } - if (m_offset_commit_cb) { - m_offset_commit_cb->dispatcher.Activate(); + NodeKafka::Callbacks::OffsetCommit *offset_commit = offset_commit_cb(); + if (offset_commit) { + offset_commit->dispatcher.Activate(); + } + + NodeKafka::Callbacks::OAuthBearerTokenRefresh *oauthbearer_token_refresh = + oauthbearer_token_refresh_cb(); + if (oauthbearer_token_refresh) { + oauthbearer_token_refresh->dispatcher.Activate(); } } void Conf::stop() { - if (m_rebalance_cb) { - m_rebalance_cb->dispatcher.Deactivate(); + NodeKafka::Callbacks::Rebalance *rebalance = rebalance_cb(); + if (rebalance) { + rebalance->dispatcher.Deactivate(); } - if (m_offset_commit_cb) { - m_offset_commit_cb->dispatcher.Deactivate(); + NodeKafka::Callbacks::OffsetCommit *offset_commit = offset_commit_cb(); + if (offset_commit) { + offset_commit->dispatcher.Deactivate(); + } + + NodeKafka::Callbacks::OAuthBearerTokenRefresh *oauthbearer_token_refresh = + oauthbearer_token_refresh_cb(); + if (oauthbearer_token_refresh) { + oauthbearer_token_refresh->dispatcher.Deactivate(); } } Conf::~Conf() { - if (m_rebalance_cb) { - delete m_rebalance_cb; + // Delete the rdconf object, since that's what we are internally. + RdKafka::Conf *rdconf = static_cast(this); + delete rdconf; +} + +NodeKafka::Callbacks::Rebalance* Conf::rebalance_cb() const { + RdKafka::RebalanceCb *cb = NULL; + if (this->get(cb) != RdKafka::Conf::CONF_OK) { + return NULL; + } + return static_cast(cb); +} + +NodeKafka::Callbacks::OffsetCommit* Conf::offset_commit_cb() const { + RdKafka::OffsetCommitCb *cb = NULL; + if (this->get(cb) != RdKafka::Conf::CONF_OK) { + return NULL; + } + return static_cast(cb); +} + +NodeKafka::Callbacks::OAuthBearerTokenRefresh * +Conf::oauthbearer_token_refresh_cb() const { + RdKafka::OAuthBearerTokenRefreshCb *cb = NULL; + if (this->get(cb) != RdKafka::Conf::CONF_OK) { + return NULL; + } + return static_cast(cb); +} + +bool Conf::is_sasl_oauthbearer() const { + std::string sasl_mechanism; + if (this->get("sasl.mechanisms", sasl_mechanism) != RdKafka::Conf::CONF_OK) { + return false; } + return sasl_mechanism.compare("OAUTHBEARER") == 0; } } // namespace NodeKafka diff --git a/src/config.h b/src/config.h index 5358ce49..d7a5a786 100644 --- a/src/config.h +++ b/src/config.h @@ -1,5 +1,5 @@ /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * * Copyright (c) 2016-2023 Blizzard Entertainment * @@ -16,7 +16,7 @@ #include #include -#include "rdkafkacpp.h" +#include "rdkafkacpp.h" // NOLINT #include "src/common.h" #include "src/callbacks.h" @@ -32,10 +32,28 @@ class Conf : public RdKafka::Conf { void listen(); void stop(); - void ConfigureCallback(const std::string &string_key, const v8::Local &cb, bool add, std::string &errstr); - protected: - NodeKafka::Callbacks::Rebalance * m_rebalance_cb = NULL; - NodeKafka::Callbacks::OffsetCommit * m_offset_commit_cb = NULL; + void ConfigureCallback( + const std::string &string_key, + const v8::Local &cb, + bool add, std::string &errstr); + + bool is_sasl_oauthbearer() const; + + private: + NodeKafka::Callbacks::Rebalance *rebalance_cb() const; + NodeKafka::Callbacks::OffsetCommit *offset_commit_cb() const; + NodeKafka::Callbacks::OAuthBearerTokenRefresh *oauthbearer_token_refresh_cb() + const; + + // NOTE: Do NOT add any members to this class. + // Internally, to get an instance of this class, we just cast RdKafka::Conf* + // that we obtain from RdKafka::Conf::create(). However, that's internally an + // instance of a sub-class, ConfImpl. This means that any members here are + // aliased to that with the wrong name (for example, the first member of this + // class, if it's a pointer, will be aliased to consume_cb_ in the ConfImpl, + // and and changing one will change the other!) + // TODO: Just don't inherit from RdKafka::Conf, and instead have a member of + // type RdKafka::Conf*. }; } // namespace NodeKafka diff --git a/src/connection.cc b/src/connection.cc index cd203494..833c34f2 100644 --- a/src/connection.cc +++ b/src/connection.cc @@ -1,16 +1,18 @@ /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * * Copyright (c) 2016-2023 Blizzard Entertainment + * (c) 2023 Confluent, Inc. * * This software may be modified and distributed under the terms * of the MIT license. See the LICENSE.txt file for details. */ +#include "src/connection.h" +#include #include #include -#include "src/connection.h" #include "src/workers.h" using RdKafka::Conf; @@ -67,6 +69,45 @@ Connection::~Connection() { } } +Baton Connection::rdkafkaErrorToBaton(RdKafka::Error* error) { + if (NULL == error) { + return Baton(RdKafka::ERR_NO_ERROR); + } else { + Baton result(error->code(), error->str(), error->is_fatal(), + error->is_retriable(), error->txn_requires_abort()); + delete error; + return result; + } +} + +// If OAUTHBEARER authentication is set up, then push the callbacks onto the +// SASL queue so we don't need to keep polling. This method should be called +// before the client is created. +Baton Connection::setupSaslOAuthBearerConfig() { + if (!m_gconfig->is_sasl_oauthbearer()) { + return Baton(RdKafka::ERR_NO_ERROR); + } + + std::string errstr; + if (m_gconfig->enable_sasl_queue(true, errstr) != RdKafka::Conf::CONF_OK) { + return Baton(RdKafka::ERR__STATE, errstr); + } + + return Baton(RdKafka::ERR_NO_ERROR); +} + +// If OAUTHBEARER authentication is set up, then handle the callbacks on +// the background thread. This method should be called after the client is +// created and only if `setupSaslOAuthBearerConfig` is called earlier. +Baton Connection::setupSaslOAuthBearerBackgroundQueue() { + if (!m_gconfig->is_sasl_oauthbearer()) { + return Baton(RdKafka::ERR_NO_ERROR); + } + + RdKafka::Error* error = m_client->sasl_background_callbacks_enable(); + return rdkafkaErrorToBaton(error); +} + RdKafka::TopicPartition* Connection::GetPartition(std::string &topic) { return RdKafka::TopicPartition::create(topic, RdKafka::Topic::PARTITION_UA); } @@ -75,11 +116,11 @@ RdKafka::TopicPartition* Connection::GetPartition(std::string &topic, int partit return RdKafka::TopicPartition::create(topic, partition); } -bool Connection::IsConnected() { +bool Connection::IsConnected() const { return !m_is_closing && m_client != NULL; } -bool Connection::IsClosing() { +bool Connection::IsClosing() const { return m_client != NULL && m_is_closing; } @@ -87,6 +128,13 @@ RdKafka::Handle* Connection::GetClient() { return m_client; } +std::string Connection::Name() const { + if (!IsConnected()) { + return std::string(""); + } + return std::string(m_client->name()); +} + Baton Connection::CreateTopic(std::string topic_name) { return CreateTopic(topic_name, NULL); } @@ -210,12 +258,75 @@ Baton Connection::GetMetadata( return Baton(metadata); } else { // metadata is not set here - // @see https://github.com/edenhill/librdkafka/blob/master/src-cpp/rdkafkacpp.h#L860 + // @see https://github.com/confluentinc/librdkafka/blob/master/src-cpp/rdkafkacpp.h#L860 // NOLINT return Baton(err); } } -void Connection::ConfigureCallback(const std::string &string_key, const v8::Local &cb, bool add) { +Baton Connection::SetSaslCredentials( + std::string username, std::string password) { + RdKafka::Error *error; + + if (IsConnected()) { + scoped_shared_read_lock lock(m_connection_lock); + if (IsConnected()) { + // Always send true - we + error = m_client->sasl_set_credentials(username, password); + } else { + return Baton(RdKafka::ERR__STATE); + } + } else { + return Baton(RdKafka::ERR__STATE); + } + + return rdkafkaErrorToBaton(error); +} + +Baton Connection::SetOAuthBearerToken( + const std::string& value, int64_t lifetime_ms, + const std::string& principal_name, + const std::list& extensions) { + RdKafka::ErrorCode error_code; + std::string errstr; + + if (IsConnected()) { + scoped_shared_read_lock lock(m_connection_lock); + if (IsConnected()) { + error_code = m_client->oauthbearer_set_token( + value, lifetime_ms, principal_name, extensions, errstr); + } else { + return Baton(RdKafka::ERR__STATE); + } + } else { + return Baton(RdKafka::ERR__STATE); + } + + if (error_code != RdKafka::ERR_NO_ERROR) { + return Baton(error_code, errstr); + } + + return Baton(error_code); +} + +Baton Connection::SetOAuthBearerTokenFailure(const std::string& errstr) { + RdKafka::ErrorCode error_code; + + if (IsConnected()) { + scoped_shared_read_lock lock(m_connection_lock); + if (IsConnected()) { + error_code = m_client->oauthbearer_set_token_failure(errstr); + } else { + return Baton(RdKafka::ERR__STATE); + } + } else { + return Baton(RdKafka::ERR__STATE); + } + + return Baton(error_code); +} + +void Connection::ConfigureCallback( + const std::string &string_key, const v8::Local &cb, bool add) { if (string_key.compare("event_cb") == 0) { if (add) { this->m_event_cb.dispatcher.AddCallback(cb); @@ -337,6 +448,39 @@ NAN_METHOD(Connection::NodeQueryWatermarkOffsets) { info.GetReturnValue().Set(Nan::Null()); } +NAN_METHOD(Connection::NodeSetSaslCredentials) { + if (!info[0]->IsString()) { + Nan::ThrowError("1st parameter must be a username string"); + return; + } + + if (!info[1]->IsString()) { + Nan::ThrowError("2nd parameter must be a password string"); + return; + } + + // Get string pointer for the username + Nan::Utf8String usernameUTF8(Nan::To(info[0]).ToLocalChecked()); + // The first parameter is the username + std::string username(*usernameUTF8); + + // Get string pointer for the password + Nan::Utf8String passwordUTF8(Nan::To(info[1]).ToLocalChecked()); + // The first parameter is the password + std::string password(*passwordUTF8); + + Connection* obj = ObjectWrap::Unwrap(info.This()); + Baton b = obj->SetSaslCredentials(username, password); + + if (b.err() != RdKafka::ERR_NO_ERROR) { + v8::Local errorObject = b.ToObject(); + return Nan::ThrowError(errorObject); + } + + info.GetReturnValue().Set(Nan::Null()); +} + + // Node methods NAN_METHOD(Connection::NodeConfigureCallbacks) { Nan::HandleScope scope; @@ -351,14 +495,18 @@ NAN_METHOD(Connection::NodeConfigureCallbacks) { Connection* obj = ObjectWrap::Unwrap(info.This()); const bool add = Nan::To(info[0]).ToChecked(); - v8::Local configs_object = info[1]->ToObject(context).ToLocalChecked(); - v8::Local configs_property_names = configs_object->GetOwnPropertyNames(context).ToLocalChecked(); + v8::Local configs_object = + info[1]->ToObject(context).ToLocalChecked(); + v8::Local configs_property_names = + configs_object->GetOwnPropertyNames(context).ToLocalChecked(); for (unsigned int j = 0; j < configs_property_names->Length(); ++j) { std::string configs_string_key; - v8::Local configs_key = Nan::Get(configs_property_names, j).ToLocalChecked(); - v8::Local configs_value = Nan::Get(configs_object, configs_key).ToLocalChecked(); + v8::Local configs_key = + Nan::Get(configs_property_names, j).ToLocalChecked(); + v8::Local configs_value = + Nan::Get(configs_object, configs_key).ToLocalChecked(); int config_type = 0; if (configs_value->IsObject() && configs_key->IsString()) { @@ -377,8 +525,10 @@ NAN_METHOD(Connection::NodeConfigureCallbacks) { continue; } - v8::Local object = configs_value->ToObject(context).ToLocalChecked(); - v8::Local property_names = object->GetOwnPropertyNames(context).ToLocalChecked(); + v8::Local object = + configs_value->ToObject(context).ToLocalChecked(); + v8::Local property_names = + object->GetOwnPropertyNames(context).ToLocalChecked(); for (unsigned int i = 0; i < property_names->Length(); ++i) { std::string errstr; @@ -420,4 +570,83 @@ NAN_METHOD(Connection::NodeConfigureCallbacks) { info.GetReturnValue().Set(Nan::True()); } +NAN_METHOD(Connection::NodeSetOAuthBearerToken) { + if (!info[0]->IsString()) { + Nan::ThrowError("1st parameter must be a token string"); + return; + } + + if (!info[1]->IsNumber()) { + Nan::ThrowError("2nd parameter must be a lifetime_ms number"); + return; + } + + if (!info[2]->IsString()) { + Nan::ThrowError("3rd parameter must be a principal_name string"); + return; + } + + if (!info[3]->IsNullOrUndefined() && !info[3]->IsArray()) { + Nan::ThrowError("4th parameter must be an extensions array or null"); + return; + } + + // Get string pointer for the token + Nan::Utf8String tokenUtf8(Nan::To(info[0]).ToLocalChecked()); + std::string token(*tokenUtf8); + + // Get the lifetime_ms + int64_t lifetime_ms = Nan::To(info[1]).FromJust(); + + // Get string pointer for the principal_name + Nan::Utf8String principal_nameUtf8( + Nan::To(info[2]).ToLocalChecked()); + std::string principal_name(*principal_nameUtf8); + + // Get the extensions (if any) + std::list extensions; + if (!info[3]->IsNullOrUndefined()) { + v8::Local extensionsArray = info[3].As(); + extensions = v8ArrayToStringList(extensionsArray); + } + + Connection* obj = ObjectWrap::Unwrap(info.This()); + Baton b = + obj->SetOAuthBearerToken(token, lifetime_ms, principal_name, extensions); + + if (b.err() != RdKafka::ERR_NO_ERROR) { + v8::Local errorObject = b.ToObject(); + return Nan::ThrowError(errorObject); + } + + info.GetReturnValue().Set(Nan::Null()); +} + +NAN_METHOD(Connection::NodeSetOAuthBearerTokenFailure) { + if (!info[0]->IsString()) { + Nan::ThrowError("1st parameter must be an error string"); + return; + } + + // Get string pointer for the error string + Nan::Utf8String errstrUtf8(Nan::To(info[0]).ToLocalChecked()); + std::string errstr(*errstrUtf8); + + Connection* obj = ObjectWrap::Unwrap(info.This()); + Baton b = obj->SetOAuthBearerTokenFailure(errstr); + + if (b.err() != RdKafka::ERR_NO_ERROR) { + v8::Local errorObject = b.ToObject(); + return Nan::ThrowError(errorObject); + } + + info.GetReturnValue().Set(Nan::Null()); +} + +NAN_METHOD(Connection::NodeName) { + Connection* obj = ObjectWrap::Unwrap(info.This()); + std::string name = obj->Name(); + info.GetReturnValue().Set(Nan::New(name).ToLocalChecked()); +} + } // namespace NodeKafka diff --git a/src/connection.h b/src/connection.h index 9e335d63..c798814b 100644 --- a/src/connection.h +++ b/src/connection.h @@ -1,7 +1,8 @@ /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * * Copyright (c) 2016-2023 Blizzard Entertainment + * (c) 2023 Confluent, Inc. * * This software may be modified and distributed under the terms * of the MIT license. See the LICENSE.txt file for details. @@ -12,10 +13,11 @@ #include #include +#include #include #include -#include "rdkafkacpp.h" +#include "rdkafkacpp.h" // NOLINT #include "src/common.h" #include "src/errors.h" @@ -46,8 +48,8 @@ namespace NodeKafka { class Connection : public Nan::ObjectWrap { public: - bool IsConnected(); - bool IsClosing(); + bool IsConnected() const; + bool IsClosing() const; // Baton Baton CreateTopic(std::string); @@ -55,6 +57,10 @@ class Connection : public Nan::ObjectWrap { Baton GetMetadata(bool, std::string, int); Baton QueryWatermarkOffsets(std::string, int32_t, int64_t*, int64_t*, int); Baton OffsetsForTimes(std::vector &, int); + Baton SetSaslCredentials(std::string, std::string); + Baton SetOAuthBearerToken(const std::string&, int64_t, const std::string&, + const std::list&); + Baton SetOAuthBearerTokenFailure(const std::string&); RdKafka::Handle* GetClient(); @@ -66,7 +72,10 @@ class Connection : public Nan::ObjectWrap { virtual void ActivateDispatchers() = 0; virtual void DeactivateDispatchers() = 0; - virtual void ConfigureCallback(const std::string &string_key, const v8::Local &cb, bool add); + virtual void ConfigureCallback( + const std::string &string_key, const v8::Local &cb, bool add); + + std::string Name() const; protected: Connection(Conf*, Conf*); @@ -74,6 +83,10 @@ class Connection : public Nan::ObjectWrap { static Nan::Persistent constructor; static void New(const Nan::FunctionCallbackInfo& info); + static Baton rdkafkaErrorToBaton(RdKafka::Error* error); + + Baton setupSaslOAuthBearerConfig(); + Baton setupSaslOAuthBearerBackgroundQueue(); bool m_has_been_disconnected; bool m_is_closing; @@ -90,6 +103,10 @@ class Connection : public Nan::ObjectWrap { static NAN_METHOD(NodeGetMetadata); static NAN_METHOD(NodeQueryWatermarkOffsets); static NAN_METHOD(NodeOffsetsForTimes); + static NAN_METHOD(NodeSetSaslCredentials); + static NAN_METHOD(NodeSetOAuthBearerToken); + static NAN_METHOD(NodeSetOAuthBearerTokenFailure); + static NAN_METHOD(NodeName); }; } // namespace NodeKafka diff --git a/src/errors.cc b/src/errors.cc index 7ad40635..9d1d9675 100644 --- a/src/errors.cc +++ b/src/errors.cc @@ -1,7 +1,8 @@ /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * * Copyright (c) 2016-2023 Blizzard Entertainment + * (c) 2024 Confluent, Inc. * * This software may be modified and distributed under the terms * of the MIT license. See the LICENSE.txt file for details. @@ -13,8 +14,8 @@ namespace NodeKafka { -v8::Local RdKafkaError(const RdKafka::ErrorCode &err, std::string errstr) { // NOLINT - // +v8::Local RdKafkaError(const RdKafka::ErrorCode &err, + const std::string &errstr) { int code = static_cast(err); v8::Local ret = Nan::New(); @@ -28,11 +29,13 @@ v8::Local RdKafkaError(const RdKafka::ErrorCode &err, std::string er } v8::Local RdKafkaError(const RdKafka::ErrorCode &err) { - return RdKafkaError(err, RdKafka::err2str(err)); + std::string errstr = RdKafka::err2str(err); + return RdKafkaError(err, errstr); } -v8::Local RdKafkaError(const RdKafka::ErrorCode &err, std::string errstr, - bool isFatal, bool isRetriable, bool isTxnRequiresAbort) { +v8::Local RdKafkaError( + const RdKafka::ErrorCode &err, std::string errstr, + bool isFatal, bool isRetriable, bool isTxnRequiresAbort) { v8::Local ret = RdKafkaError(err, errstr); Nan::Set(ret, Nan::New("isFatal").ToLocalChecked(), @@ -68,6 +71,26 @@ Baton::Baton(const RdKafka::ErrorCode &code, std::string errstr, bool isFatal, m_isTxnRequiresAbort = isTxnRequiresAbort; } +/** + * Creates a Baton from an rd_kafka_error_t* and destroys it. + */ +Baton Baton::BatonFromErrorAndDestroy(rd_kafka_error_t *error) { + std::string errstr = rd_kafka_error_string(error); + RdKafka::ErrorCode err = + static_cast(rd_kafka_error_code(error)); + rd_kafka_error_destroy(error); + return Baton(err, errstr); +} + +/** + * Creates a Baton from an RdKafka::Error* and deletes it. + */ +Baton Baton::BatonFromErrorAndDestroy(RdKafka::Error *error) { + std::string errstr = error->str(); + RdKafka::ErrorCode err = error->code(); + delete error; + return Baton(err, errstr); +} v8::Local Baton::ToObject() { if (m_errstr.empty()) { @@ -78,7 +101,7 @@ v8::Local Baton::ToObject() { } v8::Local Baton::ToTxnObject() { - return RdKafkaError(m_err, m_errstr, m_isFatal, m_isRetriable, m_isTxnRequiresAbort); + return RdKafkaError(m_err, m_errstr, m_isFatal, m_isRetriable, m_isTxnRequiresAbort); // NOLINT } RdKafka::ErrorCode Baton::err() { diff --git a/src/errors.h b/src/errors.h index 799719eb..248d26ad 100644 --- a/src/errors.h +++ b/src/errors.h @@ -1,7 +1,8 @@ /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * * Copyright (c) 2016-2023 Blizzard Entertainment + * (c) 2024 Confluent, Inc. * * This software may be modified and distributed under the terms * of the MIT license. See the LICENSE.txt file for details. @@ -14,7 +15,7 @@ #include #include -#include "rdkafkacpp.h" +#include "rdkafkacpp.h" // NOLINT #include "src/common.h" @@ -28,6 +29,9 @@ class Baton { explicit Baton(const RdKafka::ErrorCode &, std::string, bool isFatal, bool isRetriable, bool isTxnRequiresAbort); + static Baton BatonFromErrorAndDestroy(rd_kafka_error_t *error); + static Baton BatonFromErrorAndDestroy(RdKafka::Error *error); + template T data() { return static_cast(m_data); } @@ -48,6 +52,8 @@ class Baton { }; v8::Local RdKafkaError(const RdKafka::ErrorCode &); +v8::Local RdKafkaError(const RdKafka::ErrorCode &, + const std::string &); } // namespace NodeKafka diff --git a/src/kafka-consumer.cc b/src/kafka-consumer.cc index eccab3e9..36789a25 100644 --- a/src/kafka-consumer.cc +++ b/src/kafka-consumer.cc @@ -1,12 +1,14 @@ /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * * Copyright (c) 2016-2023 Blizzard Entertainment + * (c) 2023 Confluent, Inc. * * This software may be modified and distributed under the terms * of the MIT license. See the LICENSE.txt file for details. */ +#include #include #include @@ -31,7 +33,8 @@ KafkaConsumer::KafkaConsumer(Conf* gconfig, Conf* tconfig): Connection(gconfig, tconfig) { std::string errstr; - m_gconfig->set("default_topic_conf", m_tconfig, errstr); + if (m_tconfig) + m_gconfig->set("default_topic_conf", m_tconfig, errstr); m_consume_loop = nullptr; } @@ -46,16 +49,30 @@ Baton KafkaConsumer::Connect() { return Baton(RdKafka::ERR_NO_ERROR); } + Baton baton = setupSaslOAuthBearerConfig(); + if (baton.err() != RdKafka::ERR_NO_ERROR) { + return baton; + } + std::string errstr; { scoped_shared_write_lock lock(m_connection_lock); - m_client = RdKafka::KafkaConsumer::create(m_gconfig, errstr); + m_consumer = RdKafka::KafkaConsumer::create(m_gconfig, errstr); + m_client = m_consumer; } if (!m_client || !errstr.empty()) { return Baton(RdKafka::ERR__STATE, errstr); } + /* Set the client name at the first possible opportunity for logging. */ + m_event_cb.dispatcher.SetClientName(m_client->name()); + + baton = setupSaslOAuthBearerBackgroundQueue(); + if (baton.err() != RdKafka::ERR_NO_ERROR) { + return baton; + } + if (m_partitions.size() > 0) { m_client->resume(m_partitions); } @@ -83,12 +100,11 @@ Baton KafkaConsumer::Disconnect() { { scoped_shared_write_lock lock(m_connection_lock); - RdKafka::KafkaConsumer* consumer = - dynamic_cast(m_client); - err = consumer->close(); + err = m_consumer->close(); delete m_client; m_client = NULL; + m_consumer = nullptr; } } @@ -161,10 +177,8 @@ Baton KafkaConsumer::Assign(std::vector partitions) { return Baton(RdKafka::ERR__STATE, "KafkaConsumer is disconnected"); } - RdKafka::KafkaConsumer* consumer = - dynamic_cast(m_client); - RdKafka::ErrorCode errcode = consumer->assign(partitions); + RdKafka::ErrorCode errcode = m_consumer->assign(partitions); if (errcode == RdKafka::ERR_NO_ERROR) { m_partition_cnt = partitions.size(); @@ -184,10 +198,7 @@ Baton KafkaConsumer::Unassign() { return Baton(RdKafka::ERR__STATE); } - RdKafka::KafkaConsumer* consumer = - dynamic_cast(m_client); - - RdKafka::ErrorCode errcode = consumer->unassign(); + RdKafka::ErrorCode errcode = m_consumer->unassign(); if (errcode != RdKafka::ERR_NO_ERROR) { return Baton(errcode); @@ -201,15 +212,67 @@ Baton KafkaConsumer::Unassign() { return Baton(RdKafka::ERR_NO_ERROR); } -Baton KafkaConsumer::Commit(std::vector toppars) { +Baton KafkaConsumer::IncrementalAssign( + std::vector partitions) { if (!IsConnected()) { + return Baton(RdKafka::ERR__STATE, "KafkaConsumer is disconnected"); + } + + RdKafka::Error* error = m_consumer->incremental_assign(partitions); + + if (error == NULL) { + m_partition_cnt += partitions.size(); + // We assume here that there are no duplicate assigns and just transfer. + m_partitions.insert(m_partitions.end(), partitions.begin(), partitions.end()); // NOLINT + } else { + // If we're in error, destroy it, otherwise, don't (since we're using them). + RdKafka::TopicPartition::destroy(partitions); + } + + return rdkafkaErrorToBaton(error); +} + +Baton KafkaConsumer::IncrementalUnassign( + std::vector partitions) { + if (!IsClosing() && !IsConnected()) { return Baton(RdKafka::ERR__STATE); } - RdKafka::KafkaConsumer* consumer = - dynamic_cast(m_client); + RdKafka::Error* error = m_consumer->incremental_unassign(partitions); + + std::vector delete_partitions; + + if (error == NULL) { + // For now, use two for loops. Make more efficient if needed later. + for (unsigned int i = 0; i < partitions.size(); i++) { + for (unsigned int j = 0; j < m_partitions.size(); j++) { + if (partitions[i]->partition() == m_partitions[j]->partition() && + partitions[i]->topic() == m_partitions[j]->topic()) { + delete_partitions.push_back(m_partitions[j]); + m_partitions.erase(m_partitions.begin() + j); + m_partition_cnt--; + break; + } + } + } + } + + // Destroy the old list of partitions since we are no longer using it + RdKafka::TopicPartition::destroy(delete_partitions); + + // Destroy the partition args since those are only used to lookup the + // partitions that needed to be deleted. + RdKafka::TopicPartition::destroy(partitions); + + return rdkafkaErrorToBaton(error); +} + +Baton KafkaConsumer::Commit(std::vector toppars) { + if (!IsConnected()) { + return Baton(RdKafka::ERR__STATE); + } - RdKafka::ErrorCode err = consumer->commitAsync(toppars); + RdKafka::ErrorCode err = m_consumer->commitAsync(toppars); return Baton(err); } @@ -219,12 +282,9 @@ Baton KafkaConsumer::Commit(RdKafka::TopicPartition * toppar) { return Baton(RdKafka::ERR__STATE, "KafkaConsumer is not connected"); } - RdKafka::KafkaConsumer* consumer = - dynamic_cast(m_client); - // Need to put topic in a vector for it to work std::vector offsets = {toppar}; - RdKafka::ErrorCode err = consumer->commitAsync(offsets); + RdKafka::ErrorCode err = m_consumer->commitAsync(offsets); return Baton(err); } @@ -235,10 +295,7 @@ Baton KafkaConsumer::Commit() { return Baton(RdKafka::ERR__STATE, "KafkaConsumer is not connected"); } - RdKafka::KafkaConsumer* consumer = - dynamic_cast(m_client); - - RdKafka::ErrorCode err = consumer->commitAsync(); + RdKafka::ErrorCode err = m_consumer->commitAsync(); return Baton(err); } @@ -249,10 +306,7 @@ Baton KafkaConsumer::CommitSync(std::vector toppars) { return Baton(RdKafka::ERR__STATE, "KafkaConsumer is not connected"); } - RdKafka::KafkaConsumer* consumer = - dynamic_cast(m_client); - - RdKafka::ErrorCode err = consumer->commitSync(toppars); + RdKafka::ErrorCode err = m_consumer->commitSync(toppars); // RdKafka::TopicPartition::destroy(toppars); return Baton(err); @@ -263,12 +317,9 @@ Baton KafkaConsumer::CommitSync(RdKafka::TopicPartition * toppar) { return Baton(RdKafka::ERR__STATE); } - RdKafka::KafkaConsumer* consumer = - dynamic_cast(m_client); - // Need to put topic in a vector for it to work std::vector offsets = {toppar}; - RdKafka::ErrorCode err = consumer->commitSync(offsets); + RdKafka::ErrorCode err = m_consumer->commitSync(offsets); return Baton(err); } @@ -279,10 +330,7 @@ Baton KafkaConsumer::CommitSync() { return Baton(RdKafka::ERR__STATE, "KafkaConsumer is not connected"); } - RdKafka::KafkaConsumer* consumer = - dynamic_cast(m_client); - - RdKafka::ErrorCode err = consumer->commitSync(); + RdKafka::ErrorCode err = m_consumer->commitSync(); return Baton(err); } @@ -292,10 +340,7 @@ Baton KafkaConsumer::Seek(const RdKafka::TopicPartition &partition, int timeout_ return Baton(RdKafka::ERR__STATE, "KafkaConsumer is not connected"); } - RdKafka::KafkaConsumer* consumer = - dynamic_cast(m_client); - - RdKafka::ErrorCode err = consumer->seek(partition, timeout_ms); + RdKafka::ErrorCode err = m_consumer->seek(partition, timeout_ms); return Baton(err); } @@ -306,10 +351,7 @@ Baton KafkaConsumer::Committed(std::vector &toppars, return Baton(RdKafka::ERR__STATE, "KafkaConsumer is not connected"); } - RdKafka::KafkaConsumer* consumer = - dynamic_cast(m_client); - - RdKafka::ErrorCode err = consumer->committed(toppars, timeout_ms); + RdKafka::ErrorCode err = m_consumer->committed(toppars, timeout_ms); return Baton(err); } @@ -319,10 +361,7 @@ Baton KafkaConsumer::Position(std::vector &toppars) { return Baton(RdKafka::ERR__STATE, "KafkaConsumer is not connected"); } - RdKafka::KafkaConsumer* consumer = - dynamic_cast(m_client); - - RdKafka::ErrorCode err = consumer->position(toppars); + RdKafka::ErrorCode err = m_consumer->position(toppars); return Baton(err); } @@ -332,13 +371,10 @@ Baton KafkaConsumer::Subscription() { return Baton(RdKafka::ERR__STATE, "Consumer is not connected"); } - RdKafka::KafkaConsumer* consumer = - dynamic_cast(m_client); - // Needs to be a pointer since we're returning it through the baton std::vector * topics = new std::vector; - RdKafka::ErrorCode err = consumer->subscription(*topics); + RdKafka::ErrorCode err = m_consumer->subscription(*topics); if (err == RdKafka::ErrorCode::ERR_NO_ERROR) { // Good to go @@ -350,9 +386,7 @@ Baton KafkaConsumer::Subscription() { Baton KafkaConsumer::Unsubscribe() { if (IsConnected() && IsSubscribed()) { - RdKafka::KafkaConsumer* consumer = - dynamic_cast(m_client); - consumer->unsubscribe(); + m_consumer->unsubscribe(); m_is_subscribed = false; } @@ -361,10 +395,7 @@ Baton KafkaConsumer::Unsubscribe() { Baton KafkaConsumer::Pause(std::vector & toppars) { if (IsConnected()) { - RdKafka::KafkaConsumer* consumer = - dynamic_cast(m_client); - RdKafka::ErrorCode err = consumer->pause(toppars); - + RdKafka::ErrorCode err = m_consumer->pause(toppars); return Baton(err); } @@ -373,9 +404,7 @@ Baton KafkaConsumer::Pause(std::vector & toppars) { Baton KafkaConsumer::Resume(std::vector & toppars) { if (IsConnected()) { - RdKafka::KafkaConsumer* consumer = - dynamic_cast(m_client); - RdKafka::ErrorCode err = consumer->resume(toppars); + RdKafka::ErrorCode err = m_consumer->resume(toppars); return Baton(err); } @@ -383,16 +412,15 @@ Baton KafkaConsumer::Resume(std::vector & toppars) { return Baton(RdKafka::ERR__STATE); } -Baton KafkaConsumer::OffsetsStore(std::vector & toppars) { // NOLINT - if (IsConnected() && IsSubscribed()) { - RdKafka::KafkaConsumer* consumer = - dynamic_cast(m_client); - RdKafka::ErrorCode err = consumer->offsets_store(toppars); - - return Baton(err); +Baton KafkaConsumer::OffsetsStore( + std::vector& toppars) { // NOLINT + if (!IsSubscribed()) { /* IsSubscribed also checks IsConnected */ + return Baton(RdKafka::ERR__STATE); } - return Baton(RdKafka::ERR__STATE); + RdKafka::ErrorCode err = m_consumer->offsets_store(toppars); + + return Baton(err); } Baton KafkaConsumer::Subscribe(std::vector topics) { @@ -400,10 +428,7 @@ Baton KafkaConsumer::Subscribe(std::vector topics) { return Baton(RdKafka::ERR__STATE); } - RdKafka::KafkaConsumer* consumer = - dynamic_cast(m_client); - - RdKafka::ErrorCode errcode = consumer->subscribe(topics); + RdKafka::ErrorCode errcode = m_consumer->subscribe(topics); if (errcode != RdKafka::ERR_NO_ERROR) { return Baton(errcode); } @@ -419,10 +444,7 @@ Baton KafkaConsumer::Consume(int timeout_ms) { if (!IsConnected()) { return Baton(RdKafka::ERR__STATE, "KafkaConsumer is not connected"); } else { - RdKafka::KafkaConsumer* consumer = - dynamic_cast(m_client); - - RdKafka::Message * message = consumer->consume(timeout_ms); + RdKafka::Message * message = m_consumer->consume(timeout_ms); RdKafka::ErrorCode response_code = message->err(); // we want to handle these errors at the call site if (response_code != RdKafka::ERR_NO_ERROR && @@ -446,11 +468,8 @@ Baton KafkaConsumer::RefreshAssignments() { return Baton(RdKafka::ERR__STATE); } - RdKafka::KafkaConsumer* consumer = - dynamic_cast(m_client); - std::vector partition_list; - RdKafka::ErrorCode err = consumer->assignment(partition_list); + RdKafka::ErrorCode err = m_consumer->assignment(partition_list); switch (err) { case RdKafka::ERR_NO_ERROR: @@ -469,11 +488,17 @@ Baton KafkaConsumer::RefreshAssignments() { } } -std::string KafkaConsumer::Name() { +Baton KafkaConsumer::AssignmentLost() { + bool lost = m_consumer->assignment_lost(); + return Baton(reinterpret_cast(lost)); +} + +std::string KafkaConsumer::RebalanceProtocol() { if (!IsConnected()) { - return std::string(""); + return std::string("NONE"); } - return std::string(m_client->name()); + + return m_consumer->rebalance_protocol(); } Nan::Persistent KafkaConsumer::constructor; @@ -503,6 +528,10 @@ void KafkaConsumer::Init(v8::Local exports) { Nan::SetPrototypeMethod(tpl, "queryWatermarkOffsets", NodeQueryWatermarkOffsets); // NOLINT Nan::SetPrototypeMethod(tpl, "offsetsForTimes", NodeOffsetsForTimes); Nan::SetPrototypeMethod(tpl, "getWatermarkOffsets", NodeGetWatermarkOffsets); + Nan::SetPrototypeMethod(tpl, "setSaslCredentials", NodeSetSaslCredentials); + Nan::SetPrototypeMethod(tpl, "setOAuthBearerToken", NodeSetOAuthBearerToken); + Nan::SetPrototypeMethod(tpl, "setOAuthBearerTokenFailure", + NodeSetOAuthBearerTokenFailure); /* * @brief Methods exposed to do with message retrieval @@ -528,11 +557,17 @@ void KafkaConsumer::Init(v8::Local exports) { Nan::SetPrototypeMethod(tpl, "position", NodePosition); Nan::SetPrototypeMethod(tpl, "assign", NodeAssign); Nan::SetPrototypeMethod(tpl, "unassign", NodeUnassign); + Nan::SetPrototypeMethod(tpl, "incrementalAssign", NodeIncrementalAssign); + Nan::SetPrototypeMethod(tpl, "incrementalUnassign", NodeIncrementalUnassign); Nan::SetPrototypeMethod(tpl, "assignments", NodeAssignments); + Nan::SetPrototypeMethod(tpl, "assignmentLost", NodeAssignmentLost); + Nan::SetPrototypeMethod(tpl, "rebalanceProtocol", NodeRebalanceProtocol); Nan::SetPrototypeMethod(tpl, "commit", NodeCommit); Nan::SetPrototypeMethod(tpl, "commitSync", NodeCommitSync); + Nan::SetPrototypeMethod(tpl, "commitCb", NodeCommitCb); Nan::SetPrototypeMethod(tpl, "offsetsStore", NodeOffsetsStore); + Nan::SetPrototypeMethod(tpl, "offsetsStoreSingle", NodeOffsetsStoreSingle); constructor.Reset((tpl->GetFunction(Nan::GetCurrentContext())) .ToLocalChecked()); @@ -553,10 +588,6 @@ void KafkaConsumer::New(const Nan::FunctionCallbackInfo& info) { return Nan::ThrowError("Global configuration data must be specified"); } - if (!info[1]->IsObject()) { - return Nan::ThrowError("Topic configuration must be specified"); - } - std::string errstr; Conf* gconfig = @@ -567,15 +598,19 @@ void KafkaConsumer::New(const Nan::FunctionCallbackInfo& info) { return Nan::ThrowError(errstr.c_str()); } - Conf* tconfig = - Conf::create(RdKafka::Conf::CONF_TOPIC, + // If tconfig isn't set, then just let us pick properties from gconf. + Conf* tconfig = nullptr; + if (info[1]->IsObject()) { + tconfig = Conf::create(RdKafka::Conf::CONF_TOPIC, (info[1]->ToObject(Nan::GetCurrentContext())).ToLocalChecked(), errstr); - if (!tconfig) { - delete gconfig; - return Nan::ThrowError(errstr.c_str()); + if (!tconfig) { + delete gconfig; + return Nan::ThrowError(errstr.c_str()); + } } + // TODO: fix this - this memory is leaked. KafkaConsumer* consumer = new KafkaConsumer(gconfig, tconfig); // Wrap it @@ -651,7 +686,7 @@ NAN_METHOD(KafkaConsumer::NodeSubscription) { std::vector * topics = b.data*>(); - info.GetReturnValue().Set(Conversion::Topic::ToV8Array(*topics)); + info.GetReturnValue().Set(Conversion::Util::ToV8Array(*topics)); delete topics; } @@ -701,6 +736,23 @@ NAN_METHOD(KafkaConsumer::NodeAssignments) { Conversion::TopicPartition::ToV8Array(consumer->m_partitions)); } +NAN_METHOD(KafkaConsumer::NodeAssignmentLost) { + Nan::HandleScope scope; + + KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); + + Baton b = consumer->AssignmentLost(); + + bool lost = b.data(); + info.GetReturnValue().Set(Nan::New(lost)); +} + +NAN_METHOD(KafkaConsumer::NodeRebalanceProtocol) { + KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); + std::string protocol = consumer->RebalanceProtocol(); + info.GetReturnValue().Set(Nan::New(protocol).ToLocalChecked()); +} + NAN_METHOD(KafkaConsumer::NodeAssign) { Nan::HandleScope scope; @@ -779,6 +831,125 @@ NAN_METHOD(KafkaConsumer::NodeUnassign) { info.GetReturnValue().Set(Nan::True()); } +NAN_METHOD(KafkaConsumer::NodeIncrementalAssign) { + Nan::HandleScope scope; + + if (info.Length() < 1 || !info[0]->IsArray()) { + // Just throw an exception + return Nan::ThrowError("Need to specify an array of partitions"); + } + + v8::Local partitions = info[0].As(); + std::vector topic_partitions; + + for (unsigned int i = 0; i < partitions->Length(); ++i) { + v8::Local partition_obj_value; + if (!( + Nan::Get(partitions, i).ToLocal(&partition_obj_value) && + partition_obj_value->IsObject())) { + Nan::ThrowError("Must pass topic-partition objects"); + } + + v8::Local partition_obj = partition_obj_value.As(); + + // Got the object + int64_t partition = GetParameter(partition_obj, "partition", -1); + std::string topic = GetParameter(partition_obj, "topic", ""); + + if (!topic.empty()) { + RdKafka::TopicPartition* part; + + if (partition < 0) { + part = Connection::GetPartition(topic); + } else { + part = Connection::GetPartition(topic, partition); + } + + // Set the default value to offset invalid. If provided, we will not set + // the offset. + int64_t offset = GetParameter( + partition_obj, "offset", RdKafka::Topic::OFFSET_INVALID); + if (offset != RdKafka::Topic::OFFSET_INVALID) { + part->set_offset(offset); + } + + topic_partitions.push_back(part); + } + } + + KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); + + // Hand over the partitions to the consumer. + Baton b = consumer->IncrementalAssign(topic_partitions); + + if (b.err() != RdKafka::ERR_NO_ERROR) { + v8::Local errorObject = b.ToObject(); + Nan::ThrowError(errorObject); + } + + info.GetReturnValue().Set(Nan::True()); +} + +NAN_METHOD(KafkaConsumer::NodeIncrementalUnassign) { + Nan::HandleScope scope; + + if (info.Length() < 1 || !info[0]->IsArray()) { + // Just throw an exception + return Nan::ThrowError("Need to specify an array of partitions"); + } + + v8::Local partitions = info[0].As(); + std::vector topic_partitions; + + for (unsigned int i = 0; i < partitions->Length(); ++i) { + v8::Local partition_obj_value; + if (!( + Nan::Get(partitions, i).ToLocal(&partition_obj_value) && + partition_obj_value->IsObject())) { + Nan::ThrowError("Must pass topic-partition objects"); + } + + v8::Local partition_obj = partition_obj_value.As(); + + // Got the object + int64_t partition = GetParameter(partition_obj, "partition", -1); + std::string topic = GetParameter(partition_obj, "topic", ""); + + if (!topic.empty()) { + RdKafka::TopicPartition* part; + + if (partition < 0) { + part = Connection::GetPartition(topic); + } else { + part = Connection::GetPartition(topic, partition); + } + + // Set the default value to offset invalid. If provided, we will not set + // the offset. + int64_t offset = GetParameter( + partition_obj, "offset", RdKafka::Topic::OFFSET_INVALID); + if (offset != RdKafka::Topic::OFFSET_INVALID) { + part->set_offset(offset); + } + + topic_partitions.push_back(part); + } + } + + KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); + + // Hand over the partitions to the consumer. + Baton b = consumer->IncrementalUnassign(topic_partitions); + + if (b.err() != RdKafka::ERR_NO_ERROR) { + v8::Local errorObject = b.ToObject(); + Nan::ThrowError(errorObject); + } + + info.GetReturnValue().Set(Nan::True()); +} + + NAN_METHOD(KafkaConsumer::NodeUnsubscribe) { Nan::HandleScope scope; @@ -875,6 +1046,45 @@ NAN_METHOD(KafkaConsumer::NodeCommitSync) { info.GetReturnValue().Set(Nan::New(error_code)); } +NAN_METHOD(KafkaConsumer::NodeCommitCb) { + Nan::HandleScope scope; + int error_code; + std::optional> toppars = std::nullopt; + Nan::Callback *callback; + + KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); + + if (!consumer->IsConnected()) { + Nan::ThrowError("KafkaConsumer is disconnected"); + return; + } + + if (info.Length() != 2) { + Nan::ThrowError("Two arguments are required"); + return; + } + + if (!( + (info[0]->IsArray() || info[0]->IsNull()) && + info[1]->IsFunction())) { + Nan::ThrowError( + "First argument should be an array or null and second one a callback"); + return; + } + + if (info[0]->IsArray()) { + toppars = + Conversion::TopicPartition::FromV8Array(info[0].As()); + } + callback = new Nan::Callback(info[1].As()); + + Nan::AsyncQueueWorker( + new Workers::KafkaConsumerCommitCb(callback, consumer, + toppars)); + + info.GetReturnValue().Set(Nan::Null()); +} + NAN_METHOD(KafkaConsumer::NodeSubscribe) { Nan::HandleScope scope; @@ -886,7 +1096,8 @@ NAN_METHOD(KafkaConsumer::NodeSubscribe) { KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); v8::Local topicsArray = info[0].As(); - std::vector topics = Conversion::Topic::ToStringVector(topicsArray); // NOLINT + std::vector topics = + Conversion::Util::ToStringVector(topicsArray); Baton b = consumer->Subscribe(topics); @@ -971,6 +1182,39 @@ NAN_METHOD(KafkaConsumer::NodeOffsetsStore) { info.GetReturnValue().Set(Nan::New(error_code)); } +NAN_METHOD(KafkaConsumer::NodeOffsetsStoreSingle) { + Nan::HandleScope scope; + + // If number of parameters is less than 3 (need topic partition, partition, + // offset, and leader epoch), we can't call this. + if (info.Length() < 4) { + return Nan::ThrowError( + "Must provide topic, partition, offset and leaderEpoch"); + } + + KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); + + // Get string pointer for the topic name + Nan::Utf8String topicUTF8(Nan::To(info[0]).ToLocalChecked()); + const std::string& topic_name(*topicUTF8); + + int64_t partition = Nan::To(info[1]).FromJust(); + int64_t offset = Nan::To(info[2]).FromJust(); + int64_t leader_epoch = Nan::To(info[3]).FromJust(); + + RdKafka::TopicPartition* toppar = + RdKafka::TopicPartition::create(topic_name, partition, offset); + toppar->set_leader_epoch(leader_epoch); + std::vector toppars = {toppar}; + + Baton b = consumer->OffsetsStore(toppars); + + delete toppar; + + int error_code = static_cast(b.err()); + info.GetReturnValue().Set(Nan::New(error_code)); +} + NAN_METHOD(KafkaConsumer::NodePause) { Nan::HandleScope scope; @@ -1098,7 +1342,8 @@ NAN_METHOD(KafkaConsumer::NodeConsumeLoop) { Nan::Callback *callback = new Nan::Callback(cb); - consumer->m_consume_loop = new Workers::KafkaConsumerConsumeLoop(callback, consumer, timeout_ms, timeout_sleep_delay_ms); + consumer->m_consume_loop = + new Workers::KafkaConsumerConsumeLoop(callback, consumer, timeout_ms, timeout_sleep_delay_ms); // NOLINT info.GetReturnValue().Set(Nan::Null()); } @@ -1122,7 +1367,11 @@ NAN_METHOD(KafkaConsumer::NodeConsume) { } if (info[1]->IsNumber()) { - if (!info[2]->IsFunction()) { + if (!info[2]->IsBoolean()) { + return Nan::ThrowError("Need to specify a boolean"); + } + + if (!info[3]->IsFunction()) { return Nan::ThrowError("Need to specify a callback"); } @@ -1136,12 +1385,23 @@ NAN_METHOD(KafkaConsumer::NodeConsume) { numMessages = numMessagesMaybe.FromJust(); } + v8::Local isTimeoutOnlyForFirstMessageBoolean = info[2].As(); // NOLINT + Nan::Maybe isTimeoutOnlyForFirstMessageMaybe = + Nan::To(isTimeoutOnlyForFirstMessageBoolean); + + bool isTimeoutOnlyForFirstMessage; + if (isTimeoutOnlyForFirstMessageMaybe.IsNothing()) { + return Nan::ThrowError("Parameter must be a boolean"); + } else { + isTimeoutOnlyForFirstMessage = isTimeoutOnlyForFirstMessageMaybe.FromJust(); // NOLINT + } + KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); - v8::Local cb = info[2].As(); + v8::Local cb = info[3].As(); Nan::Callback *callback = new Nan::Callback(cb); Nan::AsyncQueueWorker( - new Workers::KafkaConsumerConsumeNum(callback, consumer, numMessages, timeout_ms)); // NOLINT + new Workers::KafkaConsumerConsumeNum(callback, consumer, numMessages, timeout_ms, isTimeoutOnlyForFirstMessage)); // NOLINT } else { if (!info[1]->IsFunction()) { @@ -1187,7 +1447,8 @@ NAN_METHOD(KafkaConsumer::NodeDisconnect) { Nan::Callback *callback = new Nan::Callback(cb); KafkaConsumer* consumer = ObjectWrap::Unwrap(info.This()); - Workers::KafkaConsumerConsumeLoop* consumeLoop = (Workers::KafkaConsumerConsumeLoop*)consumer->m_consume_loop; + Workers::KafkaConsumerConsumeLoop* consumeLoop = + (Workers::KafkaConsumerConsumeLoop*)consumer->m_consume_loop; if (consumeLoop != nullptr) { // stop the consume loop consumeLoop->Close(); diff --git a/src/kafka-consumer.h b/src/kafka-consumer.h index d5991944..9da6f2cb 100644 --- a/src/kafka-consumer.h +++ b/src/kafka-consumer.h @@ -1,7 +1,8 @@ /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * * Copyright (c) 2016-2023 Blizzard Entertainment + * (c) 2023 Confluent, Inc. * * This software may be modified and distributed under the terms * of the MIT license. See the LICENSE.txt file for details. @@ -16,7 +17,7 @@ #include #include -#include "rdkafkacpp.h" +#include "rdkafkacpp.h" // NOLINT #include "src/common.h" #include "src/connection.h" @@ -67,6 +68,7 @@ class KafkaConsumer : public Connection { Baton Position(std::vector &); Baton RefreshAssignments(); + Baton AssignmentLost(); bool HasAssignedPartitions(); int AssignedPartitionCount(); @@ -74,9 +76,12 @@ class KafkaConsumer : public Connection { Baton Assign(std::vector); Baton Unassign(); - Baton Seek(const RdKafka::TopicPartition &partition, int timeout_ms); + Baton IncrementalAssign(std::vector); + Baton IncrementalUnassign(std::vector); + + std::string RebalanceProtocol(); - std::string Name(); + Baton Seek(const RdKafka::TopicPartition &partition, int timeout_ms); Baton Subscribe(std::vector); Baton Consume(int timeout_ms); @@ -100,17 +105,27 @@ class KafkaConsumer : public Connection { void* m_consume_loop = nullptr; + /* This is the same client as stored in m_client. + * Prevents a dynamic_cast in every single method. */ + RdKafka::KafkaConsumer *m_consumer = nullptr; + // Node methods static NAN_METHOD(NodeConnect); static NAN_METHOD(NodeSubscribe); static NAN_METHOD(NodeDisconnect); static NAN_METHOD(NodeAssign); static NAN_METHOD(NodeUnassign); + static NAN_METHOD(NodeIncrementalAssign); + static NAN_METHOD(NodeIncrementalUnassign); static NAN_METHOD(NodeAssignments); + static NAN_METHOD(NodeAssignmentLost); + static NAN_METHOD(NodeRebalanceProtocol); static NAN_METHOD(NodeUnsubscribe); static NAN_METHOD(NodeCommit); static NAN_METHOD(NodeCommitSync); + static NAN_METHOD(NodeCommitCb); static NAN_METHOD(NodeOffsetsStore); + static NAN_METHOD(NodeOffsetsStoreSingle); static NAN_METHOD(NodeCommitted); static NAN_METHOD(NodePosition); static NAN_METHOD(NodeSubscription); diff --git a/src/producer.cc b/src/producer.cc index f5a32b1b..aeab634e 100644 --- a/src/producer.cc +++ b/src/producer.cc @@ -1,7 +1,8 @@ /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * * Copyright (c) 2016-2023 Blizzard Entertainment + * (c) 2023 Confluent, Inc. * * This software may be modified and distributed under the terms * of the MIT license. See the LICENSE.txt file for details. @@ -32,10 +33,13 @@ namespace NodeKafka { Producer::Producer(Conf* gconfig, Conf* tconfig): Connection(gconfig, tconfig), m_dr_cb(), - m_partitioner_cb() { + m_partitioner_cb(), + m_is_background_polling(false) { std::string errstr; - m_gconfig->set("default_topic_conf", m_tconfig, errstr); + if (m_tconfig) + m_gconfig->set("default_topic_conf", m_tconfig, errstr); + m_gconfig->set("dr_cb", &m_dr_cb, errstr); } @@ -69,6 +73,11 @@ void Producer::Init(v8::Local exports) { Nan::SetPrototypeMethod(tpl, "getMetadata", NodeGetMetadata); Nan::SetPrototypeMethod(tpl, "queryWatermarkOffsets", NodeQueryWatermarkOffsets); // NOLINT Nan::SetPrototypeMethod(tpl, "poll", NodePoll); + Nan::SetPrototypeMethod(tpl, "setPollInBackground", NodeSetPollInBackground); + Nan::SetPrototypeMethod(tpl, "setSaslCredentials", NodeSetSaslCredentials); + Nan::SetPrototypeMethod(tpl, "setOAuthBearerToken", NodeSetOAuthBearerToken); + Nan::SetPrototypeMethod(tpl, "setOAuthBearerTokenFailure", + NodeSetOAuthBearerTokenFailure); /* * @brief Methods exposed to do with message production @@ -87,7 +96,7 @@ void Producer::Init(v8::Local exports) { Nan::SetPrototypeMethod(tpl, "beginTransaction", NodeBeginTransaction); Nan::SetPrototypeMethod(tpl, "commitTransaction", NodeCommitTransaction); Nan::SetPrototypeMethod(tpl, "abortTransaction", NodeAbortTransaction); - Nan::SetPrototypeMethod(tpl, "sendOffsetsToTransaction", NodeSendOffsetsToTransaction); + Nan::SetPrototypeMethod(tpl, "sendOffsetsToTransaction", NodeSendOffsetsToTransaction); // NOLINT // connect. disconnect. resume. pause. get meta data constructor.Reset((tpl->GetFunction(Nan::GetCurrentContext())) @@ -110,10 +119,6 @@ void Producer::New(const Nan::FunctionCallbackInfo& info) { return Nan::ThrowError("Global configuration data must be specified"); } - if (!info[1]->IsObject()) { - return Nan::ThrowError("Topic configuration must be specified"); - } - std::string errstr; Conf* gconfig = @@ -124,14 +129,18 @@ void Producer::New(const Nan::FunctionCallbackInfo& info) { return Nan::ThrowError(errstr.c_str()); } - Conf* tconfig = - Conf::create(RdKafka::Conf::CONF_TOPIC, - (info[1]->ToObject(Nan::GetCurrentContext())).ToLocalChecked(), errstr); + // If tconfig isn't set, then just let us pick properties from gconf. + Conf* tconfig = nullptr; + if (info[1]->IsObject()) { + tconfig = Conf::create( + RdKafka::Conf::CONF_TOPIC, + (info[1]->ToObject(Nan::GetCurrentContext())).ToLocalChecked(), errstr); - if (!tconfig) { - // No longer need this since we aren't instantiating anything - delete gconfig; - return Nan::ThrowError(errstr.c_str()); + if (!tconfig) { + // No longer need this since we aren't instantiating anything + delete gconfig; + return Nan::ThrowError(errstr.c_str()); + } } Producer* producer = new Producer(gconfig, tconfig); @@ -159,39 +168,42 @@ v8::Local Producer::NewInstance(v8::Local arg) { return scope.Escape(instance); } - -std::string Producer::Name() { - if (!IsConnected()) { - return std::string(""); - } - return std::string(m_client->name()); -} - Baton Producer::Connect() { if (IsConnected()) { return Baton(RdKafka::ERR_NO_ERROR); } std::string errstr; + + Baton baton = setupSaslOAuthBearerConfig(); + if (baton.err() != RdKafka::ERR_NO_ERROR) { + return baton; + } + { scoped_shared_read_lock lock(m_connection_lock); m_client = RdKafka::Producer::create(m_gconfig, errstr); } if (!m_client) { - // @todo implement errstr into this somehow return Baton(RdKafka::ERR__STATE, errstr); } - return Baton(RdKafka::ERR_NO_ERROR); + /* Set the client name at the first possible opportunity for logging. */ + m_event_cb.dispatcher.SetClientName(m_client->name()); + + baton = setupSaslOAuthBearerBackgroundQueue(); + return baton; } void Producer::ActivateDispatchers() { + m_gconfig->listen(); // From global config. m_event_cb.dispatcher.Activate(); // From connection m_dr_cb.dispatcher.Activate(); } void Producer::DeactivateDispatchers() { + m_gconfig->stop(); // From global config. m_event_cb.dispatcher.Deactivate(); // From connection m_dr_cb.dispatcher.Deactivate(); } @@ -324,14 +336,45 @@ Baton Producer::Produce(void* message, size_t size, std::string topic, } void Producer::Poll() { + // We're not allowed to call poll when we have forwarded the main + // queue to the background queue, as that would indirectly poll + // the background queue. However, that's not allowed by librdkafka. + if (m_is_background_polling) { + return; + } m_client->poll(0); } -void Producer::ConfigureCallback(const std::string &string_key, const v8::Local &cb, bool add) { +Baton Producer::SetPollInBackground(bool set) { + scoped_shared_read_lock lock(m_connection_lock); + rd_kafka_t* rk = this->m_client->c_ptr(); + if (!IsConnected()) { + return Baton(RdKafka::ERR__STATE, "Producer is disconnected"); + } + + if (set && !m_is_background_polling) { + m_is_background_polling = true; + rd_kafka_queue_t* main_q = rd_kafka_queue_get_main(rk); + rd_kafka_queue_t* background_q = rd_kafka_queue_get_background(rk); + rd_kafka_queue_forward(main_q, background_q); + rd_kafka_queue_destroy(main_q); + rd_kafka_queue_destroy(background_q); + } else if (!set && m_is_background_polling) { + m_is_background_polling = false; + rd_kafka_queue_t* main_q = rd_kafka_queue_get_main(rk); + rd_kafka_queue_forward(main_q, NULL); + rd_kafka_queue_destroy(main_q); + } + + return Baton(RdKafka::ERR_NO_ERROR); +} + +void Producer::ConfigureCallback(const std::string& string_key, + const v8::Local& cb, bool add) { if (string_key.compare("delivery_cb") == 0) { if (add) { bool dr_msg_cb = false; - v8::Local dr_msg_cb_key = Nan::New("dr_msg_cb").ToLocalChecked(); + v8::Local dr_msg_cb_key = Nan::New("dr_msg_cb").ToLocalChecked(); // NOLINT if (Nan::Has(cb, dr_msg_cb_key).FromMaybe(false)) { v8::Local v = Nan::Get(cb, dr_msg_cb_key).ToLocalChecked(); if (v->IsBoolean()) { @@ -350,18 +393,6 @@ void Producer::ConfigureCallback(const std::string &string_key, const v8::Local< } } -Baton rdkafkaErrorToBaton(RdKafka::Error* error) { - if ( NULL == error) { - return Baton(RdKafka::ERR_NO_ERROR); - } - else { - Baton result(error->code(), error->str(), error->is_fatal(), - error->is_retriable(), error->txn_requires_abort()); - delete error; - return result; - } -} - Baton Producer::InitTransactions(int32_t timeout_ms) { if (!IsConnected()) { return Baton(RdKafka::ERR__STATE); @@ -414,10 +445,12 @@ Baton Producer::SendOffsetsToTransaction( return Baton(RdKafka::ERR__STATE); } - RdKafka::ConsumerGroupMetadata* group_metadata = dynamic_cast(consumer->m_client)->groupMetadata(); + RdKafka::ConsumerGroupMetadata* group_metadata = + dynamic_cast(consumer->m_client)->groupMetadata(); // NOLINT RdKafka::Producer* producer = dynamic_cast(m_client); - RdKafka::Error* error = producer->send_offsets_to_transaction(offsets, group_metadata, timeout_ms); + RdKafka::Error* error = + producer->send_offsets_to_transaction(offsets, group_metadata, timeout_ms); delete group_metadata; return rdkafkaErrorToBaton( error); @@ -489,9 +522,11 @@ NAN_METHOD(Producer::NodeProduce) { message_buffer_data = node::Buffer::Data(message_buffer_object); if (message_buffer_data == NULL) { // empty string message buffer should not end up as null message - v8::Local message_buffer_object_emptystring = Nan::NewBuffer(new char[0], 0).ToLocalChecked(); - message_buffer_length = node::Buffer::Length(message_buffer_object_emptystring); - message_buffer_data = node::Buffer::Data(message_buffer_object_emptystring); + v8::Local message_buffer_object_emptystring = + Nan::NewBuffer(new char[0], 0).ToLocalChecked(); + message_buffer_length = + node::Buffer::Length(message_buffer_object_emptystring); + message_buffer_data = node::Buffer::Data(message_buffer_object_emptystring); // NOLINT } } @@ -519,9 +554,10 @@ NAN_METHOD(Producer::NodeProduce) { key_buffer_data = node::Buffer::Data(key_buffer_object); if (key_buffer_data == NULL) { // empty string key buffer should not end up as null key - v8::Local key_buffer_object_emptystring = Nan::NewBuffer(new char[0], 0).ToLocalChecked(); - key_buffer_length = node::Buffer::Length(key_buffer_object_emptystring); - key_buffer_data = node::Buffer::Data(key_buffer_object_emptystring); + v8::Local key_buffer_object_emptystring = + Nan::NewBuffer(new char[0], 0).ToLocalChecked(); + key_buffer_length = node::Buffer::Length(key_buffer_object_emptystring); + key_buffer_data = node::Buffer::Data(key_buffer_object_emptystring); } } else { // If it was a string just use the utf8 value. @@ -569,18 +605,37 @@ NAN_METHOD(Producer::NodeProduce) { v8::Local props = header->GetOwnPropertyNames( Nan::GetCurrentContext()).ToLocalChecked(); - Nan::MaybeLocal v8Key = Nan::To( - Nan::Get(props, 0).ToLocalChecked()); - Nan::MaybeLocal v8Value = Nan::To( - Nan::Get(header, v8Key.ToLocalChecked()).ToLocalChecked()); + // TODO: Other properties in the list of properties should not be + // ignored, but they are. This is a bug, need to handle it either in JS + // or here. + Nan::MaybeLocal v8Key = + Nan::To(Nan::Get(props, 0).ToLocalChecked()); + + // The key must be a string. + if (v8Key.IsEmpty()) { + Nan::ThrowError("Header key must be a string"); + } Nan::Utf8String uKey(v8Key.ToLocalChecked()); std::string key(*uKey); - Nan::Utf8String uValue(v8Value.ToLocalChecked()); - std::string value(*uValue); - headers.push_back( - RdKafka::Headers::Header(key, value.c_str(), value.size())); + // Valid types for the header are string or buffer. + // Other types will throw an error. + v8::Local v8Value = + Nan::Get(header, v8Key.ToLocalChecked()).ToLocalChecked(); + + if (node::Buffer::HasInstance(v8Value)) { + const char* value = node::Buffer::Data(v8Value); + const size_t value_len = node::Buffer::Length(v8Value); + headers.push_back(RdKafka::Headers::Header(key, value, value_len)); + } else if (v8Value->IsString()) { + Nan::Utf8String uValue(v8Value); + std::string value(*uValue); + headers.push_back( + RdKafka::Headers::Header(key, value.c_str(), value.size())); + } else { + Nan::ThrowError("Header value must be a string or buffer"); + } } } } @@ -692,6 +747,23 @@ NAN_METHOD(Producer::NodePoll) { } } +NAN_METHOD(Producer::NodeSetPollInBackground) { + Nan::HandleScope scope; + if (info.Length() < 1 || !info[0]->IsBoolean()) { + // Just throw an exception + return Nan::ThrowError( + "Need to specify a boolean for setting or unsetting"); + } + bool set = Nan::To(info[0]).FromJust(); + + Producer* producer = ObjectWrap::Unwrap(info.This()); + Baton b = producer->SetPollInBackground(set); + if (b.err() != RdKafka::ERR_NO_ERROR) { + return Nan::ThrowError(b.errstr().c_str()); + } + info.GetReturnValue().Set(b.ToObject()); +} + Baton Producer::Flush(int timeout_ms) { RdKafka::ErrorCode response_code; if (IsConnected()) { @@ -761,7 +833,8 @@ NAN_METHOD(Producer::NodeInitTransactions) { Nan::Callback *callback = new Nan::Callback(cb); Producer* producer = ObjectWrap::Unwrap(info.This()); - Nan::AsyncQueueWorker(new Workers::ProducerInitTransactions(callback, producer, timeout_ms)); + Nan::AsyncQueueWorker( + new Workers::ProducerInitTransactions(callback, producer, timeout_ms)); info.GetReturnValue().Set(Nan::Null()); } @@ -777,7 +850,7 @@ NAN_METHOD(Producer::NodeBeginTransaction) { Nan::Callback *callback = new Nan::Callback(cb); Producer* producer = ObjectWrap::Unwrap(info.This()); - Nan::AsyncQueueWorker(new Workers::ProducerBeginTransaction(callback, producer)); + Nan::AsyncQueueWorker(new Workers::ProducerBeginTransaction(callback, producer)); // NOLINT info.GetReturnValue().Set(Nan::Null()); } @@ -795,7 +868,8 @@ NAN_METHOD(Producer::NodeCommitTransaction) { Nan::Callback *callback = new Nan::Callback(cb); Producer* producer = ObjectWrap::Unwrap(info.This()); - Nan::AsyncQueueWorker(new Workers::ProducerCommitTransaction(callback, producer, timeout_ms)); + Nan::AsyncQueueWorker( + new Workers::ProducerCommitTransaction(callback, producer, timeout_ms)); info.GetReturnValue().Set(Nan::Null()); } @@ -813,7 +887,8 @@ NAN_METHOD(Producer::NodeAbortTransaction) { Nan::Callback *callback = new Nan::Callback(cb); Producer* producer = ObjectWrap::Unwrap(info.This()); - Nan::AsyncQueueWorker(new Workers::ProducerAbortTransaction(callback, producer, timeout_ms)); + Nan::AsyncQueueWorker( + new Workers::ProducerAbortTransaction(callback, producer, timeout_ms)); info.GetReturnValue().Set(Nan::Null()); } @@ -822,10 +897,12 @@ NAN_METHOD(Producer::NodeSendOffsetsToTransaction) { Nan::HandleScope scope; if (info.Length() < 4) { - return Nan::ThrowError("Need to specify offsets, consumer, timeout for 'send offsets to transaction', and callback"); + return Nan::ThrowError( + "Need to specify offsets, consumer, timeout for 'send offsets to transaction', and callback"); // NOLINT } if (!info[0]->IsArray()) { - return Nan::ThrowError("First argument to 'send offsets to transaction' has to be a consumer object"); + return Nan::ThrowError( + "First argument to 'send offsets to transaction' has to be a consumer object"); // NOLINT } if (!info[1]->IsObject()) { Nan::ThrowError("Kafka consumer must be provided"); @@ -851,8 +928,7 @@ NAN_METHOD(Producer::NodeSendOffsetsToTransaction) { producer, toppars, consumer, - timeout_ms - )); + timeout_ms)); info.GetReturnValue().Set(Nan::Null()); } diff --git a/src/producer.h b/src/producer.h index 4f0ca2c3..8df138e8 100644 --- a/src/producer.h +++ b/src/producer.h @@ -1,5 +1,5 @@ /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * * Copyright (c) 2016-2023 Blizzard Entertainment * @@ -14,8 +14,9 @@ #include #include #include +#include -#include "rdkafkacpp.h" +#include "rdkafkacpp.h" // NOLINT #include "src/common.h" #include "src/connection.h" @@ -54,6 +55,7 @@ class Producer : public Connection { Baton Connect(); void Disconnect(); void Poll(); + Baton SetPollInBackground(bool); #if RD_KAFKA_VERSION > 0x00090200 Baton Flush(int timeout_ms); #endif @@ -75,12 +77,11 @@ class Producer : public Connection { int64_t timestamp, void* opaque, RdKafka::Headers* headers); - std::string Name(); - void ActivateDispatchers(); void DeactivateDispatchers(); - void ConfigureCallback(const std::string &string_key, const v8::Local &cb, bool add) override; + void ConfigureCallback(const std::string& string_key, + const v8::Local& cb, bool add) override; Baton InitTransactions(int32_t timeout_ms); Baton BeginTransaction(); @@ -89,8 +90,7 @@ class Producer : public Connection { Baton SendOffsetsToTransaction( std::vector &offsets, NodeKafka::KafkaConsumer* consumer, - int timeout_ms - ); + int timeout_ms); protected: static Nan::Persistent constructor; @@ -105,6 +105,7 @@ class Producer : public Connection { static NAN_METHOD(NodeConnect); static NAN_METHOD(NodeDisconnect); static NAN_METHOD(NodePoll); + static NAN_METHOD(NodeSetPollInBackground); #if RD_KAFKA_VERSION > 0x00090200 static NAN_METHOD(NodeFlush); #endif @@ -116,6 +117,7 @@ class Producer : public Connection { Callbacks::Delivery m_dr_cb; Callbacks::Partitioner m_partitioner_cb; + bool m_is_background_polling; }; } // namespace NodeKafka diff --git a/src/topic.cc b/src/topic.cc index 1f0de565..78653c41 100644 --- a/src/topic.cc +++ b/src/topic.cc @@ -1,5 +1,5 @@ /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * * Copyright (c) 2016-2023 Blizzard Entertainment * diff --git a/src/topic.h b/src/topic.h index 1404d532..d487d089 100644 --- a/src/topic.h +++ b/src/topic.h @@ -1,5 +1,5 @@ /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * * Copyright (c) 2016-2023 Blizzard Entertainment * @@ -13,7 +13,7 @@ #include #include -#include "rdkafkacpp.h" +#include "rdkafkacpp.h" // NOLINT #include "src/config.h" diff --git a/src/workers.cc b/src/workers.cc index 749732d0..571cc1e7 100644 --- a/src/workers.cc +++ b/src/workers.cc @@ -1,17 +1,17 @@ /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * * Copyright (c) 2016-2023 Blizzard Entertainment + * (c) 2024 Confluent, Inc. * * This software may be modified and distributed under the terms * of the MIT license. See the LICENSE.txt file for details. */ +#include "src/workers.h" #include #include -#include "src/workers.h" - #ifndef _WIN32 #include #else @@ -199,6 +199,11 @@ ProducerConnect::ProducerConnect(Nan::Callback *callback, Producer* producer): ProducerConnect::~ProducerConnect() {} void ProducerConnect::Execute() { + // Activate the dispatchers before the connection, as some callbacks may run + // on the background thread. + // We will deactivate them if the connection fails. + producer->ActivateDispatchers(); + Baton b = producer->Connect(); if (b.err() != RdKafka::ERR_NO_ERROR) { @@ -217,15 +222,14 @@ void ProducerConnect::HandleOKCallback() { v8::Local argv[argc] = { Nan::Null(), obj}; - // Activate the dispatchers - producer->ActivateDispatchers(); - callback->Call(argc, argv); } void ProducerConnect::HandleErrorCallback() { Nan::HandleScope scope; + producer->DeactivateDispatchers(); + const unsigned int argc = 1; v8::Local argv[argc] = { GetErrorObject() }; @@ -356,9 +360,9 @@ void ProducerInitTransactions::HandleErrorCallback() { * @sa NodeKafka::Producer::BeginTransaction */ -ProducerBeginTransaction::ProducerBeginTransaction(Nan::Callback *callback, Producer* producer): - ErrorAwareWorker(callback), - producer(producer) {} +ProducerBeginTransaction::ProducerBeginTransaction(Nan::Callback* callback, + Producer* producer) + : ErrorAwareWorker(callback), producer(producer) {} ProducerBeginTransaction::~ProducerBeginTransaction() {} @@ -508,11 +512,8 @@ ProducerSendOffsetsToTransaction::ProducerSendOffsetsToTransaction( ProducerSendOffsetsToTransaction::~ProducerSendOffsetsToTransaction() {} void ProducerSendOffsetsToTransaction::Execute() { - Baton b = producer->SendOffsetsToTransaction( - m_topic_partitions, - consumer, - m_timeout_ms - ); + Baton b = producer->SendOffsetsToTransaction(m_topic_partitions, consumer, + m_timeout_ms); if (b.err() != RdKafka::ERR_NO_ERROR) { SetErrorBaton(b); @@ -557,6 +558,11 @@ KafkaConsumerConnect::KafkaConsumerConnect(Nan::Callback *callback, KafkaConsumerConnect::~KafkaConsumerConnect() {} void KafkaConsumerConnect::Execute() { + // Activate the dispatchers before the connection, as some callbacks may run + // on the background thread. + // We will deactivate them if the connection fails. + consumer->ActivateDispatchers(); + Baton b = consumer->Connect(); // consumer->Wait(); @@ -576,7 +582,6 @@ void KafkaConsumerConnect::HandleOKCallback() { Nan::New(consumer->Name()).ToLocalChecked()); v8::Local argv[argc] = { Nan::Null(), obj }; - consumer->ActivateDispatchers(); callback->Call(argc, argv); } @@ -584,6 +589,8 @@ void KafkaConsumerConnect::HandleOKCallback() { void KafkaConsumerConnect::HandleErrorCallback() { Nan::HandleScope scope; + consumer->DeactivateDispatchers(); + const unsigned int argc = 1; v8::Local argv[argc] = { Nan::Error(ErrorMessage()) }; @@ -648,9 +655,9 @@ void KafkaConsumerDisconnect::HandleErrorCallback() { * consumer is flagged as disconnected or as unsubscribed. * * @todo thread-safe isConnected checking - * @note Chances are, when the connection is broken with the way librdkafka works, - * we are shutting down. But we want it to shut down properly so we probably - * need the consumer to have a thread lock that can be used when + * @note Chances are, when the connection is broken with the way librdkafka + * works, we are shutting down. But we want it to shut down properly so we + * probably need the consumer to have a thread lock that can be used when * we are dealing with manipulating the `client` * * @sa RdKafka::KafkaConsumer::Consume @@ -666,7 +673,8 @@ KafkaConsumerConsumeLoop::KafkaConsumerConsumeLoop(Nan::Callback *callback, m_looping(true), m_timeout_ms(timeout_ms), m_timeout_sleep_delay_ms(timeout_sleep_delay_ms) { - uv_thread_create(&thread_event_loop, KafkaConsumerConsumeLoop::ConsumeLoop, (void*)this); + uv_thread_create(&thread_event_loop, KafkaConsumerConsumeLoop::ConsumeLoop, + reinterpret_cast(this)); } KafkaConsumerConsumeLoop::~KafkaConsumerConsumeLoop() {} @@ -680,8 +688,9 @@ void KafkaConsumerConsumeLoop::Execute(const ExecutionMessageBus& bus) { // ConsumeLoop is used instead } -void KafkaConsumerConsumeLoop::ConsumeLoop(void *arg) { - KafkaConsumerConsumeLoop* consumerLoop = (KafkaConsumerConsumeLoop*)arg; +void KafkaConsumerConsumeLoop::ConsumeLoop(void* arg) { + KafkaConsumerConsumeLoop* consumerLoop = + reinterpret_cast(arg); ExecutionMessageBus bus(consumerLoop); KafkaConsumer* consumer = consumerLoop->consumer; @@ -719,7 +728,8 @@ void KafkaConsumerConsumeLoop::ConsumeLoop(void *arg) { consumerLoop->m_looping = false; break; } - } else if (ec == RdKafka::ERR_UNKNOWN_TOPIC_OR_PART || ec == RdKafka::ERR_TOPIC_AUTHORIZATION_FAILED) { + } else if (ec == RdKafka::ERR_UNKNOWN_TOPIC_OR_PART || + ec == RdKafka::ERR_TOPIC_AUTHORIZATION_FAILED) { bus.SendWarning(ec); } else { // Unknown error. We need to break out of this @@ -729,7 +739,8 @@ void KafkaConsumerConsumeLoop::ConsumeLoop(void *arg) { } } -void KafkaConsumerConsumeLoop::HandleMessageCallback(RdKafka::Message* msg, RdKafka::ErrorCode ec) { +void KafkaConsumerConsumeLoop::HandleMessageCallback(RdKafka::Message* msg, + RdKafka::ErrorCode ec) { Nan::HandleScope scope; const unsigned int argc = 4; @@ -797,11 +808,13 @@ void KafkaConsumerConsumeLoop::HandleErrorCallback() { KafkaConsumerConsumeNum::KafkaConsumerConsumeNum(Nan::Callback *callback, KafkaConsumer* consumer, const uint32_t & num_messages, - const int & timeout_ms) : + const int & timeout_ms, + bool timeout_only_for_first_message) : ErrorAwareWorker(callback), m_consumer(consumer), m_num_messages(num_messages), - m_timeout_ms(timeout_ms) {} + m_timeout_ms(timeout_ms), + m_timeout_only_for_first_message(timeout_only_for_first_message) {} KafkaConsumerConsumeNum::~KafkaConsumerConsumeNum() {} @@ -825,8 +838,9 @@ void KafkaConsumerConsumeNum::Execute() { timeout_ms = 1; } - // We will only go into this code path when `enable.partition.eof` is set to true - // In this case, consumer is also interested in EOF messages, so we return an EOF message + // We will only go into this code path when `enable.partition.eof` + // is set to true. In this case, consumer is also interested in EOF + // messages, so we return an EOF message m_messages.push_back(message); eof_event_count += 1; break; @@ -838,6 +852,15 @@ void KafkaConsumerConsumeNum::Execute() { break; case RdKafka::ERR_NO_ERROR: m_messages.push_back(b.data()); + + // This allows getting ready messages, while not waiting for new ones. + // This is useful when we want to get the as many messages as possible + // within the timeout but not wait if we already have one or more + // messages. + if (m_timeout_only_for_first_message) { + timeout_ms = 1; + } + break; default: // Set the error for any other errors and break @@ -876,7 +899,8 @@ void KafkaConsumerConsumeNum::HandleOKCallback() { switch (message->err()) { case RdKafka::ERR_NO_ERROR: ++returnArrayIndex; - Nan::Set(returnArray, returnArrayIndex, Conversion::Message::ToV8Object(message)); + Nan::Set(returnArray, returnArrayIndex, + Conversion::Message::ToV8Object(message)); break; case RdKafka::ERR__PARTITION_EOF: ++eofEventsArrayIndex; @@ -891,10 +915,12 @@ void KafkaConsumerConsumeNum::HandleOKCallback() { Nan::Set(eofEvent, Nan::New("partition").ToLocalChecked(), Nan::New(message->partition())); - // also store index at which position in the message array this event was emitted - // this way, we can later emit it at the right point in time - Nan::Set(eofEvent, Nan::New("messageIndex").ToLocalChecked(), - Nan::New(returnArrayIndex)); + // also store index at which position in the message array this event + // was emitted this way, we can later emit it at the right point in + // time + Nan::Set(eofEvent, + Nan::New("messageIndex").ToLocalChecked(), + Nan::New(returnArrayIndex)); Nan::Set(eofEventsArray, eofEventsArrayIndex, eofEvent); } @@ -1036,6 +1062,58 @@ void KafkaConsumerCommitted::HandleErrorCallback() { callback->Call(argc, argv); } +/** + * @brief KafkaConsumer commit offsets with a callback function. + * + * The first callback argument is the commit error, or null on success. + * + * @see RdKafka::KafkaConsumer::commitSync + */ +KafkaConsumerCommitCb::KafkaConsumerCommitCb(Nan::Callback *callback, + KafkaConsumer* consumer, + std::optional> & t) : + ErrorAwareWorker(callback), + m_consumer(consumer), + m_topic_partitions(t) {} + +KafkaConsumerCommitCb::~KafkaConsumerCommitCb() { + // Delete the underlying topic partitions as they are ephemeral or cloned + if (m_topic_partitions.has_value()) + RdKafka::TopicPartition::destroy(m_topic_partitions.value()); +} + +void KafkaConsumerCommitCb::Execute() { + Baton b = Baton(NULL); + if (m_topic_partitions.has_value()) { + b = m_consumer->Commit(m_topic_partitions.value()); + } else { + b = m_consumer->Commit(); + } + if (b.err() != RdKafka::ERR_NO_ERROR) { + SetErrorBaton(b); + } +} + +void KafkaConsumerCommitCb::HandleOKCallback() { + Nan::HandleScope scope; + + const unsigned int argc = 1; + v8::Local argv[argc]; + + argv[0] = Nan::Null(); + + callback->Call(argc, argv); +} + +void KafkaConsumerCommitCb::HandleErrorCallback() { + Nan::HandleScope scope; + + const unsigned int argc = 1; + v8::Local argv[argc] = { GetErrorObject() }; + + callback->Call(argc, argv); +} + /** * @brief KafkaConsumer seek * @@ -1240,5 +1318,171 @@ void AdminClientCreatePartitions::HandleErrorCallback() { callback->Call(argc, argv); } +/** + * @brief List consumer groups in an asynchronous worker. + * + * This callback will list consumer groups. + * + */ +AdminClientListGroups::AdminClientListGroups( + Nan::Callback* callback, AdminClient* client, bool is_match_states_set, + std::vector& match_states, + const int& timeout_ms) + : ErrorAwareWorker(callback), + m_client(client), + m_is_match_states_set(is_match_states_set), + m_match_states(match_states), + m_timeout_ms(timeout_ms) {} + +AdminClientListGroups::~AdminClientListGroups() { + if (this->m_event_response) { + rd_kafka_event_destroy(this->m_event_response); + } +} + +void AdminClientListGroups::Execute() { + Baton b = m_client->ListGroups(m_is_match_states_set, m_match_states, + m_timeout_ms, &m_event_response); + if (b.err() != RdKafka::ERR_NO_ERROR) { + SetErrorBaton(b); + } +} + +void AdminClientListGroups::HandleOKCallback() { + Nan::HandleScope scope; + + const unsigned int argc = 2; + v8::Local argv[argc]; + + argv[0] = Nan::Null(); + + const rd_kafka_ListConsumerGroups_result_t* result = + rd_kafka_event_ListConsumerGroups_result(m_event_response); + + argv[1] = Conversion::Admin::FromListConsumerGroupsResult(result); + + callback->Call(argc, argv); +} + +void AdminClientListGroups::HandleErrorCallback() { + Nan::HandleScope scope; + + const unsigned int argc = 1; + v8::Local argv[argc] = {GetErrorObject()}; + + callback->Call(argc, argv); +} + +/** + * @brief Describe consumer groups in an asynchronous worker. + * + * This callback will describe consumer groups. + * + */ +AdminClientDescribeGroups::AdminClientDescribeGroups( + Nan::Callback* callback, NodeKafka::AdminClient* client, + std::vector& groups, bool include_authorized_operations, + const int& timeout_ms) + : ErrorAwareWorker(callback), + m_client(client), + m_groups(groups), + m_include_authorized_operations(include_authorized_operations), + m_timeout_ms(timeout_ms) {} + +AdminClientDescribeGroups::~AdminClientDescribeGroups() { + if (this->m_event_response) { + rd_kafka_event_destroy(this->m_event_response); + } +} + +void AdminClientDescribeGroups::Execute() { + Baton b = m_client->DescribeGroups(m_groups, m_include_authorized_operations, + m_timeout_ms, &m_event_response); + if (b.err() != RdKafka::ERR_NO_ERROR) { + SetErrorBaton(b); + } +} + +void AdminClientDescribeGroups::HandleOKCallback() { + Nan::HandleScope scope; + + const unsigned int argc = 2; + v8::Local argv[argc]; + + argv[0] = Nan::Null(); + argv[1] = Conversion::Admin::FromDescribeConsumerGroupsResult( + rd_kafka_event_DescribeConsumerGroups_result(m_event_response)); + + callback->Call(argc, argv); +} + +void AdminClientDescribeGroups::HandleErrorCallback() { + Nan::HandleScope scope; + + const unsigned int argc = 1; + v8::Local argv[argc] = {GetErrorObject()}; + + callback->Call(argc, argv); +} + +/** + * @brief Delete consumer groups in an asynchronous worker. + * + * This callback will delete consumer groups. + * + */ +AdminClientDeleteGroups::AdminClientDeleteGroups( + Nan::Callback* callback, NodeKafka::AdminClient* client, + rd_kafka_DeleteGroup_t **group_list, + size_t group_cnt, + const int& timeout_ms) + : ErrorAwareWorker(callback), + m_client(client), + m_group_list(group_list), + m_group_cnt(group_cnt), + m_timeout_ms(timeout_ms) {} + +AdminClientDeleteGroups::~AdminClientDeleteGroups() { + if (m_group_list) { + rd_kafka_DeleteGroup_destroy_array(m_group_list, m_group_cnt); + free(m_group_list); + } + + if (this->m_event_response) { + rd_kafka_event_destroy(this->m_event_response); + } +} + +void AdminClientDeleteGroups::Execute() { + Baton b = m_client->DeleteGroups(m_group_list, m_group_cnt, m_timeout_ms, + &m_event_response); + if (b.err() != RdKafka::ERR_NO_ERROR) { + SetErrorBaton(b); + } +} + +void AdminClientDeleteGroups::HandleOKCallback() { + Nan::HandleScope scope; + + const unsigned int argc = 2; + v8::Local argv[argc]; + + argv[0] = Nan::Null(); + argv[1] = Conversion::Admin::FromDeleteGroupsResult( + rd_kafka_event_DeleteGroups_result(m_event_response)); + + callback->Call(argc, argv); +} + +void AdminClientDeleteGroups::HandleErrorCallback() { + Nan::HandleScope scope; + + const unsigned int argc = 1; + v8::Local argv[argc] = {GetErrorObject()}; + + callback->Call(argc, argv); +} + + } // namespace Workers } // namespace NodeKafka diff --git a/src/workers.h b/src/workers.h index b290d253..e103163f 100644 --- a/src/workers.h +++ b/src/workers.h @@ -1,7 +1,8 @@ /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * * Copyright (c) 2016-2023 Blizzard Entertainment + * (c) 2023 Confluent, Inc. * * This software may be modified and distributed under the terms * of the MIT license. See the LICENSE.txt file for details. @@ -13,6 +14,7 @@ #include #include #include +#include #include #include "src/common.h" @@ -36,8 +38,13 @@ class ErrorAwareWorker : public Nan::AsyncWorker { void HandleErrorCallback() { Nan::HandleScope scope; + // Construct error and add code to it. + v8::Local error = Nan::Error(ErrorMessage()); + Nan::Set(error.As(), Nan::New("code").ToLocalChecked(), + Nan::New(GetErrorCode())); + const unsigned int argc = 1; - v8::Local argv[argc] = { Nan::Error(ErrorMessage()) }; + v8::Local argv[argc] = { error }; callback->Call(argc, argv); } @@ -417,6 +424,21 @@ class KafkaConsumerCommitted : public ErrorAwareWorker { const int m_timeout_ms; }; +class KafkaConsumerCommitCb : public ErrorAwareWorker { + public: + KafkaConsumerCommitCb(Nan::Callback*, + NodeKafka::KafkaConsumer*, + std::optional> &); + ~KafkaConsumerCommitCb(); + + void Execute(); + void HandleOKCallback(); + void HandleErrorCallback(); + private: + NodeKafka::KafkaConsumer * m_consumer; + std::optional> m_topic_partitions; +}; + class KafkaConsumerSeek : public ErrorAwareWorker { public: KafkaConsumerSeek(Nan::Callback*, NodeKafka::KafkaConsumer*, @@ -435,7 +457,7 @@ class KafkaConsumerSeek : public ErrorAwareWorker { class KafkaConsumerConsumeNum : public ErrorAwareWorker { public: KafkaConsumerConsumeNum(Nan::Callback*, NodeKafka::KafkaConsumer*, - const uint32_t &, const int &); + const uint32_t &, const int &, bool); ~KafkaConsumerConsumeNum(); void Execute(); @@ -445,6 +467,7 @@ class KafkaConsumerConsumeNum : public ErrorAwareWorker { NodeKafka::KafkaConsumer * m_consumer; const uint32_t m_num_messages; const int m_timeout_ms; + const bool m_timeout_only_for_first_message; std::vector m_messages; }; @@ -502,6 +525,70 @@ class AdminClientCreatePartitions : public ErrorAwareWorker { const int m_timeout_ms; }; +/** + * @brief List consumer groups on a remote broker cluster. + */ +class AdminClientListGroups : public ErrorAwareWorker { + public: + AdminClientListGroups(Nan::Callback *, NodeKafka::AdminClient *, bool, + std::vector &, + const int &); + ~AdminClientListGroups(); + + void Execute(); + void HandleOKCallback(); + void HandleErrorCallback(); + + private: + NodeKafka::AdminClient *m_client; + const bool m_is_match_states_set; + std::vector m_match_states; + const int m_timeout_ms; + rd_kafka_event_t *m_event_response; +}; + +/** + * @brief Describe consumer groups on a remote broker cluster. + */ +class AdminClientDescribeGroups : public ErrorAwareWorker { + public: + AdminClientDescribeGroups(Nan::Callback *, NodeKafka::AdminClient *, + std::vector &, bool, const int &); + ~AdminClientDescribeGroups(); + + void Execute(); + void HandleOKCallback(); + void HandleErrorCallback(); + + private: + NodeKafka::AdminClient *m_client; + std::vector m_groups; + const bool m_include_authorized_operations; + const int m_timeout_ms; + rd_kafka_event_t *m_event_response; +}; + +/** + * @brief Delete consumer groups on a remote broker cluster. + */ +class AdminClientDeleteGroups : public ErrorAwareWorker { + public: + AdminClientDeleteGroups(Nan::Callback *, NodeKafka::AdminClient *, + rd_kafka_DeleteGroup_t **, size_t, const int &); + ~AdminClientDeleteGroups(); + + void Execute(); + void HandleOKCallback(); + void HandleErrorCallback(); + + private: + NodeKafka::AdminClient *m_client; + rd_kafka_DeleteGroup_t **m_group_list; + size_t m_group_cnt; + const int m_timeout_ms; + rd_kafka_event_t *m_event_response; +}; + } // namespace Workers } // namespace NodeKafka diff --git a/test/binding.spec.js b/test/binding.spec.js index 466f3fe6..cde357f9 100644 --- a/test/binding.spec.js +++ b/test/binding.spec.js @@ -1,5 +1,5 @@ /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * * Copyright (c) 2016-2023 Blizzard Entertainment * @@ -7,7 +7,7 @@ * of the MIT license. See the LICENSE.txt file for details. */ -var addon = require('bindings')('confluent-kafka-js'); +var addon = require('bindings')('confluent-kafka-javascript'); var t = require('assert'); var consumerConfig = { diff --git a/test/consumer.spec.js b/test/consumer.spec.js index 45b1b17e..1f1d61a7 100644 --- a/test/consumer.spec.js +++ b/test/consumer.spec.js @@ -1,5 +1,5 @@ /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * * Copyright (c) 2016-2023 Blizzard Entertainment * @@ -7,7 +7,7 @@ * of the MIT license. See the LICENSE.txt file for details. */ -var addon = require('bindings')('confluent-kafka-js'); +var addon = require('bindings')('confluent-kafka-javascript'); var t = require('assert'); var client; diff --git a/test/error.spec.js b/test/error.spec.js index 1e54ca4f..894a17f0 100644 --- a/test/error.spec.js +++ b/test/error.spec.js @@ -1,5 +1,5 @@ /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * * Copyright (c) 2016-2023 Blizzard Entertainment * diff --git a/test/index.spec.js b/test/index.spec.js index 1e54ca4f..894a17f0 100644 --- a/test/index.spec.js +++ b/test/index.spec.js @@ -1,5 +1,5 @@ /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * * Copyright (c) 2016-2023 Blizzard Entertainment * diff --git a/test/kafka-consumer-stream.spec.js b/test/kafka-consumer-stream.spec.js index b12d95c8..198dfb82 100644 --- a/test/kafka-consumer-stream.spec.js +++ b/test/kafka-consumer-stream.spec.js @@ -1,5 +1,5 @@ /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * * Copyright (c) 2016-2023 Blizzard Entertainment * diff --git a/test/kafka-consumer.spec.js b/test/kafka-consumer.spec.js index 9d05e01c..a6afe64c 100644 --- a/test/kafka-consumer.spec.js +++ b/test/kafka-consumer.spec.js @@ -1,5 +1,5 @@ /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * * Copyright (c) 2016-2023 Blizzard Entertainment * diff --git a/test/mock.js b/test/mock.js index 8e72c822..a9f50748 100644 --- a/test/mock.js +++ b/test/mock.js @@ -1,5 +1,5 @@ /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * * Copyright (c) 2016-2023 Blizzard Entertainment * diff --git a/test/producer-stream.spec.js b/test/producer-stream.spec.js index 4bb3bfa7..bb70de96 100644 --- a/test/producer-stream.spec.js +++ b/test/producer-stream.spec.js @@ -1,5 +1,5 @@ /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * * Copyright (c) 2016-2023 Blizzard Entertainment * diff --git a/test/producer.spec.js b/test/producer.spec.js index fc24ea4b..03a8f4be 100644 --- a/test/producer.spec.js +++ b/test/producer.spec.js @@ -1,5 +1,5 @@ /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * * Copyright (c) 2016-2023 Blizzard Entertainment * diff --git a/test/producer/high-level-producer.spec.js b/test/producer/high-level-producer.spec.js index 41f665b8..4f8577a3 100644 --- a/test/producer/high-level-producer.spec.js +++ b/test/producer/high-level-producer.spec.js @@ -1,5 +1,5 @@ /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * * Copyright (c) 2016-2023 Blizzard Entertainment * diff --git a/test/promisified/admin/delete_groups.spec.js b/test/promisified/admin/delete_groups.spec.js new file mode 100644 index 00000000..07fddf9b --- /dev/null +++ b/test/promisified/admin/delete_groups.spec.js @@ -0,0 +1,103 @@ +jest.setTimeout(30000); + +const { + createConsumer, + secureRandom, + createTopic, + waitFor, + createAdmin, +} = require('../testhelpers'); +const { ErrorCodes } = require('../../../lib').KafkaJS; + +describe('Admin > deleteGroups', () => { + let topicName, groupId, consumer, admin; + + beforeEach(async () => { + topicName = `test-topic-${secureRandom()}`; + groupId = `consumer-group-id-${secureRandom()}`; + + consumer = createConsumer({ + groupId, + fromBeginning: true, + }); + + await createTopic({ topic: topicName, partitions: 2 }); + + admin = createAdmin({}); + }); + + afterEach(async () => { + consumer && (await consumer.disconnect()); + admin && (await admin.disconnect()); + }); + + it('should timeout', async () => { + await admin.connect(); + + await expect(admin.deleteGroups(['invalid-group'], { timeout: 0 })).rejects.toHaveProperty( + 'code', + ErrorCodes.ERR__TIMED_OUT + ); + }); + + it('should delete empty consumer groups', async () => { + await consumer.connect(); + await consumer.subscribe({ topic: topicName }); + await consumer.run({ eachMessage: async () => {} }); + + await waitFor(() => consumer.assignment().length > 0, () => null, 1000); + + await admin.connect(); + let listGroupsResult = await admin.listGroups(); + expect(listGroupsResult.errors).toEqual([]); + expect(listGroupsResult.groups.map(group => group.groupId)).toContain(groupId); + + // Delete when the group is not empty - it should fail. + let deleteResult = await admin.deleteGroups([groupId]); + expect(deleteResult).toEqual([ + expect.objectContaining({ + groupId, + error: expect.objectContaining({ + code: ErrorCodes.ERR_NON_EMPTY_GROUP, + }), + errorCode: ErrorCodes.ERR_NON_EMPTY_GROUP, + }), + ]); + + // Disconnect the consumer to make the group EMPTY. + await consumer.disconnect(); + consumer = null; + + listGroupsResult = await admin.listGroups(); + expect(listGroupsResult.errors).toEqual([]); + expect(listGroupsResult.groups.map(group => group.groupId)).toContain(groupId); + + // Delete the empty consumer group. + deleteResult = await admin.deleteGroups([groupId]); + expect(deleteResult).toEqual([ + expect.objectContaining({ + groupId, + errorCode: ErrorCodes.ERR_NO_ERROR, + }), + ]); + + // Cross-verify the deletion. + listGroupsResult = await admin.listGroups(); + expect(listGroupsResult.errors).toEqual([]); + expect(listGroupsResult.groups.map(group => group.groupId)).not.toContain(groupId); + + // Deleting the group again should fail. + deleteResult = await admin.deleteGroups([groupId]); + expect(deleteResult).toEqual([ + expect.objectContaining({ + groupId, + error: expect.objectContaining({ + code: ErrorCodes.ERR_GROUP_ID_NOT_FOUND, + }), + errorCode: ErrorCodes.ERR_GROUP_ID_NOT_FOUND, + }), + ]); + + }); +}); + diff --git a/test/promisified/admin/describe_groups.spec.js b/test/promisified/admin/describe_groups.spec.js new file mode 100644 index 00000000..0981d12d --- /dev/null +++ b/test/promisified/admin/describe_groups.spec.js @@ -0,0 +1,136 @@ +jest.setTimeout(30000); + +const { + createConsumer, + secureRandom, + createTopic, + waitFor, + createAdmin, + sleep, +} = require('../testhelpers'); +const { ConsumerGroupStates, ErrorCodes, AclOperationTypes } = require('../../../lib').KafkaJS; + +describe('Admin > describeGroups', () => { + let topicName, groupId, consumer, admin; + + beforeEach(async () => { + topicName = `test-topic-${secureRandom()}`; + groupId = `consumer-group-id-${secureRandom()}`; + + consumer = createConsumer({ + groupId, + fromBeginning: true, + clientId: 'test-client-id', + }, { + 'group.instance.id': 'test-instance-id', + 'session.timeout.ms': 10000, + 'partition.assignment.strategy': 'roundrobin', + }); + + await createTopic({ topic: topicName, partitions: 2 }); + + admin = createAdmin({}); + }); + + afterEach(async () => { + consumer && (await consumer.disconnect()); + admin && (await admin.disconnect()); + }); + + it('should timeout', async () => { + await admin.connect(); + + await expect(admin.describeGroups(['not-a-real-group'], { timeout: 0 })).rejects.toHaveProperty( + 'code', + ErrorCodes.ERR__TIMED_OUT + ); + }); + + it('should not accept empty or null groups array', async () => { + await admin.connect(); + + await expect(admin.describeGroups([])).rejects.toHaveProperty( + 'message', + 'Must provide at least one group name' + ); + + await expect(admin.describeGroups(null)).rejects.toHaveProperty( + 'message', + 'Must provide group name array' + ); + }); + + it('should describe consumer groups', async () => { + await consumer.connect(); + await consumer.subscribe({ topic: topicName }); + await consumer.run({ eachMessage: async () => { } }); + + await waitFor(() => consumer.assignment().length > 0, () => null, 1000); + + await admin.connect(); + let describeGroupsResult = await admin.describeGroups( + [groupId], { includeAuthorizedOperations: true }); + expect(describeGroupsResult.groups.length).toEqual(1); + expect(describeGroupsResult.groups[0]).toEqual( + expect.objectContaining({ + groupId, + protocol: 'roundrobin', + partitionAssignor: 'roundrobin', + isSimpleConsumerGroup: false, + protocolType: 'consumer', + state: ConsumerGroupStates.STABLE, + coordinator: expect.objectContaining({ + id: expect.any(Number), + host: expect.any(String), + port: expect.any(Number), + }), + authorizedOperations: expect.arrayContaining([AclOperationTypes.READ, AclOperationTypes.DESCRIBE]), + members: expect.arrayContaining([ + expect.objectContaining({ + clientHost: expect.any(String), + clientId: 'test-client-id', + memberId: expect.any(String), + memberAssignment: null, + memberMetadata: null, + groupInstanceId: 'test-instance-id', + assignment: { + topicPartitions:[ + expect.objectContaining({ topic: topicName, partition: 0 }), + expect.objectContaining({ topic: topicName, partition: 1 }), + ], + } + }), + ]), + }) + ); + + // Disconnect the consumer to make the group EMPTY. + await consumer.disconnect(); + consumer = null; + + // Wait so that session.timeout.ms expires and the group becomes EMPTY. + await sleep(12000); + + // Don't include authorized operations this time. + describeGroupsResult = await admin.describeGroups([groupId]); + expect(describeGroupsResult.groups.length).toEqual(1); + expect(describeGroupsResult.groups[0]).toEqual( + expect.objectContaining({ + groupId, + protocol: '', + partitionAssignor: '', + isSimpleConsumerGroup: false, + protocolType: 'consumer', + state: ConsumerGroupStates.EMPTY, + coordinator: expect.objectContaining({ + id: expect.any(Number), + host: expect.any(String), + port: expect.any(Number), + }), + members: [], + }) + ); + expect(describeGroupsResult.groups[0].authorizedOperations).toBeUndefined(); + }); +}); + diff --git a/test/promisified/admin/list_groups.spec.js b/test/promisified/admin/list_groups.spec.js new file mode 100644 index 00000000..11e7a271 --- /dev/null +++ b/test/promisified/admin/list_groups.spec.js @@ -0,0 +1,93 @@ +jest.setTimeout(30000); + +const { + createConsumer, + secureRandom, + createTopic, + waitFor, + createAdmin, +} = require('../testhelpers'); +const { ConsumerGroupStates, ErrorCodes } = require('../../../lib').KafkaJS; + +describe('Admin > listGroups', () => { + let topicName, groupId, consumer, admin; + + beforeEach(async () => { + topicName = `test-topic-${secureRandom()}`; + groupId = `consumer-group-id-${secureRandom()}`; + + consumer = createConsumer({ + groupId, + fromBeginning: true, + }); + + await createTopic({ topic: topicName, partitions: 2 }); + + admin = createAdmin({}); + }); + + afterEach(async () => { + consumer && (await consumer.disconnect()); + admin && (await admin.disconnect()); + }); + + it('should timeout', async () => { + await admin.connect(); + + await expect(admin.listGroups({ timeout: 0 })).rejects.toHaveProperty( + 'code', + ErrorCodes.ERR__TIMED_OUT + ); + }); + + it('should list consumer groups', async () => { + await consumer.connect(); + await consumer.subscribe({ topic: topicName }); + await consumer.run({ eachMessage: async () => {} }); + + await waitFor(() => consumer.assignment().length > 0, () => null, 1000); + + await admin.connect(); + let listGroupsResult = await admin.listGroups({ + matchConsumerGroupStates: undefined, + }); + expect(listGroupsResult.errors).toEqual([]); + expect(listGroupsResult.groups).toEqual( + expect.arrayContaining([ + expect.objectContaining({ + groupId, + isSimpleConsumerGroup: false, + protocolType: 'consumer', + state: ConsumerGroupStates.STABLE, + }), + ]) + ); + + // Disconnect the consumer to make the group EMPTY. + await consumer.disconnect(); + consumer = null; + + listGroupsResult = await admin.listGroups(); + expect(listGroupsResult.errors).toEqual([]); + expect(listGroupsResult.groups).toEqual( + expect.arrayContaining([ + expect.objectContaining({ + groupId, + isSimpleConsumerGroup: false, + protocolType: 'consumer', + state: ConsumerGroupStates.EMPTY, + }), + ]) + ); + + // Consumer group should not show up if filtering by non-empty groups + // using state matching. + listGroupsResult = await admin.listGroups({ + matchConsumerGroupStates: [ ConsumerGroupStates.STABLE, + ConsumerGroupStates.PREPARING_REBALANCE, + ConsumerGroupStates.COMPLETING_REBALANCE, ] }); + expect(listGroupsResult.errors).toEqual([]); + expect(listGroupsResult.groups.map(group => group.groupId)).not.toContain(groupId); + }); +}); + diff --git a/test/promisified/admin/list_topics.spec.js b/test/promisified/admin/list_topics.spec.js new file mode 100644 index 00000000..77a3447b --- /dev/null +++ b/test/promisified/admin/list_topics.spec.js @@ -0,0 +1,43 @@ +jest.setTimeout(30000); + +const { + secureRandom, + createTopic, + createAdmin, +} = require('../testhelpers'); +const { ErrorCodes } = require('../../../lib').KafkaJS; + +describe('Admin > listTopics', () => { + let topicNames, admin; + + beforeEach(async () => { + topicNames = [`test-topic-${secureRandom()}`, `test-topic-${secureRandom()}`]; + + await createTopic({ topic: topicNames[0], }); + await createTopic({ topic: topicNames[1] }); + + admin = createAdmin({}); + }); + + afterEach(async () => { + admin && (await admin.disconnect()); + }); + + it('should timeout', async () => { + await admin.connect(); + + await expect(admin.listTopics({ timeout: 1 })).rejects.toHaveProperty( + 'code', + ErrorCodes.ERR__TIMED_OUT + ); + }); + + it('should list consumer topics', async () => { + await admin.connect(); + const listTopicsResult = await admin.listTopics(); + expect(listTopicsResult).toEqual( + expect.arrayContaining(topicNames) + ); + }); +}); + diff --git a/test/promisified/consumer/commit.spec.js b/test/promisified/consumer/commit.spec.js new file mode 100644 index 00000000..bebece53 --- /dev/null +++ b/test/promisified/consumer/commit.spec.js @@ -0,0 +1,227 @@ +jest.setTimeout(30000); + +const { + secureRandom, + createTopic, + waitFor, + createProducer, + createConsumer, + sleep, +} = require('../testhelpers'); + +describe('Consumer commit', () => { + let topicName, groupId, producer, consumer; + + beforeEach(async () => { + topicName = `test-topic-${secureRandom()}`; + groupId = `consumer-group-id-${secureRandom()}`; + + await createTopic({ topic: topicName, partitions: 3 }); + + producer = createProducer({}); + + consumer = createConsumer({ + groupId, + maxWaitTimeInMs: 100, + fromBeginning: true, + autoCommit: false, + autoCommitInterval: 500, + }); + }); + + afterEach(async () => { + consumer && (await consumer.disconnect()); + producer && (await producer.disconnect()); + }); + + it('should commit offsets', async () => { + /* Evenly distribute 30 messages across 3 partitions */ + let i = 0; + const messages = Array(3 * 10) + .fill() + .map(() => { + const value = secureRandom(); + return { value: `value-${value}`, partition: (i++) % 3 }; + }); + + await producer.connect(); + await producer.send({ topic: topicName, messages }); + await producer.flush(); + + let msgCount = 0; + await consumer.connect(); + await consumer.subscribe({ topic: topicName }); + await consumer.run({ + eachMessage: async ({ topic, partition, message }) => { + msgCount++; + const offset = (Number(message.offset) + 1).toString(); + await expect(() => consumer.commitOffsets([{ topic, partition, offset }])).not.toThrow(); + } + }); + await waitFor(() => msgCount >= 30, () => null, { delay: 100 }); + expect(msgCount).toEqual(30); + + await consumer.disconnect(); + + /* Send 30 more messages */ + await producer.send({ topic: topicName, messages }); + await producer.flush(); + + + consumer = createConsumer({ + groupId, + maxWaitTimeInMs: 100, + fromBeginning: true, + }); + + msgCount = 0; + await consumer.connect(); + await consumer.subscribe({ topic: topicName }); + await consumer.run({ + eachMessage: async () => { + msgCount++; + } + }); + /* Only the extra 30 messages should come to us */ + await waitFor(() => msgCount >= 30, () => null, { delay: 100 }); + await sleep(1000); + expect(msgCount).toEqual(30); + }); + + it('should commit offsets with metadata', async () => { + /* Evenly distribute 30 messages across 3 partitions */ + let i = 0; + const messages = Array(3 * 10) + .fill() + .map(() => { + const value = secureRandom(); + return { value: `value-${value}`, partition: (i++) % 3 }; + }); + + await producer.connect(); + await producer.send({ topic: topicName, messages }); + await producer.flush(); + + let msgCount = 0; + const metadata = 'unicode-metadata-😊'; + await consumer.connect(); + await consumer.subscribe({ topic: topicName }); + await consumer.run({ + eachMessage: async ({ topic, partition, message }) => { + msgCount++; + const offset = (Number(message.offset) + 1).toString(); + const leaderEpoch = message.leaderEpoch; + await expect(() => consumer.commitOffsets([{ topic, partition, offset, metadata, leaderEpoch }])).not.toThrow(); + } + }); + await waitFor(() => msgCount >= 30, () => null, { delay: 100 }); + expect(msgCount).toEqual(30); + + let committed = await consumer.committed(null, 5000); + expect(committed).toEqual([ + { topic: topicName, partition: 0, offset: '10', metadata, leaderEpoch: expect.any(Number) }, + { topic: topicName, partition: 1, offset: '10', metadata, leaderEpoch: expect.any(Number) }, + { topic: topicName, partition: 2, offset: '10', metadata, leaderEpoch: expect.any(Number) } + ]); + + await consumer.disconnect(); + + consumer = createConsumer({ + groupId, + maxWaitTimeInMs: 100, + fromBeginning: true, + }); + + msgCount = 0; + await consumer.connect(); + await consumer.subscribe({ topic: 'not-a-real-topic-name' }); + + /* At this point, we're not actually assigned anything, but we should be able to fetch + * the stored offsets and metadata anyway since we're of the same consumer group. */ + committed = await consumer.committed([ + { topic: topicName, partition: 0 }, + { topic: topicName, partition: 1 }, + { topic: topicName, partition: 2 }, + ]); + expect(committed).toEqual([ + { topic: topicName, partition: 0, offset: '10', metadata,leaderEpoch: expect.any(Number) }, + { topic: topicName, partition: 1, offset: '10', metadata,leaderEpoch: expect.any(Number) }, + { topic: topicName, partition: 2, offset: '10', metadata,leaderEpoch: expect.any(Number) } + ]); + }); + + it.each([[true], [false]])('should commit only resolved offsets while using eachBatch - isAutocommit: %s', async (isAutoCommit) => { + /* Evenly distribute 3*30 messages across 3 partitions */ + const numMsgs = 30; + let i = 0; + const messages = Array(3 * numMsgs) + .fill() + .map(() => { + const value = secureRandom(); + return { value: `value-${value}`, partition: (i++) % 3 }; + }); + + await producer.connect(); + await producer.send({ topic: topicName, messages }); + await producer.flush(); + + consumer = createConsumer({ + groupId, + maxWaitTimeInMs: 100, + fromBeginning: true, + autoCommit: isAutoCommit, + autoCommitInterval: 500, + }); + + let msgCount = 0; + await consumer.connect(); + await consumer.subscribe({ topic: topicName }); + await consumer.run({ + eachBatchAutoResolve: false, + eachBatch: async ({ batch, resolveOffset, commitOffsetsIfNecessary }) => { + for (const message of batch.messages) { + msgCount++; + if ((+message.offset) < numMsgs/2) { + resolveOffset(message.offset); + } + } + if (!isAutoCommit) + await commitOffsetsIfNecessary(); + } + }); + await waitFor(() => msgCount >= (3 * numMsgs), () => null, { delay: 100 }); + + /* Disconnect should commit any uncommitted offsets */ + await consumer.disconnect(); + + consumer = createConsumer({ + groupId, + maxWaitTimeInMs: 100, + fromBeginning: true, + }); + + await consumer.connect(); + const toppars = Array(3).fill().map((_, i) => ({ topic: topicName, partition: i })); + const committed = await consumer.committed(toppars); + const halfOffset = Math.floor(numMsgs/2).toString(); + expect(committed).toEqual( + expect.arrayContaining([ + expect.objectContaining({ + topic: topicName, + partition: 0, + offset: halfOffset, + }), + expect.objectContaining({ + topic: topicName, + partition: 1, + offset: halfOffset, + }), + expect.objectContaining({ + topic: topicName, + partition: 2, + offset: halfOffset, + }) + ]) + ); + }); +}); diff --git a/test/promisified/consumer/consumeMessages.spec.js b/test/promisified/consumer/consumeMessages.spec.js new file mode 100644 index 00000000..b4556999 --- /dev/null +++ b/test/promisified/consumer/consumeMessages.spec.js @@ -0,0 +1,744 @@ +jest.setTimeout(30000); + +const { CompressionTypes, ErrorCodes } = require('../../../lib').KafkaJS; +const { + secureRandom, + createTopic, + waitFor, + createProducer, + createConsumer, + waitForMessages, + sleep, +} = require('../testhelpers'); +const { Buffer } = require('buffer'); + +/* All variations of partitionsConsumedConcurrently */ +const cases = Array(3).fill().map((_, i) => [(i % 3) + 1]); + +describe.each(cases)('Consumer - partitionsConsumedConcurrently = %s -', (partitionsConsumedConcurrently) => { + let topicName, groupId, producer, consumer; + const partitions = 3; + + beforeEach(async () => { + console.log("Starting:", expect.getState().currentTestName); + topicName = `test-topic-${secureRandom()}`; + groupId = `consumer-group-id-${secureRandom()}`; + + await createTopic({ topic: topicName, partitions }); + producer = createProducer({}); + + consumer = createConsumer({ + groupId, + maxWaitTimeInMs: 100, + fromBeginning: true, + autoCommit: true, + }); + }); + + afterEach(async () => { + consumer && (await consumer.disconnect()); + producer && (await producer.disconnect()); + console.log("Ending:", expect.getState().currentTestName); + }); + + it('consume messages', async () => { + await consumer.connect(); + await producer.connect(); + await consumer.subscribe({ topic: topicName }); + + const messagesConsumed = []; + consumer.run({ + partitionsConsumedConcurrently, + eachMessage: async event => messagesConsumed.push(event) + }); + + const messages = Array(10) + .fill() + .map(() => { + const value = secureRandom(); + return { key: `key-${value}`, value: `value-${value}`, partition: 0 }; + }); + + await producer.send({ topic: topicName, messages }); + await waitForMessages(messagesConsumed, { number: messages.length }); + + expect(messagesConsumed[0]).toEqual( + expect.objectContaining({ + topic: topicName, + partition: 0, + message: expect.objectContaining({ + key: Buffer.from(messages[0].key), + value: Buffer.from(messages[0].value), + offset: '0', + }), + }) + ); + + expect(messagesConsumed[messagesConsumed.length - 1]).toEqual( + expect.objectContaining({ + topic: topicName, + partition: 0, + message: expect.objectContaining({ + key: Buffer.from(messages[messages.length - 1].key), + value: Buffer.from(messages[messages.length - 1].value), + offset: '' + (messagesConsumed.length - 1), + }), + }) + ); + + // check if all offsets are present + expect(messagesConsumed.map(m => m.message.offset)).toEqual(messages.map((_, i) => `${i}`)); + }); + + it('consume messages with headers', async () => { + await consumer.connect(); + await producer.connect(); + await consumer.subscribe({ topic: topicName }); + + const messagesConsumed = []; + consumer.run({ + partitionsConsumedConcurrently, + eachMessage: async event => messagesConsumed.push(event) + }); + + const messages = [{ + value: `value-${secureRandom}`, + headers: { + 'header-1': 'value-1', + 'header-2': 'value-2', + 'header-3': ['value-3-1', 'value-3-2', Buffer.from([1, 0, 1, 0, 1])], + 'header-4': Buffer.from([1, 0, 1, 0, 1]), + }, + partition: 0, + }]; + + await producer.send({ topic: topicName, messages }); + await waitForMessages(messagesConsumed, { number: messages.length }); + + expect(messagesConsumed[0]).toEqual( + expect.objectContaining({ + topic: topicName, + partition: 0, + message: expect.objectContaining({ + value: Buffer.from(messages[0].value), + offset: '0', + headers: { + // Headers are always returned as Buffers from the broker. + 'header-1': Buffer.from('value-1'), + 'header-2': Buffer.from('value-2'), + 'header-3': [Buffer.from('value-3-1'), Buffer.from('value-3-2'), Buffer.from([1, 0, 1, 0, 1])], + 'header-4': Buffer.from([1, 0, 1, 0, 1]), + } + }), + }) + ); + }); + + it.each([[true], [false]])('consumes messages using eachBatch - isAutoResolve: %s', async (isAutoResolve) => { + await consumer.connect(); + await producer.connect(); + await consumer.subscribe({ topic: topicName }); + + const messagesConsumed = []; + consumer.run({ + partitionsConsumedConcurrently, + eachBatchAutoResolve: isAutoResolve, + eachBatch: async event => { + // Match the message format to be checked easily later. + event.batch.messages = event.batch.messages.map(msg => ({ + message: msg, + topic: event.batch.topic, + partition: event.batch.partition, + })); + messagesConsumed.push(...event.batch.messages); + + // If we're not auto-resolving, we need to resolve the offsets manually. + if (!isAutoResolve) + event.resolveOffset(event.batch.messages[event.batch.messages.length - 1].message.offset); + } + }); + + const messages = Array(100 * partitions) + .fill() + .map((_, i) => { + const value = secureRandom(); + return { key: `key-${value}`, value: `value-${value}`, partition: i % partitions }; + }); + + await producer.send({ topic: topicName, messages }); + await waitForMessages(messagesConsumed, { number: messages.length }); + + for (let p = 0; p < partitions; p++) { + const specificPartitionMessages = messagesConsumed.filter(m => m.partition === p); + const specificExpectedMessages = messages.filter(m => m.partition === p); + expect(specificPartitionMessages[0]).toEqual( + expect.objectContaining({ + topic: topicName, + partition: p, + message: expect.objectContaining({ + key: Buffer.from(specificExpectedMessages[0].key), + value: Buffer.from(specificExpectedMessages[0].value), + offset: String(0), + }), + }) + ); + + expect(specificPartitionMessages[specificPartitionMessages.length - 1]).toEqual( + expect.objectContaining({ + topic: topicName, + partition: p, + message: expect.objectContaining({ + key: Buffer.from(specificExpectedMessages[specificExpectedMessages.length - 1].key), + value: Buffer.from(specificExpectedMessages[specificExpectedMessages.length - 1].value), + offset: String(specificExpectedMessages.length - 1), + }), + }) + ); + + // check if all offsets are present + expect(specificPartitionMessages.map(m => m.message.offset)).toEqual(specificExpectedMessages.map((_, i) => `${i}`)); + } + + }); + + it('partially resolving offsets in eachBatch leads to reconsumption', async () => { + await consumer.connect(); + await producer.connect(); + await consumer.subscribe({ topic: topicName }); + + const messagesConsumed = []; + const skippedResolutionForPartition = Array(partitions).fill(false); + const messagesPerPartition = 100; + consumer.run({ + partitionsConsumedConcurrently, + eachBatchAutoResolve: false, + eachBatch: async (event) => { + const partition = event.batch.partition; + let maxOffset = -1; + for (const message of event.batch.messages) { + const offset = +message.offset; + maxOffset = offset; + messagesConsumed.push(message); + /* If we get a message greater than the halfway point, don't resolve it the first time around + * Only resolve it when we see it the second time. */ + if (offset < Math.floor(messagesPerPartition/2) || skippedResolutionForPartition[partition]) { + event.resolveOffset(offset); + } + } + /* If we've completed the first half of messages, then we are now allowed to resolve + * the second half. */ + if (maxOffset >= Math.floor(messagesPerPartition/2)) + skippedResolutionForPartition[partition] = true; + } + }); + + const messages = Array(messagesPerPartition * partitions) + .fill() + .map((_, i) => { + const value = secureRandom(); + return { key: `key-${value}`, value: `value-${value}`, partition: i % partitions }; + }); + + await producer.send({ topic: topicName, messages }); + + /* It's not possible to actually know the exact number of messages without knowing the + * cache growth characteristics, which may change in the future. So just check if there + * is at least 1 message more than we sent. */ + await waitForMessages(messagesConsumed, { number: messages.length + 1 }); + expect(messagesConsumed.length).toBeGreaterThan(messages.length); + }); + + it('is able to reconsume messages after not resolving it', async () => { + await consumer.connect(); + await producer.connect(); + await consumer.subscribe({ topic: topicName }); + + let messageSeen = false; + const messagesConsumed = []; + consumer.run({ + partitionsConsumedConcurrently, + eachBatchAutoResolve: false, + eachBatch: async event => { + expect(event.batch.messages.length).toEqual(1); + expect(event.batch.messages[0].offset).toEqual('0'); + expect(event.batch.topic).toEqual(topicName); + expect(event.batch.partition).toEqual(0); + + if (!messageSeen) { + messageSeen = true; + return; + } + messagesConsumed.push(...event.batch.messages); + + // Since we're not auto-resolving, we need to resolve the offsets manually. + event.resolveOffset(event.batch.messages[event.batch.messages.length - 1].offset); + } + }); + + const messages = Array(1) + .fill() + .map(() => { + const value = secureRandom(); + return { key: `key-${value}`, value: `value-${value}`, partition: 0 }; + }); + + await producer.send({ topic: topicName, messages }); + await waitFor(() => consumer.assignment().length > 0, () => { }, 100); + await waitForMessages(messagesConsumed, { number: messages.length }); + }); + + it.each([[true], [false]])('is able to reconsume messages when an error is thrown - isAutoResolve: %s', async (isAutoResolve) => { + await consumer.connect(); + await producer.connect(); + await consumer.subscribe({ topic: topicName }); + + let messageSeen = false; + const messagesConsumed = []; + consumer.run({ + partitionsConsumedConcurrently, + eachBatchAutoResolve: isAutoResolve, + eachBatch: async event => { + expect(event.batch.messages.length).toEqual(1); + expect(event.batch.messages[0].offset).toEqual('0'); + expect(event.batch.topic).toEqual(topicName); + expect(event.batch.partition).toEqual(0); + + if (!messageSeen) { + messageSeen = true; + throw new Error('a new error.'); + } + messagesConsumed.push(...event.batch.messages); + } + }); + + const messages = Array(1) + .fill() + .map(() => { + const value = secureRandom(); + return { key: `key-${value}`, value: `value-${value}`, partition: 0 }; + }); + + await producer.send({ topic: topicName, messages }); + await waitForMessages(messagesConsumed, { number: messages.length }); + }); + + it.each([[true], [false]])('does not reconsume resolved messages even on error - isAutoResolve: %s', async (isAutoResolve) => { + await consumer.connect(); + await producer.connect(); + await consumer.subscribe({ topic: topicName }); + + const messagesConsumed = []; + consumer.run({ + partitionsConsumedConcurrently, + eachBatchAutoResolve: isAutoResolve, + eachBatch: async event => { + messagesConsumed.push(...event.batch.messages); + // Resolve offsets irrespective of the value of eachBatchAutoResolve. + event.resolveOffset(event.batch.messages[event.batch.messages.length - 1].offset); + throw new Error('a new error.'); + } + }); + + const messages = Array(2) + .fill() + .map(() => { + const value = secureRandom(); + return { key: `key-${value}`, value: `value-${value}`, partition: 0 }; + }); + + await producer.send({ topic: topicName, messages }); + await waitForMessages(messagesConsumed, { number: messages.length }); + + expect(messagesConsumed[0].key.toString()).toBe(messages[0].key); + expect(messagesConsumed[1].key.toString()).toBe(messages[1].key); + }); + + it('consumes messages concurrently where partitionsConsumedConcurrently - partitions = diffConcurrencyPartitions', async () => { + const partitions = 3; + /* We want partitionsConsumedConcurrently to be 2, 3, and 4 rather than 1, 2, and 3 that is tested by the test. */ + const partitionsConsumedConcurrentlyDiff = partitionsConsumedConcurrently + 1; + topicName = `test-topic-${secureRandom()}`; + await createTopic({ + topic: topicName, + partitions: partitions, + }); + await consumer.connect(); + await producer.connect(); + await consumer.subscribe({ topic: topicName }); + + let inProgress = 0; + let inProgressMaxValue = 0; + const messagesConsumed = []; + consumer.run({ + partitionsConsumedConcurrently: partitionsConsumedConcurrentlyDiff, + eachMessage: async event => { + inProgress++; + await sleep(1); + messagesConsumed.push(event); + inProgressMaxValue = Math.max(inProgress, inProgressMaxValue); + inProgress--; + }, + }); + + await waitFor(() => consumer.assignment().length > 0, () => { }, 100); + + const messages = Array(1024*9) + .fill() + .map((_, i) => { + const value = secureRandom(512); + return { key: `key-${value}`, value: `value-${value}`, partition: i % partitions }; + }); + + await producer.send({ topic: topicName, messages }); + await waitForMessages(messagesConsumed, { number: messages.length }); + expect(inProgressMaxValue).toBe(Math.min(partitionsConsumedConcurrentlyDiff, partitions)); + }); + + it('consume GZIP messages', async () => { + /* Discard and recreate producer with the compression set */ + producer = createProducer({ + compression: CompressionTypes.GZIP, + }); + + await consumer.connect(); + await producer.connect(); + await consumer.subscribe({ topic: topicName }); + + const messagesConsumed = []; + consumer.run({ eachMessage: async event => messagesConsumed.push(event) }); + + const key1 = secureRandom(); + const message1 = { key: `key-${key1}`, value: `value-${key1}`, partition: 0 }; + const key2 = secureRandom(); + const message2 = { key: `key-${key2}`, value: `value-${key2}`, partition: 0 }; + + await producer.send({ + topic: topicName, + messages: [message1, message2], + }); + + await expect(waitForMessages(messagesConsumed, { number: 2 })).resolves.toEqual([ + expect.objectContaining({ + topic: topicName, + partition: 0, + message: expect.objectContaining({ + key: Buffer.from(message1.key), + value: Buffer.from(message1.value), + offset: '0', + }), + }), + expect.objectContaining({ + topic: topicName, + partition: 0, + message: expect.objectContaining({ + key: Buffer.from(message2.key), + value: Buffer.from(message2.value), + offset: '1', + }), + }), + ]); + }); + + it('stops consuming messages when running = false', async () => { + await consumer.connect(); + await producer.connect(); + await consumer.subscribe({ topic: topicName }); + + let calls = 0; + + consumer.run({ + eachMessage: async () => { + calls++; + await sleep(100); + }, + }); + + const key1 = secureRandom(); + const message1 = { key: `key-${key1}`, value: `value-${key1}` }; + const key2 = secureRandom(); + const message2 = { key: `key-${key2}`, value: `value-${key2}` }; + + await producer.send({ topic: topicName, messages: [message1, message2] }); + await waitFor(() => calls > 0, () => { }, 10); + await consumer.disconnect(); // don't give the consumer the chance to consume the 2nd message + + expect(calls).toEqual(1); + }); + + it('does not disconnect in the middle of message processing', async () => { + await producer.connect(); + await consumer.connect(); + await consumer.subscribe({ topic: topicName }); + + let calls = 0; + let failedSeek = false; + consumer.run({ + eachMessage: async ({ message }) => { + /* Take a long time to process the message. */ + await sleep(7000); + try { + consumer.seek({ topic: topicName, partition: 0, offset: message.offset }); + } catch { + failedSeek = true; + } + calls++; + } + }); + + await producer.send({ + topic: topicName, + messages: [{ key: '1', value: '1' }], + }); + + /* Waiting for assignment and then a bit more means that the first eachMessage starts running. */ + await waitFor(() => consumer.assignment().length > 0, () => { }, { delay: 50 }); + await sleep(200); + await consumer.disconnect(); + + /* Even without explicitly waiting for it, a pending call to eachMessage must complete before disconnect does. */ + expect(calls).toEqual(1); + expect(failedSeek).toEqual(false); + + await producer.disconnect(); + }); + + it('max.poll.interval.ms should not be exceeded when per-message processing time < max.poll.interval.ms', async () => { + let rebalanceCount = 0; + consumer = createConsumer({ + groupId, + maxWaitTimeInMs: 100, + fromBeginning: true, + rebalanceTimeout: 7000, /* also changes max.poll.interval.ms */ + sessionTimeout: 6000, /* minimum default value, must be less than + * rebalanceTimeout */ + autoCommitInterval: 1000, + }, { + rebalance_cb: () => { + rebalanceCount++; + }, + }); + + await producer.connect(); + await consumer.connect(); + await consumer.subscribe({ topic: topicName }); + + const messagesConsumed = []; + + consumer.run({ + partitionsConsumedConcurrently, + eachMessage: async event => { + messagesConsumed.push(event); + await sleep(7500); /* 7.5s 'processing' + * after each message cache is cleared + * and max poll interval isn't reached */ + } + }); + + const messages = Array(5) + .fill() + .map(() => { + const value = secureRandom(); + return { value: `value-${value}`, partition: 0 }; + }); + + await producer.send({ topic: topicName, messages }); + + await waitForMessages(messagesConsumed, { number: 5, delay: 100 }); + expect(rebalanceCount).toEqual(1); /* Just the assign and nothing else at this point. */ + }, 60000); + + it('max.poll.interval.ms should not be exceeded when batch processing time < max.poll.interval.ms', async () => { + if (partitionsConsumedConcurrently !== 1) { + return; + } + let assigns = 0; + let revokes = 0; + let lost = 0; + consumer = createConsumer({ + groupId, + maxWaitTimeInMs: 100, + fromBeginning: true, + rebalanceTimeout: 7000, /* also changes max.poll.interval.ms */ + sessionTimeout: 6000, /* minimum default value, must be less than + * rebalanceTimeout */ + autoCommitInterval: 1000, + }, { + rebalance_cb: async (err, assignment, { assignmentLost }) => { + if (err.code === ErrorCodes.ERR__ASSIGN_PARTITIONS) { + assigns++; + expect(assignment.length).toBe(3); + } else if (err.code === ErrorCodes.ERR__REVOKE_PARTITIONS) { + revokes++; + if (assignmentLost()) + lost++; + expect(assignment.length).toBe(3); + } + } + }); + + await producer.connect(); + await consumer.connect(); + await consumer.subscribe({ topic: topicName }); + + const messagesConsumed = []; + + let errors = false; + let receivedMessages = 0; + const batchLengths = [1, 1, 2, + /* cache reset */ + 1, 1]; + consumer.run({ + partitionsConsumedConcurrently, + eachBatchAutoResolve: true, + eachBatch: async (event) => { + receivedMessages++; + + try { + expect(event.batch.messages.length) + .toEqual(batchLengths[receivedMessages - 1]); + + if (receivedMessages === 3) { + expect(event.isStale()).toEqual(false); + await sleep(7500); + /* 7.5s 'processing' + * doesn't exceed max poll interval. + * Cache reset is transparent */ + expect(event.isStale()).toEqual(false); + } + } catch (e) { + console.error(e); + errors = true; + } + messagesConsumed.push(...event.batch.messages); + } + }); + + const messages = Array(6) + .fill() + .map(() => { + const value = secureRandom(); + return { value: `value-${value}`, partition: 0 }; + }); + + await producer.send({ topic: topicName, messages }); + + await waitForMessages(messagesConsumed, { number: 6, delay: 100 }); + expect(messagesConsumed.length).toEqual(6); + + /* Triggers revocation */ + await consumer.disconnect(); + + /* First assignment */ + expect(assigns).toEqual(1); + /* Revocation on disconnect */ + expect(revokes).toEqual(1); + expect(lost).toEqual(0); + expect(errors).toEqual(false); + }, 60000); + + it('max.poll.interval.ms should be exceeded when batch processing time > max.poll.interval.ms', async () => { + if (partitionsConsumedConcurrently !== 1) { + return; + } + let assigns = 0; + let revokes = 0; + let lost = 0; + consumer = createConsumer({ + groupId, + maxWaitTimeInMs: 100, + fromBeginning: true, + sessionTimeout: 6000, /* minimum default value, must be less than + * rebalanceTimeout */ + autoCommitInterval: 1000, + }, { + /* Testing direct librdkafka configuration here */ + 'max.poll.interval.ms': 7000, + rebalance_cb: async (err, assignment, { assignmentLost }) => { + if (err.code === ErrorCodes.ERR__ASSIGN_PARTITIONS) { + assigns++; + expect(assignment.length).toBe(3); + } else if (err.code === ErrorCodes.ERR__REVOKE_PARTITIONS) { + revokes++; + if (assignmentLost()) + lost++; + expect(assignment.length).toBe(3); + } + } + }); + + await producer.connect(); + await consumer.connect(); + await consumer.subscribe({ topic: topicName }); + + const messagesConsumed = []; + + let errors = false; + let receivedMessages = 0; + const batchLengths = [/* first we reach batches of 32 message and fetches of 64 + * max poll interval exceeded happens on second + * 32 messages batch of the 64 msg fetch. */ + 1, 1, 2, 2, 4, 4, 8, 8, 16, 16, 32, 32, 32, 32, + /* max poll interval exceeded, 32 reprocessed + + * 1 new message. */ + 1, 1, 2, 2, 4, 4, 8, 8, 3]; + consumer.run({ + partitionsConsumedConcurrently, + eachBatchAutoResolve: true, + eachBatch: async (event) => { + receivedMessages++; + + try { + expect(event.batch.messages.length) + .toEqual(batchLengths[receivedMessages - 1]); + + if (receivedMessages === 13) { + expect(event.isStale()).toEqual(false); + await sleep(6000); + /* 6s 'processing' + * cache clearance starts at 7000 */ + expect(event.isStale()).toEqual(false); + } + if ( receivedMessages === 14) { + expect(event.isStale()).toEqual(false); + await sleep(10000); + /* 10s 'processing' + * 16s in total exceeds max poll interval. + * in this last batch after clearance. + * Batch is marked stale + * and partitions are lost */ + expect(event.isStale()).toEqual(true); + } + } catch (e) { + console.error(e); + errors = true; + } + messagesConsumed.push(...event.batch.messages); + } + }); + + const totalMessages = 191; /* without reprocessed messages */ + const messages = Array(totalMessages) + .fill() + .map(() => { + const value = secureRandom(); + return { value: `value-${value}`, partition: 0 }; + }); + + await producer.send({ topic: topicName, messages }); + /* 32 message are re-consumed after not being resolved + * because of the stale batch */ + await waitForMessages(messagesConsumed, { number: totalMessages + 32, delay: 100 }); + expect(messagesConsumed.length).toEqual(totalMessages + 32); + + /* Triggers revocation */ + await consumer.disconnect(); + + /* First assignment + assignment after partitions lost */ + expect(assigns).toEqual(2); + /* Partitions lost + revocation on disconnect */ + expect(revokes).toEqual(2); + /* Only one of the revocations has the lost flag */ + expect(lost).toEqual(1); + expect(errors).toEqual(false); + }, 60000); +}); diff --git a/test/promisified/consumer/consumerCacheTests.spec.js b/test/promisified/consumer/consumerCacheTests.spec.js new file mode 100644 index 00000000..aabcd90c --- /dev/null +++ b/test/promisified/consumer/consumerCacheTests.spec.js @@ -0,0 +1,321 @@ +jest.setTimeout(30000); + +const { + secureRandom, + createTopic, + waitFor, + createProducer, + createConsumer, + waitForMessages, + sleep, +} = require('../testhelpers'); + +/* All required combinations of [autoCommit, partitionsConsumedConcurrently] */ +const cases = [ + [true, 1], + [true, 3], + [false, 1], + [false, 3], +]; + +describe.each(cases)('Consumer message cache - isAutoCommit = %s - partitionsConsumedConcurrently = %s -', (isAutoCommit, partitionsConsumedConcurrently) => { + let topicName, groupId, producer, consumer; + + beforeEach(async () => { + console.log("Starting:", expect.getState().currentTestName); + topicName = `test-topic-${secureRandom()}`; + groupId = `consumer-group-id-${secureRandom()}`; + + await createTopic({ topic: topicName, partitions: 3 }); + + producer = createProducer({}); + + consumer = createConsumer({ + groupId, + maxWaitTimeInMs: 100, + fromBeginning: true, + autoCommit: isAutoCommit, + }); + }); + + afterEach(async () => { + consumer && (await consumer.disconnect()); + producer && (await producer.disconnect()); + console.log("Ending:", expect.getState().currentTestName); + }); + + it('is cleared on pause', async () => { + await consumer.connect(); + await producer.connect(); + await consumer.subscribe({ topic: topicName }); + + const msgs = 1024; + const messagesConsumed = []; + consumer.run({ + partitionsConsumedConcurrently, + eachMessage: async event => { + messagesConsumed.push(event); + if (event.partition === 0 && (+event.message.offset) === (msgs - 1)) { + consumer.pause([{ topic: topicName, partitions: [0] }]); + } + } + }); + + /* Evenly distribute msgs*9 messages across 3 partitions */ + let i = 0; + const messages = Array(msgs * 9) + .fill() + .map(() => { + const value = secureRandom(); + return { value: `value-${value}`, partition: ((i++) % 3) }; + }); + + await producer.send({ topic: topicName, messages }); + + // Wait for the messages. + // We consume msgs*1 messages from partition 0, and msgs*3 from partition 1 and 2. + await waitForMessages(messagesConsumed, { number: msgs * 7 }); + + // We should not consume even one more message than that. + await sleep(1000); + expect(messagesConsumed.length).toEqual(msgs * 7); + + // check if all offsets are present + // partition 0 + expect(messagesConsumed.filter(m => m.partition === 0).map(m => m.message.offset)).toEqual(Array(msgs).fill().map((_, i) => `${i}`)); + // partition 1 + expect(messagesConsumed.filter(m => m.partition === 1).map(m => m.message.offset)).toEqual(Array(msgs * 3).fill().map((_, i) => `${i}`)); + // partition 2 + expect(messagesConsumed.filter(m => m.partition === 2).map(m => m.message.offset)).toEqual(Array(msgs * 3).fill().map((_, i) => `${i}`)); + }); + + it('is cleared on seek', async () => { + await consumer.connect(); + await producer.connect(); + await consumer.subscribe({ topic: topicName }); + + const messagesConsumed = []; + let hasBeenSeeked = false; + consumer.run({ + partitionsConsumedConcurrently, + eachMessage: async event => { + messagesConsumed.push(event); + if (event.partition === 0 && (+event.message.offset) === 1023 && !hasBeenSeeked) { + consumer.seek({ topic: topicName, partition: 0, offset: 0 }); + hasBeenSeeked = true; + } + } + }); + + /* Evenly distribute 1024*9 messages across 3 partitions */ + let i = 0; + const messages = Array(1024 * 9) + .fill() + .map(() => { + const value = secureRandom(); + return { value: `value-${value}`, partition: ((i++) % 3) }; + }); + + await producer.send({ topic: topicName, messages }); + + // Wait for the messages. + // We consume 1024*4 messages from partition 0, and 1024*3 from partition 1 and 2. + await waitForMessages(messagesConsumed, { number: 1024 * 10 }); + + // We should not consume even one more message than that. + await sleep(1000); + expect(messagesConsumed.length).toEqual(1024 * 10); + + // check if all offsets are present + // partition 0 + expect(messagesConsumed.filter(m => m.partition === 0).map(m => m.message.offset)) + .toEqual(Array(1024 * 4).fill().map((_, i) => i < 1024 ? `${i}` : `${i - 1024}`)); + // partition 1 + expect(messagesConsumed.filter(m => m.partition === 1).map(m => m.message.offset)).toEqual(Array(1024 * 3).fill().map((_, i) => `${i}`)); + // partition 2 + expect(messagesConsumed.filter(m => m.partition === 2).map(m => m.message.offset)).toEqual(Array(1024 * 3).fill().map((_, i) => `${i}`)); + }); + + it('is cleared before rebalance', async () => { + /* If another test times out, jest chooses to run this test in parallel with + * the other test. I think this causes an issue with shared groupIds. So to ensure + * the consumers are created with the same groupId, we create them here. + * TODO: verify correctness of theory. It's conjecture... which solves flakiness. */ + let groupId = `consumer-group-id-${secureRandom()}`; + consumer = createConsumer({ + groupId, + maxWaitTimeInMs: 100, + fromBeginning: true, + autoCommit: isAutoCommit, + }); + + const consumer2 = createConsumer({ + groupId, + maxWaitTimeInMs: 100, + fromBeginning: true, + autoCommit: isAutoCommit, + clientId: "consumer2", + }); + + await consumer.connect(); + await producer.connect(); + await consumer.subscribe({ topic: topicName }); + + const messagesConsumed = []; + const messagesConsumedConsumer1 = []; + const messagesConsumedConsumer2 = []; + let consumer2ConsumeRunning = false; + + consumer.run({ + partitionsConsumedConcurrently, + eachMessage: async event => { + messagesConsumed.push(event); + messagesConsumedConsumer1.push(event); + if (!isAutoCommit) + await consumer.commitOffsets([ + { topic: event.topic, partition: event.partition, offset: Number(event.message.offset) + 1 }, + ]); + + /* Until the second consumer joins, consume messages slowly so as to not consume them all + * before the rebalance triggers. */ + if (messagesConsumed.length > 1024 && !consumer2ConsumeRunning) { + await sleep(10); + } + } + }); + + /* Evenly distribute 1024*9 messages across 3 partitions */ + let i = 0; + const multiplier = 9; + const messages = Array(1024 * multiplier) + .fill() + .map(() => { + const value = secureRandom(); + return { value: `value-${value}`, partition: (i++) % 3 }; + }); + + await producer.send({ topic: topicName, messages }); + + // Wait for the messages - some of them, before starting the + // second consumer. + await waitForMessages(messagesConsumed, { number: 1024 }); + + await consumer2.connect(); + await consumer2.subscribe({ topic: topicName }); + consumer2.run({ + eachMessage: async event => { + messagesConsumed.push(event); + messagesConsumedConsumer2.push(event); + } + }); + + await waitFor(() => consumer2.assignment().length > 0, () => null); + consumer2ConsumeRunning = true; + + /* Now that both consumers have joined, wait for all msgs to be consumed */ + await waitForMessages(messagesConsumed, { number: 1024 * multiplier }); + + /* No extra messages should be consumed. */ + await sleep(1000); + expect(messagesConsumed.length).toEqual(1024 * multiplier); + + /* Check if all messages were consumed. */ + expect(messagesConsumed.map(event => (+event.message.offset)).sort((a, b) => a - b)) + .toEqual(Array(1024 * multiplier).fill().map((_, i) => Math.floor(i / 3))); + + /* Consumer2 should have consumed at least one message. */ + expect(messagesConsumedConsumer2.length).toBeGreaterThan(0); + + await consumer2.disconnect(); + }, 60000); + + it('does not hold up polling for non-message events', async () => { + /* Even if the cache is full of messages, we should still be polling for + * non-message events like rebalances, etc. Internally, this is to make sure that + * we call poll() at least once within max.poll.interval.ms even if the cache is + * still full. This depends on us expiring the cache on time. */ + const impatientConsumer = createConsumer({ + groupId, + maxWaitTimeInMs: 100, + fromBeginning: true, + rebalanceTimeout: 10000, /* also changes max.poll.interval.ms */ + sessionTimeout: 10000, + autoCommitInterval: 1000, + clientId: "impatientConsumer", + autoCommit: isAutoCommit, + }); + + await producer.connect(); + await impatientConsumer.connect(); + await impatientConsumer.subscribe({ topic: topicName }); + + const messagesConsumed = []; + let impatientConsumerMessages = []; + let consumer1Messages = []; + let consumer1TryingToJoin = false; + + impatientConsumer.run({ + partitionsConsumedConcurrently, + eachMessage: async event => { + messagesConsumed.push(event); + impatientConsumerMessages.push(event); + if (!isAutoCommit) + await impatientConsumer.commitOffsets([ + { topic: event.topic, partition: event.partition, offset: Number(event.message.offset) + 1 }, + ]); + + /* When the second consumer is joining, deliberately slow down message consumption. + * This is so the cache remains full. + * We should still have a rebalance very soon, since we will expire the cache and + * trigger a rebalance before max.poll.interval.ms. + */ + if (consumer1TryingToJoin) { + await sleep(1000); + } + } + }); + + /* Distribute 1024*10 messages across 3 partitions */ + let i = 0; + const messages = Array(1024 * 10) + .fill() + .map(() => { + const value = secureRandom(); + return { value: `value-${value}`, partition: (i++) % 3 }; + }); + + await producer.send({ topic: topicName, messages }); + + /* Wait for the messages - some of them, before starting the + * second consumer. + * FIXME: This can get a bit flaky depending on the system, as sometimes + * the impatientConsumer consumes all the messages before consumer1TryingToJoin + * can be set to true */ + await waitForMessages(messagesConsumed, { number: 1024, delay: 100 }); + consumer1TryingToJoin = true; + + await consumer.connect(); + await consumer.subscribe({ topic: topicName }); + consumer.run({ + eachMessage: async event => { + messagesConsumed.push(event); + consumer1Messages.push(event); + } + }); + await waitFor(() => consumer.assignment().length > 0, () => null); + consumer1TryingToJoin = false; + + /* Now that both consumers have joined, wait for all msgs to be consumed */ + await waitForMessages(messagesConsumed, { number: 1024 * 10 }); + + // No extra messages should be consumed. + await sleep(1000); + expect(messagesConsumed.length).toEqual(1024 * 10); + + /* Each consumer should have consumed at least one message. */ + expect(consumer1Messages.length).toBeGreaterThan(0); + expect(impatientConsumerMessages.length).toBeGreaterThan(0); + + await impatientConsumer.disconnect(); + }, 60000); +}); diff --git a/test/promisified/consumer/consumerTransactions.spec.js b/test/promisified/consumer/consumerTransactions.spec.js new file mode 100644 index 00000000..980a7514 --- /dev/null +++ b/test/promisified/consumer/consumerTransactions.spec.js @@ -0,0 +1,418 @@ +jest.setTimeout(10000); + +const { ErrorCodes } = require('../../../lib').KafkaJS; +const { + secureRandom, + createTopic, + createProducer, + createConsumer, + waitForMessages, + generateMessages, +} = require('../testhelpers'); + +describe('Consumer transactions', () => { + let topicName, groupId, producer, consumer; + + beforeEach(async () => { + topicName = `test-topic-${secureRandom()}`; + groupId = `consumer-group-id-${secureRandom()}`; + + await createTopic({ topic: topicName }); + producer = createProducer({ + idempotent: true, + maxInFlightRequests: 1, + }); + + consumer = createConsumer({ + groupId, + maxWaitTimeInMs: 100, + fromBeginning: true, + autoCommit: true, + }); + }); + + afterEach(async () => { + consumer && (await consumer.disconnect()); + producer && (await producer.disconnect()); + }); + + it('accepts messages from an idempotent producer', async () => { + await consumer.connect(); + await producer.connect(); + await consumer.subscribe({ topic: topicName }); + + const messagesConsumed = []; + const idempotentMessages = generateMessages({ prefix: 'idempotent', partition: 0 }); + + consumer.run({ + eachMessage: async event => messagesConsumed.push(event), + }); + + await producer.sendBatch({ + topicMessages: [{ topic: topicName, messages: idempotentMessages }], + }); + + const number = idempotentMessages.length; + await waitForMessages(messagesConsumed, { + number, + }); + + expect(messagesConsumed).toHaveLength(idempotentMessages.length); + expect(messagesConsumed[0].message.value.toString()).toMatch(/value-idempotent-0/); + expect(messagesConsumed[99].message.value.toString()).toMatch(/value-idempotent-99/); + }); + + it('accepts messages from committed transactions', async () => { + producer = createProducer({ + transactionalId: `transactional-id-${secureRandom()}`, + maxInFlightRequests: 1, + }); + + await consumer.connect(); + await producer.connect(); + await consumer.subscribe({ topic: topicName }); + + const messagesConsumed = []; + + const messages1 = generateMessages({ prefix: 'txn1', partition: 0 }); + const messages2 = generateMessages({ prefix: 'txn2', partition: 0 }); + const nontransactionalMessages1 = generateMessages({ prefix: 'nontransactional1', number: 1, partition: 0 }); + + consumer.run({ + eachMessage: async event => messagesConsumed.push(event), + }); + + // We cannot send non-transaction messages. + await expect(producer.sendBatch({ + topicMessages: [{ topic: topicName, messages: nontransactionalMessages1 }], + })).rejects.toHaveProperty('code', ErrorCodes.ERR__STATE); + + // We can run a transaction + const txn1 = await producer.transaction(); + await txn1.sendBatch({ + topicMessages: [{ topic: topicName, messages: messages1 }], + }); + await txn1.commit(); + + // We can immediately run another transaction + const txn2 = await producer.transaction(); + await txn2.sendBatch({ + topicMessages: [{ topic: topicName, messages: messages2 }], + }); + await txn2.commit(); + + const numMessages = + messages1.length + messages2.length; + + await waitForMessages(messagesConsumed, { + number: numMessages, + }); + + expect(messagesConsumed[0].message.value.toString()).toMatch(/value-txn1-0/); + expect(messagesConsumed[numMessages - 1].message.value.toString()).toMatch(/value-txn2-99/); + }); + + it('does not receive aborted messages', async () => { + producer = createProducer({ + transactionalId: `transactional-id-${secureRandom()}`, + maxInFlightRequests: 1, + }); + + await consumer.connect(); + await producer.connect(); + await consumer.subscribe({ topic: topicName }); + + const messagesConsumed = []; + + const abortedMessages1 = generateMessages({ prefix: 'aborted-txn-1', partition: 0 }); + const abortedMessages2 = generateMessages({ prefix: 'aborted-txn-2', partition: 0 }); + const committedMessages = generateMessages({ prefix: 'committed-txn', number: 10, partition: 0 }); + + consumer.run({ + eachMessage: async event => messagesConsumed.push(event), + }); + + const abortedTxn1 = await producer.transaction(); + await abortedTxn1.sendBatch({ + topicMessages: [{ topic: topicName, messages: abortedMessages1 }], + }); + await abortedTxn1.abort(); + + const abortedTxn2 = await producer.transaction(); + await abortedTxn2.sendBatch({ + topicMessages: [{ topic: topicName, messages: abortedMessages2 }], + }); + await abortedTxn2.abort(); + + const committedTxn = await producer.transaction(); + await committedTxn.sendBatch({ + topicMessages: [{ topic: topicName, messages: committedMessages }], + }); + await committedTxn.commit(); + + const number = committedMessages.length; + await waitForMessages(messagesConsumed, { + number, + }); + + expect(messagesConsumed).toHaveLength(number); + expect(messagesConsumed[0].message.value.toString()).toMatch(/value-committed-txn-0/); + expect(messagesConsumed[number - 1].message.value.toString()).toMatch(/value-committed-txn-9/); + }); + + it( + 'receives aborted messages for an isolation level of READ_UNCOMMITTED', + async () => { + producer = createProducer({ + transactionalId: `transactional-id-${secureRandom()}`, + maxInFlightRequests: 1, + }); + + consumer = createConsumer({ + groupId, + maxWaitTimeInMs: 100, + readUncommitted: true, + fromBeginning: true, + autoCommit: true, + }); + + await consumer.connect(); + await producer.connect(); + await consumer.subscribe({ topic: topicName }); + + const messagesConsumed = []; + + const abortedMessages = generateMessages({ prefix: 'aborted-txn1', partition: 0 }); + + consumer.run({ + eachMessage: async event => messagesConsumed.push(event), + }); + + const abortedTxn1 = await producer.transaction(); + await abortedTxn1.sendBatch({ + topicMessages: [{ topic: topicName, messages: abortedMessages }], + }); + await abortedTxn1.abort(); + + const number = abortedMessages.length; + await waitForMessages(messagesConsumed, { + number, + }); + + expect(messagesConsumed).toHaveLength(abortedMessages.length); + expect(messagesConsumed[0].message.value.toString()).toMatch(/value-aborted-txn1-0/); + expect(messagesConsumed[messagesConsumed.length - 1].message.value.toString()).toMatch( + /value-aborted-txn1-99/ + ); + } + ); + + it( + 'respects offsets sent by a committed transaction ("consume-transform-produce" flow)', + async () => { + // Seed the topic with some messages. We don't need a tx producer for this. + await producer.connect(); + const partition = 0; + const messages = generateMessages().map(message => ({ + ...message, + partition, + })); + + await producer.send({ + topic: topicName, + messages, + }); + + await producer.disconnect(); + + producer = createProducer({ + transactionalId: `transactional-id-${secureRandom()}`, + maxInFlightRequests: 1, + }); + + consumer = createConsumer({ + groupId, + maxWaitTimeInMs: 100, + fromBeginning: true, + autoCommit: false, + }); + + await consumer.connect(); + await producer.connect(); + await consumer.subscribe({ topic: topicName }); + + // 1. Run consumer with "autoCommit=false" + + let messagesConsumed = []; + // This stores the latest offsets consumed for each partition, when we received the ith message. + let uncommittedOffsetsPerMessage = []; + let latestOffsetsPerPartition = {}; + + const eachMessage = async ({ partition, message }) => { + messagesConsumed.push(message); + /* The message.offset indicates current offset, so we need to add 1 to it, since committed offset denotes + * the next offset to consume. */ + latestOffsetsPerPartition[partition] = Number(message.offset) + 1; + uncommittedOffsetsPerMessage.push(Object.assign({}, latestOffsetsPerPartition)); + }; + + consumer.run({ + eachMessage, + }); + + // 2. Consume pre-produced messages. + + const number = messages.length; + await waitForMessages(messagesConsumed, { + number, + }); + + expect(messagesConsumed[0].value.toString()).toMatch(/value-0/); + expect(messagesConsumed[99].value.toString()).toMatch(/value-99/); + expect(uncommittedOffsetsPerMessage).toHaveLength(messagesConsumed.length); + + // 3. Send offsets in a transaction and commit + const txnToCommit = await producer.transaction(); + let offsetsToCommit = uncommittedOffsetsPerMessage[97]; + let topicPartitionOffsets = { topic: topicName, partitions: [] }; + for (const partition in offsetsToCommit) { + topicPartitionOffsets.partitions.push({ partition, offset: offsetsToCommit[partition] }); + } + + await txnToCommit.sendOffsets({ + consumer, + topics: [topicPartitionOffsets], + }); + await txnToCommit.commit(); + + // Restart consumer - we cannot stop it, so we recreate it. + await consumer.disconnect(); + + consumer = createConsumer({ + groupId, + maxWaitTimeInMs: 100, + fromBeginning: true, + autoCommit: false, + }); + + await consumer.connect(); + await consumer.subscribe({ topic: topicName }); + + messagesConsumed = []; + uncommittedOffsetsPerMessage = []; + + consumer.run({ eachMessage }); + + // Assert we only consume the messages that were after the sent offset + await waitForMessages(messagesConsumed, { + number: 2, + }); + + expect(messagesConsumed).toHaveLength(2); + expect(messagesConsumed[0].value.toString()).toMatch(/value-98/); + expect(messagesConsumed[1].value.toString()).toMatch(/value-99/); + } + ); + + it( + 'does not respect offsets sent by an aborted transaction ("consume-transform-produce" flow)', + async () => { + // Seed the topic with some messages. We don't need a tx producer for this. + await producer.connect(); + + const partition = 0; + const messages = generateMessages().map(message => ({ + ...message, + partition, + })); + + await producer.send({ + topic: topicName, + messages, + }); + + await producer.disconnect(); + + producer = createProducer({ + transactionalId: `transactional-id-${secureRandom()}`, + maxInFlightRequests: 1, + }); + + consumer = createConsumer({ + groupId, + maxWaitTimeInMs: 100, + fromBeginning: true, + autoCommit: false, + }); + + await consumer.connect(); + await producer.connect(); + await consumer.subscribe({ topic: topicName }); + + // 1. Run consumer with "autoCommit=false" + + let messagesConsumed = []; + // This stores the latest offsets consumed for each partition, when we received the ith message. + let uncommittedOffsetsPerMessage = []; + let latestOffsetsPerPartition = {}; + + const eachMessage = async ({ partition, message }) => { + messagesConsumed.push(message); + /* The message.offset indicates current offset, so we need to add 1 to it, since committed offset denotes + * the next offset to consume. */ + latestOffsetsPerPartition[partition] = Number(message.offset) + 1; + uncommittedOffsetsPerMessage.push(Object.assign({}, latestOffsetsPerPartition)); + }; + + consumer.run({ + eachMessage, + }); + + // Consume produced messages. + await waitForMessages(messagesConsumed, { number: messages.length }); + + expect(messagesConsumed[0].value.toString()).toMatch(/value-0/); + expect(messagesConsumed[99].value.toString()).toMatch(/value-99/); + expect(uncommittedOffsetsPerMessage).toHaveLength(messagesConsumed.length); + + // 3. Send offsets in a transaction and commit + const txnToAbort = await producer.transaction(); + let offsetsToCommit = uncommittedOffsetsPerMessage[97]; + let topicPartitionOffsets = { topic: topicName, partitions: [] }; + for (const partition in offsetsToCommit) { + topicPartitionOffsets.partitions.push({ partition, offset: offsetsToCommit[partition] }); + } + + await txnToAbort.sendOffsets({ + consumer, + topics: [topicPartitionOffsets], + }); + await txnToAbort.abort(); + + /* Restart consumer - we cannot stop it, so we recreate it. */ + messagesConsumed = []; + uncommittedOffsetsPerMessage = []; + + await consumer.disconnect(); + + consumer = createConsumer({ + groupId, + maxWaitTimeInMs: 100, + fromBeginning: true, + autoCommit: false, + }); + + await consumer.connect(); + await consumer.subscribe({ topic: topicName }); + + consumer.run({ + eachMessage, + }); + + await waitForMessages(messagesConsumed, { number: 1 }); + expect(messagesConsumed[0].value.toString()).toMatch(/value-0/); + await waitForMessages(messagesConsumed, { number: messages.length }); + expect(messagesConsumed[messagesConsumed.length - 1].value.toString()).toMatch(/value-99/); + } + ); +}); \ No newline at end of file diff --git a/test/promisified/consumer/groupInstanceId.spec.js b/test/promisified/consumer/groupInstanceId.spec.js new file mode 100644 index 00000000..bf814000 --- /dev/null +++ b/test/promisified/consumer/groupInstanceId.spec.js @@ -0,0 +1,137 @@ +jest.setTimeout(30000); + +const { waitFor, + secureRandom, + createTopic, + createConsumer, + sleep, } = require("../testhelpers"); +const { ErrorCodes } = require('../../../lib').KafkaJS; + +describe('Consumer with static membership', () => { + let consumer; + let groupId, topicName; + + let consumerConfig; + + beforeEach(async () => { + topicName = `test-topic1-${secureRandom()}`; + groupId = `consumer-group-id-${secureRandom()}`; + consumerConfig = { + groupId, + }; + consumer = null; + await createTopic({ topic: topicName, partitions: 2 }); + }); + + afterEach(async () => { + consumer && (await consumer.disconnect()); + }); + + it('does not rebalance after disconnect', async () => { + let assigns = 0; + let revokes = 0; + const rebalanceCallback = function (err) { + if (err.code === ErrorCodes.ERR__ASSIGN_PARTITIONS) { + assigns++; + } else if (err.code === ErrorCodes.ERR__REVOKE_PARTITIONS) { + revokes++; + } else { + // It's either assign or revoke and nothing else. + jest.fail('Unexpected error code'); + } + }; + + // Create and start two consumers. + consumer = createConsumer(consumerConfig, { + 'group.instance.id': 'instance-1', + }); + await consumer.connect(); + await consumer.subscribe({ topic: topicName }); + consumer.run({ eachMessage: async () => { } }); + + await waitFor(() => consumer.assignment().length > 0, () => null, 1000); + + const consumer2 = createConsumer(consumerConfig, { + 'rebalance_cb': rebalanceCallback, + 'group.instance.id': 'instance-2', + }); + await consumer2.connect(); + await consumer2.subscribe({ topic: topicName }); + consumer2.run({ eachMessage: async () => { } }); + + await waitFor(() => consumer2.assignment().length > 0, () => null, 1000); + expect(assigns).toBe(1); + + // Disconnect one consumer and reconnect it. It should not cause a rebalance in the other. + await consumer.disconnect(); + + consumer = createConsumer(consumerConfig, { + 'group.instance.id': 'instance-1', + }); + await consumer.connect(); + await consumer.subscribe({ topic: topicName }); + consumer.run({ eachMessage: async () => { } }); + + await waitFor(() => consumer.assignment().length > 0, () => null, 1000); + expect(assigns).toBe(1); + expect(revokes).toBe(0); + + await consumer.disconnect(); + await consumer2.disconnect(); + consumer = null; + }); + + it('does rebalance after session timeout', async () => { + let assigns = 0; + let revokes = 0; + const rebalanceCallback = function (err) { + if (err.code === ErrorCodes.ERR__ASSIGN_PARTITIONS) { + assigns++; + } else if (err.code === ErrorCodes.ERR__REVOKE_PARTITIONS) { + revokes++; + } else { + // It's either assign or revoke and nothing else. + jest.fail('Unexpected error code'); + } + }; + + // Create and start two consumers. + consumer = createConsumer(consumerConfig, { + 'group.instance.id': 'instance-1', + 'session.timeout.ms': '10000', + }); + await consumer.connect(); + await consumer.subscribe({ topic: topicName }); + consumer.run({ eachMessage: async () => { } }); + + await waitFor(() => consumer.assignment().length > 0, () => null, 1000); + + const consumer2 = createConsumer(consumerConfig, { + 'rebalance_cb': rebalanceCallback, + 'group.instance.id': 'instance-2', + 'session.timeout.ms': '10000', + }); + await consumer2.connect(); + await consumer2.subscribe({ topic: topicName }); + consumer2.run({ eachMessage: async () => { } }); + + await waitFor(() => consumer2.assignment().length > 0, () => null, 1000); + expect(assigns).toBe(1); + + // Disconnect one consumer and reconnect it. It should cause a rebalance after session timeout. + await consumer.disconnect(); + consumer = null; + + await sleep(8000); + + // Session timeout (10s) hasn't kicked in yet - we have slept for just 8s. + expect(consumer2.assignment().length).toBe(1); + + await waitFor(() => consumer2.assignment().length === 2, () => null, 1000); + expect(consumer2.assignment().length).toBe(2); + expect(assigns).toBe(2); + expect(revokes).toBe(1); + + await consumer2.disconnect(); + }); +}); \ No newline at end of file diff --git a/test/promisified/consumer/incrementalRebalance.spec.js b/test/promisified/consumer/incrementalRebalance.spec.js new file mode 100644 index 00000000..61647ad4 --- /dev/null +++ b/test/promisified/consumer/incrementalRebalance.spec.js @@ -0,0 +1,174 @@ +jest.setTimeout(30000); + +const { waitFor, + secureRandom, + createTopic, + createConsumer, } = require("../testhelpers"); +const { PartitionAssigners, ErrorCodes } = require('../../../lib').KafkaJS; + +describe('Consumer > incremental rebalance', () => { + let consumer; + let groupId, topicName; + + const consumerConfig = { + groupId, + partitionAssigners: [PartitionAssigners.cooperativeSticky], + }; + + beforeEach(async () => { + topicName = `test-topic1-${secureRandom()}`; + groupId = `consumer-group-id-${secureRandom()}`; + consumer = null; + await createTopic({ topic: topicName, partitions: 2 }); + }); + + afterEach(async () => { + consumer && (await consumer.disconnect()); + }); + + it('returns protocol name', async () => { + consumer = createConsumer(consumerConfig); + await consumer.connect(); + await consumer.subscribe({ topic: topicName }); + consumer.run({ eachMessage: async () => { } }); + + await waitFor(() => consumer.assignment().length > 0, () => null, 1000); + + expect(consumer.rebalanceProtocol()).toEqual('COOPERATIVE'); + }); + + it('calls rebalance callback', async () => { + let assigns = 0; + let revokes = 0; + const rebalanceCallback = function (err, assignment) { + if (err.code === ErrorCodes.ERR__ASSIGN_PARTITIONS) { + assigns++; + expect(assignment.length).toBe(2); + } else if (err.code === ErrorCodes.ERR__REVOKE_PARTITIONS) { + revokes++; + expect(assignment.length).toBe(2); + } else { + // It's either assign or revoke and nothing else. + jest.fail('Unexpected error code'); + } + }; + + + consumer = createConsumer(consumerConfig, { + 'rebalance_cb': rebalanceCallback, + }); + await consumer.connect(); + await consumer.subscribe({ topic: topicName }); + consumer.run({ eachMessage: async () => { } }); + + await waitFor(() => consumer.assignment().length > 0, () => null, 1000); + expect(assigns).toBe(1); + expect(consumer.assignment().length).toBe(2); + + await consumer.disconnect(); + consumer = null; + expect(revokes).toBe(1); + expect(assigns).toBe(1); + }); + + it('allows changing the assignment', async () => { + let assigns = 0; + const rebalanceCallback = function (err, assignment) { + if (err.code === ErrorCodes.ERR__ASSIGN_PARTITIONS) { + assigns++; + expect(assignment.length).toBe(2); + assignment = [assignment[0]]; + return assignment; + } else { + // It's either assign or revoke and nothing else. + expect(err.code).toBe(ErrorCodes.ERR__REVOKE_PARTITIONS); + } + }; + + + consumer = createConsumer(consumerConfig, { + 'rebalance_cb': rebalanceCallback, + }); + await consumer.connect(); + await consumer.subscribe({ topic: topicName }); + consumer.run({ eachMessage: async () => { } }); + + await waitFor(() => consumer.assignment().length > 0, () => null, 1000); + expect(assigns).toBe(1); + expect(consumer.assignment().length).toBe(1); + }); + + it('is actually incremental', async () => { + let expectedAssignmentCount = 0; + const rebalanceCallback = (err, assignment) => { + /* Empty assignments are ignored, they're a rebalance for the synchronization barrier. */ + if (assignment.length === 0) + return; + if (err.code === ErrorCodes.ERR__ASSIGN_PARTITIONS) { + expect(assignment.length).toBe(expectedAssignmentCount); + } else if (err.code === ErrorCodes.ERR__REVOKE_PARTITIONS) { + expect(assignment.length).toBe(expectedAssignmentCount); + } else { + // It's either assign or revoke and nothing else. + jest.fail('Unexpected error code'); + } + }; + + /* First consumer joins and gets all partitions. */ + expectedAssignmentCount = 2; + consumer = createConsumer(consumerConfig, { + 'rebalance_cb': rebalanceCallback, + }); + + await consumer.connect(); + await consumer.subscribe({ topic: topicName }); + consumer.run({ eachMessage: async () => { } }); + + await waitFor(() => consumer.assignment().length > 0, () => null, 1000); + expect(consumer.assignment().length).toBe(2); + + /* Second consumer joins and gets one partition. */ + expectedAssignmentCount = 1; + const consumer2 = createConsumer(consumerConfig, { + 'rebalance_cb': rebalanceCallback, + }); + + await consumer2.connect(); + await consumer2.subscribe({ topic: topicName }); + consumer2.run({ eachMessage: async () => { } }); + await waitFor(() => consumer2.assignment().length > 0, () => null, 1000); + expect(consumer.assignment().length).toBe(1); + expect(consumer2.assignment().length).toBe(1); + + await consumer2.disconnect(); + }); + + it('works with promisified handler', async () => { + let assigns = 0; + let revokes = 0; + + consumer = createConsumer(consumerConfig, { + rebalance_cb: async (err, assignment) => { + if (err.code === ErrorCodes.ERR__ASSIGN_PARTITIONS) { + assigns++; + expect(assignment.length).toBe(2); + } else if (err.code === ErrorCodes.ERR__REVOKE_PARTITIONS) { + revokes++; + expect(assignment.length).toBe(2); + } + }, + }); + await consumer.connect(); + await consumer.subscribe({ topic: topicName }); + consumer.run({ eachMessage: async () => { } }); + + await waitFor(() => consumer.assignment().length > 0, () => null, 1000); + expect(assigns).toBe(1); + expect(consumer.assignment().length).toBe(2); + + await consumer.disconnect(); + consumer = null; + expect(revokes).toBe(1); + expect(assigns).toBe(1); + }); +}); \ No newline at end of file diff --git a/test/promisified/consumer/pause.spec.js b/test/promisified/consumer/pause.spec.js new file mode 100644 index 00000000..d5bbfe6c --- /dev/null +++ b/test/promisified/consumer/pause.spec.js @@ -0,0 +1,686 @@ +jest.setTimeout(30000); + +const { + secureRandom, + createTopic, + waitForMessages, + waitFor, + waitForConsumerToJoinGroup, + createConsumer, + createProducer, +} = require('../testhelpers'); + +describe('Consumer', () => { + let consumer; + let groupId, producer, topics; + + beforeEach(async () => { + console.log("Starting:", expect.getState().currentTestName); + topics = [`test-topic1-${secureRandom()}`, `test-topic2-${secureRandom()}`]; + groupId = `consumer-group-id-${secureRandom()}`; + + for (const topic of topics) { + await createTopic({ topic, partitions: 2 }); + } + + producer = createProducer({ + }); + + consumer = createConsumer({ + groupId, + maxWaitTimeInMs: 1, + maxBytesPerPartition: 180, + fromBeginning: true, + }); + }); + + afterEach(async () => { + consumer && (await consumer.disconnect()); + producer && (await producer.disconnect()); + console.log("Ending:", expect.getState().currentTestName); + }); + + describe('when pausing', () => { + it('throws an error if the topic is invalid', async () => { + await consumer.connect(); + expect(() => consumer.pause([{ topic: null, partitions: [0] }])).toThrow('Topic must be a string'); + }); + + it('throws an error if Consumer#connect has not been called', () => { + expect(() => consumer.pause([{ topic: 'foo', partitions: [0] }])).toThrow('Pause can only be called while connected'); + }); + + it('pauses the appropriate topic/partition when pausing via the eachMessage callback', async () => { + await consumer.connect(); + await producer.connect(); + + /* Send 4 of the same messages to each topic, in order to partition 0, 0, 1, 0 of that topic. */ + const messages = [0, 0, 1, 0].map(partition => { + const key = secureRandom(); + return { key: `key-${key}`, value: `value-${key}`, partition }; + }); + + /* Send the first 2 messages to each topic. */ + for (const topic of topics) { + await producer.send({ topic, messages: messages.slice(0, 2) }); + await consumer.subscribe({ topic }); + } + + let shouldPause = true; + let pauseMessageRecvd = false; + const messagesConsumed = []; + const resumeCallbacks = []; + consumer.run({ + eachMessage: async event => { + const { topic, message, pause } = event; + + const whichTopic = topics.indexOf(topic); + const whichMessage = messages.findIndex(m => String(m.key) === String(message.key)); + + /* In case we're at the 2nd message (idx = 1) for the first topic, pause the partition. + * It should be the 0th partition which gets paused. */ + if (shouldPause && whichTopic === 0 && whichMessage === 1) { + resumeCallbacks.push(pause()); + pauseMessageRecvd = true; + /* We throw an error to indicate to the runner that this message should be + * considered 'unprocessed'. */ + throw new Error('bailing out'); + } + messagesConsumed.push({ + topic: whichTopic, + message: whichMessage, + }); + }, + }); + + await waitForMessages(messagesConsumed, { number: 3 }); + /* Librdkafka provides no guarantee about message ordering beyond per-partition. + * Encountering 3 messages is no guarantee of that we did manage to pause. */ + await waitFor(() => pauseMessageRecvd, () => { }, { delay: 100 }); + const [pausedTopic] = topics; + expect(consumer.paused()).toEqual([{ topic: pausedTopic, partitions: [0] }]); + + for (const topic of topics) { + await producer.send({ topic, messages: messages.slice(2) }); + } + await waitForMessages(messagesConsumed, { number: 6, delay: 10 }); + + expect(messagesConsumed).toHaveLength(6); + expect(messagesConsumed).toContainEqual({ topic: 0, message: 0 }); // partition 0 + expect(messagesConsumed).toContainEqual({ topic: 0, message: 2 }); // partition 1 + + expect(messagesConsumed).toContainEqual({ topic: 1, message: 0 }); // partition 0 + expect(messagesConsumed).toContainEqual({ topic: 1, message: 1 }); // partition 0 + expect(messagesConsumed).toContainEqual({ topic: 1, message: 2 }); // partition 1 + expect(messagesConsumed).toContainEqual({ topic: 1, message: 3 }); // partition 0 + + shouldPause = false; + resumeCallbacks.forEach(resume => resume()); + + await waitForMessages(messagesConsumed, { number: 8 }); + + // these messages have to wait until the consumer has resumed + expect(messagesConsumed).toHaveLength(8); + expect(messagesConsumed).toContainEqual({ topic: 0, message: 1 }); // partition 0 + expect(messagesConsumed).toContainEqual({ topic: 0, message: 3 }); // partition 0 + }, 10000); + + it('avoids calling eachMessage again for paused topics/partitions when paused via consumer.pause', async () => { + await consumer.connect(); + await producer.connect(); + const messages = [0, 0, 1, 0].map(partition => { + const key = secureRandom(); + return { key: `key-${key}`, value: `value-${key}`, partition }; + }); + + for (const topic of topics) { + await producer.send({ topic, messages: messages.slice(0, 2) }); + } + await consumer.subscribe({ topics, replace: true }); + + let shouldPause = true; + const messagesConsumed = []; + consumer.run({ + eachMessage: async event => { + const { topic, message, partition } = event; + + const whichTopic = topics.indexOf(topic); + const whichMessage = messages.findIndex(m => String(m.key) === String(message.key)); + + messagesConsumed.push({ + topic: whichTopic, + message: whichMessage, + }); + + // here, we pause after the first message (0) on the first topic (0) + if (shouldPause && whichTopic === 0 && whichMessage === 0) { + consumer.pause([{ topic, partitions: [partition] }]); + // we don't throw an exception here to ensure the loop calling us breaks on its own and doesn't call us again + } + }, + }); + + await waitForMessages(messagesConsumed, { number: 3 }); + const [pausedTopic] = topics; + expect(consumer.paused()).toEqual([{ topic: pausedTopic, partitions: [0] }]); + + for (const topic of topics) { + await producer.send({ topic, messages: messages.slice(2) }); + } + await waitForMessages(messagesConsumed, { number: 6, delay: 10 }); + + expect(messagesConsumed).toHaveLength(6); + expect(messagesConsumed).toContainEqual({ topic: 0, message: 0 }); // partition 0 + expect(messagesConsumed).toContainEqual({ topic: 0, message: 2 }); // partition 1 + + expect(messagesConsumed).toContainEqual({ topic: 1, message: 0 }); // partition 0 + expect(messagesConsumed).toContainEqual({ topic: 1, message: 1 }); // partition 0 + expect(messagesConsumed).toContainEqual({ topic: 1, message: 2 }); // partition 1 + expect(messagesConsumed).toContainEqual({ topic: 1, message: 3 }); // partition 0 + + shouldPause = false; + consumer.resume(consumer.paused()); + + await waitForMessages(messagesConsumed, { number: 8 }); + + // these messages have to wait until the consumer has resumed + expect(messagesConsumed).toHaveLength(8); + expect(messagesConsumed).toContainEqual({ topic: 0, message: 1 }); // partition 0 + expect(messagesConsumed).toContainEqual({ topic: 0, message: 3 }); // partition 0 + }, 15000); + + it('pauses when pausing via the eachBatch callback', async () => { + await consumer.connect(); + await producer.connect(); + const originalMessages = [0, 0, 0, 1].map(partition => { + const key = secureRandom(); + return { key: `key-${key}`, value: `value-${key}`, partition }; + }); + + for (const topic of topics) { + await producer.send({ topic, messages: originalMessages }); + await consumer.subscribe({ topic }); + } + + let shouldPause = true; + const messagesConsumed = []; + const resumeCallbacks = []; + consumer.run({ + eachBatch: async event => { + const { + batch: { topic, messages }, + pause, + resolveOffset, + commitOffsetsIfNecessary, + } = event; + messages.every(message => { + const whichTopic = topics.indexOf(topic); + const whichMessage = originalMessages.findIndex( + m => String(m.key) === String(message.key) + ); + + if (shouldPause && whichTopic === 0 && whichMessage === 1) { + resumeCallbacks.push(pause()); + return false; + } else if (shouldPause && whichTopic === 1 && whichMessage === 3) { + resumeCallbacks.push(pause()); + return false; + } + messagesConsumed.push({ + topic: whichTopic, + message: whichMessage, + }); + resolveOffset(message.offset); + return true; + }); + await commitOffsetsIfNecessary(); + }, + eachBatchAutoResolve: false, + }); + await waitForConsumerToJoinGroup(consumer); + await waitForMessages(messagesConsumed, { number: 5 }); + expect(messagesConsumed.length).toEqual(5); + await waitFor(() => resumeCallbacks.length >= 2, () => null, { delay: 100 }); + expect(consumer.paused()).toContainEqual({ topic: topics[0], partitions: [0] }); + expect(consumer.paused()).toContainEqual({ topic: topics[1], partitions: [1] }); + shouldPause = false; + resumeCallbacks.forEach(resume => resume()); + await waitForMessages(messagesConsumed, { number: 8 }); + expect(consumer.paused()).toEqual([]); + expect(messagesConsumed).toContainEqual({ topic: 0, message: 1 }); + expect(messagesConsumed).toContainEqual({ topic: 1, message: 3 }); + }, 10000); + + it('does not fetch messages for the paused topic', async () => { + await consumer.connect(); + await producer.connect(); + + const key1 = secureRandom(); + const message1 = { key: `key-${key1}`, value: `value-${key1}`, partition: 0 }; + const key2 = secureRandom(); + const message2 = { key: `key-${key2}`, value: `value-${key2}`, partition: 1 }; + + for (const topic of topics) { + await producer.send({ topic, messages: [message1] }); + } + await consumer.subscribe({ topics }); + + const messagesConsumed = []; + consumer.run({ eachMessage: async event => messagesConsumed.push(event) }); + await waitForMessages(messagesConsumed, { number: 2 }); + + expect(consumer.paused()).toEqual([]); + const [pausedTopic, activeTopic] = topics; + consumer.pause([{ topic: pausedTopic }]); + + for (const topic of topics) { + await producer.send({ topic, messages: [message2] }); + } + + const consumedMessages = await waitForMessages(messagesConsumed, { number: 3 }); + + expect(consumedMessages.filter(({ topic }) => topic === pausedTopic)).toEqual([ + expect.objectContaining({ + topic: pausedTopic, + partition: expect.any(Number), + message: expect.objectContaining({ offset: '0' }), + }), + ]); + + const byPartition = (a, b) => a.partition - b.partition; + expect( + consumedMessages.filter(({ topic }) => topic === activeTopic).sort(byPartition) + ).toEqual([ + expect.objectContaining({ + topic: activeTopic, + partition: 0, + message: expect.objectContaining({ offset: '0' }), + }), + expect.objectContaining({ + topic: activeTopic, + partition: 1, + message: expect.objectContaining({ offset: '0' }), + }), + ]); + + expect(consumer.paused()).toEqual([ + { + topic: pausedTopic, + partitions: [0, 1], + }, + ]); + }, 10000); + + it('does not fetch messages for the paused partitions', async () => { + await consumer.connect(); + await producer.connect(); + + const [topic] = topics; + const partitions = [0, 1]; + + const messages = Array(1) + .fill() + .map(() => { + const value = secureRandom(); + return { key: `key-${value}`, value: `value-${value}` }; + }); + const forPartition = partition => message => ({ ...message, partition }); + + for (const partition of partitions) { + await producer.send({ topic, messages: messages.map(forPartition(partition)) }); + } + await consumer.subscribe({ topic }); + + const messagesConsumed = []; + consumer.run({ eachMessage: async event => messagesConsumed.push(event) }); + + await waitForMessages(messagesConsumed, { number: messages.length * partitions.length }); + + expect(consumer.paused()).toEqual([]); + const [pausedPartition, activePartition] = partitions; + consumer.pause([{ topic, partitions: [pausedPartition] }]); + + for (const partition of partitions) { + await producer.send({ topic, messages: messages.map(forPartition(partition)) }); + } + + const consumedMessages = await waitForMessages(messagesConsumed, { + number: messages.length * 3, + }); + + expect(consumedMessages.filter(({ partition }) => partition === pausedPartition)).toEqual( + messages.map((message, i) => + expect.objectContaining({ + topic, + partition: pausedPartition, + message: expect.objectContaining({ offset: `${i}` }), + }) + ) + ); + + expect(consumedMessages.filter(({ partition }) => partition !== pausedPartition)).toEqual( + messages.concat(messages).map((message, i) => + expect.objectContaining({ + topic, + partition: activePartition, + message: expect.objectContaining({ offset: `${i}` }), + }) + ) + ); + + expect(consumer.paused()).toEqual([ + { + topic, + partitions: [pausedPartition], + }, + ]); + }, 10000); + }); + + describe('when pausing and breaking the consumption', () => { + it('does not process messages when consumption from topic is paused', async () => { + const [topic] = topics; + const key1 = secureRandom(); + const message1 = { key: `key-${key1}`, value: `value-${key1}`, partition: 0 }; + const messagesConsumed = []; + let shouldThrow = true; + + await consumer.connect(); + await producer.connect(); + + await producer.send({ topic, messages: [message1] }); + await consumer.subscribe({ topic }); + + consumer.run({ + eachMessage: async event => { + messagesConsumed.push(event); + if (shouldThrow) { + consumer.pause([{ topic }]); + throw new Error('Should fail'); + } + }, + }); + + const consumedMessagesTillError = [ + ...(await waitForMessages(messagesConsumed, { delay: 100 })), + ]; + + shouldThrow = false; + consumer.resume([{ topic }]); + + const consumedMessages = await waitForMessages(messagesConsumed, { number: 2 }); + + expect(consumedMessagesTillError).toHaveLength(1); + expect(consumedMessagesTillError).toEqual([ + expect.objectContaining({ + topic, + partition: expect.any(Number), + message: expect.objectContaining({ offset: '0' }), + }), + ]); + expect(consumedMessages).toHaveLength(2); + expect(consumedMessages).toEqual([ + expect.objectContaining({ + topic, + partition: expect.any(Number), + message: expect.objectContaining({ offset: '0' }), + }), + expect.objectContaining({ + topic, + partition: expect.any(Number), + message: expect.objectContaining({ offset: '0' }), + }), + ]); + }, 10000); + + it('does not process messages when consumption from topic-partition is paused', async () => { + const [topic] = topics; + const pausedPartition = 0; + const key1 = secureRandom(); + const message1 = { key: `key-${key1}`, value: `value-${key1}`, partition: 0 }; + const key2 = secureRandom(); + const message2 = { key: `key-${key2}`, value: `value-${key2}`, partition: 1 }; + const messagesConsumed = []; + let shouldThrow = true; + + await consumer.connect(); + await producer.connect(); + + await producer.send({ topic, messages: [message1, message2] }); + await consumer.subscribe({ topic }); + + consumer.run({ + eachMessage: async event => { + messagesConsumed.push(event); + if (shouldThrow && event.partition === pausedPartition) { + consumer.pause([{ topic, partitions: [pausedPartition] }]); + throw new Error('Should fail'); + } + }, + }); + + const consumedMessagesTillError = [ + ...(await waitForMessages(messagesConsumed, { number: 2 })), + ]; + + shouldThrow = false; + consumer.resume([{ topic, partitions: [pausedPartition] }]); + + const consumedMessages = await waitForMessages(messagesConsumed, { number: 3 }); + + expect(consumedMessagesTillError).toHaveLength(2); + expect(consumedMessagesTillError).toEqual( + expect.arrayContaining([ + expect.objectContaining({ + topic, + partition: 0, + message: expect.objectContaining({ offset: '0' }), + }), + expect.objectContaining({ + topic, + partition: 1, + message: expect.objectContaining({ offset: '0' }), + }), + ]) + ); + expect(consumedMessages).toHaveLength(3); + expect(consumedMessages).toEqual( + expect.arrayContaining([ + expect.objectContaining({ + topic, + partition: 0, + message: expect.objectContaining({ offset: '0' }), + }), + expect.objectContaining({ + topic, + partition: 0, + message: expect.objectContaining({ offset: '0' }), + }), + expect.objectContaining({ + topic, + partition: 1, + message: expect.objectContaining({ offset: '0' }), + }), + ]) + ); + }, 10000); + }); + + describe('when all topics are paused', () => { + it('does not fetch messages', async () => { + consumer = createConsumer({ + groupId, + maxWaitTimeInMs: 100, + maxBytesPerPartition: 180, + }); + + await producer.connect(); + await consumer.connect(); + + const [topic1, topic2] = topics; + await consumer.subscribe({ topics: [topic1, topic2] }); + + const eachMessage = jest.fn(); + consumer.run({ eachMessage }); + + await waitFor(() => consumer.assignment().length > 0, () => { }, { delay: 10 }); + consumer.pause([{ topic: topic1 }, { topic: topic2 }]); + + const key1 = secureRandom(); + const message1 = { key: `key-${key1}`, value: `value-${key1}`, partition: 0 }; + + await producer.send({ topic: topic1, messages: [message1] }); + await producer.send({ topic: topic2, messages: [message1] }); + + expect(eachMessage).not.toHaveBeenCalled(); + }); + }); + + describe('when resuming', () => { + it('throws an error if the topic is invalid', async () => { + await consumer.connect(); + expect(() => consumer.pause([{ topic: null, partitions: [0] }])).toThrow('Topic must be a string'); + }); + + it('throws an error if Consumer#connect has not been called', () => { + expect(() => consumer.resume([{ topic: 'foo', partitions: [0] }])).toThrow( + 'Resume can only be called while connected' + ); + }); + + it('resumes fetching from the specified topic', async () => { + await consumer.connect(); + await producer.connect(); + + const key = secureRandom(); + const message = { key: `key-${key}`, value: `value-${key}`, partition: 0 }; + + await consumer.subscribe({ topics }); + + const messagesConsumed = []; + consumer.run({ + eachMessage: async event => { + return messagesConsumed.push(event); + } + }); + await waitFor(() => consumer.assignment().length > 0, () => { }, { delay: 10 }); + const [pausedTopic, activeTopic] = topics; + consumer.pause([{ topic: pausedTopic }]); + + for (const topic of topics) { + await producer.send({ topic, messages: [message] }); + } + + await waitForMessages(messagesConsumed, { number: 1 }); + + consumer.resume([{ topic: pausedTopic }]); + + await expect(waitForMessages(messagesConsumed, { number: 2 })).resolves.toEqual([ + expect.objectContaining({ + topic: activeTopic, + partition: 0, + message: expect.objectContaining({ offset: '0' }), + }), + expect.objectContaining({ + topic: pausedTopic, + partition: 0, + message: expect.objectContaining({ offset: '0' }), + }), + ]); + + expect(consumer.paused()).toEqual([]); + }); + + it('resumes fetching from earlier paused partitions', async () => { + await consumer.connect(); + await producer.connect(); + + const [topic] = topics; + const partitions = [0, 1]; + + const messages = Array(1) + .fill() + .map(() => { + const value = secureRandom(); + return { key: `key-${value}`, value: `value-${value}` }; + }); + const forPartition = partition => message => ({ ...message, partition }); + + for (const partition of partitions) { + await producer.send({ topic, messages: messages.map(forPartition(partition)) }); + } + await consumer.subscribe({ topic }); + + const messagesConsumed = []; + consumer.run({ eachMessage: async event => messagesConsumed.push(event) }); + + await waitForMessages(messagesConsumed, { number: messages.length * partitions.length }); + + const [pausedPartition, activePartition] = partitions; + consumer.pause([{ topic, partitions: [pausedPartition] }]); + + for (const partition of partitions) { + await producer.send({ topic, messages: messages.map(forPartition(partition)) }); + } + + await waitForMessages(messagesConsumed, { + number: messages.length * 3, + }); + + consumer.resume([{ topic, partitions: [pausedPartition] }]); + + const consumedMessages = await waitForMessages(messagesConsumed, { + number: messages.length * 4, + }); + + expect(consumedMessages.filter(({ partition }) => partition === pausedPartition)).toEqual( + messages.concat(messages).map((message, i) => + expect.objectContaining({ + topic, + partition: pausedPartition, + message: expect.objectContaining({ offset: `${i}` }), + }) + ) + ); + + expect(consumedMessages.filter(({ partition }) => partition !== pausedPartition)).toEqual( + messages.concat(messages).map((message, i) => + expect.objectContaining({ + topic, + partition: activePartition, + message: expect.objectContaining({ offset: `${i}` }), + }) + ) + ); + + expect(consumer.paused()).toEqual([]); + }, 10000); + + it('resumes via the function returned by pause', async () => { + await consumer.connect(); + consumer.subscribe({ topic: topics[0] }); + consumer.run({ + eachMessage: async () => {} + }); + + await waitFor(() => consumer.assignment().length > 0, () => { }, { delay: 10 }); + + const tp0 = { topic: topics[0], partitions: [0] }; + const tp1 = { topic: topics[0], partitions: [1] }; + + const resumeTopic0Partition0 = consumer.pause([ tp0 ]); + const resumeTopic0Partition1 = consumer.pause([ tp1 ]); + + let paused = consumer.paused(); + expect(paused).toEqual([{ topic: topics[0], partitions: [0, 1] }]); + + resumeTopic0Partition0(); + paused = consumer.paused(); + expect(paused).toEqual([{ topic: topics[0], partitions: [1] }]); + + resumeTopic0Partition1(); + paused = consumer.paused(); + expect(paused).toEqual([]); + }); + }); +}); diff --git a/test/promisified/consumer/rebalanceCallback.spec.js b/test/promisified/consumer/rebalanceCallback.spec.js new file mode 100644 index 00000000..9269e19b --- /dev/null +++ b/test/promisified/consumer/rebalanceCallback.spec.js @@ -0,0 +1,143 @@ +jest.setTimeout(30000); + +const { waitFor, + secureRandom, + createTopic, + createConsumer, + createProducer, + sleep, } = require("../testhelpers"); +const { ErrorCodes } = require('../../../lib').KafkaJS; + +describe('Consumer', () => { + let consumer; + let groupId, topicName; + let consumerConfig; + + beforeEach(async () => { + topicName = `test-topic-${secureRandom()}`; + groupId = `consumer-group-id-${secureRandom()}`; + consumerConfig = { + groupId, + }; + consumer = null; + await createTopic({ topic: topicName, partitions: 3 }); + }); + + afterEach(async () => { + consumer && (await consumer.disconnect()); + }); + + it('calls rebalance callback', async () => { + let calls = 0; + consumer = createConsumer(consumerConfig, { + rebalance_cb: function () { + calls++; + } + }); + + await consumer.connect(); + await consumer.subscribe({ topic: topicName }); + consumer.run({ eachMessage: async () => { } }); + await waitFor(() => consumer.assignment().length > 0, () => null, 1000); + expect(calls).toBe(1); /* assign */ + await consumer.disconnect(); + expect(calls).toBe(2); /* assign + unassign */ + consumer = null; + }); + + it('allows modifying the assignment via returns', async () => { + consumer = createConsumer(consumerConfig, { + rebalance_cb: function (err, assignment) { + if (err.code === ErrorCodes.ERR__ASSIGN_PARTITIONS) { + expect(assignment.length).toBe(3); + return assignment.filter(a => a.partition !== 0); + } + } + }); + + await consumer.connect(); + await consumer.subscribe({ topic: topicName }); + consumer.run({ eachMessage: async () => { } }); + await waitFor(() => consumer.assignment().length > 0, () => null, 1000); + expect(consumer.assignment().length).toBe(2); + expect(consumer.assignment()).toEqual( + expect.arrayContaining([ + { topic: topicName, partition: 1 }, + { topic: topicName, partition: 2 }])); + }); + + it('allows modifying the assigment via assignment functions', async () => { + let calls = 0; + consumer = createConsumer(consumerConfig, { + rebalance_cb: function (err, assignment, assignmentFns) { + calls++; + if (err.code === ErrorCodes.ERR__ASSIGN_PARTITIONS) { + expect(assignment.length).toBe(3); + assignmentFns.assign(assignment.filter(a => a.partition !== 0)); + } else { + assignmentFns.unassign(assignment); + } + } + }); + + await consumer.connect(); + await consumer.subscribe({ topic: topicName }); + consumer.run({ eachMessage: async () => { } }); + await waitFor(() => consumer.assignment().length > 0, () => null, 1000); + expect(consumer.assignment().length).toBe(2); + expect(consumer.assignment()).toEqual( + expect.arrayContaining([ + { topic: topicName, partition: 1 }, + { topic: topicName, partition: 2 }])); + await consumer.disconnect(); + expect(calls).toBe(2); + consumer = null; + }); + + it('pauses correctly from the rebalance callback after assign', async () => { + consumer = createConsumer(consumerConfig, { + rebalance_cb: function (err, assignment, assignmentFns) { + if (err.code === ErrorCodes.ERR__ASSIGN_PARTITIONS) { + expect(assignment.length).toBe(3); + + /* Assign first so we can pause. */ + assignmentFns.assign(assignment); + + /* Convert the assignment into format suitable for pause argument. */ + const pausablePartitions = [{ topic: topicName, partitions: [0, 1, 2] }]; + consumer.pause(pausablePartitions); + } else { + assignmentFns.unassign(assignment); + } + } + }); + + let messagesConsumed = []; + await consumer.connect(); + await consumer.subscribe({ topic: topicName }); + consumer.run({ eachMessage: async (e) => { messagesConsumed.push(e); } }); + await waitFor(() => consumer.assignment().length > 0, () => null, 1000); + + const producer = createProducer({}); + await producer.connect(); + const key1 = secureRandom(); + for (const partition of [0, 1, 2]) { + const message = { key: `key-${key1}`, value: `value-${key1}`, partition }; + await producer.send({ + topic: topicName, + messages: [message], + }); + } + await producer.disconnect(); + + expect(consumer.paused()).toEqual([{ topic: topicName, partitions: [0, 1, 2] }]); + + /* Give it some extra time just in case - should be enough to get the messages if a partition isn't paused. */ + await sleep(1000); + expect(messagesConsumed.length).toBe(0); + + consumer.resume([ { topic: topicName } ]); + await waitFor(() => messagesConsumed.length === 3, () => null, 1000); + expect(messagesConsumed.length).toBe(3); + }); +}); \ No newline at end of file diff --git a/test/promisified/consumer/seek.spec.js b/test/promisified/consumer/seek.spec.js new file mode 100644 index 00000000..17e1c749 --- /dev/null +++ b/test/promisified/consumer/seek.spec.js @@ -0,0 +1,532 @@ +jest.setTimeout(30000); + +const { + createConsumer, + createProducer, + secureRandom, + createTopic, + waitForMessages, + waitFor, + sleep, +} = require('../testhelpers'); + +describe('Consumer seek >', () => { + let topicName, groupId, producer, consumer; + + beforeEach(async () => { + console.log("Starting:", expect.getState().currentTestName); + topicName = `test-topic-${secureRandom()}`; + groupId = `consumer-group-id-${secureRandom()}`; + + producer = createProducer({}); + + consumer = createConsumer({ + groupId, + fromBeginning: true, + }); + }); + + afterEach(async () => { + consumer && (await consumer.disconnect()); + producer && (await producer.disconnect()); + console.log("Ending:", expect.getState().currentTestName); + }); + + describe('when seek offset', () => { + describe('with one partition', () => { + beforeEach(async () => { + await createTopic({ topic: topicName, partitions: 1 }); + }); + + it('throws an error if the topic is invalid', async () => { + await consumer.connect(); + expect(() => consumer.seek({ topic: null })).toThrow('must be a string'); + }); + + it('throws an error if the partition is not a number', async () => { + await consumer.connect(); + expect(() => consumer.seek({ topic: topicName, partition: 'ABC' })).toThrow('Offset must be'); + }); + + it('throws an error if the offset is not a number', async () => { + await consumer.connect(); + expect(() => consumer.seek({ topic: topicName, partition: 0, offset: 'ABC' })).toThrow('Offset must be'); + }); + + it('throws an error if the offset is negative and not a special offset', async () => { + await consumer.connect(); + expect(() => consumer.seek({ topic: topicName, partition: 0, offset: '-32' })).toThrow('Offset must be'); + }); + + it('recovers from offset out of range', async () => { + await consumer.connect(); + await producer.connect(); + + const key1 = secureRandom(); + const message1 = { key: `key-${key1}`, value: `value-${key1}` }; + + await producer.send({ topic: topicName, messages: [message1] }); + await consumer.subscribe({ topic: topicName, }); + + const messagesConsumed = []; + consumer.seek({ topic: topicName, partition: 0, offset: 100 }); + consumer.run({ + eachMessage: async event => { + messagesConsumed.push(event); + } + }); + + await expect(waitForMessages(messagesConsumed, { number: 1 })).resolves.toEqual([ + expect.objectContaining({ + topic: topicName, + partition: 0, + message: expect.objectContaining({ offset: '0' }), + }), + ]); + }); + + + describe('when "enable.auto.commit" is false', () => { + beforeEach(() => { + consumer = createConsumer({ + groupId, + fromBeginning: true, + autoCommit: false, + }); + }); + + it('should not commit the offset', async () => { + await producer.connect(); + await consumer.connect(); + + await producer.send({ + topic: topicName, + messages: [1, 2, 3].map(n => ({ key: `key-${n}`, value: `value-${n}`, partition: 0 })), + }); + await consumer.subscribe({ topic: topicName }); + + let messagesConsumed = []; + consumer.seek({ topic: topicName, partition: 0, offset: 2 }); + consumer.run({ + eachMessage: async event => messagesConsumed.push(event), + }); + + await expect(waitForMessages(messagesConsumed, { number: 1 })).resolves.toEqual([ + expect.objectContaining({ + topic: topicName, + partition: 0, + message: expect.objectContaining({ offset: '2' }), + }), + ]); + + /* We disconnect this consumer, and create another one of the same consumer group. + * This new consumer should start from 0, despite the fact that we've sought to 2 */ + await consumer.disconnect(); + + consumer = createConsumer({ + groupId, + fromBeginning: true, + autoCommit: false, + }); + await consumer.connect(); + await consumer.subscribe({ topic: topicName }); + + messagesConsumed = []; + consumer.run({ + eachMessage: async event => messagesConsumed.push(event), + }); + + await expect(waitForMessages(messagesConsumed, { number: 3 })).resolves.toEqual([ + expect.objectContaining({ + topic: topicName, + partition: 0, + message: expect.objectContaining({ offset: '0' }), + }), + expect.objectContaining({ + topic: topicName, + partition: 0, + message: expect.objectContaining({ offset: '1' }), + }), + expect.objectContaining({ + topic: topicName, + partition: 0, + message: expect.objectContaining({ offset: '2' }), + }), + ]); + }); + }); + }); + + describe('with two partitions', () => { + beforeEach(async () => { + await createTopic({ topic: topicName, partitions: 2 }); + }); + + it('updates the partition offset to the given offset', async () => { + await consumer.connect(); + await producer.connect(); + + const value1 = secureRandom(); + const message1 = { key: `key-1`, value: `value-${value1}`, partition: 1, }; + const value2 = secureRandom(); + const message2 = { key: `key-1`, value: `value-${value2}`, partition: 1, }; + const value3 = secureRandom(); + const message3 = { key: `key-1`, value: `value-${value3}`, partition: 1, }; + const value4 = secureRandom(); + const message4 = { key: `key-0`, value: `value-${value4}`, partition: 0, }; + + await producer.send({ + topic: topicName, + messages: [message1, message2, message3, message4], + }); + + await consumer.subscribe({ topic: topicName }); + + const messagesConsumed = []; + consumer.seek({ topic: topicName, partition: 1, offset: 1 }); + consumer.run({ + eachMessage: async event => { + messagesConsumed.push(event); + } + }); + + let check = await waitForMessages(messagesConsumed, { number: 3 }); + + expect(check).toEqual( + expect.arrayContaining([ + expect.objectContaining({ + topic: topicName, + partition: 0, + message: expect.objectContaining({ offset: '0' }), + }), + expect.objectContaining({ + topic: topicName, + partition: 1, + message: expect.objectContaining({ offset: '1' }), + }), + expect.objectContaining({ + topic: topicName, + partition: 1, + message: expect.objectContaining({ offset: '2' }), + }), + ]) + ); + + expect(check).toEqual( + expect.not.arrayContaining([ + expect.objectContaining({ + topic: topicName, + partition: 1, + message: expect.objectContaining({ offset: '0' }), + }), + ]) + ); + + }); + + it('works for both partitions', async () => { + await consumer.connect(); + await producer.connect(); + + const value1 = secureRandom(); + const message1 = { key: `key-1`, value: `value-${value1}`, partition: 1, }; + const value2 = secureRandom(); + const message2 = { key: `key-1`, value: `value-${value2}`, partition: 1, }; + const value3 = secureRandom(); + const message3 = { key: `key-0`, value: `value-${value3}`, partition: 0 }; + const value4 = secureRandom(); + const message4 = { key: `key-0`, value: `value-${value4}`, partition: 0, }; + const value5 = secureRandom(); + const message5 = { key: `key-0`, value: `value-${value5}`, partition: 0 }; + + await producer.send({ + topic: topicName, + messages: [message1, message2, message3, message4, message5], + }); + await consumer.subscribe({ topic: topicName }); + + const messagesConsumed = []; + consumer.seek({ topic: topicName, partition: 0, offset: 2 }); + consumer.seek({ topic: topicName, partition: 1, offset: 1 }); + consumer.run({ + eachMessage: async event => { + messagesConsumed.push(event); + } + }); + + let check = await waitForMessages(messagesConsumed, { number: 2 }); + + expect(check).toEqual( + expect.arrayContaining([ + expect.objectContaining({ + topic: topicName, + partition: 0, + message: expect.objectContaining({ offset: '2' }), + }), + expect.objectContaining({ + topic: topicName, + partition: 1, + message: expect.objectContaining({ offset: '1' }), + }), + ]) + ); + + expect(check).toEqual( + expect.not.arrayContaining([ + expect.objectContaining({ + topic: topicName, + partition: 0, + message: expect.objectContaining({ offset: '0' }), + }), + expect.objectContaining({ + topic: topicName, + partition: 0, + message: expect.objectContaining({ offset: '1' }), + }), + expect.objectContaining({ + topic: topicName, + partition: 1, + message: expect.objectContaining({ offset: '0' }), + }), + ]) + ); + + }); + + it('uses the last seek for a given topic/partition', async () => { + await consumer.connect(); + await producer.connect(); + + const value1 = secureRandom(); + const message1 = { key: `key-0`, value: `value-${value1}`, partition: 0 }; + const value2 = secureRandom(); + const message2 = { key: `key-0`, value: `value-${value2}`, partition: 0 }; + const value3 = secureRandom(); + const message3 = { key: `key-0`, value: `value-${value3}`, partition: 0 }; + + await producer.send({ topic: topicName, messages: [message1, message2, message3] }); + await consumer.subscribe({ topic: topicName, }); + + const messagesConsumed = []; + consumer.seek({ topic: topicName, partition: 0, offset: 0 }); + consumer.seek({ topic: topicName, partition: 0, offset: 1 }); + consumer.seek({ topic: topicName, partition: 0, offset: 2 }); + consumer.run({ + eachMessage: async event => { + messagesConsumed.push(event); + } + }); + + let check = await waitForMessages(messagesConsumed, { number: 1 }); + + expect(check).toEqual( + expect.arrayContaining([ + expect.objectContaining({ + topic: topicName, + partition: 0, + message: expect.objectContaining({ offset: '2' }), + }), + ]) + ); + + expect(check).toEqual( + expect.not.arrayContaining([ + expect.objectContaining({ + topic: topicName, + partition: 0, + message: expect.objectContaining({ offset: '0' }), + }), + expect.objectContaining({ + topic: topicName, + partition: 0, + message: expect.objectContaining({ offset: '1' }), + }), + ]) + ); + }); + }); + }); + + describe('batch staleness >', () => { + it('stops consuming messages after staleness', async () => { + consumer = createConsumer({ + groupId, + maxWaitTimeInMs: 500, + fromBeginning: true, + }); + + const messages = Array(10) + .fill() + .map(() => { + const value = secureRandom(); + return { key: `key-${value}`, value: `value-${value}`, partition: 0 }; + }); + + await consumer.connect(); + await producer.connect(); + await producer.send({ topic: topicName, messages }); + await consumer.subscribe({ topic: topicName }); + + const offsetsConsumed = []; + + consumer.run({ + eachMessage: async ({ message }) => { + offsetsConsumed.push(message.offset); + + if (offsetsConsumed.length === 1) { + consumer.seek({ topic: topicName, partition: 0, offset: message.offset }); + } + }, + }); + + await waitFor(() => offsetsConsumed.length >= 2, () => { }, { delay: 50 }); + + expect(offsetsConsumed[0]).toEqual(offsetsConsumed[1]); + }); + + it('resolves a batch as stale when seek was called while processing it', async () => { + consumer = createConsumer({ + groupId, + // make sure we fetch a batch of messages + minBytes: 1024, + maxWaitTimeInMs: 500, + fromBeginning: true, + autoCommit: true, + }); + + const messages = Array(10) + .fill() + .map(() => { + const value = secureRandom(); + return { key: `key-${value}`, value: `value-${value}` }; + }); + + await consumer.connect(); + await producer.connect(); + await producer.send({ topic: topicName, messages }); + await consumer.subscribe({ topic: topicName }); + + const offsetsConsumed = []; + + consumer.run({ + eachBatch: async ({ batch, isStale, resolveOffset }) => { + for (const message of batch.messages) { + if (isStale()) break; + + offsetsConsumed.push(message.offset); + + if (offsetsConsumed.length === 1) { + consumer.seek({ topic: topicName, partition: 0, offset: +message.offset }); + } + + resolveOffset(message.offset); + } + }, + }); + + await waitFor(() => offsetsConsumed.length >= 2, () => null, { delay: 50 }); + + expect(offsetsConsumed[0]).toEqual(offsetsConsumed[1]); + }); + + it('resolves a batch as stale when seek is called from outside eachBatch', async () => { + consumer = createConsumer({ + groupId, + maxWaitTimeInMs: 500, + fromBeginning: true, + autoCommit: true, + }); + + const messages = Array(10) + .fill() + .map(() => { + const value = secureRandom(); + return { key: `key-${value}`, value: `value-${value}`, partition: 0 }; + }); + + await consumer.connect(); + await producer.connect(); + await producer.send({ topic: topicName, messages }); + await consumer.subscribe({ topic: topicName }); + + const offsetsConsumed = []; + + consumer.run({ + eachBatch: async ({ batch, isStale, resolveOffset }) => { + for (const message of batch.messages) { + if (isStale()) break; + + offsetsConsumed.push(message.offset); + + /* Slow things down so we can call seek predictably. */ + await sleep(1000); + + resolveOffset(message.offset); + } + }, + }); + + await waitFor(() => offsetsConsumed.length === 1, () => null, { delay: 50 }); + consumer.seek({ topic: topicName, partition: 0, offset: offsetsConsumed[0] }); + + await waitFor(() => offsetsConsumed.length >= 2, () => null, { delay: 50 }); + + expect(offsetsConsumed[0]).toEqual(offsetsConsumed[1]); + }); + + it('resolves a batch as stale when pause was called while processing it', async () => { + consumer = createConsumer({ + groupId, + maxWaitTimeInMs: 500, + fromBeginning: true, + autoCommit: true, + }); + + const numMessages = 100; + const messages = Array(numMessages) + .fill() + .map(() => { + const value = secureRandom(); + return { key: `key-${value}`, value: `value-${value}` }; + }); + + await consumer.connect(); + await producer.connect(); + await producer.send({ topic: topicName, messages }); + await consumer.subscribe({ topic: topicName }); + + const offsetsConsumed = []; + + let resume; + consumer.run({ + eachBatchAutoResolve: true, + eachBatch: async ({ batch, isStale, resolveOffset, pause }) => { + for (const message of batch.messages) { + if (isStale()) break; + + offsetsConsumed.push(message.offset); + + if (offsetsConsumed.length === Math.floor(numMessages/2)) { + resume = pause(); + } + + resolveOffset(message.offset); + } + }, + }); + + /* Despite eachBatchAutoResolve being true, it shouldn't resolve offsets on its own. + * However, manual resolution of offsets should still count. */ + await waitFor(() => offsetsConsumed.length >= numMessages/2, () => null, { delay: 50 }); + + resume(); + + /* Since we've properly resolved all offsets before pause, including the offset that we paused at, + * there is no repeat. */ + await waitFor(() => offsetsConsumed.length >= numMessages, () => null, { delay: 50 }); + expect(offsetsConsumed.length).toBe(numMessages); + + expect(+offsetsConsumed[Math.floor(numMessages/2)]).toEqual(+offsetsConsumed[Math.floor(numMessages/2) + 1] - 1); + }); + }); +}); diff --git a/test/promisified/consumer/subscribe.spec.js b/test/promisified/consumer/subscribe.spec.js new file mode 100644 index 00000000..dc131c61 --- /dev/null +++ b/test/promisified/consumer/subscribe.spec.js @@ -0,0 +1,181 @@ +jest.setTimeout(30000); + +const { ErrorCodes } = require('../../../lib').KafkaJS; +const { secureRandom, + createTopic, + waitFor, + waitForMessages, + waitForConsumerToJoinGroup, + createProducer, + createConsumer, + sleep } = require('../testhelpers'); + +describe('Consumer', () => { + let groupId, consumer, producer; + + beforeEach(async () => { + groupId = `consumer-group-id-${secureRandom()}`; + consumer = createConsumer({ + groupId, + maxWaitTimeInMs: 1, + maxBytesPerPartition: 180, + fromBeginning: true + }); + + producer = createProducer({}); + }); + + afterEach(async () => { + consumer && (await consumer.disconnect()); + producer && (await producer.disconnect()); + }); + + describe('when subscribing to multiple topics', () => { + it('throws an error if one of the topics is invalid', async () => { + await consumer.connect(); + await expect(consumer.subscribe({ topics: [1] })).rejects.toHaveProperty( + 'code', + ErrorCodes.ERR__INVALID_ARG, + ); + }); + + it('subscribes by topic name as a string or regex', async () => { + const testScope = secureRandom(); + const regexMatchingTopic = `pattern-${testScope}-regex-${secureRandom()}`; + const topics = [`topic-${secureRandom()}`, `topic-${secureRandom()}`, regexMatchingTopic]; + + await Promise.all(topics.map(topic => createTopic({ topic }))); + /* It takes some time for the topics to be propagated in the metadata. We could check + * by listing topics in a loop, but this serves as well to get rid of flakiness. */ + await sleep(1000); + + const messagesConsumed = []; + await consumer.connect(); + await consumer.subscribe({ + topics: [topics[0], topics[1], new RegExp(`^pattern-${testScope}-regex-.*`)], + }); + + consumer.run({ eachMessage: async event => messagesConsumed.push(event) }); + await waitFor(() => consumer.assignment().length > 0, () => null); + + await producer.connect(); + await producer.sendBatch({ + topicMessages: [ + { topic: topics[0], messages: [{ key: 'drink', value: 'drink' }] }, + { topic: topics[1], messages: [{ key: 'your', value: 'your' }] }, + { topic: topics[2], messages: [{ key: 'tea', value: 'tea' }] }, + ], + }); + + await waitForMessages(messagesConsumed, { number: 3 }); + expect(messagesConsumed.map(m => m.message.value.toString())).toEqual( + expect.arrayContaining(['drink', 'your', 'tea']) + ); + }); + }); + + describe('Deprecated "topic" interface', () => { + describe('when subscribing', () => { + it('throws an error if the topic is invalid', async () => { + await consumer.connect(); + await expect(consumer.subscribe({ topic: null })).rejects.toHaveProperty( + 'code', + ErrorCodes.ERR__INVALID_ARG + ); + }); + + it('throws an error if the topic is not a String or RegExp', async () => { + await consumer.connect(); + await expect(consumer.subscribe({ topic: 1 })).rejects.toHaveProperty( + 'code', + ErrorCodes.ERR__INVALID_ARG + ); + }); + + describe('with a string', () => { + it('subscribes to the topic', async () => { + const topic = `topic-${secureRandom()}`; + + await createTopic({ topic }); + + const messagesConsumed = []; + await consumer.connect(); + await consumer.subscribe({ topic }); + + consumer.run({ eachMessage: async event => messagesConsumed.push(event) }); + await waitForConsumerToJoinGroup(consumer); + + await producer.connect(); + await producer.sendBatch({ + topicMessages: [{ topic, messages: [{ key: 'key-a', value: 'value-a' }] }], + }); + + await waitForMessages(messagesConsumed, { number: 1 }); + expect(messagesConsumed.map(m => m.message.value.toString()).sort()).toEqual(['value-a']); + }); + }); + + describe('with regex', () => { + it('subscribes to all matching topics', async () => { + const testScope = secureRandom(); + const topicUS = `pattern-${testScope}-us-${secureRandom()}`; + const topicSE = `pattern-${testScope}-se-${secureRandom()}`; + const topicUK = `pattern-${testScope}-uk-${secureRandom()}`; + const topicBR = `pattern-${testScope}-br-${secureRandom()}`; + + await Promise.all( + [topicUS, topicSE, topicUK, topicBR].map(topic => createTopic({ topic })) + ); + + /* It takes some time for the topics to be propagated in the metadata. We could check + * by listing topics in a loop, but this serves as well to get rid of flakiness. */ + await sleep(1000); + + const messagesConsumed = []; + await consumer.connect(); + await consumer.subscribe({ + topic: new RegExp(`^pattern-${testScope}-(se|br)-.*`), + }); + + consumer.run({ eachMessage: async event => messagesConsumed.push(event) }); + await waitFor(() => consumer.assignment().length > 0, () => null, 100); + + await producer.connect(); + await producer.sendBatch({ + topicMessages: [ + { topic: topicUS, messages: [{ key: `key-us`, value: `value-us` }] }, + { topic: topicUK, messages: [{ key: `key-uk`, value: `value-uk` }] }, + { topic: topicSE, messages: [{ key: `key-se`, value: `value-se` }] }, + { topic: topicBR, messages: [{ key: `key-br`, value: `value-br` }] }, + ], + }); + + await waitForMessages(messagesConsumed, { number: 2 }); + expect(messagesConsumed.map(m => m.message.value.toString()).sort()).toEqual([ + 'value-br', + 'value-se', + ]); + }); + }); + }); + }); + + describe('throws if subscribing with a RegExp incorrectly', () => { + it('when RegExp contains a flag', async () => { + await consumer.connect(); + await expect(consumer.subscribe({ topics: [new RegExp('^test', 'g')] })).rejects.toHaveProperty( + 'code', + ErrorCodes.ERR__INVALID_ARG + ); + }); + + it('when RegExp does not start with a ^', async () => { + await consumer.connect(); + await expect(consumer.subscribe({ topics: [new RegExp('test')] })).rejects.toHaveProperty( + 'code', + ErrorCodes.ERR__INVALID_ARG + ); + }); + }); +}); + diff --git a/test/promisified/oauthbearer_cb.spec.js b/test/promisified/oauthbearer_cb.spec.js new file mode 100644 index 00000000..d736646c --- /dev/null +++ b/test/promisified/oauthbearer_cb.spec.js @@ -0,0 +1,80 @@ +// minimum 30s are needed for the connect timeouts of consumer/producer +jest.setTimeout(35000); + +const { + createProducer, + sleep, + createConsumer, + createAdmin, +} = require('./testhelpers'); + +describe('Client > oauthbearer callback', () => { + let oauthbearer_cb_called = 0; + const oauthbearer_config = 'key=value'; + const providerCb = async (config) => { + expect(config).toEqual(oauthbearer_config); + oauthbearer_cb_called++; + throw new Error('oauthbearer_cb error'); + }; + + beforeEach(async () => { + oauthbearer_cb_called = 0; + }); + + it('works for producer', + async () => { + const client = createProducer({ + sasl: { + mechanism: 'OAUTHBEARER', + oauthBearerProvider: providerCb, + } + }, { + 'sasl.oauthbearer.config': oauthbearer_config, + }); + + await expect(client.connect()).rejects.toThrow('oauthbearer_cb error'); + expect(oauthbearer_cb_called).toEqual(1); + await client.disconnect(); + } + ); + + it('works for consumer', + async () => { + const client = createConsumer({ + groupId: 'gid', + sasl: { + mechanism: 'OAUTHBEARER', + oauthBearerProvider: providerCb, + } + }, { + 'sasl.oauthbearer.config': oauthbearer_config, + }); + + await expect(client.connect()).rejects.toThrow('oauthbearer_cb error'); + expect(oauthbearer_cb_called).toEqual(1); + await client.disconnect(); + } + ); + + it('works for admin', + async () => { + const client = createAdmin({ + sasl: { + mechanism: 'OAUTHBEARER', + oauthBearerProvider: providerCb, + } + }, { + 'sasl.oauthbearer.config': oauthbearer_config, + }); + + // Unlike others, there is no actual connection establishment + // within the admin client, so we can't test for the error here. + await expect(client.connect()).resolves.toBeUndefined(); + + await sleep(2000); // Wait for the callback to be called + expect(oauthbearer_cb_called).toEqual(1); + await client.disconnect(); + } + ); + +}); diff --git a/test/promisified/producer/concurrentSend.spec.js b/test/promisified/producer/concurrentSend.spec.js new file mode 100644 index 00000000..e793e0f0 --- /dev/null +++ b/test/promisified/producer/concurrentSend.spec.js @@ -0,0 +1,45 @@ +jest.setTimeout(10000); + +const { + secureRandom, + createProducer, + createTopic, +} = require('../testhelpers'); + +describe('Producer', () => { + let producer, topicName, message; + const partitions = 3; + + beforeEach(async () => { + producer = createProducer({ + }, { + 'linger.ms': 0, + }); + + topicName = `test-topic-${secureRandom()}`; + + await createTopic({ topic: topicName, partitions: 3 }); + }); + + afterEach(async () => { + producer && (await producer.disconnect()); + }); + + + it('can send messages concurrently', + async () => { + await producer.connect(); + const sender = async (p) => { + message = { partition: p, value: `value-${secureRandom()}` }; + const report = await producer.send({ topic: topicName, messages: [message] }); + return report; + }; + const reports = await Promise.all(Array(partitions).fill().map((_, i) => sender(i))); + expect(reports.length).toBe(partitions); + for (let i = 0; i < partitions; i++) { + expect(reports[i].length).toBe(1); + expect(reports[i][0].partition).toBe(i); + } + } + ); +}); diff --git a/test/promisified/producer/concurrentTransaction.spec.js b/test/promisified/producer/concurrentTransaction.spec.js new file mode 100644 index 00000000..3cfbc6a4 --- /dev/null +++ b/test/promisified/producer/concurrentTransaction.spec.js @@ -0,0 +1,50 @@ +const { + secureRandom, + createProducer, + createTopic, +} = require('../testhelpers'); + +describe('Producer > Transactional producer', () => { + let producer1, producer2, topicName, transactionalId, message; + + const newProducer = () => + createProducer({ + idempotent: true, + transactionalId, + transactionTimeout: 1000, + }); + + beforeEach(async () => { + topicName = `test-topic-${secureRandom()}`; + transactionalId = `transactional-id-${secureRandom()}`; + message = { key: `key-${secureRandom()}`, value: `value-${secureRandom()}` }; + + await createTopic({ topic: topicName }); + }); + + afterEach(async () => { + producer1 && (await producer1.disconnect()); + producer2 && (await producer2.disconnect()); + }); + + describe('when there is an ongoing transaction on connect', () => { + it('retries initProducerId to cancel the ongoing transaction', + async () => { + // Producer 1 will create a transaction and "crash", it will never commit or abort the connection + producer1 = newProducer(); + await producer1.connect(); + const transaction1 = await producer1.transaction(); + expect(transaction1.isActive()).toBe(true); + await transaction1.send({ topic: topicName, messages: [message] }); + + // Producer 2 starts with the same transactional id to cause the concurrent transactions error + producer2 = newProducer(); + await producer2.connect(); + let transaction2; + await expect(producer2.transaction().then(t => (transaction2 = t))).resolves.toBeTruthy(); + await transaction2.send({ topic: topicName, messages: [message] }); + await transaction2.commit(); + } + ); + }); +}); diff --git a/test/promisified/producer/eos.spec.js b/test/promisified/producer/eos.spec.js new file mode 100644 index 00000000..9f8f21d3 --- /dev/null +++ b/test/promisified/producer/eos.spec.js @@ -0,0 +1,156 @@ +jest.setTimeout(30000); + +const { + secureRandom, + createConsumer, + createProducer, + createTopic, + waitForMessages, +} = require('../testhelpers'); +const { ErrorCodes } = require('../../../lib').KafkaJS; + +describe('Producer > Transactional producer', () => { + let producer, basicProducer, topicName, topicName2, transactionalId, message, consumer, groupId; + + beforeEach(async () => { + topicName = `test-topic-${secureRandom()}`; + topicName2 = `test-topic2-${secureRandom()}`; + transactionalId = `transactional-id-${secureRandom()}`; + message = { key: `key-${secureRandom()}`, value: `value-${secureRandom()}` }; + groupId = `group-id-${secureRandom()}`; + + producer = createProducer({ + idempotent: true, + transactionalId, + transactionTimeout: 1000, + }); + + basicProducer = createProducer({}); + + consumer = createConsumer({ groupId, autoCommit: false, fromBeginning: true }); + + await createTopic({ topic: topicName, partitions: 1 }); + await createTopic({ topic: topicName2 }); + }); + + afterEach(async () => { + consumer && (await consumer.disconnect()); + producer && (await producer.disconnect()); + basicProducer && (await basicProducer.disconnect()); + }); + + it('fails when using consumer group id while sending offsets from transactional producer', async () => { + await producer.connect(); + await basicProducer.connect(); + await consumer.connect(); + + await basicProducer.send({ topic: topicName, messages: [message] }); + + await consumer.subscribe({ topic: topicName }); + + let messagesConsumed = []; + await consumer.run({ + eachMessage: async ({ message }) => { + const transaction = await producer.transaction(); + await transaction.send({ topic: topicName, messages: [message] }); + + await expect( + transaction.sendOffsets({ consumerGroupId: groupId })).rejects.toHaveProperty('code', ErrorCodes.ERR__INVALID_ARG); + await expect( + transaction.sendOffsets({ consumerGroupId: groupId, consumer })).rejects.toHaveProperty('code', ErrorCodes.ERR__INVALID_ARG); + + await transaction.abort(); + messagesConsumed.push(message); + } + }); + + await waitForMessages(messagesConsumed, { number: 1 }); + expect(messagesConsumed.length).toBe(1); + }); + + it('sends offsets when transaction is committed', async () => { + await producer.connect(); + await basicProducer.connect(); + await consumer.connect(); + + await basicProducer.send({ topic: topicName, messages: [message] }); + + await consumer.subscribe({ topic: topicName }); + + let messagesConsumed = []; + await consumer.run({ + eachMessage: async ({ topic, partition, message }) => { + const transaction = await producer.transaction(); + await transaction.send({ topic: topicName2, messages: [message] }); + + await transaction.sendOffsets({ consumer, topics: [ + { + topic, + partitions: [ + { partition, offset: Number(message.offset) + 1 }, + ], + } + ], }); + + await transaction.commit(); + messagesConsumed.push(message); + } + }); + + await waitForMessages(messagesConsumed, { number: 1 }); + expect(messagesConsumed.length).toBe(1); + const committed = await consumer.committed(); + expect(committed).toEqual( + expect.arrayContaining([ + expect.objectContaining({ + topic: topicName, + offset: '1', + partition: 0, + }), + ]) + ); + }); + + it('sends no offsets when transaction is aborted', async () => { + await producer.connect(); + await basicProducer.connect(); + await consumer.connect(); + + await basicProducer.send({ topic: topicName, messages: [message] }); + + await consumer.subscribe({ topic: topicName }); + + let messagesConsumed = []; + await consumer.run({ + eachMessage: async ({ topic, partition, message }) => { + const transaction = await producer.transaction(); + await transaction.send({ topic: topicName2, messages: [message] }); + + await transaction.sendOffsets({ consumer, topics: [ + { + topic, + partitions: [ + { partition, offset: Number(message.offset) + 1 }, + ], + } + ], }); + + await transaction.abort(); + messagesConsumed.push(message); + } + }); + + await waitForMessages(messagesConsumed, { number: 1 }); + expect(messagesConsumed.length).toBe(1); + const committed = await consumer.committed(); + expect(committed).toEqual( + expect.arrayContaining([ + expect.objectContaining({ + topic: topicName, + offset: null, + partition: 0, + }), + ]) + ); + }); +}); diff --git a/test/promisified/producer/flush.spec.js b/test/promisified/producer/flush.spec.js new file mode 100644 index 00000000..c4f7daf9 --- /dev/null +++ b/test/promisified/producer/flush.spec.js @@ -0,0 +1,88 @@ +jest.setTimeout(10000); + +const { + secureRandom, + createProducer, + createTopic, +} = require('../testhelpers'); +const { Kafka } = require('../../../lib').KafkaJS; +const process = require('process'); + +describe('Producer > Flush', () => { + let producer, topicName, message; + + beforeEach(async () => { + producer = createProducer({ + }, { + 'linger.ms': 5000, /* large linger ms to test flush */ + 'queue.buffering.max.kbytes': 2147483647, /* effectively unbounded */ + }); + + topicName = `test-topic-${secureRandom()}`; + message = { key: `key-${secureRandom()}`, value: `value-${secureRandom()}` }; + + await createTopic({ topic: topicName }); + }); + + afterEach(async () => { + producer && (await producer.disconnect()); + }); + + + it('does not wait for linger.ms', + async () => { + await producer.connect(); + let messageSent = false; + const startTime = process.hrtime(); + let diffTime; + + producer.send({ topic: topicName, messages: [message] }).then(() => { + messageSent = true; + diffTime = process.hrtime(startTime); + }); + + await producer.flush({ timeout: 5000 }); + expect(messageSent).toBe(true); + + const diffTimeSeconds = diffTime[0] + diffTime[1] / 1e9; + expect(diffTimeSeconds).toBeLessThan(5); + } + ); + + it('does not matter when awaiting sends', + async () => { + await producer.connect(); + let messageSent = false; + const startTime = process.hrtime(); + let diffTime; + + await producer.send({ topic: topicName, messages: [message] }).then(() => { + messageSent = true; + diffTime = process.hrtime(startTime); + }); + + await producer.flush({ timeout: 1000 }); + expect(messageSent).toBe(true); + + const diffTimeSeconds = diffTime[0] + diffTime[1] / 1e9; + expect(diffTimeSeconds).toBeGreaterThan(5); + } + ); + + it('times out if messages are pending', + async () => { + await producer.connect(); + let messageSent = false; + + /* Larger number of messages */ + producer.send({ topic: topicName, messages: Array(100).fill(message) }).then(() => { + messageSent = true; + }); + + /* Small timeout */ + await expect(producer.flush({ timeout: 1 })).rejects.toThrow(Kafka.KafkaJSTimeout); + expect(messageSent).toBe(false); + } + ); + +}); diff --git a/test/promisified/producer/idempotentProduceMessage.spec.js b/test/promisified/producer/idempotentProduceMessage.spec.js new file mode 100644 index 00000000..d882f977 --- /dev/null +++ b/test/promisified/producer/idempotentProduceMessage.spec.js @@ -0,0 +1,129 @@ +jest.setTimeout(10000); + +const { + secureRandom, + createTopic, + waitForMessages, + createProducer, + createConsumer, +} = require('../testhelpers'); +const { KafkaJSError } = require('../../../lib').KafkaJS; + +describe('Producer > Idempotent producer', () => { + let producer, consumer, topicName, cluster, messages; + + beforeAll(async () => { + messages = Array(4) + .fill() + .map((_, i) => { + const value = secureRandom(); + return { key: `key-${value}`, value: `${i}` }; + }); + }); + + beforeEach(async () => { + topicName = `test-topic-${secureRandom()}`; + producer = createProducer({ + idempotent: true, + }); + consumer = createConsumer({ + groupId: `consumer-group-id-${secureRandom()}`, + maxWaitTimeInMs: 0, + fromBeginning: true, + }); + await createTopic({ topic: topicName, partitions: 1 }); + await Promise.all([producer.connect(), consumer.connect()]); + await consumer.subscribe({ topic: topicName }); + }); + + afterEach( + async () => + await Promise.all([ + producer && (await producer.disconnect()), + consumer && (await consumer.disconnect()), + ]) + ); + + it('sequential produce() calls > all messages are written to the partition once, in order', async () => { + const messagesConsumed = []; + + for (const m of messages) { + await producer.send({ topic: topicName, messages: [m] }); + } + + await consumer.run({ eachMessage: async message => messagesConsumed.push(message) }); + await waitForMessages(messagesConsumed, { number: messages.length }); + + messagesConsumed.forEach(({ message: { value } }, i) => + expect(value.toString()).toEqual(`${i}`) + ); + }); + + /* Skip as we don't have the mock broker available */ + it.skip('sequential produce() calls > where produce() throws a retriable error, all messages are written to the partition once, in order', async () => { + for (const nodeId of [0, 1, 2]) { + const broker = await cluster.findBroker({ nodeId }); + + const brokerProduce = jest.spyOn(broker, 'produce'); + brokerProduce.mockImplementationOnce(() => { + throw new KafkaJSError('retriable error'); + }); + } + + const messagesConsumed = []; + + for (const m of messages) { + await producer.send({ acks: -1, topic: topicName, messages: [m] }); + } + + await consumer.run({ eachMessage: async message => messagesConsumed.push(message) }); + + await waitForMessages(messagesConsumed, { number: messages.length }); + + messagesConsumed.forEach(({ message: { value } }, i) => + expect(value.toString()).toEqual(`${i}`) + ); + }); + + /* Skip as we don't have the mock broker available */ + it.skip('sequential produce() calls > where produce() throws a retriable error after the message is written to the log, all messages are written to the partition once, in order', async () => { + for (const nodeId of [0, 1, 2]) { + const broker = await cluster.findBroker({ nodeId }); + const originalCall = broker.produce.bind(broker); + const brokerProduce = jest.spyOn(broker, 'produce'); + brokerProduce.mockImplementationOnce(); + brokerProduce.mockImplementationOnce(); + brokerProduce.mockImplementationOnce(async (...args) => { + await originalCall(...args); + throw new KafkaJSError('retriable error'); + }); + } + + const messagesConsumed = []; + + for (const m of messages) { + await producer.send({ acks: -1, topic: topicName, messages: [m] }); + } + + await consumer.run({ eachMessage: async message => messagesConsumed.push(message) }); + + await waitForMessages(messagesConsumed, { number: messages.length }); + + messagesConsumed.forEach(({ message: { value } }, i) => + expect(value.toString()).toEqual(`${i}`) + ); + }); + + it('concurrent produce() calls > all messages are written to the partition once', async () => { + const messagesConsumed = []; + + await Promise.all( + messages.map(m => producer.send({ topic: topicName, messages: [m] })) + ); + + await consumer.run({ eachMessage: async message => messagesConsumed.push(message) }); + + await waitForMessages(messagesConsumed, { number: messages.length }); + expect(messagesConsumed).toHaveLength(messages.length); + }); +}); diff --git a/test/promisified/producer/producingToInvalidTopic.spec.js b/test/promisified/producer/producingToInvalidTopic.spec.js new file mode 100644 index 00000000..2c713d22 --- /dev/null +++ b/test/promisified/producer/producingToInvalidTopic.spec.js @@ -0,0 +1,30 @@ +const { createTopic, createProducer, secureRandom } = require('../testhelpers'); +const { ErrorCodes } = require('../../../lib').KafkaJS; + +describe('Producer > Producing to invalid topics', () => { + let producer, topicName; + + beforeEach(async () => { + topicName = `test-topic-${secureRandom()}`; + + producer = createProducer({ + }); + await producer.connect(); + await createTopic({ topic: topicName }); + }); + + afterEach(async () => { + producer && (await producer.disconnect()); + }); + + it('rejects when producing to an invalid topic name, but is able to subsequently produce to a valid topic', async () => { + const message = { key: `key-${secureRandom()}`, value: `value-${secureRandom()}` }; + const invalidTopicName = `${topicName}-abc)(*&^%`; + await expect(producer.send({ topic: invalidTopicName, messages: [message] })).rejects.toHaveProperty( + 'code', + ErrorCodes.ERR_TOPIC_EXCEPTION, + ); + + await expect(producer.send({ topic: topicName, messages: [message] })).resolves.toBeTruthy(); + }); +}); diff --git a/test/promisified/testhelpers.js b/test/promisified/testhelpers.js new file mode 100644 index 00000000..0331f209 --- /dev/null +++ b/test/promisified/testhelpers.js @@ -0,0 +1,140 @@ +const crypto = require('crypto'); +const process = require('process'); +const { Kafka } = require('../../lib').KafkaJS; +const { DeferredPromise } = require('../../lib/kafkajs/_common'); + +// TODO: pick this up from a file +const clusterInformation = { + kafkaJS: { + brokers: process.env.KAFKA_HOST ? process.env.KAFKA_HOST.split(',') : ['localhost:9092'], + }, + librdkafka: { + 'bootstrap.servers': process.env.KAFKA_HOST ? process.env.KAFKA_HOST : 'localhost:9092', + }, +}; + +const debug = process.env.TEST_DEBUG; + +function makeConfig(config, common) { + const kafkaJS = Object.assign(config, clusterInformation.kafkaJS); + if (debug) { + common['debug'] = debug; + } + + return Object.assign(common, { kafkaJS }); +} + +function createConsumer(config, common = {}) { + const kafka = new Kafka(makeConfig(config, common)); + return kafka.consumer(); +} + +function createProducer(config, common = {}) { + const kafka = new Kafka(makeConfig(config, common)); + return kafka.producer(); +} + +function createAdmin(config, common = {}) { + const kafka = new Kafka(makeConfig(config, common)); + return kafka.admin(); +} + +function secureRandom(length = 10) { + return `${crypto.randomBytes(length).toString('hex')}-${process.pid}-${crypto.randomUUID()}`; +} + +async function createTopic(args) { + const { topic, partitions } = args; + const admin = createAdmin({}); + await admin.connect(); + await admin.createTopics({ + topics: [ + { topic, numPartitions: partitions ?? 1 } + ] + }); + await admin.disconnect(); +} + +async function waitForConsumerToJoinGroup(/* consumer is passed as the first argument, and ignored */) { + // We don't yet have a deterministic way to test this, so we just wait for a bit. + // TODO: we can probably wait for consumer.assignment() to be not empty, but that only + // works if the assignment exists. + return new Promise(resolve => setTimeout(resolve, 2500)); +} + +async function waitFor(check, resolveValue, { delay = 50 } = {}) { + return new Promise(resolve => { + const interval = setInterval(() => { + if (check()) { + clearInterval(interval); + resolve(resolveValue()); + } + }, delay); + }); +} + +async function waitForMessages(messagesConsumed, { number = 1, delay } = {}) { + return waitFor(() => messagesConsumed.length >= number, () => messagesConsumed, { delay }); +} + +async function sleep(ms) { + return new Promise(resolve => setTimeout(resolve, ms)); +} + +const generateMessages = options => { + const { prefix, number = 100, partition } = options || {}; + const prefixOrEmpty = prefix ? `-${prefix}` : ''; + + return Array(number) + .fill() + .map((v, i) => { + const value = secureRandom(); + const message = { + key: `key${prefixOrEmpty}-${i}-${value}`, + value: `value${prefixOrEmpty}-${i}-${value}`, + }; + if (partition !== undefined) { + message.partition = partition; + } + return message; + }); +}; + +/** + * Represents a list of promises that can be resolved in sequence or + * in a different order and awaited multiple times. + * Useful for testing particular ordering of async operations without + * relying of timing. + */ +class SequentialPromises { + #promises; + #current = 0; + + constructor(num) { + this.#promises = Array(num).fill().map(() => new DeferredPromise()); + } + + get(index) { + return this.#promises[index]; + } + + resolveNext(value) { + this.#promises[this.#current].resolve(value); + this.#current++; + } +} + +module.exports = { + createConsumer, + createProducer, + createAdmin, + secureRandom, + waitForMessages, + createTopic, + waitForConsumerToJoinGroup, + waitFor, + sleep, + generateMessages, + clusterInformation, + SequentialPromises +}; diff --git a/test/promisified/unit/cache.spec.js b/test/promisified/unit/cache.spec.js new file mode 100644 index 00000000..c217539b --- /dev/null +++ b/test/promisified/unit/cache.spec.js @@ -0,0 +1,293 @@ +const MessageCache = require('../../../lib/kafkajs/_consumer_cache'); + +describe('MessageCache', () => { + const messages = + Array(5000) + .fill() + .map((_, i) => ({ topic: 'topic', partition: i % 3, number: i })); + + let cache; + beforeEach(() => { + cache = new MessageCache(); + }); + + it('caches messages and retrieves them', () => { + const msgs = messages.slice(0, 90); + cache.addMessages(msgs); + + const receivedMessages = []; + let ppc = null, next = null; + for (let i = 0; i < 90; i++) { + next = cache.next(ppc); + expect(next).not.toBeNull(); + [next, ppc] = next; + expect(next).not.toBeNull(); + receivedMessages.push(next); + } + + /* Results are on a per-partition basis and well-ordered */ + expect(receivedMessages.slice(1, 30).every((msg, i) => msg.partition === receivedMessages[0].partition && (msg.number - 3) === receivedMessages[i].number)).toBeTruthy(); + expect(receivedMessages.slice(31, 30).every((msg, i) => msg.partition === receivedMessages[30].partition && (msg.number - 3) === receivedMessages[i].number)).toBeTruthy(); + expect(receivedMessages.slice(61, 30).every((msg, i) => msg.partition === receivedMessages[60].partition && (msg.number - 3) === receivedMessages[i].number)).toBeTruthy(); + }); + + it('caches messages and retrieves N of them', () => { + const msgs = messages.slice(0, 90); + cache.addMessages(msgs); + + const receivedMessages = []; + let ppc = null, next = null; + const expectedFetchedSizes = [11, 11, 8]; + for (let i = 0; i < (90/11); i++) { + /* We choose to fetch 11 messages together rather than 10 so that we can test the case where + * remaining messages > 0 but less than requested size. */ + next = cache.nextN(ppc, 11); + expect(next).not.toBeNull(); + [next, ppc] = next; + /* There are 30 messages per partition, the first fetch will get 11, the second 11, and the last one + * 8, and then it repeats for each partition. */ + expect(next.length).toBe(expectedFetchedSizes[i % 3]); + expect(next).not.toBeNull(); + receivedMessages.push(...next); + } + + /* Results are on a per-partition basis and well-ordered */ + expect(receivedMessages.slice(1, 30).every((msg, i) => msg.partition === receivedMessages[0].partition && (msg.number - 3) === receivedMessages[i].number)).toBeTruthy(); + expect(receivedMessages.slice(31, 30).every((msg, i) => msg.partition === receivedMessages[30].partition && (msg.number - 3) === receivedMessages[i].number)).toBeTruthy(); + expect(receivedMessages.slice(61, 30).every((msg, i) => msg.partition === receivedMessages[60].partition && (msg.number - 3) === receivedMessages[i].number)).toBeTruthy(); + }); + + it('stops fetching from stale partition', () => { + const msgs = messages.slice(0, 90); + cache.addMessages(msgs); + + const receivedMessages = []; + let ppc = null, next = null; + for (let i = 0; i < 3; i++) { + next = cache.next(null); + expect(next).not.toBeNull(); + [next, ppc] = next; + expect(next).not.toBeNull(); + receivedMessages.push(next); + cache.markStale([{topic: next.topic, partition: next.partition}]); + } + + // We should not be able to get anything more. + expect(cache.next(ppc)).toBeNull(); + // Nothing should be pending, we've returned everything. + expect(cache.assignedSize).toBe(0); + // The first 3 messages from different toppars are what we should get. + expect(receivedMessages).toEqual(expect.arrayContaining(msgs.slice(0, 3))); + }); + + it('caches messages and retrieves 2-at-a-time', () => { + const msgs = messages.slice(0, 90).filter(msg => msg.partition !== 3); + cache.addMessages(msgs); + + const receivedMessages = []; + let next = [null, null]; + let nextPpc = [null, null]; + for (let i = 0; i < 30; i++) { + next[0] = cache.next(nextPpc[0]); + next[1] = cache.next(nextPpc[1]); + expect(next[0]).not.toBeNull(); + expect(next[1]).not.toBeNull(); + [next[0], nextPpc[0]] = next[0]; + [next[1], nextPpc[1]] = next[1]; + receivedMessages.push(next[0]); + receivedMessages.push(next[1]); + } + + expect(receivedMessages.length).toBe(60); + expect(receivedMessages.filter(msg => msg.partition === 0).length).toBe(30); + expect(receivedMessages.filter(msg => msg.partition === 1).length).toBe(30); + }); + + it('caches messages and retrieves N of them 2-at-a-time', () => { + const msgs = messages.slice(0, 90).filter(msg => msg.partition !== 3); + cache.addMessages(msgs); + + const receivedMessages = []; + let next = [null, null]; + let nextPpc = [null, null]; + for (let i = 0; i < 30/11; i++) { + next[0] = cache.nextN(nextPpc[0], 11); + next[1] = cache.nextN(nextPpc[1], 11); + expect(next[0]).not.toBeNull(); + expect(next[1]).not.toBeNull(); + [next[0], nextPpc[0]] = next[0]; + [next[1], nextPpc[1]] = next[1]; + receivedMessages.push(...next[0]); + receivedMessages.push(...next[1]); + } + + expect(receivedMessages.length).toBe(60); + expect(receivedMessages.filter(msg => msg.partition === 0).length).toBe(30); + expect(receivedMessages.filter(msg => msg.partition === 1).length).toBe(30); + }); + + it('does not allow fetching messages more than available partitions at a time', () => { + const msgs = messages.slice(0, 90); + cache.addMessages(msgs); + + let next = cache.next(); + let ppc = next[1]; + expect(next).not.toBeNull(); + next = cache.next(); + expect(next).not.toBeNull(); + next = cache.next(); + expect(next).not.toBeNull(); + next = cache.next(); + expect(next).toBeNull(); + expect(cache.assignedSize).toBe(3); + + // Fetch after returning ppc works. + cache.return(ppc); + next = cache.next(); + expect(next).not.toBeNull(); + }); + + + it('does not allow fetching message sets more than available partitions at a time', () => { + const msgs = messages.slice(0, 90); + cache.addMessages(msgs); + + let next = cache.nextN(null, 11); + let ppc = next[1]; + expect(next).not.toBeNull(); + next = cache.nextN(null, 11); + expect(next).not.toBeNull(); + next = cache.nextN(null, 11); + expect(next).not.toBeNull(); + next = cache.nextN(null, 11); + expect(next).toBeNull(); + expect(cache.assignedSize).toBe(3); + + // Fetch after returning ppc works. + cache.return(ppc); + next = cache.nextN(null, 11); + expect(next).not.toBeNull(); + }); + + it('stops fetching message sets from stale partition', () => { + const msgs = messages.slice(0, 90); + cache.addMessages(msgs); + + const receivedMessages = []; + let next, ppc; + for (let i = 0; i < 3; i++) { + next = cache.nextN(null, 11); + expect(next).not.toBeNull(); + [next, ppc] = next; + receivedMessages.push(...next); + cache.markStale([{topic: next[0].topic, partition: next[0].partition}]); + cache.return(ppc); + } + + // We should not be able to get anything more. + expect(cache.nextN(null, 11)).toBeNull(); + // Nothing should be pending, we've returned everything. + expect(cache.assignedSize).toBe(0); + // The first [11, 11, 11] messages from different toppars. + expect(receivedMessages.length).toBe(33); + expect(receivedMessages).toEqual(expect.arrayContaining(msgs.slice(0, 33))); + }); + + it('one slow processing message should not slow down others', () => { + const msgs = messages.slice(0, 90); + cache.addMessages(msgs); + + const receivedMessages = []; + let next, ppc; + cache.next(ppc); + for (let i = 0; i < 60; i++) { /* 60 - for non-partition 0 msgs */ + next = cache.next(ppc); + expect(next).not.toBeNull(); + [next, ppc] = next; + expect(next).not.toBeNull(); + receivedMessages.push(next); + } + + // We should not be able to get anything more. + expect(cache.next(ppc)).toBeNull(); + // The slowMsg should be pending. + expect(cache.assignedSize).toBe(1); + + /* Messages should be partition-wise and well-ordered. */ + expect(receivedMessages.slice(1, 30).every((msg, i) => msg.partition === receivedMessages[0].partition && (msg.number - 3) === receivedMessages[i].number)).toBeTruthy(); + expect(receivedMessages.slice(31, 30).every((msg, i) => msg.partition === receivedMessages[30].partition && (msg.number - 3) === receivedMessages[i].number)).toBeTruthy(); + }); + + it('one slow processing message set should not slow down others', () => { + const msgs = messages.slice(0, 90); + cache.addMessages(msgs); + + const receivedMessages = []; + let next, ppc; + cache.nextN(ppc, 11); + for (let i = 0; i < 60/11; i++) { /* 60 - for non-partition 0 msgs */ + next = cache.nextN(ppc, 11); + expect(next).not.toBeNull(); + [next, ppc] = next; + receivedMessages.push(...next); + } + + + // We should not be able to get anything more. + expect(cache.nextN(ppc, 11)).toBeNull(); + // The slowMsg should be pending. + expect(cache.assignedSize).toBe(1); + + /* Messages should be partition-wise and well-ordered. */ + expect(receivedMessages.slice(1, 30).every((msg, i) => msg.partition === receivedMessages[0].partition && (msg.number - 3) === receivedMessages[i].number)).toBeTruthy(); + expect(receivedMessages.slice(31, 30).every((msg, i) => msg.partition === receivedMessages[30].partition && (msg.number - 3) === receivedMessages[i].number)).toBeTruthy(); + }); + + it('should be able to handle cache-clearance in the middle of processing', () => { + const msgs = messages.slice(0, 90); + cache.addMessages(msgs); + + const receivedMessages = []; + let next, ppc; + cache.next(); + for (let i = 0; i < 60; i++) { /* 60 - for non-partition 0 msgs */ + next = cache.next(ppc); + expect(next).not.toBeNull(); + [next, ppc] = next; + expect(next).not.toBeNull(); + receivedMessages.push(next); + } + + // We should not be able to get anything more. + expect(cache.next(ppc)).toBeNull(); + + // The slowMsg should be pending. + expect(cache.assignedSize).toBe(1); + + expect(() => cache.clear()).not.toThrow(); + }); + + it('should be able to handle message adds in the middle of processing', () => { + const msgs = messages.slice(0, 90); + cache.addMessages(msgs); + + const receivedMessages = []; + let next, ppc; + cache.next(); + for (let i = 0; i < 60; i++) { /* 60 - for non-partition 0 msgs */ + next = cache.next(ppc); + expect(next).not.toBeNull(); + [next, ppc] = next; + expect(next).not.toBeNull(); + receivedMessages.push(next); + } + + // We should not be able to get anything more. + expect(cache.next(ppc)).toBeNull(); + + // The slowMsg should be pending. + expect(cache.assignedSize).toBe(1); + + expect(() => cache.addMessages(msgs)).not.toThrow(); + }); +}); \ No newline at end of file diff --git a/test/promisified/unit/common.spec.js b/test/promisified/unit/common.spec.js new file mode 100644 index 00000000..c6d77a15 --- /dev/null +++ b/test/promisified/unit/common.spec.js @@ -0,0 +1,227 @@ +const { Lock } = require('../../../lib/kafkajs/_common'); +const { SequentialPromises } = require('../testhelpers'); + +describe('Lock', () => { + + it('allows multiple concurrent readers', async () => { + let lock = new Lock(); + let sequentialPromises = new SequentialPromises(1); + let events = []; + let tasks = []; + let concurrency = 50; + + for (let i = 0; i < concurrency; i++) { + let task = lock.read(async () => { + events.push(i * 2); + await sequentialPromises.get(0); + events.push(i * 2 + 1); + }); + tasks.push(task); + } + + /* Make sure all tasks can reach the promise. */ + await new Promise((r) => setTimeout(r, 10)); + sequentialPromises.resolveNext(); + await Promise.all(tasks); + + for (let event of events.slice(0, 50)) { + expect(event % 2).toEqual(0); + } + for (let event of events.slice(50)) { + expect(event % 2).toEqual(1); + } + }); + + it('prevents multiple concurrent write locks', async () => { + let lock = new Lock(); + let sequentialPromises = new SequentialPromises(1); + let events = []; + let tasks = []; + let concurrency = 50; + + for (let i = 0; i < concurrency; i++) { + let task = lock.write(async () => { + events.push(i * 2); + await sequentialPromises.get(0); + events.push(i * 2 + 1); + }); + tasks.push(task); + } + + /* Make sure all tasks can reach the promise in case + * the lock wasn't working. */ + await new Promise((r) => setTimeout(r, 10)); + sequentialPromises.resolveNext(); + await Promise.all(tasks); + + for (let i = 0; i < concurrency; i++) { + expect(events[i * 2]).toBe(events[i * 2 + 1] - 1); + } + }); + + it('allows either multiple readers or a single writer', async () => { + let lock = new Lock(); + let sequentialPromises = new SequentialPromises(3); + let events = []; + let promises = []; + sequentialPromises.resolveNext(); + + let read1 = lock.read(async () => { + events.push(0); + await sequentialPromises.get(0); + events.push(1); + sequentialPromises.resolveNext(); + }); + promises.push(read1); + + let read2 = lock.read(async () => { + events.push(2); + await sequentialPromises.get(1); + events.push(3); + sequentialPromises.resolveNext(); + }); + promises.push(read2); + + let write1 = lock.write(async () => { + events.push(4); + await sequentialPromises.get(2); + events.push(5); + }); + promises.push(write1); + + await Promise.all(promises); + + expect(events).toEqual([0, 2, 1, 3, 4, 5]); + }); + + + it('allows reentrant read locks', async () => { + let lock = new Lock(); + let sequentialPromises = new SequentialPromises(2); + let events = []; + let promises = []; + sequentialPromises.resolveNext(); + + let read1 = lock.read(async () => { + events.push(0); + await lock.read(async () => { + events.push(1); + await sequentialPromises.get(0); + events.push(2); + }); + events.push(3); + sequentialPromises.resolveNext(); + }); + promises.push(read1); + + let read2 = lock.read(async () => { + events.push(4); + await lock.read(async () => { + events.push(5); + await sequentialPromises.get(1); + events.push(6); + }); + events.push(7); + }); + promises.push(read2); + + await Promise.all(promises); + + expect(events).toEqual([0, 4, 1, 5, 2, 3, 6, 7]); + }); + + it('allows reentrant write locks', async () => { + let lock = new Lock(); + let sequentialPromises = new SequentialPromises(2); + let events = []; + let promises = []; + sequentialPromises.resolveNext(); + + let write1 = lock.write(async () => { + events.push(0); + await lock.write(async () => { + events.push(1); + await sequentialPromises.get(0); + events.push(2); + }); + events.push(3); + sequentialPromises.resolveNext(); + }); + promises.push(write1); + + let write2 = lock.write(async () => { + events.push(4); + await lock.write(async () => { + events.push(5); + await sequentialPromises.get(1); + events.push(6); + }); + events.push(7); + }); + promises.push(write2); + + await Promise.allSettled(promises); + + expect(events).toEqual([0, 1, 2, 3, 4, 5, 6, 7]); + }); + + it('can upgrade to a write lock while holding a read lock', + async () => { + let lock = new Lock(); + await lock.read(async () => { + await lock.read(async () => { + await lock.write(async () => { + await lock.write(async () => { + await lock.read(async () => { + + }); + }); + }); + }); + }); + }); + + it('can acquire a read lock with holding a write lock', async () => { + let lock = new Lock(); + await lock.write(async () => { + await lock.write(async () => { + await lock.read(async () => { + await lock.read(async () => { + await lock.write(async () => { + }); + }); + }); + }); + }); + }); + + it('awaits locks the called function doesn\'t await', async () => { + let lock = new Lock(); + let sequentialPromises = new SequentialPromises(2); + let events = []; + await lock.write(async () => { + events.push(0); + lock.read(async () => { + await sequentialPromises.get(1); + events.push(1); + }); + lock.write(async () => { + await sequentialPromises.get(0); + events.push(2); + sequentialPromises.resolveNext(); + }); + sequentialPromises.resolveNext(); + }); + + expect(events).toEqual([0, 2, 1]); + }); + + it('propagates errors', async () => { + let lock = new Lock(); + let throwing = + lock.read(async () => { + throw new Error('shouldn\'t happen'); + }); + await expect(throwing).rejects.toThrow('shouldn\'t happen'); + }); +}); \ No newline at end of file diff --git a/test/schemaregistry/mock-schemaregistery-client.spec.ts b/test/schemaregistry/mock-schemaregistery-client.spec.ts new file mode 100644 index 00000000..c878ee8a --- /dev/null +++ b/test/schemaregistry/mock-schemaregistery-client.spec.ts @@ -0,0 +1,219 @@ +import { beforeEach, afterEach, describe, expect, it, jest } from '@jest/globals'; +import { MockClient } from '../../schemaregistry/mock-schemaregistry-client'; +import { Compatibility, Metadata, SchemaInfo, SchemaMetadata } from '../../schemaregistry/schemaregistry-client'; + +const schemaString: string = JSON.stringify({ + type: 'record', + name: 'User', + fields: [ + { name: 'name', type: 'string' }, + { name: 'age', type: 'int' }, + ], +}); + +const schemaString2: string = JSON.stringify({ + type: 'record', + name: 'User', + fields: [ + { name: 'name', type: 'string' }, + { name: 'age', type: 'int' }, + { name: 'email', type: 'string' }, + ], +}); + +const metadata: Metadata = { + properties: { + owner: 'Alice Bob', + email: 'alice@bob.com', + } +}; + +const metadata2: Metadata = { + properties: { + owner: 'Alice Bob2', + email: 'alice@bob2.com' + } +}; + +const metadataKeyValue: { [key: string]: string } = { + owner: 'Alice Bob', + email: 'alice@bob.com' +}; + +const metadataKeyValue2: { [key: string]: string } = { + owner: 'Alice Bob2', + email: 'alice@bob2.com' +}; + +const schemaInfo: SchemaInfo = { + schema: schemaString, + metadata: metadata +}; + +const schemaInfo2: SchemaInfo = { + schema: schemaString2, + metadata: metadata2 +}; + +const testSubject = 'test-subject'; +const testSubject2 = 'test-subject2'; + + +describe('MockClient-tests', () => { + let mockClient: MockClient; + + beforeEach(() => { + mockClient = new MockClient(); + }); + + afterEach(() => { + jest.clearAllMocks(); + }); + + it('Should return schemaId when calling register', async () => { + const response: number = await mockClient.register(testSubject, schemaInfo); + expect(response).toBe(1); + }); + + it('Should return SchemaMetadata when calling registerFullResponse', async () => { + const response: SchemaMetadata = await mockClient.registerFullResponse(testSubject, schemaInfo); + expect(response.id).toBe(1); + }); + + it('Should return SchemaInfo when getting with subject and Id', async () => { + await mockClient.register(testSubject, schemaInfo); + const response: SchemaInfo = await mockClient.getBySubjectAndId(testSubject, 1); + expect(response.schema).toBe(schemaString); + }); + + it('Should throw error when getBySubjectAndId is called with non-existing schemaId', async () => { + await mockClient.register(testSubject, schemaInfo); + await expect(mockClient.getBySubjectAndId(testSubject, 2)).rejects.toThrowError(); + }); + + it('Should return schemaId when calling getId', async () => { + await mockClient.register(testSubject, schemaInfo); + const response: number = await mockClient.getId(testSubject, schemaInfo); + expect(response).toBe(1); + }); + + it('Should throw error when getId is called with non-existing schema', async () => { + await expect(mockClient.getId(testSubject, schemaInfo)).rejects.toThrowError(); + }); + + it('Should return latest schema metadata when calling getLatestSchemaMetadata', async () => { + await mockClient.register(testSubject, schemaInfo); + await mockClient.register(testSubject, schemaInfo2); + const response: SchemaMetadata = await mockClient.getLatestSchemaMetadata(testSubject); + expect(response.id).toBe(2); + expect(response.schema).toBe(schemaString2); + }); + + it('Should return latest Schema with metadata when calling getLatestWithMetadata', async () => { + await mockClient.register(testSubject, schemaInfo); + await mockClient.register(testSubject, schemaInfo2); + const response = await mockClient.getLatestWithMetadata(testSubject, metadataKeyValue); + expect(response.schema).toBe(schemaString); + expect(response.version).toBe(1); + const response2 = await mockClient.getLatestWithMetadata(testSubject, metadataKeyValue2); + expect(response2.schema).toBe(schemaString2); + expect(response2.version).toBe(2); + }); + + it('Should return specific schemaMetadata version when calling getSchemaMetadata', async () => { + await mockClient.register(testSubject, schemaInfo); + await mockClient.register(testSubject, schemaInfo2); + const response: SchemaMetadata = await mockClient.getSchemaMetadata(testSubject, 1); + expect(response.id).toBe(1); + expect(response.schema).toBe(schemaString); + const response2: SchemaMetadata = await mockClient.getSchemaMetadata(testSubject, 2); + expect(response2.id).toBe(2); + expect(response2.schema).toBe(schemaString2); + }); + + it('Should return the correct version when calling getVersion', async () => { + await mockClient.register(testSubject, schemaInfo); + await mockClient.register(testSubject, schemaInfo2); + const response: number = await mockClient.getVersion(testSubject, schemaInfo2); + expect(response).toBe(2); + }); + + it('Should throw error when getVersion is called with non-existing schema', async () => { + await expect(mockClient.getVersion(testSubject, schemaInfo)).rejects.toThrowError(); + }); + + it('Should return all versions when calling getAllVersions', async () => { + await mockClient.register(testSubject, schemaInfo); + await mockClient.register(testSubject, schemaInfo2); + const response: number[] = await mockClient.getAllVersions(testSubject); + expect(response).toEqual([1, 2]); + }); + + it('Should update compatibility when calling updateCompatibility', async () => { + const response: Compatibility = await mockClient.updateCompatibility(testSubject, Compatibility.BACKWARD_TRANSITIVE); + expect(response).toBe(Compatibility.BACKWARD_TRANSITIVE); + }); + + it('Should return compatibility when calling getCompatibility', async () => { + await mockClient.updateCompatibility(testSubject, Compatibility.BACKWARD_TRANSITIVE); + const response: Compatibility = await mockClient.getCompatibility(testSubject); + expect(response).toBe(Compatibility.BACKWARD_TRANSITIVE); + }); + + it('Should throw error when getCompatibility is called with non-existing subject', async () => { + await expect(mockClient.getCompatibility(testSubject)).rejects.toThrowError(); + }); + + it('Should update default compatibility when calling updateDefaultCompatibility', async () => { + const response: Compatibility = await mockClient.updateDefaultCompatibility(Compatibility.BACKWARD_TRANSITIVE); + expect(response).toBe(Compatibility.BACKWARD_TRANSITIVE); + }); + + it('Should return default compatibility when calling getDefaultCompatibility', async () => { + await mockClient.updateDefaultCompatibility(Compatibility.BACKWARD_TRANSITIVE); + const response: Compatibility = await mockClient.getDefaultCompatibility(); + expect(response).toBe(Compatibility.BACKWARD_TRANSITIVE); + }); + + it('Should throw error when getDefaultCompatibility is called with non-existing default compatibility', async () => { + await expect(mockClient.getDefaultCompatibility()).rejects.toThrowError(); + }); + + it('Should get all subjects when calling getAllSubjects', async () => { + expect(await mockClient.getAllSubjects()).toEqual([]); + + await mockClient.register(testSubject, schemaInfo); + await mockClient.register(testSubject2, schemaInfo); + const response: string[] = await mockClient.getAllSubjects(); + expect(response).toEqual([testSubject, testSubject2]); + }); + + it('Should soft delete subject when calling deleteSubject', async () => { + await mockClient.register(testSubject, schemaInfo); + await mockClient.deleteSubject(testSubject); + await expect(mockClient.getId(testSubject, schemaInfo)).rejects.toThrowError(); + await expect(mockClient.getVersion(testSubject, schemaInfo)).rejects.toThrowError(); + const response: SchemaInfo = await mockClient.getBySubjectAndId(testSubject, 1); + await expect(response.schema).toBe(schemaString); + }); + + it('Should permanent delete subject when calling deleteSubject with permanent flag', async () => { + await mockClient.register(testSubject, schemaInfo); + await mockClient.deleteSubject(testSubject, true); + await expect(mockClient.getId(testSubject, schemaInfo)).rejects.toThrowError(); + await expect(mockClient.getVersion(testSubject, schemaInfo)).rejects.toThrowError(); + await expect(mockClient.getBySubjectAndId(testSubject, 1)).rejects.toThrowError(); + }); + + it('Should soft delete subject version when calling deleteSubjectVersion', async () => { + await mockClient.register(testSubject, schemaInfo); + await mockClient.register(testSubject, schemaInfo2); + await mockClient.deleteSubjectVersion(testSubject, 1); + await expect(mockClient.getId(testSubject, schemaInfo)).rejects.toThrowError(); + await expect(mockClient.getVersion(testSubject, schemaInfo)).rejects.toThrowError(); + const response: SchemaInfo = await mockClient.getBySubjectAndId(testSubject, 1); + await expect(response.schema).toBe(schemaString); + const response2: SchemaInfo = await mockClient.getBySubjectAndId(testSubject, 2); + await expect(response2.schema).toBe(schemaString2); + }); +}); diff --git a/test/schemaregistry/rules/encryption/dekregistry/dekregistry-client.spec.ts b/test/schemaregistry/rules/encryption/dekregistry/dekregistry-client.spec.ts new file mode 100644 index 00000000..5dc3da20 --- /dev/null +++ b/test/schemaregistry/rules/encryption/dekregistry/dekregistry-client.spec.ts @@ -0,0 +1,106 @@ +import { DekRegistryClient, Dek, Kek } from "../../../../../schemaregistry/rules/encryption/dekregistry/dekregistry-client"; +import { RestService } from "../../../../../schemaregistry/rest-service"; +import { AxiosResponse } from 'axios'; +import { beforeEach, afterEach, describe, expect, it, jest } from '@jest/globals'; +import { TEST_KEK, TEST_KEK_2, TEST_KEK_NAME, TEST_KEK_NAME_2, TEST_KMS_TYPE, TEST_KMS_KEY_ID, + TEST_KMS_PROPS, TEST_DOC, TEST_DEK, TEST_DEK_2, TEST_ALGORITHM, + TEST_ENCRYPTED_KEY_MATERIAL, TEST_SUBJECT, TEST_VERSION, + TEST_DEK_LATEST} from "./test-constants"; +import { mockClientConfig } from "../../../test-constants"; + +jest.mock('../../../../../schemaregistry/rest-service'); + + +let client: DekRegistryClient; +let restService: jest.Mocked; + +describe('DekRegistryClient', () => { + + beforeEach(() => { + restService = new RestService(mockClientConfig.baseURLs) as jest.Mocked; + client = new DekRegistryClient(mockClientConfig); + (client as any).restService = restService; + }); + + afterEach(() => { + jest.clearAllMocks(); + }); + + it('Should register kek when registerKek is called', async () => { + restService.handleRequest.mockResolvedValue({ data: TEST_KEK } as AxiosResponse); + const response: Kek = await client.registerKek( + TEST_KEK_NAME, TEST_KMS_TYPE, TEST_KMS_KEY_ID, true, TEST_KMS_PROPS, TEST_DOC); + + expect(response).toEqual(TEST_KEK); + expect(restService.handleRequest).toHaveBeenCalledTimes(1); + }); + + it('Should return kek from cache when registerKek is called with same kek name', async () => { + restService.handleRequest.mockResolvedValue({ data: TEST_KEK } as AxiosResponse); + await client.registerKek(TEST_KEK_NAME, TEST_KMS_TYPE, TEST_KMS_KEY_ID, true, TEST_KMS_PROPS, TEST_DOC); + restService.handleRequest.mockResolvedValue({ data: TEST_KEK_2 } as AxiosResponse); + await client.registerKek(TEST_KEK_NAME_2, TEST_KMS_TYPE, TEST_KMS_KEY_ID, true, TEST_KMS_PROPS, TEST_DOC); + + const response: Kek = await client.registerKek( + TEST_KEK_NAME, TEST_KMS_TYPE, TEST_KMS_KEY_ID, true, TEST_KMS_PROPS, TEST_DOC); + const response2: Kek = await client.registerKek( + TEST_KEK_NAME_2, TEST_KMS_TYPE, TEST_KMS_KEY_ID, true, TEST_KMS_PROPS, TEST_DOC); + + expect(response).toEqual(TEST_KEK); + expect(response2).toEqual(TEST_KEK_2); + expect(restService.handleRequest).toHaveBeenCalledTimes(2); + }); + + it('Should return kek from cache when getKek is called with same kek name', async () => { + restService.handleRequest.mockResolvedValue({ data: TEST_KEK } as AxiosResponse); + await client.registerKek(TEST_KEK_NAME, TEST_KMS_TYPE, TEST_KMS_KEY_ID, true, TEST_KMS_PROPS, TEST_DOC); + const response: Kek = await client.getKek(TEST_KEK_NAME); + + expect(response).toEqual(TEST_KEK); + expect(restService.handleRequest).toHaveBeenCalledTimes(1); + }); + + it('Should register dek when registerDek is called', async () => { + restService.handleRequest.mockResolvedValue({ data: TEST_DEK } as AxiosResponse); + const response: Dek = await client.registerDek(TEST_KEK_NAME, TEST_SUBJECT, TEST_ALGORITHM, TEST_VERSION, TEST_ENCRYPTED_KEY_MATERIAL); + expect(response).toEqual(TEST_DEK); + expect(restService.handleRequest).toHaveBeenCalledTimes(1); + }); + + it('Should return dek from cache when registerDek is called with same kek name, subject, algorithm, and version', async () => { + restService.handleRequest.mockResolvedValue({ data: TEST_DEK } as AxiosResponse); + await client.registerDek(TEST_KEK_NAME, TEST_SUBJECT, TEST_ALGORITHM, TEST_VERSION, TEST_ENCRYPTED_KEY_MATERIAL); + restService.handleRequest.mockResolvedValue({ data: TEST_DEK_2 } as AxiosResponse); + await client.registerDek(TEST_KEK_NAME_2, TEST_SUBJECT, TEST_ALGORITHM, TEST_VERSION, TEST_ENCRYPTED_KEY_MATERIAL); + + const response: Dek = await client.registerDek(TEST_KEK_NAME, TEST_SUBJECT, TEST_ALGORITHM, TEST_VERSION, TEST_ENCRYPTED_KEY_MATERIAL); + const response2: Dek = await client.registerDek(TEST_KEK_NAME_2, TEST_SUBJECT, TEST_ALGORITHM, TEST_VERSION, TEST_ENCRYPTED_KEY_MATERIAL); + + expect(response).toEqual(TEST_DEK); + expect(response2).toEqual(TEST_DEK_2); + expect(restService.handleRequest).toHaveBeenCalledTimes(2); + }); + + it('Should return dek from cache when getDek is called with same kek name, subject, algorithm, and version', async () => { + restService.handleRequest.mockResolvedValue({ data: TEST_DEK } as AxiosResponse); + await client.registerDek(TEST_KEK_NAME, TEST_SUBJECT, TEST_ALGORITHM, TEST_VERSION, TEST_ENCRYPTED_KEY_MATERIAL); + const response: Dek = await client.getDek(TEST_KEK_NAME, TEST_SUBJECT, TEST_ALGORITHM, TEST_VERSION); + + expect(response).toEqual(TEST_DEK); + expect(restService.handleRequest).toHaveBeenCalledTimes(1); + }); + + it('Should delete dek with version -1 when registerDek is called', async () => { + restService.handleRequest.mockResolvedValue({ data: TEST_DEK_LATEST } as AxiosResponse); + const getDekResponse: Dek = await client.getDek(TEST_KEK_NAME, TEST_SUBJECT, TEST_ALGORITHM, -1); + expect(getDekResponse).toEqual(TEST_DEK_LATEST); + expect(await client.checkLatestDekInCache(TEST_KEK_NAME, TEST_SUBJECT, TEST_ALGORITHM)).toBe(true); + + restService.handleRequest.mockResolvedValue({ data: TEST_DEK } as AxiosResponse); + await client.registerDek(TEST_KEK_NAME, TEST_SUBJECT, TEST_ALGORITHM, TEST_VERSION, TEST_ENCRYPTED_KEY_MATERIAL); + const getDekResponse2: Dek = await client.getDek(TEST_KEK_NAME, TEST_SUBJECT, TEST_ALGORITHM); + + expect(getDekResponse2).toEqual(TEST_DEK); + expect(await client.checkLatestDekInCache(TEST_KEK_NAME, TEST_SUBJECT, TEST_ALGORITHM)).toBe(false); + }); +}); diff --git a/test/schemaregistry/rules/encryption/dekregistry/mock-dekregistry-client.spec.ts b/test/schemaregistry/rules/encryption/dekregistry/mock-dekregistry-client.spec.ts new file mode 100644 index 00000000..a5ed3cb2 --- /dev/null +++ b/test/schemaregistry/rules/encryption/dekregistry/mock-dekregistry-client.spec.ts @@ -0,0 +1,54 @@ +import { Dek, Kek } from "../../../../../schemaregistry/rules/encryption/dekregistry/dekregistry-client"; +import { MockDekRegistryClient } from "../../../../../schemaregistry/rules/encryption/dekregistry/mock-dekregistry-client"; +import { beforeEach, afterEach, describe, expect, it, jest } from '@jest/globals'; +import { TEST_KEK, TEST_KEK_NAME, TEST_KMS_TYPE, TEST_KMS_KEY_ID, + TEST_KMS_PROPS, TEST_DOC, TEST_DEK, TEST_DEK_V2, TEST_ALGORITHM, + TEST_ENCRYPTED_KEY_MATERIAL, TEST_SUBJECT, TEST_VERSION } from "./test-constants"; + +describe('MockClient-tests', () => { + let mockClient: MockDekRegistryClient; + + beforeEach(() => { + mockClient = new MockDekRegistryClient(); + }); + + afterEach(() => { + jest.clearAllMocks(); + }); + + it('Should return kek when registering Kek', async () => { + const registerKekResponse: Kek = await mockClient.registerKek( + TEST_KEK_NAME, TEST_KMS_TYPE, TEST_KMS_KEY_ID, true, TEST_KMS_PROPS, TEST_DOC); + + expect(registerKekResponse).toEqual(TEST_KEK); + }); + + it('Should return kek when getting Kek', async () => { + await mockClient.registerKek(TEST_KEK_NAME, TEST_KMS_TYPE, TEST_KMS_KEY_ID, true, TEST_KMS_PROPS, TEST_DOC); + const getKekResponse: Kek = await mockClient.getKek(TEST_KEK_NAME); + + expect(getKekResponse).toEqual(TEST_KEK); + }); + + it('Should return dek when registering Dek', async () => { + const registerDekResponse: Dek = await mockClient.registerDek( + TEST_KEK_NAME, TEST_SUBJECT, TEST_ALGORITHM, TEST_VERSION, TEST_ENCRYPTED_KEY_MATERIAL); + + expect(registerDekResponse).toEqual(TEST_DEK); + }); + + it('Should return dek when getting Dek', async () => { + await mockClient.registerDek(TEST_KEK_NAME, TEST_SUBJECT, TEST_ALGORITHM, TEST_VERSION, TEST_ENCRYPTED_KEY_MATERIAL); + const getDekResponse: Dek = await mockClient.getDek(TEST_KEK_NAME, TEST_SUBJECT, TEST_ALGORITHM, TEST_VERSION); + + expect(getDekResponse).toEqual(TEST_DEK); + }); + + it('Should return latest dek when getting Dek with version -1', async () => { + await mockClient.registerDek(TEST_KEK_NAME, TEST_SUBJECT, TEST_ALGORITHM, 2, TEST_ENCRYPTED_KEY_MATERIAL); + await mockClient.registerDek(TEST_KEK_NAME, TEST_SUBJECT, TEST_ALGORITHM, TEST_VERSION, TEST_ENCRYPTED_KEY_MATERIAL); + const getDekResponse: Dek = await mockClient.getDek(TEST_KEK_NAME, TEST_SUBJECT, TEST_ALGORITHM, -1); + + expect(getDekResponse).toEqual(TEST_DEK_V2); + }); + }); diff --git a/test/schemaregistry/rules/encryption/dekregistry/test-constants.ts b/test/schemaregistry/rules/encryption/dekregistry/test-constants.ts new file mode 100644 index 00000000..b5cae073 --- /dev/null +++ b/test/schemaregistry/rules/encryption/dekregistry/test-constants.ts @@ -0,0 +1,74 @@ +import { MOCK_TS } from "../../../../../schemaregistry/rules/encryption/dekregistry/constants"; +import { Kek, Dek } from "../../../../../schemaregistry/rules/encryption/dekregistry/dekregistry-client"; + +const TEST_KEK_NAME: string = 'test-kek-name'; +const TEST_KEK_NAME_2: string = 'test-kek-name2'; +const TEST_KMS_TYPE: string = 'test-kms-type'; +const TEST_KMS_KEY_ID: string = 'test-kms-key-id'; +const TEST_KMS_PROPS = { testKey: 'testValue' }; +const TEST_DOC: string = 'test-doc'; + +const TEST_SUBJECT: string = 'test-subject'; +const TEST_ALGORITHM: string = 'test-algorithm'; +const TEST_ENCRYPTED_KEY_MATERIAL: string = 'test-encrypted-key-material'; +const TEST_VERSION: number = 1; + +const TEST_KEK: Kek = { + name: TEST_KEK_NAME, + kmsType: TEST_KMS_TYPE, + kmsKeyId: TEST_KMS_KEY_ID, + kmsProps: TEST_KMS_PROPS, + doc: TEST_DOC, + shared: true +}; + +const TEST_KEK_2: Kek = { + name: TEST_KEK_NAME_2, + kmsType: TEST_KMS_TYPE, + kmsKeyId: TEST_KMS_KEY_ID, + kmsProps: TEST_KMS_PROPS, + doc: TEST_DOC, + shared: true +}; + +const TEST_DEK: Dek = { + kekName: TEST_KEK_NAME, + subject: TEST_SUBJECT, + algorithm: TEST_ALGORITHM, + encryptedKeyMaterial: TEST_ENCRYPTED_KEY_MATERIAL, + version: TEST_VERSION, + ts: MOCK_TS +}; + +const TEST_DEK_V2: Dek = { + kekName: TEST_KEK_NAME, + subject: TEST_SUBJECT, + algorithm: TEST_ALGORITHM, + encryptedKeyMaterial: TEST_ENCRYPTED_KEY_MATERIAL, + version: 2, + ts: MOCK_TS +}; + +const TEST_DEK_2: Dek = { + kekName: TEST_KEK_NAME_2, + subject: TEST_SUBJECT, + algorithm: TEST_ALGORITHM, + encryptedKeyMaterial: TEST_ENCRYPTED_KEY_MATERIAL, + version: TEST_VERSION, + ts: MOCK_TS +}; + +const TEST_DEK_LATEST: Dek = { + kekName: TEST_KEK_NAME, + subject: TEST_SUBJECT, + algorithm: TEST_ALGORITHM, + encryptedKeyMaterial: TEST_ENCRYPTED_KEY_MATERIAL, + version: -1, + ts: MOCK_TS +}; + +export { + TEST_KEK_NAME, TEST_KEK_NAME_2, TEST_KMS_TYPE, TEST_KMS_KEY_ID, TEST_KMS_PROPS, TEST_DOC, + TEST_SUBJECT, TEST_ALGORITHM, TEST_ENCRYPTED_KEY_MATERIAL, TEST_VERSION, + TEST_KEK, TEST_KEK_2, TEST_DEK, TEST_DEK_V2, TEST_DEK_2, TEST_DEK_LATEST +}; diff --git a/test/schemaregistry/schemaregistry-client.spec.ts b/test/schemaregistry/schemaregistry-client.spec.ts new file mode 100644 index 00000000..14c7e060 --- /dev/null +++ b/test/schemaregistry/schemaregistry-client.spec.ts @@ -0,0 +1,632 @@ +import { + SchemaRegistryClient, + Metadata, + Compatibility, + SchemaInfo, + SchemaMetadata, + ServerConfig +} from '../../schemaregistry/schemaregistry-client'; +import { RestService } from '../../schemaregistry/rest-service'; +import { AxiosResponse } from 'axios'; +import stringify from "json-stringify-deterministic"; +import { beforeEach, afterEach, describe, expect, it, jest } from '@jest/globals'; +import { mockClientConfig } from '../../test/schemaregistry/test-constants'; + +jest.mock('../../schemaregistry/rest-service'); + +let client: SchemaRegistryClient; +let restService: jest.Mocked; +const mockSubject = 'mock-subject'; +const mockSubject2 = 'mock-subject2'; +const schemaString = stringify({ + type: 'record', + name: 'User', + fields: [ + { name: 'name', type: 'string' }, + { name: 'age', type: 'int' } + ] +}); +const schemaString2 = stringify({ + type: 'record', + name: 'User2', + fields: [ + { name: 'name2', type: 'string' }, + { name: 'age2', type: 'int' } + ] +}); +const metadata: Metadata = { + properties: { + owner: 'Alice Bob', + email: 'Alice@bob.com', + } +}; +const metadata2: Metadata = { + properties: { + owner: 'Alice Bob2', + email: 'Alice@bob2.com', + } +}; +const metadataKeyValue = { + 'owner': 'Alice Bob', + 'email': 'Alice@bob.com', +} + +const metadataKeyValue2 = { + 'owner': 'Alice Bob2', + 'email': 'Alice@bob2.com' +}; +const schemaInfo = { + schema: schemaString, + schemaType: 'AVRO', +}; +const schemaInfo2 = { + schema: schemaString, + schemaType: 'AVRO', +}; +const schemaInfoMetadata = { + schema: schemaString, + schemaType: 'AVRO', + metadata: metadata, +}; +const schemaInfoMetadata2 = { + schema: schemaString, + schemaType: 'AVRO', + metadata: metadata2, +}; +const subjects: string[] = [mockSubject, mockSubject2]; +const versions: number[] = [1, 2, 3]; + +describe('SchemaRegistryClient-Register', () => { + + beforeEach(() => { + restService = new RestService(mockClientConfig.baseURLs) as jest.Mocked; + client = new SchemaRegistryClient(mockClientConfig); + (client as any).restService = restService; + }); + + afterEach(() => { + jest.clearAllMocks(); + }); + + it('Should return id when Register is called', async () => { + restService.handleRequest.mockResolvedValue({ data: { id: 1 } } as AxiosResponse); + + const response: number = await client.register(mockSubject, schemaInfo); + + expect(response).toEqual(1); + + expect(restService.handleRequest).toHaveBeenCalledTimes(1); + }); + + it('Should return from cache when Register is called twice', async () => { + restService.handleRequest.mockResolvedValue({ data: { id: 1 } } as AxiosResponse); + + const response: number = await client.register(mockSubject, schemaInfo); + expect(response).toEqual(1); + expect(restService.handleRequest).toHaveBeenCalledTimes(1); + + restService.handleRequest.mockResolvedValue({ data: { id: 2 } } as AxiosResponse); + + const response2: number = await client.register(mockSubject2, schemaInfo2); + expect(response2).toEqual(2); + expect(restService.handleRequest).toHaveBeenCalledTimes(2); + + //Try to create same objects again + + const cachedResponse: number = await client.register(mockSubject, schemaInfo); + expect(cachedResponse).toEqual(1); + expect(restService.handleRequest).toHaveBeenCalledTimes(2); + + const cachedResponse2: number = await client.register(mockSubject2, schemaInfo2); + expect(cachedResponse2).toEqual(2); + expect(restService.handleRequest).toHaveBeenCalledTimes(2); + }); + + it('Should return id, version, metadata, and schema when RegisterFullResponse is called', async () => { + const expectedResponse = { + id: 1, + version: 1, + schema: schemaString, + metadata: metadata, + }; + + restService.handleRequest.mockResolvedValue({ data: expectedResponse } as AxiosResponse); + + const response: SchemaMetadata = await client.registerFullResponse(mockSubject, schemaInfoMetadata); + + expect(response).toMatchObject(expectedResponse); + expect(restService.handleRequest).toHaveBeenCalledTimes(1); + }); + + it('Should return id, version, metadata, and schema from cache when RegisterFullResponse is called twice', async () => { + const expectedResponse = { + id: 1, + version: 1, + schema: schemaString, + metadata: metadata, + }; + const expectedResponse2 = { + id: 2, + version: 1, + schema: schemaString2, + metadata: metadata2, + }; + + restService.handleRequest.mockResolvedValue({ data: expectedResponse } as AxiosResponse); + + const response: SchemaMetadata = await client.registerFullResponse(mockSubject, schemaInfoMetadata); + expect(response).toMatchObject(expectedResponse); + expect(restService.handleRequest).toHaveBeenCalledTimes(1); + + restService.handleRequest.mockResolvedValue({ data: expectedResponse2 } as AxiosResponse); + + const response2: SchemaMetadata = await client.registerFullResponse(mockSubject2, schemaInfoMetadata2); + expect(response2).toMatchObject(expectedResponse2); + expect(restService.handleRequest).toHaveBeenCalledTimes(2); + + const cachedResponse: SchemaMetadata = await client.registerFullResponse(mockSubject, schemaInfoMetadata); + expect(cachedResponse).toMatchObject(expectedResponse); + expect(restService.handleRequest).toHaveBeenCalledTimes(2); + + const cachedResponse2: SchemaMetadata = await client.registerFullResponse(mockSubject2, schemaInfoMetadata2); + expect(cachedResponse2).toMatchObject(expectedResponse2); + expect(restService.handleRequest).toHaveBeenCalledTimes(2); + }); +}); + +describe('SchemaRegistryClient-Get-ID', () => { + beforeEach(() => { + restService = new RestService(mockClientConfig.baseURLs) as jest.Mocked; + client = new SchemaRegistryClient(mockClientConfig); + (client as any).restService = restService; + }); + afterEach(() => { + jest.clearAllMocks(); + }); + + it('Should return id when GetId is called', async () => { + restService.handleRequest.mockResolvedValue({ data: { id: 1 } } as AxiosResponse); + + const response: number = await client.getId(mockSubject, schemaInfo); + + expect(response).toEqual(1); + expect(restService.handleRequest).toHaveBeenCalledTimes(1); + }); + + it('Should return id from cache when GetId is called twice', async () => { + restService.handleRequest.mockResolvedValue({ data: { id: 1 } } as AxiosResponse); + + const response: number = await client.getId(mockSubject, schemaInfo); + expect(response).toEqual(1); + expect(restService.handleRequest).toHaveBeenCalledTimes(1); + + restService.handleRequest.mockResolvedValue({ data: { id: 2 } } as AxiosResponse); + + const response2: number = await client.getId(mockSubject2, schemaInfo2); + expect(response2).toEqual(2); + expect(restService.handleRequest).toHaveBeenCalledTimes(2); + + const cachedResponse: number = await client.getId(mockSubject, schemaInfo); + expect(cachedResponse).toEqual(1); + expect(restService.handleRequest).toHaveBeenCalledTimes(2); + + const cachedResponse2: number = await client.getId(mockSubject2, schemaInfo2); + expect(cachedResponse2).toEqual(2); + expect(restService.handleRequest).toHaveBeenCalledTimes(2); + }); + + it('Should return SchemaInfo when GetBySubjectAndId is called', async () => { + const expectedResponse = { + id: 1, + version: 1, + schema: schemaString, + metadata: metadata, + }; + + restService.handleRequest.mockResolvedValue({ data: expectedResponse } as AxiosResponse); + + const response: SchemaInfo = await client.getBySubjectAndId(mockSubject, 1); + + expect(response).toMatchObject(expectedResponse); + expect(restService.handleRequest).toHaveBeenCalledTimes(1); + }); + + it('Should return SchemaInfo from cache when GetBySubjectAndId is called twice', async () => { + const expectedResponse = { + id: 1, + version: 1, + schema: schemaString, + metadata: metadata, + }; + const expectedResponse2 = { + id: 2, + version: 1, + schema: schemaString2, + metadata: metadata2, + }; + + restService.handleRequest.mockResolvedValue({ data: expectedResponse } as AxiosResponse); + + const response: SchemaInfo = await client.getBySubjectAndId(mockSubject, 1); + expect(response).toMatchObject(expectedResponse); + expect(restService.handleRequest).toHaveBeenCalledTimes(1); + + restService.handleRequest.mockResolvedValue({ data: expectedResponse2 } as AxiosResponse); + + const response2: SchemaInfo = await client.getBySubjectAndId(mockSubject2, 2); + expect(response2).toMatchObject(expectedResponse2); + expect(restService.handleRequest).toHaveBeenCalledTimes(2); + + const cachedResponse: SchemaInfo = await client.getBySubjectAndId(mockSubject, 1); + expect(cachedResponse).toMatchObject(expectedResponse); + expect(restService.handleRequest).toHaveBeenCalledTimes(2); + + const cachedResponse2: SchemaInfo = await client.getBySubjectAndId(mockSubject2, 2); + expect(cachedResponse2).toMatchObject(expectedResponse2); + expect(restService.handleRequest).toHaveBeenCalledTimes(2); + }); +}); + +describe('SchemaRegistryClient-Get-Schema-Metadata', () => { + beforeEach(() => { + restService = new RestService(mockClientConfig.baseURLs) as jest.Mocked; + client = new SchemaRegistryClient(mockClientConfig); + (client as any).restService = restService; + }); + afterEach(() => { + jest.clearAllMocks(); + }); + + it('Should return latest schema with metadata when GetLatestWithMetadata is called', async () => { + const expectedResponse = { + id: 1, + version: 1, + schema: schemaString, + metadata: metadata, + }; + + restService.handleRequest.mockResolvedValue({ data: expectedResponse } as AxiosResponse); + + const response: SchemaMetadata = await client.getLatestWithMetadata(mockSubject, metadataKeyValue); + + expect(response).toMatchObject(expectedResponse); + expect(restService.handleRequest).toHaveBeenCalledTimes(1); + }); + + it('Should return latest schema with metadata from cache when GetLatestWithMetadata is called twice', async () => { + const expectedResponse = { + id: 1, + version: 1, + schema: schemaString, + metadata: metadata, + }; + const expectedResponse2 = { + id: 2, + version: 1, + schema: schemaString2, + metadata: metadata2, + }; + + restService.handleRequest.mockResolvedValue({ data: expectedResponse } as AxiosResponse); + + const response: SchemaMetadata = await client.getLatestWithMetadata(mockSubject, metadataKeyValue); + expect(response).toMatchObject(expectedResponse); + expect(restService.handleRequest).toHaveBeenCalledTimes(1); + + restService.handleRequest.mockResolvedValue({ data: expectedResponse2 } as AxiosResponse); + + const response2: SchemaMetadata = await client.getLatestWithMetadata(mockSubject2, metadataKeyValue2); + expect(response2).toMatchObject(expectedResponse2); + expect(restService.handleRequest).toHaveBeenCalledTimes(2); + + const cachedResponse: SchemaMetadata = await client.getLatestWithMetadata(mockSubject, metadataKeyValue); + expect(cachedResponse).toMatchObject(expectedResponse); + expect(restService.handleRequest).toHaveBeenCalledTimes(2); + + const cachedResponse2: SchemaMetadata = await client.getLatestWithMetadata(mockSubject2, metadataKeyValue2); + expect(cachedResponse2).toMatchObject(expectedResponse2); + expect(restService.handleRequest).toHaveBeenCalledTimes(2); + }); + + it('Should return SchemaMetadata when GetSchemaMetadata is called', async () => { + const expectedResponse = { + id: 1, + version: 1, + schema: schemaString, + metadata: metadata, + }; + + restService.handleRequest.mockResolvedValue({ data: expectedResponse } as AxiosResponse); + + const response: SchemaMetadata = await client.getSchemaMetadata(mockSubject, 1, true); + + expect(response).toMatchObject(expectedResponse); + expect(restService.handleRequest).toHaveBeenCalledTimes(1); + }); + + it('Should return SchemaMetadata from cache when GetSchemaMetadata is called twice', async () => { + const expectedResponse = { + id: 1, + version: 1, + schema: schemaString, + metadata: metadata, + }; + const expectedResponse2 = { + id: 2, + version: 1, + schema: schemaString2, + metadata: metadata2, + }; + + restService.handleRequest.mockResolvedValue({ data: expectedResponse } as AxiosResponse); + + const response: SchemaMetadata = await client.getSchemaMetadata(mockSubject, 1, true); + expect(response).toMatchObject(expectedResponse); + expect(restService.handleRequest).toHaveBeenCalledTimes(1); + + restService.handleRequest.mockResolvedValue({ data: expectedResponse2 } as AxiosResponse); + + const response2: SchemaMetadata = await client.getSchemaMetadata(mockSubject2, 2, false); + expect(response2).toMatchObject(expectedResponse2); + expect(restService.handleRequest).toHaveBeenCalledTimes(2); + + const cachedResponse: SchemaMetadata = await client.getSchemaMetadata(mockSubject, 1, true); + expect(cachedResponse).toMatchObject(expectedResponse); + expect(restService.handleRequest).toHaveBeenCalledTimes(2); + + const cachedResponse2: SchemaMetadata = await client.getSchemaMetadata(mockSubject2, 2, false); + expect(cachedResponse2).toMatchObject(expectedResponse2); + expect(restService.handleRequest).toHaveBeenCalledTimes(2); + }); +}); + +describe('SchemaRegistryClient-Subjects', () => { + beforeEach(() => { + restService = new RestService(mockClientConfig.baseURLs) as jest.Mocked; + client = new SchemaRegistryClient(mockClientConfig); + (client as any).restService = restService; + }); + afterEach(() => { + jest.clearAllMocks(); + }); + + it('Should return all subjects when GetAllSubjects is called', async () => { + restService.handleRequest.mockResolvedValue({ data: subjects } as AxiosResponse); + + const response: string[] = await client.getAllSubjects(); + + expect(response).toEqual(subjects); + expect(restService.handleRequest).toHaveBeenCalledTimes(1); + }); + + it('Should return all versions when GetAllVersions is called', async () => { + restService.handleRequest.mockResolvedValue({ data: versions } as AxiosResponse); + + const response: number[] = await client.getAllVersions(mockSubject); + + expect(response).toEqual(versions); + expect(restService.handleRequest).toHaveBeenCalledTimes(1); + }); + + it('Should return version when GetVersion is called', async () => { + const schemaInfo = { + schema: schemaString, + schemaType: 'AVRO', + }; + restService.handleRequest.mockResolvedValue({ data: { version: 1 } } as AxiosResponse); + + const response: number = await client.getVersion(mockSubject, schemaInfo, true); + + expect(response).toEqual(1); + expect(restService.handleRequest).toHaveBeenCalledTimes(1); + }); + + it('Should return version from cache when GetVersion is called twice', async () => { + const schemaInfo = { + schema: schemaString, + schemaType: 'AVRO', + }; + const schemaInfo2 = { + schema: schemaString2, + schemaType: 'AVRO', + }; + + restService.handleRequest.mockResolvedValue({ data: { version: 1 } } as AxiosResponse); + + const response: number = await client.getVersion(mockSubject, schemaInfo, true); + expect(response).toEqual(1); + expect(restService.handleRequest).toHaveBeenCalledTimes(1); + + restService.handleRequest.mockResolvedValue({ data: { version: 2 } } as AxiosResponse); + + const response2: number = await client.getVersion(mockSubject2, schemaInfo2, false); + expect(response2).toEqual(2); + expect(restService.handleRequest).toHaveBeenCalledTimes(2); + + const cachedResponse: number = await client.getVersion(mockSubject, schemaInfo, true); + expect(cachedResponse).toEqual(1); + expect(restService.handleRequest).toHaveBeenCalledTimes(2); + + const cachedResponse2: number = await client.getVersion(mockSubject2, schemaInfo2, false); + expect(cachedResponse2).toEqual(2); + expect(restService.handleRequest).toHaveBeenCalledTimes(2); + }); + + it('Should delete subject from all caches and registry when deleteSubject is called', async () => { + const expectedResponse = { + id: 1, + version: 1, + schema: schemaString, + metadata: metadata, + }; + await client.addToInfoToSchemaCache(mockSubject, schemaInfo, expectedResponse); + await client.addToSchemaToVersionCache(mockSubject, schemaInfo, 1); + await client.addToVersionToSchemaCache(mockSubject, 1, expectedResponse); + await client.addToIdToSchemaInfoCache(mockSubject, 1, schemaInfo); + + restService.handleRequest.mockResolvedValue({ data: [1] } as AxiosResponse); + + const response: number[] = await client.deleteSubject(mockSubject); + + expect(await client.getInfoToSchemaCacheSize()).toEqual(0); + expect(await client.getSchemaToVersionCacheSize()).toEqual(0); + expect(await client.getVersionToSchemaCacheSize()).toEqual(0); + expect(await client.getIdToSchemaInfoCacheSize()).toEqual(0); + + expect(response).toEqual([1]); + expect(restService.handleRequest).toHaveBeenCalledTimes(1); + }); + + it('Should delete subject version from all caches and registry when deleteSubjectVersion is called', async () => { + const expectedResponse = { + id: 1, + version: 1, + schema: schemaString, + metadata: metadata, + }; + await client.addToInfoToSchemaCache(mockSubject, schemaInfo, expectedResponse); + await client.addToSchemaToVersionCache(mockSubject, schemaInfo, 1); + await client.addToVersionToSchemaCache(mockSubject, 1, expectedResponse); + await client.addToIdToSchemaInfoCache(mockSubject, 1, schemaInfo); + + restService.handleRequest.mockResolvedValue({ data: [1] } as AxiosResponse); + + const response: number = await client.deleteSubjectVersion(mockSubject, 1); + + expect(await client.getVersionToSchemaCacheSize()).toEqual(0); + expect(await client.getInfoToSchemaCacheSize()).toEqual(0); + expect(await client.getSchemaToVersionCacheSize()).toEqual(0); + expect(await client.getIdToSchemaInfoCacheSize()).toEqual(0); + + expect(response).toEqual([1]); + expect(restService.handleRequest).toHaveBeenCalledTimes(1); + }); +}); + +describe('SchemaRegistryClient-Compatibility', () => { + beforeEach(() => { + restService = new RestService(mockClientConfig.baseURLs) as jest.Mocked; + client = new SchemaRegistryClient(mockClientConfig); + (client as any).restService = restService; + }); + afterEach(() => { + jest.clearAllMocks(); + }); + + it('Should return compatibility level when GetCompatibility is called', async () => { + restService.handleRequest.mockResolvedValue({ data: { compatibilityLevel: "BACKWARD" } } as AxiosResponse); + + const response: Compatibility = await client.getCompatibility(mockSubject); + + expect(response).toEqual('BACKWARD'); + expect(restService.handleRequest).toHaveBeenCalledTimes(1); + }); + + it('Should update compatibility level when updateCompatibility is called', async () => { + restService.handleRequest.mockResolvedValue({ data: { compatibility: 'BACKWARD' } } as AxiosResponse); + + const response: Compatibility = await client.updateCompatibility(mockSubject, Compatibility.BACKWARD); + + expect(response).toEqual(Compatibility.BACKWARD); + expect(restService.handleRequest).toHaveBeenCalledTimes(1); + }); + + it('Should return Compatibility when getDefaultCompatibility is called', async () => { + restService.handleRequest.mockResolvedValue({ data: { compatibilityLevel: 'BACKWARD' } } as AxiosResponse); + + const response: Compatibility = await client.getDefaultCompatibility(); + + expect(response).toEqual(Compatibility.BACKWARD); + expect(restService.handleRequest).toHaveBeenCalledTimes(1); + }); + + it('Should update default compatibility level when updateDefaultCompatibility is called', async () => { + restService.handleRequest.mockResolvedValue({ data: { compatibility: 'BACKWARD' } } as AxiosResponse); + + const response: Compatibility = await client.updateDefaultCompatibility(Compatibility.BACKWARD); + + expect(response).toEqual(Compatibility.BACKWARD); + expect(restService.handleRequest).toHaveBeenCalledTimes(1); + }); +}); + +describe('SchemaRegistryClient-Config', () => { + beforeEach(() => { + restService = new RestService(mockClientConfig.baseURLs) as jest.Mocked; + client = new SchemaRegistryClient(mockClientConfig); + (client as any).restService = restService; + }); + afterEach(() => { + jest.clearAllMocks(); + }); + + it('Should return config when getConfig is called', async () => { + const expectedResponse = { + compatibilityLevel: 'BACKWARD', + alias: 'test-config', + normalize: true, + }; + + restService.handleRequest.mockResolvedValue({ data: expectedResponse } as AxiosResponse); + + const response: ServerConfig = await client.getConfig(mockSubject); + + expect(response).toMatchObject(expectedResponse); + expect(restService.handleRequest).toHaveBeenCalledTimes(1); + }); + + it('Should update config when updateConfig is called', async () => { + const request = { + compatibility: Compatibility.BACKWARD, + alias: 'test-config', + normalize: true, + }; + const expectedResponse = { + compatibilityLevel: 'BACKWARD', + alias: 'test-config', + normalize: true, + }; + + restService.handleRequest.mockResolvedValue({ data: expectedResponse } as AxiosResponse); + + const response: ServerConfig = await client.updateConfig(mockSubject, request); + + expect(response).toMatchObject(expectedResponse); + expect(restService.handleRequest).toHaveBeenCalledTimes(1); + }); + + it('Should return config when getDefaultConfig is called', async () => { + const expectedResponse = { + compatibilityLevel: 'BACKWARD', + alias: 'test-config', + normalize: true, + }; + + restService.handleRequest.mockResolvedValue({ data: expectedResponse } as AxiosResponse); + + const response: ServerConfig = await client.getDefaultConfig(); + + expect(response).toMatchObject(expectedResponse); + expect(restService.handleRequest).toHaveBeenCalledTimes(1); + }); + + it('Should update default config when updateDefaultConfig is called', async () => { + const request = { + compatibility: Compatibility.BACKWARD, + alias: 'test-config', + normalize: true, + }; + const expectedResponse = { + compatibilityLevel: 'BACKWARD', + alias: 'test-config', + normalize: true, + }; + + restService.handleRequest.mockResolvedValue({ data: expectedResponse } as AxiosResponse); + + const response: ServerConfig = await client.updateDefaultConfig(request); + + expect(response).toMatchObject(expectedResponse); + expect(restService.handleRequest).toHaveBeenCalledTimes(1); + }); +}); diff --git a/test/schemaregistry/serde/avro.spec.ts b/test/schemaregistry/serde/avro.spec.ts new file mode 100644 index 00000000..1c51c07e --- /dev/null +++ b/test/schemaregistry/serde/avro.spec.ts @@ -0,0 +1,1252 @@ +import {afterEach, describe, expect, it} from '@jest/globals'; +import {ClientConfig} from "../../../schemaregistry/rest-service"; +import { + AvroDeserializer, + AvroDeserializerConfig, + AvroSerializer, + AvroSerializerConfig +} from "../../../schemaregistry/serde/avro"; +import {SerdeType, Serializer} from "../../../schemaregistry/serde/serde"; +import { + Client, + Rule, + RuleMode, + RuleSet, + SchemaInfo, + SchemaRegistryClient +} from "../../../schemaregistry/schemaregistry-client"; +import {LocalKmsDriver} from "../../../schemaregistry/rules/encryption/localkms/local-driver"; +import { + Clock, + FieldEncryptionExecutor +} from "../../../schemaregistry/rules/encryption/encrypt-executor"; +import {GcpKmsDriver} from "../../../schemaregistry/rules/encryption/gcpkms/gcp-driver"; +import {AwsKmsDriver} from "../../../schemaregistry/rules/encryption/awskms/aws-driver"; +import {AzureKmsDriver} from "../../../schemaregistry/rules/encryption/azurekms/azure-driver"; +import {HcVaultDriver} from "../../../schemaregistry/rules/encryption/hcvault/hcvault-driver"; +import {JsonataExecutor} from "@confluentinc/schemaregistry/rules/jsonata/jsonata-executor"; +import stringify from "json-stringify-deterministic"; +import {RuleRegistry} from "@confluentinc/schemaregistry/serde/rule-registry"; +import { + clearKmsClients +} from "@confluentinc/schemaregistry/rules/encryption/kms-registry"; + +const rootSchema = ` +{ + "name": "NestedTestRecord", + "type": "record", + "fields": [ + { + "name": "otherField", + "type": "DemoSchema" + } + ] +} +` +const demoSchema = ` +{ + "name": "DemoSchema", + "type": "record", + "fields": [ + { + "name": "intField", + "type": "int" + }, + { + "name": "doubleField", + "type": "double" + }, + { + "name": "stringField", + "type": "string", + "confluent:tags": [ "PII" ] + }, + { + "name": "boolField", + "type": "boolean" + }, + { + "name": "bytesField", + "type": "bytes", + "confluent:tags": [ "PII" ] + } + ] +} +` +const demoSchemaSingleTag = ` +{ + "name": "DemoSchema", + "type": "record", + "fields": [ + { + "name": "intField", + "type": "int" + }, + { + "name": "doubleField", + "type": "double" + }, + { + "name": "stringField", + "type": "string", + "confluent:tags": [ "PII" ] + }, + { + "name": "boolField", + "type": "boolean" + }, + { + "name": "bytesField", + "type": "bytes" + } + ] +} +` +const demoSchemaWithLogicalType = ` +{ + "name": "DemoSchema", + "type": "record", + "fields": [ + { + "name": "intField", + "type": "int" + }, + { + "name": "doubleField", + "type": "double" + }, + { + "name": "stringField", + "type": { + "type": "string", + "logicalType": "uuid" + }, + "confluent:tags": [ "PII" ] + }, + { + "name": "boolField", + "type": "boolean" + }, + { + "name": "bytesField", + "type": "bytes", + "confluent:tags": [ "PII" ] + } + ] +} +` +const rootPointerSchema = ` +{ + "name": "NestedTestPointerRecord", + "type": "record", + "fields": [ + { + "name": "otherField", + "type": ["null", "DemoSchema"] + } +] +} +` +const f1Schema = ` +{ + "name": "F1Schema", + "type": "record", + "fields": [ + { + "name": "f1", + "type": "string", + "confluent:tags": [ "PII" ] + } + ] +} +` +const demoSchemaWithUnion = ` +{ + "name": "DemoSchemaWithUnion", + "type": "record", + "fields": [ + { + "name": "intField", + "type": "int" + }, + { + "name": "doubleField", + "type": "double" + }, + { + "name": "stringField", + "type": ["null", "string"], + "confluent:tags": [ "PII" ] + }, + { + "name": "boolField", + "type": "boolean" + }, + { + "name": "bytesField", + "type": ["null", "bytes"], + "confluent:tags": [ "PII" ] + } + ] +} +` +const schemaEvolution1 = ` +{ + "name": "SchemaEvolution", + "type": "record", + "fields": [ + { + "name": "fieldToDelete", + "type": "string" + } + ] +} +` +const schemaEvolution2 = ` +{ + "name": "SchemaEvolution", + "type": "record", + "fields": [ + { + "name": "newOptionalField", + "type": ["string", "null"], + "default": "optional" + } + ] +} +` +const complexSchema = ` +{ + "name": "ComplexSchema", + "type": "record", + "fields": [ + { + "name": "arrayField", + "type": { + "type": "array", + "items": "string" + }, + "confluent:tags": [ "PII" ] + }, + { + "name": "mapField", + "type": { + "type": "map", + "values": "string" + }, + "confluent:tags": [ "PII" ] + }, + { + "name": "unionField", + "type": ["null", "string"], + "confluent:tags": [ "PII" ] + } + ] +} +` + +class FakeClock extends Clock { + fixedNow: number = 0 + + override now() { + return this.fixedNow + } +} + +const fieldEncryptionExecutor = FieldEncryptionExecutor.registerWithClock(new FakeClock()) +JsonataExecutor.register() +AwsKmsDriver.register() +AzureKmsDriver.register() +GcpKmsDriver.register() +HcVaultDriver.register() +LocalKmsDriver.register() + +//const baseURL = 'http://localhost:8081' +const baseURL = 'mock://' + +const topic = 'topic1' +const subject = topic + '-value' + +describe('AvroSerializer', () => { + afterEach(async () => { + let conf: ClientConfig = { + baseURLs: [baseURL], + cacheCapacity: 1000 + } + let client = SchemaRegistryClient.newClient(conf) + await client.deleteSubject(subject, false) + await client.deleteSubject(subject, true) + }) + it('basic serialization', async () => { + let conf: ClientConfig = { + baseURLs: [baseURL], + cacheCapacity: 1000 + } + let client = SchemaRegistryClient.newClient(conf) + let ser = new AvroSerializer(client, SerdeType.VALUE, {autoRegisterSchemas: true}) + let obj = { + intField: 123, + doubleField: 45.67, + stringField: 'hi', + boolField: true, + bytesField: Buffer.from([1, 2]), + } + let bytes = await ser.serialize(topic, obj) + + let deser = new AvroDeserializer(client, SerdeType.VALUE, {}) + let obj2 = await deser.deserialize(topic, bytes) + expect(obj2.intField).toEqual(obj.intField); + expect(obj2.doubleField).toBeCloseTo(obj.doubleField, 0.001); + expect(obj2.stringField).toEqual(obj.stringField); + expect(obj2.boolField).toEqual(obj.boolField); + expect(obj2.bytesField).toEqual(obj.bytesField); + }) + it('serialize nested', async () => { + let conf: ClientConfig = { + baseURLs: [baseURL], + cacheCapacity: 1000 + } + let client = SchemaRegistryClient.newClient(conf) + let ser = new AvroSerializer(client, SerdeType.VALUE, {autoRegisterSchemas: true}) + + let nested = { + intField: 123, + doubleField: 45.67, + stringField: 'hi', + boolField: true, + bytesField: Buffer.from([1, 2]), + } + let obj = { + otherField: nested + } + let bytes = await ser.serialize(topic, obj) + + let deser = new AvroDeserializer(client, SerdeType.VALUE, {}) + let obj2 = await deser.deserialize(topic, bytes) + expect(obj2.otherField.intField).toEqual(nested.intField); + expect(obj2.otherField.doubleField).toBeCloseTo(nested.doubleField, 0.001); + expect(obj2.otherField.stringField).toEqual(nested.stringField); + expect(obj2.otherField.boolField).toEqual(nested.boolField); + expect(obj2.otherField.bytesField).toEqual(nested.bytesField); + }) + it('serialize reference', async () => { + let conf: ClientConfig = { + baseURLs: [baseURL], + cacheCapacity: 1000 + } + let client = SchemaRegistryClient.newClient(conf) + let ser = new AvroSerializer(client, SerdeType.VALUE, {useLatestVersion: true}) + + let info: SchemaInfo = { + schemaType: 'AVRO', + schema: demoSchema, + } + await client.register('demo-value', info , false) + + info = { + schemaType: 'AVRO', + schema: rootPointerSchema, + references: [{ + name: 'DemoSchema', + subject: 'demo-value', + version: 1 + }] + } + await client.register(subject, info , false) + + let nested = { + intField: 123, + doubleField: 45.67, + stringField: 'hi', + boolField: true, + bytesField: Buffer.from([1, 2]), + } + let obj = { + otherField: nested + } + let bytes = await ser.serialize(topic, obj) + + let deser = new AvroDeserializer(client, SerdeType.VALUE, {}) + let obj2 = await deser.deserialize(topic, bytes) + expect(obj2.otherField.intField).toEqual(nested.intField); + expect(obj2.otherField.doubleField).toBeCloseTo(nested.doubleField, 0.001); + expect(obj2.otherField.stringField).toEqual(nested.stringField); + expect(obj2.otherField.boolField).toEqual(nested.boolField); + expect(obj2.otherField.bytesField).toEqual(nested.bytesField); + }) + it('schema evolution', async () => { + let conf: ClientConfig = { + baseURLs: [baseURL], + cacheCapacity: 1000 + } + let client = SchemaRegistryClient.newClient(conf) + let ser = new AvroSerializer(client, SerdeType.VALUE, {useLatestVersion: true}) + + let obj = { + fieldToDelete: "bye", + } + let info: SchemaInfo = { + schemaType: 'AVRO', + schema: schemaEvolution1, + } + + await client.register(subject, info, false) + + let bytes = await ser.serialize(topic, obj) + + info = { + schemaType: 'AVRO', + schema: schemaEvolution2, + } + + await client.register(subject, info, false) + + client.clearLatestCaches() + let deser = new AvroDeserializer(client, SerdeType.VALUE, {useLatestVersion: true}) + let obj2 = await deser.deserialize(topic, bytes) + expect(obj2.fieldToDelete).toEqual(undefined); + expect(obj2.newOptionalField).toEqual("optional"); + }) + it('basic encryption', async () => { + let conf: ClientConfig = { + baseURLs: [baseURL], + cacheCapacity: 1000 + } + let client = SchemaRegistryClient.newClient(conf) + let serConfig: AvroSerializerConfig = { + useLatestVersion: true, + ruleConfig: { + secret: 'mysecret' + } + } + let ser = new AvroSerializer(client, SerdeType.VALUE, serConfig) + let dekClient = fieldEncryptionExecutor.client! + + let encRule: Rule = { + name: 'test-encrypt', + kind: 'TRANSFORM', + mode: RuleMode.WRITEREAD, + type: 'ENCRYPT', + tags: ['PII'], + params: { + 'encrypt.kek.name': 'kek1', + 'encrypt.kms.type': 'local-kms', + 'encrypt.kms.key.id': 'mykey', + }, + onFailure: 'ERROR,NONE' + } + let ruleSet: RuleSet = { + domainRules: [encRule] + } + + let info: SchemaInfo = { + schemaType: 'AVRO', + schema: demoSchema, + ruleSet + } + + await client.register(subject, info, false) + + let obj = { + intField: 123, + doubleField: 45.67, + stringField: 'hi', + boolField: true, + bytesField: Buffer.from([1, 2]), + } + let bytes = await ser.serialize(topic, obj) + + // reset encrypted field + obj.stringField = 'hi' + obj.bytesField = Buffer.from([1, 2]) + + let deserConfig: AvroDeserializerConfig = { + ruleConfig: { + secret: 'mysecret' + } + } + let deser = new AvroDeserializer(client, SerdeType.VALUE, deserConfig) + fieldEncryptionExecutor.client = dekClient + let obj2 = await deser.deserialize(topic, bytes) + expect(obj2.intField).toEqual(obj.intField); + expect(obj2.doubleField).toBeCloseTo(obj.doubleField, 0.001); + expect(obj2.stringField).toEqual(obj.stringField); + expect(obj2.boolField).toEqual(obj.boolField); + expect(obj2.bytesField).toEqual(obj.bytesField); + + clearKmsClients() + let registry = new RuleRegistry() + registry.registerExecutor(new FieldEncryptionExecutor()) + deser = new AvroDeserializer(client, SerdeType.VALUE, {}, registry) + obj2 = await deser.deserialize(topic, bytes) + expect(obj2.stringField).not.toEqual(obj.stringField); + expect(obj2.bytesField).not.toEqual(obj.bytesField); + }) + it('basic encryption with logical type', async () => { + let conf: ClientConfig = { + baseURLs: [baseURL], + cacheCapacity: 1000 + } + let client = SchemaRegistryClient.newClient(conf) + let serConfig: AvroSerializerConfig = { + useLatestVersion: true, + ruleConfig: { + secret: 'mysecret' + } + } + let ser = new AvroSerializer(client, SerdeType.VALUE, serConfig) + let dekClient = fieldEncryptionExecutor.client! + + let encRule: Rule = { + name: 'test-encrypt', + kind: 'TRANSFORM', + mode: RuleMode.WRITEREAD, + type: 'ENCRYPT', + tags: ['PII'], + params: { + 'encrypt.kek.name': 'kek1', + 'encrypt.kms.type': 'local-kms', + 'encrypt.kms.key.id': 'mykey', + }, + onFailure: 'ERROR,ERROR' + } + let ruleSet: RuleSet = { + domainRules: [encRule] + } + + let info: SchemaInfo = { + schemaType: 'AVRO', + schema: demoSchemaWithLogicalType, + ruleSet + } + + await client.register(subject, info, false) + + let obj = { + intField: 123, + doubleField: 45.67, + stringField: 'hi', + boolField: true, + bytesField: Buffer.from([1, 2]), + } + let bytes = await ser.serialize(topic, obj) + + // reset encrypted field + obj.stringField = 'hi' + obj.bytesField = Buffer.from([1, 2]) + + let deserConfig: AvroDeserializerConfig = { + ruleConfig: { + secret: 'mysecret' + } + } + let deser = new AvroDeserializer(client, SerdeType.VALUE, deserConfig) + fieldEncryptionExecutor.client = dekClient + let obj2 = await deser.deserialize(topic, bytes) + expect(obj2.intField).toEqual(obj.intField); + expect(obj2.doubleField).toBeCloseTo(obj.doubleField, 0.001); + expect(obj2.stringField).toEqual(obj.stringField); + expect(obj2.boolField).toEqual(obj.boolField); + expect(obj2.bytesField).toEqual(obj.bytesField); + }) + it('basic encryption with dek rotation', async () => { + (fieldEncryptionExecutor.clock as FakeClock).fixedNow = Date.now() + let conf: ClientConfig = { + baseURLs: [baseURL], + cacheCapacity: 1000 + } + let client = SchemaRegistryClient.newClient(conf) + let serConfig: AvroSerializerConfig = { + useLatestVersion: true, + ruleConfig: { + secret: 'mysecret' + } + } + let ser = new AvroSerializer(client, SerdeType.VALUE, serConfig) + let dekClient = fieldEncryptionExecutor.client! + + let encRule: Rule = { + name: 'test-encrypt', + kind: 'TRANSFORM', + mode: RuleMode.WRITEREAD, + type: 'ENCRYPT', + tags: ['PII'], + params: { + 'encrypt.kek.name': 'kek1', + 'encrypt.kms.type': 'local-kms', + 'encrypt.kms.key.id': 'mykey', + 'encrypt.dek.expiry.days': '1', + }, + onFailure: 'ERROR,ERROR' + } + let ruleSet: RuleSet = { + domainRules: [encRule] + } + + let info: SchemaInfo = { + schemaType: 'AVRO', + schema: demoSchemaSingleTag, + ruleSet + } + + await client.register(subject, info, false) + + let obj = { + intField: 123, + doubleField: 45.67, + stringField: 'hi', + boolField: true, + bytesField: Buffer.from([1, 2]), + } + let bytes = await ser.serialize(topic, obj) + + // reset encrypted field + obj.stringField = 'hi' + + let deserConfig: AvroDeserializerConfig = { + ruleConfig: { + secret: 'mysecret' + } + } + let deser = new AvroDeserializer(client, SerdeType.VALUE, deserConfig) + fieldEncryptionExecutor.client = dekClient + let obj2 = await deser.deserialize(topic, bytes) + expect(obj2.intField).toEqual(obj.intField); + expect(obj2.doubleField).toBeCloseTo(obj.doubleField, 0.001); + expect(obj2.stringField).toEqual(obj.stringField); + expect(obj2.boolField).toEqual(obj.boolField); + expect(obj2.bytesField).toEqual(obj.bytesField); + + let dek = await dekClient.getDek("kek1", subject, 'AES256_GCM', -1, false) + expect(1).toEqual(dek.version); + + // advance time by 2 days + (fieldEncryptionExecutor.clock as FakeClock).fixedNow += 2 * 24 * 60 * 60 * 1000 + + bytes = await ser.serialize(topic, obj) + + // reset encrypted field + obj.stringField = 'hi' + + obj2 = await deser.deserialize(topic, bytes) + expect(obj2.intField).toEqual(obj.intField); + expect(obj2.doubleField).toBeCloseTo(obj.doubleField, 0.001); + expect(obj2.stringField).toEqual(obj.stringField); + expect(obj2.boolField).toEqual(obj.boolField); + expect(obj2.bytesField).toEqual(obj.bytesField); + + dek = await dekClient.getDek("kek1", subject, 'AES256_GCM', -1, false) + expect(2).toEqual(dek.version); + + // advance time by 2 days + (fieldEncryptionExecutor.clock as FakeClock).fixedNow += 2 * 24 * 60 * 60 * 1000 + + bytes = await ser.serialize(topic, obj) + + // reset encrypted field + obj.stringField = 'hi' + + obj2 = await deser.deserialize(topic, bytes) + expect(obj2.intField).toEqual(obj.intField); + expect(obj2.doubleField).toBeCloseTo(obj.doubleField, 0.001); + expect(obj2.stringField).toEqual(obj.stringField); + expect(obj2.boolField).toEqual(obj.boolField); + expect(obj2.bytesField).toEqual(obj.bytesField); + + dek = await dekClient.getDek("kek1", subject, 'AES256_GCM', -1, false) + expect(3).toEqual(dek.version); + }) + it('basic encryption with preserialized data', async () => { + let conf: ClientConfig = { + baseURLs: [baseURL], + cacheCapacity: 1000 + } + let client = SchemaRegistryClient.newClient(conf) + + let encRule: Rule = { + name: 'test-encrypt', + kind: 'TRANSFORM', + mode: RuleMode.WRITEREAD, + type: 'ENCRYPT', + tags: ['PII'], + params: { + 'encrypt.kek.name': 'kek1', + 'encrypt.kms.type': 'local-kms', + 'encrypt.kms.key.id': 'mykey', + }, + onFailure: 'ERROR,ERROR' + } + let ruleSet: RuleSet = { + domainRules: [encRule] + } + + let info: SchemaInfo = { + schemaType: 'AVRO', + schema: f1Schema, + ruleSet + } + + await client.register(subject, info, false) + + let obj = { + f1: 'hello world' + } + + let deserConfig: AvroDeserializerConfig = { + ruleConfig: { + secret: 'mysecret' + } + } + let deser = new AvroDeserializer(client, SerdeType.VALUE, deserConfig) + let dekClient = fieldEncryptionExecutor.client! + + await dekClient.registerKek("kek1", "local-kms", "mykey", false) + const encryptedDek = "07V2ndh02DA73p+dTybwZFm7DKQSZN1tEwQh+FoX1DZLk4Yj2LLu4omYjp/84tAg3BYlkfGSz+zZacJHIE4=" + await dekClient.registerDek("kek1", subject, "AES256_GCM", 1, encryptedDek) + + const bytes = Buffer.from([0, 0, 0, 0, 1, 104, 122, 103, 121, 47, 106, 70, 78, 77, 86, 47, 101, 70, 105, 108, 97, 72, 114, 77, 121, 101, 66, 103, 100, 97, 86, 122, 114, 82, 48, 117, 100, 71, 101, 111, 116, 87, 56, 99, 65, 47, 74, 97, 108, 55, 117, 107, 114, 43, 77, 47, 121, 122]) + let obj2 = await deser.deserialize(topic, bytes) + expect(obj2.f1).toEqual(obj.f1); + }) + it('deterministic encryption with preserialized data', async () => { + let conf: ClientConfig = { + baseURLs: [baseURL], + cacheCapacity: 1000 + } + let client = SchemaRegistryClient.newClient(conf) + + let encRule: Rule = { + name: 'test-encrypt', + kind: 'TRANSFORM', + mode: RuleMode.WRITEREAD, + type: 'ENCRYPT', + tags: ['PII'], + params: { + 'encrypt.kek.name': 'kek1', + 'encrypt.kms.type': 'local-kms', + 'encrypt.kms.key.id': 'mykey', + 'encrypt.dek.algorithm': 'AES256_SIV', + }, + onFailure: 'ERROR,ERROR' + } + let ruleSet: RuleSet = { + domainRules: [encRule] + } + + let info: SchemaInfo = { + schemaType: 'AVRO', + schema: f1Schema, + ruleSet + } + + await client.register(subject, info, false) + + let obj = { + f1: 'hello world' + } + + let deserConfig: AvroDeserializerConfig = { + ruleConfig: { + secret: 'mysecret' + } + } + let deser = new AvroDeserializer(client, SerdeType.VALUE, deserConfig) + let dekClient = fieldEncryptionExecutor.client! + + await dekClient.registerKek("kek1", "local-kms", "mykey", false) + const encryptedDek = "YSx3DTlAHrmpoDChquJMifmPntBzxgRVdMzgYL82rgWBKn7aUSnG+WIu9ozBNS3y2vXd++mBtK07w4/W/G6w0da39X9hfOVZsGnkSvry/QRht84V8yz3dqKxGMOK5A==" + await dekClient.registerDek("kek1", subject, "AES256_SIV", 1, encryptedDek) + + const bytes = Buffer.from([0, 0, 0, 0, 1, 72, 68, 54, 89, 116, 120, 114, 108, 66, 110, 107, 84, 87, 87, 57, 78, 54, 86, 98, 107, 51, 73, 73, 110, 106, 87, 72, 56, 49, 120, 109, 89, 104, 51, 107, 52, 100]) + let obj2 = await deser.deserialize(topic, bytes) + expect(obj2.f1).toEqual(obj.f1); + }) + it('dek rotation encryption with preserialized data', async () => { + let conf: ClientConfig = { + baseURLs: [baseURL], + cacheCapacity: 1000 + } + let client = SchemaRegistryClient.newClient(conf) + + let encRule: Rule = { + name: 'test-encrypt', + kind: 'TRANSFORM', + mode: RuleMode.WRITEREAD, + type: 'ENCRYPT', + tags: ['PII'], + params: { + 'encrypt.kek.name': 'kek1', + 'encrypt.kms.type': 'local-kms', + 'encrypt.kms.key.id': 'mykey', + 'encrypt.dek.expiry.days': '1', + }, + onFailure: 'ERROR,ERROR' + } + let ruleSet: RuleSet = { + domainRules: [encRule] + } + + let info: SchemaInfo = { + schemaType: 'AVRO', + schema: f1Schema, + ruleSet + } + + await client.register(subject, info, false) + + let obj = { + f1: 'hello world' + } + + let deserConfig: AvroDeserializerConfig = { + ruleConfig: { + secret: 'mysecret' + } + } + let deser = new AvroDeserializer(client, SerdeType.VALUE, deserConfig) + let dekClient = fieldEncryptionExecutor.client! + + await dekClient.registerKek("kek1", "local-kms", "mykey", false) + const encryptedDek = "W/v6hOQYq1idVAcs1pPWz9UUONMVZW4IrglTnG88TsWjeCjxmtRQ4VaNe/I5dCfm2zyY9Cu0nqdvqImtUk4=" + await dekClient.registerDek("kek1", subject, "AES256_GCM", 1, encryptedDek) + + const bytes = Buffer.from([0, 0, 0, 0, 1, 120, 65, 65, 65, 65, 65, 65, 71, 52, 72, 73, 54, 98, 49, 110, 88, 80, 88, 113, 76, 121, 71, 56, 99, 73, 73, 51, 53, 78, 72, 81, 115, 101, 113, 113, 85, 67, 100, 43, 73, 101, 76, 101, 70, 86, 65, 101, 78, 112, 83, 83, 51, 102, 120, 80, 110, 74, 51, 50, 65, 61]) + let obj2 = await deser.deserialize(topic, bytes) + expect(obj2.f1).toEqual(obj.f1); + }) + it('encryption with references', async () => { + let conf: ClientConfig = { + baseURLs: [baseURL], + cacheCapacity: 1000 + } + let client = SchemaRegistryClient.newClient(conf) + let serConfig: AvroSerializerConfig = { + useLatestVersion: true, + ruleConfig: { + secret: 'mysecret' + } + } + let ser = new AvroSerializer(client, SerdeType.VALUE, serConfig) + let dekClient = fieldEncryptionExecutor.client! + + let info: SchemaInfo = { + schemaType: 'AVRO', + schema: demoSchema, + } + + await client.register('demo-value', info, false) + + let encRule: Rule = { + name: 'test-encrypt', + kind: 'TRANSFORM', + mode: RuleMode.WRITEREAD, + type: 'ENCRYPT', + tags: ['PII'], + params: { + 'encrypt.kek.name': 'kek1', + 'encrypt.kms.type': 'local-kms', + 'encrypt.kms.key.id': 'mykey', + }, + onFailure: 'ERROR,ERROR' + } + let ruleSet: RuleSet = { + domainRules: [encRule] + } + + info = { + schemaType: 'AVRO', + schema: rootSchema, + references: [{ + name: 'DemoSchema', + subject: 'demo-value', + version: 1 + }], + ruleSet + } + + await client.register(subject, info, false) + + let nested = { + intField: 123, + doubleField: 45.67, + stringField: 'hi', + boolField: true, + bytesField: Buffer.from([1, 2]), + } + let obj = { + otherField: nested + } + let bytes = await ser.serialize(topic, obj) + + // reset encrypted field + nested.stringField = 'hi' + nested.bytesField = Buffer.from([1, 2]) + + let deserConfig: AvroDeserializerConfig = { + ruleConfig: { + secret: 'mysecret' + } + } + let deser = new AvroDeserializer(client, SerdeType.VALUE, deserConfig) + fieldEncryptionExecutor.client = dekClient + let obj2 = await deser.deserialize(topic, bytes) + expect(obj2.otherField.intField).toEqual(nested.intField); + expect(obj2.otherField.doubleField).toBeCloseTo(nested.doubleField, 0.001); + expect(obj2.otherField.stringField).toEqual(nested.stringField); + expect(obj2.otherField.boolField).toEqual(nested.boolField); + expect(obj2.otherField.bytesField).toEqual(nested.bytesField); + }) + it('encryption with union', async () => { + let conf: ClientConfig = { + baseURLs: [baseURL], + cacheCapacity: 1000 + } + let client = SchemaRegistryClient.newClient(conf) + let serConfig: AvroSerializerConfig = { + useLatestVersion: true, + ruleConfig: { + secret: 'mysecret' + } + } + let ser = new AvroSerializer(client, SerdeType.VALUE, serConfig) + let dekClient = fieldEncryptionExecutor.client! + + let encRule: Rule = { + name: 'test-encrypt', + kind: 'TRANSFORM', + mode: RuleMode.WRITEREAD, + type: 'ENCRYPT', + tags: ['PII'], + params: { + 'encrypt.kek.name': 'kek1', + 'encrypt.kms.type': 'local-kms', + 'encrypt.kms.key.id': 'mykey', + }, + onFailure: 'ERROR,ERROR' + } + let ruleSet: RuleSet = { + domainRules: [encRule] + } + + let info = { + schemaType: 'AVRO', + schema: demoSchemaWithUnion, + ruleSet + } + + await client.register(subject, info, false) + + let obj = { + intField: 123, + doubleField: 45.67, + stringField: 'hi', + boolField: true, + bytesField: Buffer.from([1, 2]), + } + let bytes = await ser.serialize(topic, obj) + + // reset encrypted field + obj.stringField = 'hi' + obj.bytesField = Buffer.from([1, 2]) + + let deserConfig: AvroDeserializerConfig = { + ruleConfig: { + secret: 'mysecret' + } + } + let deser = new AvroDeserializer(client, SerdeType.VALUE, deserConfig) + fieldEncryptionExecutor.client = dekClient + let obj2 = await deser.deserialize(topic, bytes) + expect(obj2.intField).toEqual(obj.intField); + expect(obj2.doubleField).toBeCloseTo(obj.doubleField, 0.001); + expect(obj2.stringField).toEqual(obj.stringField); + expect(obj2.boolField).toEqual(obj.boolField); + expect(obj2.bytesField).toEqual(obj.bytesField); + }) + it('complex encryption', async () => { + let conf: ClientConfig = { + baseURLs: [baseURL], + cacheCapacity: 1000 + } + let client = SchemaRegistryClient.newClient(conf) + let serConfig: AvroSerializerConfig = { + useLatestVersion: true, + ruleConfig: { + secret: 'mysecret' + } + } + let ser = new AvroSerializer(client, SerdeType.VALUE, serConfig) + let dekClient = fieldEncryptionExecutor.client! + + let encRule: Rule = { + name: 'test-encrypt', + kind: 'TRANSFORM', + mode: RuleMode.WRITEREAD, + type: 'ENCRYPT', + tags: ['PII'], + params: { + 'encrypt.kek.name': 'kek1', + 'encrypt.kms.type': 'local-kms', + 'encrypt.kms.key.id': 'mykey', + }, + onFailure: 'ERROR,NONE' + } + let ruleSet: RuleSet = { + domainRules: [encRule] + } + + let info: SchemaInfo = { + schemaType: 'AVRO', + schema: complexSchema, + ruleSet + } + + await client.register(subject, info, false) + + let obj = { + arrayField: [ 'hello' ], + mapField: { 'key': 'world' }, + unionField: 'bye', + } + let bytes = await ser.serialize(topic, obj) + + let deserConfig: AvroDeserializerConfig = { + ruleConfig: { + secret: 'mysecret' + } + } + let deser = new AvroDeserializer(client, SerdeType.VALUE, deserConfig) + fieldEncryptionExecutor.client = dekClient + let obj2 = await deser.deserialize(topic, bytes) + expect(obj2.arrayField).toEqual([ 'hello' ]); + expect(obj2.mapField).toEqual({ 'key': 'world' }); + expect(obj2.unionField).toEqual('bye'); + }) + it('complex encryption with null', async () => { + let conf: ClientConfig = { + baseURLs: [baseURL], + cacheCapacity: 1000 + } + let client = SchemaRegistryClient.newClient(conf) + let serConfig: AvroSerializerConfig = { + useLatestVersion: true, + ruleConfig: { + secret: 'mysecret' + } + } + let ser = new AvroSerializer(client, SerdeType.VALUE, serConfig) + let dekClient = fieldEncryptionExecutor.client! + + let encRule: Rule = { + name: 'test-encrypt', + kind: 'TRANSFORM', + mode: RuleMode.WRITEREAD, + type: 'ENCRYPT', + tags: ['PII'], + params: { + 'encrypt.kek.name': 'kek1', + 'encrypt.kms.type': 'local-kms', + 'encrypt.kms.key.id': 'mykey', + }, + onFailure: 'ERROR,NONE' + } + let ruleSet: RuleSet = { + domainRules: [encRule] + } + + let info: SchemaInfo = { + schemaType: 'AVRO', + schema: complexSchema, + ruleSet + } + + await client.register(subject, info, false) + + let obj = { + arrayField: [ 'hello' ], + mapField: { 'key': 'world' }, + unionField: null + } + let bytes = await ser.serialize(topic, obj) + + let deserConfig: AvroDeserializerConfig = { + ruleConfig: { + secret: 'mysecret' + } + } + let deser = new AvroDeserializer(client, SerdeType.VALUE, deserConfig) + fieldEncryptionExecutor.client = dekClient + let obj2 = await deser.deserialize(topic, bytes) + expect(obj2.arrayField).toEqual([ 'hello' ]); + expect(obj2.mapField).toEqual({ 'key': 'world' }); + expect(obj2.unionField).toEqual(null); + }) + it('jsonata fully compatible', async () => { + let rule1To2 = "$merge([$sift($, function($v, $k) {$k != 'size'}), {'height': $.'size'}])" + let rule2To1 = "$merge([$sift($, function($v, $k) {$k != 'height'}), {'size': $.'height'}])" + let rule2To3 = "$merge([$sift($, function($v, $k) {$k != 'height'}), {'length': $.'height'}])" + let rule3To2 = "$merge([$sift($, function($v, $k) {$k != 'length'}), {'height': $.'length'}])" + + let conf: ClientConfig = { + baseURLs: [baseURL], + cacheCapacity: 1000 + } + let client = SchemaRegistryClient.newClient(conf) + + client.updateConfig(subject, { + compatibilityGroup: 'application.version' + }) + + let widget = { + name: 'alice', + size: 123, + version: 1, + } + let avroSchema = AvroSerializer.messageToSchema(widget) + let info: SchemaInfo = { + schemaType: 'AVRO', + schema: JSON.stringify(avroSchema), + metadata: { + properties: { + "application.version": "v1" + } + } + } + + await client.register(subject, info, false) + + let newWidget = { + name: 'alice', + height: 123, + version: 1, + } + avroSchema = AvroSerializer.messageToSchema(newWidget) + info = { + schemaType: 'AVRO', + schema: JSON.stringify(avroSchema), + metadata: { + properties: { + "application.version": "v2" + } + }, + ruleSet: { + migrationRules: [ + { + name: 'myRule1', + kind: 'TRANSFORM', + mode: RuleMode.UPGRADE, + type: 'JSONATA', + expr: rule1To2, + }, + { + name: 'myRule2', + kind: 'TRANSFORM', + mode: RuleMode.DOWNGRADE, + type: 'JSONATA', + expr: rule2To1, + }, + ] + } + } + + await client.register(subject, info, false) + + let newerWidget = { + name: 'alice', + length: 123, + version: 1, + } + avroSchema = AvroSerializer.messageToSchema(newerWidget) + info = { + schemaType: 'AVRO', + schema: JSON.stringify(avroSchema), + metadata: { + properties: { + "application.version": "v3" + } + }, + ruleSet: { + migrationRules: [ + { + name: 'myRule1', + kind: 'TRANSFORM', + mode: RuleMode.UPGRADE, + type: 'JSONATA', + expr: rule2To3, + }, + { + name: 'myRule2', + kind: 'TRANSFORM', + mode: RuleMode.DOWNGRADE, + type: 'JSONATA', + expr: rule3To2, + }, + ] + } + } + + await client.register(subject, info, false) + + let serConfig1 = { + useLatestWithMetadata: { + "application.version": "v1" + } + } + let ser1 = new AvroSerializer(client, SerdeType.VALUE, serConfig1) + let bytes = await ser1.serialize(topic, widget) + + await deserializeWithAllVersions(client, ser1, bytes, widget, newWidget, newerWidget) + + let serConfig2 = { + useLatestWithMetadata: { + "application.version": "v2" + } + } + let ser2 = new AvroSerializer(client, SerdeType.VALUE, serConfig2) + bytes = await ser2.serialize(topic, newWidget) + + await deserializeWithAllVersions(client, ser2, bytes, widget, newWidget, newerWidget) + + let serConfig3 = { + useLatestWithMetadata: { + "application.version": "v3" + } + } + let ser3 = new AvroSerializer(client, SerdeType.VALUE, serConfig3) + bytes = await ser3.serialize(topic, newerWidget) + + await deserializeWithAllVersions(client, ser3, bytes, widget, newWidget, newerWidget) + }) + + async function deserializeWithAllVersions(client: Client, ser: Serializer, bytes: Buffer, + widget: any, newWidget: any, newerWidget: any) { + let deserConfig1: AvroDeserializerConfig = { + useLatestWithMetadata: { + "application.version": "v1" + } + } + let deser1 = new AvroDeserializer(client, SerdeType.VALUE, deserConfig1) + deser1.client = ser.client + + let newobj = await deser1.deserialize(topic, bytes) + expect(stringify(newobj)).toEqual(stringify(widget)); + + let deserConfig2 = { + useLatestWithMetadata: { + "application.version": "v2" + } + } + let deser2 = new AvroDeserializer(client, SerdeType.VALUE, deserConfig2) + newobj = await deser2.deserialize(topic, bytes) + expect(stringify(newobj)).toEqual(stringify(newWidget)); + + let deserConfig3 = { + useLatestWithMetadata: { + "application.version": "v3" + } + } + let deser3 = new AvroDeserializer(client, SerdeType.VALUE, deserConfig3) + newobj = await deser3.deserialize(topic, bytes) + expect(stringify(newobj)).toEqual(stringify(newerWidget)); + } +}) diff --git a/test/schemaregistry/serde/buffer-wrapper.spec.ts b/test/schemaregistry/serde/buffer-wrapper.spec.ts new file mode 100644 index 00000000..f3ac5f76 --- /dev/null +++ b/test/schemaregistry/serde/buffer-wrapper.spec.ts @@ -0,0 +1,26 @@ +import { describe, expect, it } from '@jest/globals'; +import { BufferWrapper, MAX_VARINT_LEN_32 } from "../../../schemaregistry/serde/buffer-wrapper"; + +describe('BufferWrapper', () => { + it('write and read 100', () => { + const buf = Buffer.alloc(MAX_VARINT_LEN_32) + const bw = new BufferWrapper(buf) + bw.writeVarInt(100) + const bw2 = new BufferWrapper(bw.buf.subarray(0, bw.pos)) + expect(bw2.readVarInt()).toBe(100) + }) + it('write and read max pos int', () => { + const buf = Buffer.alloc(MAX_VARINT_LEN_32) + const bw = new BufferWrapper(buf) + bw.writeVarInt(2147483647) + const bw2 = new BufferWrapper(bw.buf.subarray(0, bw.pos)) + expect(bw2.readVarInt()).toBe(2147483647) + }) + it('write and read max neg int', () => { + const buf = Buffer.alloc(MAX_VARINT_LEN_32) + const bw = new BufferWrapper(buf) + bw.writeVarInt(-2147483648) + const bw2 = new BufferWrapper(bw.buf.subarray(0, bw.pos)) + expect(bw2.readVarInt()).toBe(-2147483648) + }) +}) diff --git a/test/schemaregistry/serde/json.spec.ts b/test/schemaregistry/serde/json.spec.ts new file mode 100644 index 00000000..27378f0a --- /dev/null +++ b/test/schemaregistry/serde/json.spec.ts @@ -0,0 +1,926 @@ +import {afterEach, describe, expect, it} from '@jest/globals'; +import {ClientConfig} from "../../../schemaregistry/rest-service"; +import {SerdeType, SerializationError, Serializer} from "../../../schemaregistry/serde/serde"; +import { + Client, + Rule, + RuleMode, + RuleSet, + SchemaInfo, + SchemaRegistryClient +} from "../../../schemaregistry/schemaregistry-client"; +import {LocalKmsDriver} from "../../../schemaregistry/rules/encryption/localkms/local-driver"; +import {FieldEncryptionExecutor} from "../../../schemaregistry/rules/encryption/encrypt-executor"; +import { + JsonDeserializer, JsonDeserializerConfig, + JsonSerializer, + JsonSerializerConfig +} from "../../../schemaregistry/serde/json"; +import {RuleRegistry} from "@confluentinc/schemaregistry/serde/rule-registry"; +import stringify from "json-stringify-deterministic"; +import {JsonataExecutor} from "@confluentinc/schemaregistry/rules/jsonata/jsonata-executor"; +import {clearKmsClients} from "@confluentinc/schemaregistry/rules/encryption/kms-registry"; + +const fieldEncryptionExecutor = FieldEncryptionExecutor.register() +JsonataExecutor.register() +LocalKmsDriver.register() + +//const baseURL = 'http://localhost:8081' +const baseURL = 'mock://' + +const topic = 'topic1' +const subject = topic + '-value' + +const rootSchema = ` +{ + "type": "object", + "properties": { + "otherField": { "$ref": "DemoSchema" } + } +} +` +const rootSchema2020_12 = ` +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "type": "object", + "properties": { + "otherField": { "$ref": "DemoSchema" } + } +} +` +const demoSchema = ` +{ + "type": "object", + "properties": { + "intField": { "type": "integer" }, + "doubleField": { "type": "number" }, + "stringField": { + "type": "string", + "confluent:tags": [ "PII" ] + }, + "boolField": { "type": "boolean" }, + "bytesField": { + "type": "string", + "contentEncoding": "base64", + "confluent:tags": [ "PII" ] + } + } +} +` +const demoSchemaWithUnion = ` +{ + "type": "object", + "properties": { + "intField": { "type": "integer" }, + "doubleField": { "type": "number" }, + "stringField": { + "oneOf": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "confluent:tags": [ "PII" ] + }, + "boolField": { "type": "boolean" }, + "bytesField": { + "type": "string", + "contentEncoding": "base64", + "confluent:tags": [ "PII" ] + } + } +} +` +const demoSchema2020_12 = ` +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "type": "object", + "properties": { + "intField": { "type": "integer" }, + "doubleField": { "type": "number" }, + "stringField": { + "type": "string", + "confluent:tags": [ "PII" ] + }, + "boolField": { "type": "boolean" }, + "bytesField": { + "type": "string", + "contentEncoding": "base64", + "confluent:tags": [ "PII" ] + } + } +} +` +const complexSchema = ` +{ + "type": "object", + "properties": { + "arrayField": { + "type": "array", + "items": { + "type": "string" + }, + "confluent:tags": [ "PII" ] + }, + "objectField": { + "type": "object", + "properties": { + "stringField": { "type": "string" } + }, + "confluent:tags": [ "PII" ] + }, + "unionField": { + "oneOf": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "confluent:tags": [ "PII" ] + } + } +} +` + +describe('JsonSerializer', () => { + afterEach(async () => { + let conf: ClientConfig = { + baseURLs: [baseURL], + cacheCapacity: 1000 + } + let client = SchemaRegistryClient.newClient(conf) + await client.deleteSubject(subject, false) + await client.deleteSubject(subject, true) + }) + it('basic serialization', async () => { + let conf: ClientConfig = { + baseURLs: [baseURL], + cacheCapacity: 1000 + } + let client = SchemaRegistryClient.newClient(conf) + let ser = new JsonSerializer(client, SerdeType.VALUE, { + autoRegisterSchemas: true, + validate: true + }) + let obj = { + intField: 123, + doubleField: 45.67, + stringField: 'hi', + boolField: true, + bytesField: Buffer.from([0, 0, 0, 1]).toString('base64') + } + let bytes = await ser.serialize(topic, obj) + + let deser = new JsonDeserializer(client, SerdeType.VALUE, {}) + let obj2 = await deser.deserialize(topic, bytes) + expect(obj2).toEqual(obj) + }) + it('basic serialization 2020-12', async () => { + let conf: ClientConfig = { + baseURLs: [baseURL], + cacheCapacity: 1000 + } + let client = SchemaRegistryClient.newClient(conf) + let ser = new JsonSerializer(client, SerdeType.VALUE, { + useLatestVersion: true, + validate: true + }) + + let obj = { + intField: 123, + doubleField: 45.67, + stringField: 'hi', + boolField: true, + bytesField: Buffer.from([0, 0, 0, 1]).toString('base64') + } + let info: SchemaInfo = { + schemaType: 'JSON', + schema: demoSchema2020_12 + } + + await client.register(subject, info, false) + + let bytes = await ser.serialize(topic, obj) + + let deser = new JsonDeserializer(client, SerdeType.VALUE, {}) + let obj2 = await deser.deserialize(topic, bytes) + expect(obj2).toEqual(obj) + }) + it('serialize nested', async () => { + let conf: ClientConfig = { + baseURLs: [baseURL], + cacheCapacity: 1000 + } + let client = SchemaRegistryClient.newClient(conf) + let ser = new JsonSerializer(client, SerdeType.VALUE, { + autoRegisterSchemas: true, + validate: true + }) + + let obj = { + intField: 123, + doubleField: 45.67, + stringField: 'hi', + boolField: true, + bytesField: Buffer.from([0, 0, 0, 1]).toString('base64') + } + let bytes = await ser.serialize(topic, obj) + + let deser = new JsonDeserializer(client, SerdeType.VALUE, {}) + let obj2 = await deser.deserialize(topic, bytes) + expect(obj2).toEqual(obj) + }) + it('serialize reference', async () => { + let conf: ClientConfig = { + baseURLs: [baseURL], + cacheCapacity: 1000 + } + let client = SchemaRegistryClient.newClient(conf) + let ser = new JsonSerializer(client, SerdeType.VALUE, { + useLatestVersion: true, + validate: true + }) + + let info: SchemaInfo = { + schemaType: 'JSON', + schema: demoSchema + } + await client.register('demo-value', info, false) + + info = { + schemaType: 'JSON', + schema: rootSchema, + references: [{ + name: 'DemoSchema', + subject: 'demo-value', + version: 1 + }] + } + await client.register(subject, info, false) + + let obj = { + intField: 123, + doubleField: 45.67, + stringField: 'hi', + boolField: true, + bytesField: Buffer.from([0, 0, 0, 1]).toString('base64') + } + let bytes = await ser.serialize(topic, obj) + + let deser = new JsonDeserializer(client, SerdeType.VALUE, {}) + let obj2 = await deser.deserialize(topic, bytes) + expect(obj2).toEqual(obj) + }) + it('serialize reference 2020_12', async () => { + let conf: ClientConfig = { + baseURLs: [baseURL], + cacheCapacity: 1000 + } + let client = SchemaRegistryClient.newClient(conf) + let ser = new JsonSerializer(client, SerdeType.VALUE, { + useLatestVersion: true, + validate: true + }) + + let info: SchemaInfo = { + schemaType: 'JSON', + schema: demoSchema2020_12 + } + await client.register('demo-value', info, false) + + info = { + schemaType: 'JSON', + schema: rootSchema2020_12, + references: [{ + name: 'DemoSchema', + subject: 'demo-value', + version: 1 + }] + } + await client.register(subject, info, false) + + let obj = { + intField: 123, + doubleField: 45.67, + stringField: 'hi', + boolField: true, + bytesField: Buffer.from([0, 0, 0, 1]).toString('base64') + } + let bytes = await ser.serialize(topic, obj) + + let deser = new JsonDeserializer(client, SerdeType.VALUE, {}) + let obj2 = await deser.deserialize(topic, bytes) + expect(obj2).toEqual(obj) + }) + it('basic failing validation', async () => { + let conf: ClientConfig = { + baseURLs: [baseURL], + cacheCapacity: 1000 + } + let client = SchemaRegistryClient.newClient(conf) + let ser = new JsonSerializer(client, SerdeType.VALUE, { + useLatestVersion: true, + validate: true + }) + + let obj = { + intField: 123, + doubleField: 45.67, + stringField: 'hi', + boolField: true, + bytesField: Buffer.from([0, 0, 0, 1]).toString('base64') + } + let jsonSchema = JsonSerializer.messageToSchema(obj) + let info: SchemaInfo = { + schemaType: 'JSON', + schema: JSON.stringify(jsonSchema) + } + + await client.register(subject, info, false) + + let bytes = await ser.serialize(topic, obj) + + let deser = new JsonDeserializer(client, SerdeType.VALUE, {}) + let obj2 = await deser.deserialize(topic, bytes) + expect(obj2).toEqual(obj) + + let diffObj = { + intField: '123', + doubleField: 45.67, + stringField: 'hi', + boolField: true, + bytesField: Buffer.from([0, 0, 0, 1]).toString('base64') + } + + await expect(() => ser.serialize(topic, diffObj)).rejects.toThrow(SerializationError) + }) + it('basic encryption', async () => { + let conf: ClientConfig = { + baseURLs: [baseURL], + cacheCapacity: 1000 + } + let client = SchemaRegistryClient.newClient(conf) + let serConfig: JsonSerializerConfig = { + useLatestVersion: true, + ruleConfig: { + secret: 'mysecret' + } + } + let ser = new JsonSerializer(client, SerdeType.VALUE, serConfig) + let dekClient = fieldEncryptionExecutor.client! + + let encRule: Rule = { + name: 'test-encrypt', + kind: 'TRANSFORM', + mode: RuleMode.WRITEREAD, + type: 'ENCRYPT', + tags: ['PII'], + params: { + 'encrypt.kek.name': 'kek1', + 'encrypt.kms.type': 'local-kms', + 'encrypt.kms.key.id': 'mykey', + }, + onFailure: 'ERROR,NONE' + } + let ruleSet: RuleSet = { + domainRules: [encRule] + } + + let info: SchemaInfo = { + schemaType: 'JSON', + schema: demoSchema, + ruleSet + } + + await client.register(subject, info, false) + + let obj = { + intField: 123, + doubleField: 45.67, + stringField: 'hi', + boolField: true, + bytesField: Buffer.from([0, 0, 0, 1]).toString('base64') + } + let bytes = await ser.serialize(topic, obj) + + // reset encrypted field + obj.stringField = 'hi' + obj.bytesField = Buffer.from([0, 0, 0, 1]).toString('base64') + + let deserConfig: JsonDeserializerConfig = { + ruleConfig: { + secret: 'mysecret' + } + } + let deser = new JsonDeserializer(client, SerdeType.VALUE, deserConfig) + fieldEncryptionExecutor.client = dekClient + let obj2 = await deser.deserialize(topic, bytes) + expect(obj2).toEqual(obj) + + clearKmsClients() + let registry = new RuleRegistry() + registry.registerExecutor(new FieldEncryptionExecutor()) + deser = new JsonDeserializer(client, SerdeType.VALUE, {}, registry) + obj2 = await deser.deserialize(topic, bytes) + expect(obj2).not.toEqual(obj); + }) + it('basic encryption 2020-12', async () => { + let conf: ClientConfig = { + baseURLs: [baseURL], + cacheCapacity: 1000 + } + let client = SchemaRegistryClient.newClient(conf) + let serConfig: JsonSerializerConfig = { + useLatestVersion: true, + ruleConfig: { + secret: 'mysecret' + } + } + let ser = new JsonSerializer(client, SerdeType.VALUE, serConfig) + let dekClient = fieldEncryptionExecutor.client! + + let encRule: Rule = { + name: 'test-encrypt', + kind: 'TRANSFORM', + mode: RuleMode.WRITEREAD, + type: 'ENCRYPT', + tags: ['PII'], + params: { + 'encrypt.kek.name': 'kek1', + 'encrypt.kms.type': 'local-kms', + 'encrypt.kms.key.id': 'mykey', + }, + onFailure: 'ERROR,NONE' + } + let ruleSet: RuleSet = { + domainRules: [encRule] + } + + let info: SchemaInfo = { + schemaType: 'JSON', + schema: demoSchema2020_12, + ruleSet + } + + await client.register(subject, info, false) + + let obj = { + intField: 123, + doubleField: 45.67, + stringField: 'hi', + boolField: true, + bytesField: Buffer.from([0, 0, 0, 1]).toString('base64') + } + let bytes = await ser.serialize(topic, obj) + + // reset encrypted field + obj.stringField = 'hi' + obj.bytesField = Buffer.from([0, 0, 0, 1]).toString('base64') + + let deserConfig: JsonDeserializerConfig = { + ruleConfig: { + secret: 'mysecret' + } + } + let deser = new JsonDeserializer(client, SerdeType.VALUE, deserConfig) + fieldEncryptionExecutor.client = dekClient + let obj2 = await deser.deserialize(topic, bytes) + expect(obj2).toEqual(obj) + + clearKmsClients() + let registry = new RuleRegistry() + registry.registerExecutor(new FieldEncryptionExecutor()) + deser = new JsonDeserializer(client, SerdeType.VALUE, {}, registry) + obj2 = await deser.deserialize(topic, bytes) + expect(obj2).not.toEqual(obj); + }) + it('encryption with union', async () => { + let conf: ClientConfig = { + baseURLs: [baseURL], + cacheCapacity: 1000 + } + let client = SchemaRegistryClient.newClient(conf) + let serConfig: JsonSerializerConfig = { + useLatestVersion: true, + ruleConfig: { + secret: 'mysecret' + } + } + let ser = new JsonSerializer(client, SerdeType.VALUE, serConfig) + let dekClient = fieldEncryptionExecutor.client! + + let encRule: Rule = { + name: 'test-encrypt', + kind: 'TRANSFORM', + mode: RuleMode.WRITEREAD, + type: 'ENCRYPT', + tags: ['PII'], + params: { + 'encrypt.kek.name': 'kek1', + 'encrypt.kms.type': 'local-kms', + 'encrypt.kms.key.id': 'mykey', + }, + onFailure: 'ERROR,ERROR' + } + let ruleSet: RuleSet = { + domainRules: [encRule] + } + + let info: SchemaInfo = { + schemaType: 'JSON', + schema: demoSchemaWithUnion, + ruleSet + } + + await client.register(subject, info, false) + + let obj = { + intField: 123, + doubleField: 45.67, + stringField: 'hi', + boolField: true, + bytesField: Buffer.from([0, 0, 0, 1]).toString('base64') + } + let bytes = await ser.serialize(topic, obj) + + // reset encrypted field + obj.stringField = 'hi' + obj.bytesField = Buffer.from([0, 0, 0, 1]).toString('base64') + + let deserConfig: JsonDeserializerConfig = { + ruleConfig: { + secret: 'mysecret' + } + } + let deser = new JsonDeserializer(client, SerdeType.VALUE, deserConfig) + fieldEncryptionExecutor.client = dekClient + let obj2 = await deser.deserialize(topic, bytes) + expect(obj2).toEqual(obj) + }) + it('encryption with reference', async () => { + let conf: ClientConfig = { + baseURLs: [baseURL], + cacheCapacity: 1000 + } + let client = SchemaRegistryClient.newClient(conf) + let serConfig: JsonSerializerConfig = { + useLatestVersion: true, + ruleConfig: { + secret: 'mysecret' + } + } + let ser = new JsonSerializer(client, SerdeType.VALUE, serConfig) + let dekClient = fieldEncryptionExecutor.client! + + let info: SchemaInfo = { + schemaType: 'JSON', + schema: demoSchema, + } + await client.register('demo-value', info, false) + + let encRule: Rule = { + name: 'test-encrypt', + kind: 'TRANSFORM', + mode: RuleMode.WRITEREAD, + type: 'ENCRYPT', + tags: ['PII'], + params: { + 'encrypt.kek.name': 'kek1', + 'encrypt.kms.type': 'local-kms', + 'encrypt.kms.key.id': 'mykey', + }, + onFailure: 'ERROR,ERROR' + } + let ruleSet: RuleSet = { + domainRules: [encRule] + } + + info = { + schemaType: 'JSON', + schema: rootSchema, + references: [{ + name: 'DemoSchema', + subject: 'demo-value', + version: 1 + }], + ruleSet + } + await client.register(subject, info, false) + + let nested = { + intField: 123, + doubleField: 45.67, + stringField: 'hi', + boolField: true, + bytesField: Buffer.from([0, 0, 0, 1]).toString('base64') + } + let obj = { + otherField: nested + } + let bytes = await ser.serialize(topic, obj) + + // reset encrypted field + nested.stringField = 'hi' + nested.bytesField = Buffer.from([0, 0, 0, 1]).toString('base64') + + let deserConfig: JsonDeserializerConfig = { + ruleConfig: { + secret: 'mysecret' + } + } + let deser = new JsonDeserializer(client, SerdeType.VALUE, deserConfig) + fieldEncryptionExecutor.client = dekClient + let obj2 = await deser.deserialize(topic, bytes) + expect(obj2).toEqual(obj) + }) + it('complex encryption', async () => { + let conf: ClientConfig = { + baseURLs: [baseURL], + cacheCapacity: 1000 + } + let client = SchemaRegistryClient.newClient(conf) + let serConfig: JsonSerializerConfig = { + useLatestVersion: true, + ruleConfig: { + secret: 'mysecret' + } + } + let ser = new JsonSerializer(client, SerdeType.VALUE, serConfig) + let dekClient = fieldEncryptionExecutor.client! + + let encRule: Rule = { + name: 'test-encrypt', + kind: 'TRANSFORM', + mode: RuleMode.WRITEREAD, + type: 'ENCRYPT', + tags: ['PII'], + params: { + 'encrypt.kek.name': 'kek1', + 'encrypt.kms.type': 'local-kms', + 'encrypt.kms.key.id': 'mykey', + }, + onFailure: 'ERROR,NONE' + } + let ruleSet: RuleSet = { + domainRules: [encRule] + } + + let info: SchemaInfo = { + schemaType: 'JSON', + schema: complexSchema, + ruleSet + } + + await client.register(subject, info, false) + + let obj = { + arrayField: [ 'hello' ], + objectField: { 'stringField': 'world' }, + unionField: 'bye', + } + let bytes = await ser.serialize(topic, obj) + + let deserConfig: JsonDeserializerConfig = { + ruleConfig: { + secret: 'mysecret' + } + } + let deser = new JsonDeserializer(client, SerdeType.VALUE, deserConfig) + fieldEncryptionExecutor.client = dekClient + let obj2 = await deser.deserialize(topic, bytes) + expect(obj2.arrayField).toEqual([ 'hello' ]); + expect(obj2.objectField.stringField).toEqual('world'); + expect(obj2.unionField).toEqual('bye'); + }) + it('complex encryption with null', async () => { + let conf: ClientConfig = { + baseURLs: [baseURL], + cacheCapacity: 1000 + } + let client = SchemaRegistryClient.newClient(conf) + let serConfig: JsonSerializerConfig = { + useLatestVersion: true, + ruleConfig: { + secret: 'mysecret' + } + } + let ser = new JsonSerializer(client, SerdeType.VALUE, serConfig) + let dekClient = fieldEncryptionExecutor.client! + + let encRule: Rule = { + name: 'test-encrypt', + kind: 'TRANSFORM', + mode: RuleMode.WRITEREAD, + type: 'ENCRYPT', + tags: ['PII'], + params: { + 'encrypt.kek.name': 'kek1', + 'encrypt.kms.type': 'local-kms', + 'encrypt.kms.key.id': 'mykey', + }, + onFailure: 'ERROR,NONE' + } + let ruleSet: RuleSet = { + domainRules: [encRule] + } + + let info: SchemaInfo = { + schemaType: 'JSON', + schema: complexSchema, + ruleSet + } + + await client.register(subject, info, false) + + let obj = { + arrayField: [ 'hello' ], + objectField: { 'stringField': 'world' }, + unionField: null, + } + let bytes = await ser.serialize(topic, obj) + + let deserConfig: JsonDeserializerConfig = { + ruleConfig: { + secret: 'mysecret' + } + } + let deser = new JsonDeserializer(client, SerdeType.VALUE, deserConfig) + fieldEncryptionExecutor.client = dekClient + let obj2 = await deser.deserialize(topic, bytes) + expect(obj2.arrayField).toEqual([ 'hello' ]); + expect(obj2.objectField.stringField).toEqual('world'); + expect(obj2.unionField).toEqual(null); + }) + it('jsonata fully compatible', async () => { + let rule1To2 = "$merge([$sift($, function($v, $k) {$k != 'size'}), {'height': $.'size'}])" + let rule2To1 = "$merge([$sift($, function($v, $k) {$k != 'height'}), {'size': $.'height'}])" + let rule2To3 = "$merge([$sift($, function($v, $k) {$k != 'height'}), {'length': $.'height'}])" + let rule3To2 = "$merge([$sift($, function($v, $k) {$k != 'length'}), {'height': $.'length'}])" + + let conf: ClientConfig = { + baseURLs: [baseURL], + cacheCapacity: 1000 + } + let client = SchemaRegistryClient.newClient(conf) + + client.updateConfig(subject, { + compatibilityGroup: 'application.version' + }) + + let widget = { + name: 'alice', + size: 123, + version: 1, + } + let jsonSchema = JsonSerializer.messageToSchema(widget) + let info: SchemaInfo = { + schemaType: 'JSON', + schema: JSON.stringify(jsonSchema), + metadata: { + properties: { + "application.version": "v1" + } + } + } + + await client.register(subject, info, false) + + let newWidget = { + name: 'alice', + height: 123, + version: 1, + } + jsonSchema = JsonSerializer.messageToSchema(newWidget) + info = { + schemaType: 'JSON', + schema: JSON.stringify(jsonSchema), + metadata: { + properties: { + "application.version": "v2" + } + }, + ruleSet: { + migrationRules: [ + { + name: 'myRule1', + kind: 'TRANSFORM', + mode: RuleMode.UPGRADE, + type: 'JSONATA', + expr: rule1To2, + }, + { + name: 'myRule2', + kind: 'TRANSFORM', + mode: RuleMode.DOWNGRADE, + type: 'JSONATA', + expr: rule2To1, + }, + ] + } + } + + await client.register(subject, info, false) + + let newerWidget = { + name: 'alice', + length: 123, + version: 1, + } + jsonSchema = JsonSerializer.messageToSchema(newerWidget) + info = { + schemaType: 'JSON', + schema: JSON.stringify(jsonSchema), + metadata: { + properties: { + "application.version": "v3" + } + }, + ruleSet: { + migrationRules: [ + { + name: 'myRule1', + kind: 'TRANSFORM', + mode: RuleMode.UPGRADE, + type: 'JSONATA', + expr: rule2To3, + }, + { + name: 'myRule2', + kind: 'TRANSFORM', + mode: RuleMode.DOWNGRADE, + type: 'JSONATA', + expr: rule3To2, + }, + ] + } + } + + await client.register(subject, info, false) + + let serConfig1 = { + useLatestWithMetadata: { + "application.version": "v1" + } + } + let ser1 = new JsonSerializer(client, SerdeType.VALUE, serConfig1) + let bytes = await ser1.serialize(topic, widget) + + await deserializeWithAllVersions(client, ser1, bytes, widget, newWidget, newerWidget) + + let serConfig2 = { + useLatestWithMetadata: { + "application.version": "v2" + } + } + let ser2 = new JsonSerializer(client, SerdeType.VALUE, serConfig2) + bytes = await ser2.serialize(topic, newWidget) + + await deserializeWithAllVersions(client, ser2, bytes, widget, newWidget, newerWidget) + + let serConfig3 = { + useLatestWithMetadata: { + "application.version": "v3" + } + } + let ser3 = new JsonSerializer(client, SerdeType.VALUE, serConfig3) + bytes = await ser3.serialize(topic, newerWidget) + + await deserializeWithAllVersions(client, ser3, bytes, widget, newWidget, newerWidget) + }) + + async function deserializeWithAllVersions(client: Client, ser: Serializer, bytes: Buffer, + widget: any, newWidget: any, newerWidget: any) { + let deserConfig1: JsonDeserializerConfig = { + useLatestWithMetadata: { + "application.version": "v1" + } + } + let deser1 = new JsonDeserializer(client, SerdeType.VALUE, deserConfig1) + deser1.client = ser.client + + let newobj = await deser1.deserialize(topic, bytes) + expect(stringify(newobj)).toEqual(stringify(widget)); + + let deserConfig2 = { + useLatestWithMetadata: { + "application.version": "v2" + } + } + let deser2 = new JsonDeserializer(client, SerdeType.VALUE, deserConfig2) + newobj = await deser2.deserialize(topic, bytes) + expect(stringify(newobj)).toEqual(stringify(newWidget)); + + let deserConfig3 = { + useLatestWithMetadata: { + "application.version": "v3" + } + } + let deser3 = new JsonDeserializer(client, SerdeType.VALUE, deserConfig3) + newobj = await deser3.deserialize(topic, bytes) + expect(stringify(newobj)).toEqual(stringify(newerWidget)); + } +}) diff --git a/test/schemaregistry/serde/protobuf.spec.ts b/test/schemaregistry/serde/protobuf.spec.ts new file mode 100644 index 00000000..4d7e1759 --- /dev/null +++ b/test/schemaregistry/serde/protobuf.spec.ts @@ -0,0 +1,236 @@ +import {afterEach, describe, expect, it} from '@jest/globals'; +import {ClientConfig} from "../../../schemaregistry/rest-service"; +import { + ProtobufDeserializer, ProtobufDeserializerConfig, + ProtobufSerializer, ProtobufSerializerConfig, +} from "../../../schemaregistry/serde/protobuf"; +import {SerdeType} from "../../../schemaregistry/serde/serde"; +import { + Rule, + RuleMode, + RuleSet, + SchemaInfo, + SchemaRegistryClient +} from "../../../schemaregistry/schemaregistry-client"; +import {LocalKmsDriver} from "../../../schemaregistry/rules/encryption/localkms/local-driver"; +import {FieldEncryptionExecutor} from "../../../schemaregistry/rules/encryption/encrypt-executor"; +import {AuthorSchema, file_test_schemaregistry_serde_example, PizzaSchema} from "./test/example_pb"; +import {create, toBinary} from "@bufbuild/protobuf"; +import {FileDescriptorProtoSchema} from "@bufbuild/protobuf/wkt"; +import { + NestedMessage_InnerMessageSchema +} from "./test/nested_pb"; +import {TestMessageSchema} from "./test/test_pb"; +import {DependencyMessageSchema} from "./test/dep_pb"; +import {RuleRegistry} from "@confluentinc/schemaregistry/serde/rule-registry"; +import {LinkedListSchema} from "./test/cycle_pb"; +import {clearKmsClients} from "@confluentinc/schemaregistry/rules/encryption/kms-registry"; + +const fieldEncryptionExecutor = FieldEncryptionExecutor.register() +LocalKmsDriver.register() + +//const baseURL = 'http://localhost:8081' +const baseURL = 'mock://' + +const topic = 'topic1' +const subject = topic + '-value' + +describe('ProtobufSerializer', () => { + afterEach(async () => { + let conf: ClientConfig = { + baseURLs: [baseURL], + cacheCapacity: 1000 + } + let client = SchemaRegistryClient.newClient(conf) + await client.deleteSubject(subject, false) + await client.deleteSubject(subject, true) + }) + it('basic serialization', async () => { + let conf: ClientConfig = { + baseURLs: [baseURL], + cacheCapacity: 1000 + } + let client = SchemaRegistryClient.newClient(conf) + let ser = new ProtobufSerializer(client, SerdeType.VALUE, {autoRegisterSchemas: true}) + ser.registry.add(AuthorSchema) + let obj = create(AuthorSchema, { + name: 'Kafka', + id: 123, + picture: Buffer.from([1, 2]), + works: ['The Castle', 'The Trial'] + }) + let bytes = await ser.serialize(topic, obj) + + let deser = new ProtobufDeserializer(client, SerdeType.VALUE, {}) + let obj2 = await deser.deserialize(topic, bytes) + expect(obj2).toEqual(obj) + }) + it('serialize second messsage', async () => { + let conf: ClientConfig = { + baseURLs: [baseURL], + cacheCapacity: 1000 + } + let client = SchemaRegistryClient.newClient(conf) + let ser = new ProtobufSerializer(client, SerdeType.VALUE, {autoRegisterSchemas: true}) + ser.registry.add(PizzaSchema) + let obj = create(PizzaSchema, { + size: 'Extra extra large', + toppings: ['anchovies', 'mushrooms'] + }) + let bytes = await ser.serialize(topic, obj) + + let deser = new ProtobufDeserializer(client, SerdeType.VALUE, {}) + let obj2 = await deser.deserialize(topic, bytes) + expect(obj2).toEqual(obj) + }) + it('serialize nested messsage', async () => { + let conf: ClientConfig = { + baseURLs: [baseURL], + cacheCapacity: 1000 + } + let client = SchemaRegistryClient.newClient(conf) + let ser = new ProtobufSerializer(client, SerdeType.VALUE, {autoRegisterSchemas: true}) + ser.registry.add(NestedMessage_InnerMessageSchema) + let obj = create(NestedMessage_InnerMessageSchema, { + id: "inner" + }) + let bytes = await ser.serialize(topic, obj) + + let deser = new ProtobufDeserializer(client, SerdeType.VALUE, {}) + let obj2 = await deser.deserialize(topic, bytes) + expect(obj2).toEqual(obj) + }) + it('serialize reference', async () => { + let conf: ClientConfig = { + baseURLs: [baseURL], + cacheCapacity: 1000 + } + let client = SchemaRegistryClient.newClient(conf) + let ser = new ProtobufSerializer(client, SerdeType.VALUE, {autoRegisterSchemas: true}) + ser.registry.add(TestMessageSchema) + ser.registry.add(DependencyMessageSchema) + let msg = create(TestMessageSchema, { + testString: "hi", + testBool: true, + testBytes: Buffer.from([1, 2]), + testDouble: 1.23, + testFloat: 3.45, + testFixed32: 67, + testFixed64: 89n, + testInt32: 100, + testInt64: 200n, + testSfixed32: 300, + testSfixed64: 400n, + testSint32: 500, + testSint64: 600n, + testUint32: 700, + testUint64: 800n, + }) + let obj = create(DependencyMessageSchema, { + isActive: true, + testMesssage: msg + }) + let bytes = await ser.serialize(topic, obj) + + let deser = new ProtobufDeserializer(client, SerdeType.VALUE, {}) + let obj2 = await deser.deserialize(topic, bytes) + expect(obj2.testMesssage.testString).toEqual(msg.testString); + expect(obj2.testMesssage.testBool).toEqual(msg.testBool); + expect(obj2.testMesssage.testBytes).toEqual(msg.testBytes); + expect(obj2.testMesssage.testDouble).toBeCloseTo(msg.testDouble, 0.001); + expect(obj2.testMesssage.testFloat).toBeCloseTo(msg.testFloat, 0.001); + expect(obj2.testMesssage.testFixed32).toEqual(msg.testFixed32); + expect(obj2.testMesssage.testFixed64).toEqual(msg.testFixed64); + }) + it('serialize cycle', async () => { + let conf: ClientConfig = { + baseURLs: [baseURL], + cacheCapacity: 1000 + } + let client = SchemaRegistryClient.newClient(conf) + let ser = new ProtobufSerializer(client, SerdeType.VALUE, {autoRegisterSchemas: true}) + ser.registry.add(LinkedListSchema) + let inner = create(LinkedListSchema, { + value: 100, + }) + let obj = create(LinkedListSchema, { + value: 1, + next: inner + }) + let bytes = await ser.serialize(topic, obj) + + let deser = new ProtobufDeserializer(client, SerdeType.VALUE, {}) + let obj2 = await deser.deserialize(topic, bytes) + expect(obj2).toEqual(obj) + }) + it('basic encryption', async () => { + let conf: ClientConfig = { + baseURLs: [baseURL], + cacheCapacity: 1000 + } + let client = SchemaRegistryClient.newClient(conf) + let serConfig: ProtobufSerializerConfig = { + useLatestVersion: true, + ruleConfig: { + secret: 'mysecret' + } + } + let ser = new ProtobufSerializer(client, SerdeType.VALUE, serConfig) + ser.registry.add(AuthorSchema) + let dekClient = fieldEncryptionExecutor.client! + + let encRule: Rule = { + name: 'test-encrypt', + kind: 'TRANSFORM', + mode: RuleMode.WRITEREAD, + type: 'ENCRYPT', + tags: ['PII'], + params: { + 'encrypt.kek.name': 'kek1', + 'encrypt.kms.type': 'local-kms', + 'encrypt.kms.key.id': 'mykey', + }, + onFailure: 'ERROR,NONE' + } + let ruleSet: RuleSet = { + domainRules: [encRule] + } + + let info: SchemaInfo = { + schemaType: 'PROTOBUF', + schema: Buffer.from(toBinary(FileDescriptorProtoSchema, file_test_schemaregistry_serde_example.proto)).toString('base64'), + ruleSet + } + + await client.register(subject, info, false) + + let obj = create(AuthorSchema, { + name: 'Kafka', + id: 123, + picture: Buffer.from([1, 2]), + works: ['The Castle', 'The Trial'] + }) + let bytes = await ser.serialize(topic, obj) + + // reset encrypted field + obj.name = 'Kafka' + obj.picture = Buffer.from([1, 2]) + + let deserConfig: ProtobufDeserializerConfig = { + ruleConfig: { + secret: 'mysecret' + } + } + let deser = new ProtobufDeserializer(client, SerdeType.VALUE, deserConfig) + fieldEncryptionExecutor.client = dekClient + let obj2 = await deser.deserialize(topic, bytes) + expect(obj2).toEqual(obj) + + clearKmsClients() + let registry = new RuleRegistry() + registry.registerExecutor(new FieldEncryptionExecutor()) + deser = new ProtobufDeserializer(client, SerdeType.VALUE, {}, registry) + obj2 = await deser.deserialize(topic, bytes) + expect(obj2).not.toEqual(obj); + }) +}) diff --git a/test/schemaregistry/serde/test/cycle_pb.ts b/test/schemaregistry/serde/test/cycle_pb.ts new file mode 100644 index 00000000..73e60993 --- /dev/null +++ b/test/schemaregistry/serde/test/cycle_pb.ts @@ -0,0 +1,36 @@ +// @generated by protoc-gen-es v2.0.0 with parameter "target=ts" +// @generated from file test/schemaregistry/serde/cycle.proto (package test, syntax proto3) +/* eslint-disable */ + +import type { GenFile, GenMessage } from "@bufbuild/protobuf/codegenv1"; +import { fileDesc, messageDesc } from "@bufbuild/protobuf/codegenv1"; +import type { Message } from "@bufbuild/protobuf"; + +/** + * Describes the file test/schemaregistry/serde/cycle.proto. + */ +export const file_test_schemaregistry_serde_cycle: GenFile = /*@__PURE__*/ + fileDesc("CiV0ZXN0L3NjaGVtYXJlZ2lzdHJ5L3NlcmRlL2N5Y2xlLnByb3RvEgR0ZXN0IjsKCkxpbmtlZExpc3QSDQoFdmFsdWUYASABKAUSHgoEbmV4dBgKIAEoCzIQLnRlc3QuTGlua2VkTGlzdEIJWgcuLi90ZXN0YgZwcm90bzM"); + +/** + * @generated from message test.LinkedList + */ +export type LinkedList = Message<"test.LinkedList"> & { + /** + * @generated from field: int32 value = 1; + */ + value: number; + + /** + * @generated from field: test.LinkedList next = 10; + */ + next?: LinkedList; +}; + +/** + * Describes the message test.LinkedList. + * Use `create(LinkedListSchema)` to create a new message. + */ +export const LinkedListSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_test_schemaregistry_serde_cycle, 0); + diff --git a/test/schemaregistry/serde/test/dep_pb.ts b/test/schemaregistry/serde/test/dep_pb.ts new file mode 100644 index 00000000..80783aee --- /dev/null +++ b/test/schemaregistry/serde/test/dep_pb.ts @@ -0,0 +1,38 @@ +// @generated by protoc-gen-es v2.0.0 with parameter "target=ts" +// @generated from file test/schemaregistry/serde/dep.proto (package test, syntax proto3) +/* eslint-disable */ + +import type { GenFile, GenMessage } from "@bufbuild/protobuf/codegenv1"; +import { fileDesc, messageDesc } from "@bufbuild/protobuf/codegenv1"; +import type { TestMessage } from "./test_pb"; +import { file_test_schemaregistry_serde_test } from "./test_pb"; +import type { Message } from "@bufbuild/protobuf"; + +/** + * Describes the file test/schemaregistry/serde/dep.proto. + */ +export const file_test_schemaregistry_serde_dep: GenFile = /*@__PURE__*/ + fileDesc("CiN0ZXN0L3NjaGVtYXJlZ2lzdHJ5L3NlcmRlL2RlcC5wcm90bxIEdGVzdCJQChFEZXBlbmRlbmN5TWVzc2FnZRIRCglpc19hY3RpdmUYASABKAgSKAoNdGVzdF9tZXNzc2FnZRgCIAEoCzIRLnRlc3QuVGVzdE1lc3NhZ2VCCVoHLi4vdGVzdGIGcHJvdG8z", [file_test_schemaregistry_serde_test]); + +/** + * @generated from message test.DependencyMessage + */ +export type DependencyMessage = Message<"test.DependencyMessage"> & { + /** + * @generated from field: bool is_active = 1; + */ + isActive: boolean; + + /** + * @generated from field: test.TestMessage test_messsage = 2; + */ + testMesssage?: TestMessage; +}; + +/** + * Describes the message test.DependencyMessage. + * Use `create(DependencyMessageSchema)` to create a new message. + */ +export const DependencyMessageSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_test_schemaregistry_serde_dep, 0); + diff --git a/test/schemaregistry/serde/test/example_pb.ts b/test/schemaregistry/serde/test/example_pb.ts new file mode 100644 index 00000000..2120dfd8 --- /dev/null +++ b/test/schemaregistry/serde/test/example_pb.ts @@ -0,0 +1,69 @@ +// @generated by protoc-gen-es v2.0.0 with parameter "target=ts" +// @generated from file test/schemaregistry/serde/example.proto (package test, syntax proto3) +/* eslint-disable */ + +import type { GenFile, GenMessage } from "@bufbuild/protobuf/codegenv1"; +import { fileDesc, messageDesc } from "@bufbuild/protobuf/codegenv1"; +import { file_confluent_meta } from "../../../../schemaregistry/confluent/meta_pb"; +import type { Message } from "@bufbuild/protobuf"; + +/** + * Describes the file test/schemaregistry/serde/example.proto. + */ +export const file_test_schemaregistry_serde_example: GenFile = /*@__PURE__*/ + fileDesc("Cid0ZXN0L3NjaGVtYXJlZ2lzdHJ5L3NlcmRlL2V4YW1wbGUucHJvdG8SBHRlc3QiVgoGQXV0aG9yEhYKBG5hbWUYASABKAlCCIJEBRoDUElJEgoKAmlkGAIgASgFEhkKB3BpY3R1cmUYAyABKAxCCIJEBRoDUElJEg0KBXdvcmtzGAQgAygJIicKBVBpenphEgwKBHNpemUYASABKAkSEAoIdG9wcGluZ3MYAiADKAlCCVoHLi4vdGVzdGIGcHJvdG8z", [file_confluent_meta]); + +/** + * @generated from message test.Author + */ +export type Author = Message<"test.Author"> & { + /** + * @generated from field: string name = 1; + */ + name: string; + + /** + * @generated from field: int32 id = 2; + */ + id: number; + + /** + * @generated from field: bytes picture = 3; + */ + picture: Uint8Array; + + /** + * @generated from field: repeated string works = 4; + */ + works: string[]; +}; + +/** + * Describes the message test.Author. + * Use `create(AuthorSchema)` to create a new message. + */ +export const AuthorSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_test_schemaregistry_serde_example, 0); + +/** + * @generated from message test.Pizza + */ +export type Pizza = Message<"test.Pizza"> & { + /** + * @generated from field: string size = 1; + */ + size: string; + + /** + * @generated from field: repeated string toppings = 2; + */ + toppings: string[]; +}; + +/** + * Describes the message test.Pizza. + * Use `create(PizzaSchema)` to create a new message. + */ +export const PizzaSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_test_schemaregistry_serde_example, 1); + diff --git a/test/schemaregistry/serde/test/nested_pb.ts b/test/schemaregistry/serde/test/nested_pb.ts new file mode 100644 index 00000000..a76d1dfb --- /dev/null +++ b/test/schemaregistry/serde/test/nested_pb.ts @@ -0,0 +1,221 @@ +// @generated by protoc-gen-es v2.0.0 with parameter "target=ts" +// @generated from file test/schemaregistry/serde/nested.proto (package test, syntax proto3) +/* eslint-disable */ + +import type { GenEnum, GenFile, GenMessage } from "@bufbuild/protobuf/codegenv1"; +import { enumDesc, fileDesc, messageDesc } from "@bufbuild/protobuf/codegenv1"; +import type { Timestamp } from "@bufbuild/protobuf/wkt"; +import { file_google_protobuf_timestamp } from "@bufbuild/protobuf/wkt"; +import type { Message } from "@bufbuild/protobuf"; + +/** + * Describes the file test/schemaregistry/serde/nested.proto. + */ +export const file_test_schemaregistry_serde_nested: GenFile = /*@__PURE__*/ + fileDesc("CiZ0ZXN0L3NjaGVtYXJlZ2lzdHJ5L3NlcmRlL25lc3RlZC5wcm90bxIEdGVzdCJsCgZVc2VySWQSFwoNa2Fma2FfdXNlcl9pZBgBIAEoCUgAEhcKDW90aGVyX3VzZXJfaWQYAiABKAVIABIlCgphbm90aGVyX2lkGAMgASgLMg8udGVzdC5NZXNzYWdlSWRIAEIJCgd1c2VyX2lkIhcKCU1lc3NhZ2VJZBIKCgJpZBgBIAEoCSJSCgtDb21wbGV4VHlwZRIQCgZvbmVfaWQYASABKAlIABISCghvdGhlcl9pZBgCIAEoBUgAEhEKCWlzX2FjdGl2ZRgDIAEoCEIKCghzb21lX3ZhbCLcAwoNTmVzdGVkTWVzc2FnZRIdCgd1c2VyX2lkGAEgASgLMgwudGVzdC5Vc2VySWQSEQoJaXNfYWN0aXZlGAIgASgIEhoKEmV4cGVyaW1lbnRzX2FjdGl2ZRgDIAMoCRIuCgp1cGRhdGVkX2F0GAQgASgLMhouZ29vZ2xlLnByb3RvYnVmLlRpbWVzdGFtcBIcCgZzdGF0dXMYBSABKA4yDC50ZXN0LlN0YXR1cxInCgxjb21wbGV4X3R5cGUYBiABKAsyES50ZXN0LkNvbXBsZXhUeXBlEjIKCG1hcF90eXBlGAcgAygLMiAudGVzdC5OZXN0ZWRNZXNzYWdlLk1hcFR5cGVFbnRyeRIvCgVpbm5lchgIIAEoCzIgLnRlc3QuTmVzdGVkTWVzc2FnZS5Jbm5lck1lc3NhZ2UaLgoMTWFwVHlwZUVudHJ5EgsKA2tleRgBIAEoCRINCgV2YWx1ZRgCIAEoCToCOAEaKwoMSW5uZXJNZXNzYWdlEgoKAmlkGAEgASgJEg8KA2lkcxgCIAMoBUICEAEiKAoJSW5uZXJFbnVtEggKBFpFUk8QABINCglBTFNPX1pFUk8QABoCEAFKBAgOEA9KBAgPEBBKBAgJEAxSA2Zvb1IDYmFyKiIKBlN0YXR1cxIKCgZBQ1RJVkUQABIMCghJTkFDVElWRRABQglaBy4uL3Rlc3RiBnByb3RvMw", [file_google_protobuf_timestamp]); + +/** + * @generated from message test.UserId + */ +export type UserId = Message<"test.UserId"> & { + /** + * @generated from oneof test.UserId.user_id + */ + userId: { + /** + * @generated from field: string kafka_user_id = 1; + */ + value: string; + case: "kafkaUserId"; + } | { + /** + * @generated from field: int32 other_user_id = 2; + */ + value: number; + case: "otherUserId"; + } | { + /** + * @generated from field: test.MessageId another_id = 3; + */ + value: MessageId; + case: "anotherId"; + } | { case: undefined; value?: undefined }; +}; + +/** + * Describes the message test.UserId. + * Use `create(UserIdSchema)` to create a new message. + */ +export const UserIdSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_test_schemaregistry_serde_nested, 0); + +/** + * @generated from message test.MessageId + */ +export type MessageId = Message<"test.MessageId"> & { + /** + * @generated from field: string id = 1; + */ + id: string; +}; + +/** + * Describes the message test.MessageId. + * Use `create(MessageIdSchema)` to create a new message. + */ +export const MessageIdSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_test_schemaregistry_serde_nested, 1); + +/** + * @generated from message test.ComplexType + */ +export type ComplexType = Message<"test.ComplexType"> & { + /** + * @generated from oneof test.ComplexType.some_val + */ + someVal: { + /** + * @generated from field: string one_id = 1; + */ + value: string; + case: "oneId"; + } | { + /** + * @generated from field: int32 other_id = 2; + */ + value: number; + case: "otherId"; + } | { case: undefined; value?: undefined }; + + /** + * @generated from field: bool is_active = 3; + */ + isActive: boolean; +}; + +/** + * Describes the message test.ComplexType. + * Use `create(ComplexTypeSchema)` to create a new message. + */ +export const ComplexTypeSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_test_schemaregistry_serde_nested, 2); + +/** + * + * Complex message using nested protos and repeated fields + * + * @generated from message test.NestedMessage + */ +export type NestedMessage = Message<"test.NestedMessage"> & { + /** + * @generated from field: test.UserId user_id = 1; + */ + userId?: UserId; + + /** + * @generated from field: bool is_active = 2; + */ + isActive: boolean; + + /** + * @generated from field: repeated string experiments_active = 3; + */ + experimentsActive: string[]; + + /** + * @generated from field: google.protobuf.Timestamp updated_at = 4; + */ + updatedAt?: Timestamp; + + /** + * @generated from field: test.Status status = 5; + */ + status: Status; + + /** + * @generated from field: test.ComplexType complex_type = 6; + */ + complexType?: ComplexType; + + /** + * @generated from field: map map_type = 7; + */ + mapType: { [key: string]: string }; + + /** + * @generated from field: test.NestedMessage.InnerMessage inner = 8; + */ + inner?: NestedMessage_InnerMessage; +}; + +/** + * Describes the message test.NestedMessage. + * Use `create(NestedMessageSchema)` to create a new message. + */ +export const NestedMessageSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_test_schemaregistry_serde_nested, 3); + +/** + * @generated from message test.NestedMessage.InnerMessage + */ +export type NestedMessage_InnerMessage = Message<"test.NestedMessage.InnerMessage"> & { + /** + * @generated from field: string id = 1; + */ + id: string; + + /** + * @generated from field: repeated int32 ids = 2 [packed = true]; + */ + ids: number[]; +}; + +/** + * Describes the message test.NestedMessage.InnerMessage. + * Use `create(NestedMessage_InnerMessageSchema)` to create a new message. + */ +export const NestedMessage_InnerMessageSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_test_schemaregistry_serde_nested, 3, 0); + +/** + * @generated from enum test.NestedMessage.InnerEnum + */ +export enum NestedMessage_InnerEnum { + /** + * @generated from enum value: ZERO = 0; + */ + ZERO = 0, + + /** + * @generated from enum value: ALSO_ZERO = 0; + */ + ALSO_ZERO = 0, +} + +/** + * Describes the enum test.NestedMessage.InnerEnum. + */ +export const NestedMessage_InnerEnumSchema: GenEnum = /*@__PURE__*/ + enumDesc(file_test_schemaregistry_serde_nested, 3, 0); + +/** + * @generated from enum test.Status + */ +export enum Status { + /** + * @generated from enum value: ACTIVE = 0; + */ + ACTIVE = 0, + + /** + * @generated from enum value: INACTIVE = 1; + */ + INACTIVE = 1, +} + +/** + * Describes the enum test.Status. + */ +export const StatusSchema: GenEnum = /*@__PURE__*/ + enumDesc(file_test_schemaregistry_serde_nested, 0); + diff --git a/test/schemaregistry/serde/test/newerwidget_pb.ts b/test/schemaregistry/serde/test/newerwidget_pb.ts new file mode 100644 index 00000000..69749e4d --- /dev/null +++ b/test/schemaregistry/serde/test/newerwidget_pb.ts @@ -0,0 +1,41 @@ +// @generated by protoc-gen-es v2.0.0 with parameter "target=ts" +// @generated from file test/schemaregistry/serde/newerwidget.proto (package test, syntax proto3) +/* eslint-disable */ + +import type { GenFile, GenMessage } from "@bufbuild/protobuf/codegenv1"; +import { fileDesc, messageDesc } from "@bufbuild/protobuf/codegenv1"; +import type { Message } from "@bufbuild/protobuf"; + +/** + * Describes the file test/schemaregistry/serde/newerwidget.proto. + */ +export const file_test_schemaregistry_serde_newerwidget: GenFile = /*@__PURE__*/ + fileDesc("Cit0ZXN0L3NjaGVtYXJlZ2lzdHJ5L3NlcmRlL25ld2Vyd2lkZ2V0LnByb3RvEgR0ZXN0IjwKC05ld2VyV2lkZ2V0EgwKBG5hbWUYASABKAkSDgoGbGVuZ3RoGAIgASgFEg8KB3ZlcnNpb24YAyABKAVCCVoHLi4vdGVzdGIGcHJvdG8z"); + +/** + * @generated from message test.NewerWidget + */ +export type NewerWidget = Message<"test.NewerWidget"> & { + /** + * @generated from field: string name = 1; + */ + name: string; + + /** + * @generated from field: int32 length = 2; + */ + length: number; + + /** + * @generated from field: int32 version = 3; + */ + version: number; +}; + +/** + * Describes the message test.NewerWidget. + * Use `create(NewerWidgetSchema)` to create a new message. + */ +export const NewerWidgetSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_test_schemaregistry_serde_newerwidget, 0); + diff --git a/test/schemaregistry/serde/test/newwidget_pb.ts b/test/schemaregistry/serde/test/newwidget_pb.ts new file mode 100644 index 00000000..a24ad209 --- /dev/null +++ b/test/schemaregistry/serde/test/newwidget_pb.ts @@ -0,0 +1,41 @@ +// @generated by protoc-gen-es v2.0.0 with parameter "target=ts" +// @generated from file test/schemaregistry/serde/newwidget.proto (package test, syntax proto3) +/* eslint-disable */ + +import type { GenFile, GenMessage } from "@bufbuild/protobuf/codegenv1"; +import { fileDesc, messageDesc } from "@bufbuild/protobuf/codegenv1"; +import type { Message } from "@bufbuild/protobuf"; + +/** + * Describes the file test/schemaregistry/serde/newwidget.proto. + */ +export const file_test_schemaregistry_serde_newwidget: GenFile = /*@__PURE__*/ + fileDesc("Cil0ZXN0L3NjaGVtYXJlZ2lzdHJ5L3NlcmRlL25ld3dpZGdldC5wcm90bxIEdGVzdCI6CglOZXdXaWRnZXQSDAoEbmFtZRgBIAEoCRIOCgZoZWlnaHQYAiABKAUSDwoHdmVyc2lvbhgDIAEoBUIJWgcuLi90ZXN0YgZwcm90bzM"); + +/** + * @generated from message test.NewWidget + */ +export type NewWidget = Message<"test.NewWidget"> & { + /** + * @generated from field: string name = 1; + */ + name: string; + + /** + * @generated from field: int32 height = 2; + */ + height: number; + + /** + * @generated from field: int32 version = 3; + */ + version: number; +}; + +/** + * Describes the message test.NewWidget. + * Use `create(NewWidgetSchema)` to create a new message. + */ +export const NewWidgetSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_test_schemaregistry_serde_newwidget, 0); + diff --git a/test/schemaregistry/serde/test/test_pb.ts b/test/schemaregistry/serde/test/test_pb.ts new file mode 100644 index 00000000..09126158 --- /dev/null +++ b/test/schemaregistry/serde/test/test_pb.ts @@ -0,0 +1,102 @@ +// @generated by protoc-gen-es v2.0.0 with parameter "target=ts" +// @generated from file test/schemaregistry/serde/test.proto (package test, syntax proto3) +/* eslint-disable */ + +import type { GenFile, GenMessage } from "@bufbuild/protobuf/codegenv1"; +import { fileDesc, messageDesc } from "@bufbuild/protobuf/codegenv1"; +import { file_google_protobuf_descriptor } from "@bufbuild/protobuf/wkt"; +import type { Message } from "@bufbuild/protobuf"; + +/** + * Describes the file test/schemaregistry/serde/test.proto. + */ +export const file_test_schemaregistry_serde_test: GenFile = /*@__PURE__*/ + fileDesc("CiR0ZXN0L3NjaGVtYXJlZ2lzdHJ5L3NlcmRlL3Rlc3QucHJvdG8SBHRlc3QiyAIKC1Rlc3RNZXNzYWdlEhMKC3Rlc3Rfc3RyaW5nGAEgASgJEhEKCXRlc3RfYm9vbBgCIAEoCBISCgp0ZXN0X2J5dGVzGAMgASgMEhMKC3Rlc3RfZG91YmxlGAQgASgBEhIKCnRlc3RfZmxvYXQYBSABKAISFAoMdGVzdF9maXhlZDMyGAYgASgHEhQKDHRlc3RfZml4ZWQ2NBgHIAEoBhISCgp0ZXN0X2ludDMyGAggASgFEhIKCnRlc3RfaW50NjQYCSABKAMSFQoNdGVzdF9zZml4ZWQzMhgKIAEoDxIVCg10ZXN0X3NmaXhlZDY0GAsgASgQEhMKC3Rlc3Rfc2ludDMyGAwgASgREhMKC3Rlc3Rfc2ludDY0GA0gASgSEhMKC3Rlc3RfdWludDMyGA4gASgNEhMKC3Rlc3RfdWludDY0GA8gASgEQglaBy4uL3Rlc3RiBnByb3RvMw", [file_google_protobuf_descriptor]); + +/** + * @generated from message test.TestMessage + */ +export type TestMessage = Message<"test.TestMessage"> & { + /** + * @generated from field: string test_string = 1; + */ + testString: string; + + /** + * @generated from field: bool test_bool = 2; + */ + testBool: boolean; + + /** + * @generated from field: bytes test_bytes = 3; + */ + testBytes: Uint8Array; + + /** + * @generated from field: double test_double = 4; + */ + testDouble: number; + + /** + * @generated from field: float test_float = 5; + */ + testFloat: number; + + /** + * @generated from field: fixed32 test_fixed32 = 6; + */ + testFixed32: number; + + /** + * @generated from field: fixed64 test_fixed64 = 7; + */ + testFixed64: bigint; + + /** + * @generated from field: int32 test_int32 = 8; + */ + testInt32: number; + + /** + * @generated from field: int64 test_int64 = 9; + */ + testInt64: bigint; + + /** + * @generated from field: sfixed32 test_sfixed32 = 10; + */ + testSfixed32: number; + + /** + * @generated from field: sfixed64 test_sfixed64 = 11; + */ + testSfixed64: bigint; + + /** + * @generated from field: sint32 test_sint32 = 12; + */ + testSint32: number; + + /** + * @generated from field: sint64 test_sint64 = 13; + */ + testSint64: bigint; + + /** + * @generated from field: uint32 test_uint32 = 14; + */ + testUint32: number; + + /** + * @generated from field: uint64 test_uint64 = 15; + */ + testUint64: bigint; +}; + +/** + * Describes the message test.TestMessage. + * Use `create(TestMessageSchema)` to create a new message. + */ +export const TestMessageSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_test_schemaregistry_serde_test, 0); + diff --git a/test/schemaregistry/serde/test/widget_pb.ts b/test/schemaregistry/serde/test/widget_pb.ts new file mode 100644 index 00000000..a8924d86 --- /dev/null +++ b/test/schemaregistry/serde/test/widget_pb.ts @@ -0,0 +1,41 @@ +// @generated by protoc-gen-es v2.0.0 with parameter "target=ts" +// @generated from file test/schemaregistry/serde/widget.proto (package test, syntax proto3) +/* eslint-disable */ + +import type { GenFile, GenMessage } from "@bufbuild/protobuf/codegenv1"; +import { fileDesc, messageDesc } from "@bufbuild/protobuf/codegenv1"; +import type { Message } from "@bufbuild/protobuf"; + +/** + * Describes the file test/schemaregistry/serde/widget.proto. + */ +export const file_test_schemaregistry_serde_widget: GenFile = /*@__PURE__*/ + fileDesc("CiZ0ZXN0L3NjaGVtYXJlZ2lzdHJ5L3NlcmRlL3dpZGdldC5wcm90bxIEdGVzdCI1CgZXaWRnZXQSDAoEbmFtZRgBIAEoCRIMCgRzaXplGAIgASgFEg8KB3ZlcnNpb24YAyABKAVCCVoHLi4vdGVzdGIGcHJvdG8z"); + +/** + * @generated from message test.Widget + */ +export type Widget = Message<"test.Widget"> & { + /** + * @generated from field: string name = 1; + */ + name: string; + + /** + * @generated from field: int32 size = 2; + */ + size: number; + + /** + * @generated from field: int32 version = 3; + */ + version: number; +}; + +/** + * Describes the message test.Widget. + * Use `create(WidgetSchema)` to create a new message. + */ +export const WidgetSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_test_schemaregistry_serde_widget, 0); + diff --git a/test/schemaregistry/serde/wildcard-matcher.spec.ts b/test/schemaregistry/serde/wildcard-matcher.spec.ts new file mode 100644 index 00000000..4e76f79e --- /dev/null +++ b/test/schemaregistry/serde/wildcard-matcher.spec.ts @@ -0,0 +1,86 @@ +import { match } from '../../../schemaregistry/serde/wildcard-matcher'; +import { describe, expect, it } from '@jest/globals'; + +describe('WildcardMatcher', () => { + it('when match', () => { + expect(match('', 'Foo')).toBe(false) + }) + it('when match', () => { + expect(match('Foo', '')).toBe(false) + }) + it('when match', () => { + expect(match('', '')).toBe(true) + }) + it('when match', () => { + expect(match('Foo', 'Foo')).toBe(true) + }) + it('when match', () => { + expect(match('', '*')).toBe(true) + }) + it('when match', () => { + expect(match('', '?')).toBe(false) + }) + it('when match', () => { + expect(match('Foo', 'Fo*')).toBe(true) + }) + it('when match', () => { + expect(match('Foo', 'Fo?')).toBe(true) + }) + it('when match', () => { + expect(match('Foo Bar and Catflag', 'Fo*')).toBe(true) + }) + it('when match', () => { + expect(match('New Bookmarks', 'N?w ?o?k??r?s')).toBe(true) + }) + it('when match', () => { + expect(match('Foo', 'Bar')).toBe(false) + }) + it('when match', () => { + expect(match('Foo Bar Foo', 'F*o Bar*')).toBe(true) + }) + it('when match', () => { + expect(match('Adobe Acrobat Installer', 'Ad*er')).toBe(true) + }) + it('when match', () => { + expect(match('Foo', '*Foo')).toBe(true) + }) + it('when match', () => { + expect(match('BarFoo', '*Foo')).toBe(true) + }) + it('when match', () => { + expect(match('Foo', 'Foo*')).toBe(true) + }) + it('when match', () => { + expect(match('FOO', '*Foo')).toBe(false) + }) + it('when match', () => { + expect(match('BARFOO', '*Foo')).toBe(false) + }) + it('when match', () => { + expect(match('FOO', 'Foo*')).toBe(false) + }) + it('when match', () => { + expect(match('FOOBAR', 'Foo*')).toBe(false) + }) + it('when match', () => { + expect(match('eve', 'eve*')).toBe(true) + }) + it('when match', () => { + expect(match('alice.bob.eve', 'a*.bob.eve')).toBe(true) + }) + it('when match', () => { + expect(match('alice.bob.eve', 'a*.bob.e*')).toBe(true) + }) + it('when match', () => { + expect(match('alice.bob.eve', 'a*')).toBe(false) + }) + it('when match', () => { + expect(match('alice.bob.eve', 'a**')).toBe(true) + }) + it('when match', () => { + expect(match('alice.bob.eve', 'alice.bob*')).toBe(false) + }) + it('when match', () => { + expect(match('alice.bob.eve', 'alice.bob**')).toBe(true) + }) +}) diff --git a/test/schemaregistry/test-constants.ts b/test/schemaregistry/test-constants.ts new file mode 100644 index 00000000..fe5096c5 --- /dev/null +++ b/test/schemaregistry/test-constants.ts @@ -0,0 +1,35 @@ +import { CreateAxiosDefaults } from 'axios'; +import { ClientConfig, BasicAuthCredentials } from '../../schemaregistry/rest-service'; + +const baseUrls = ['http://localhost:8081']; + +const mockBaseUrls = ['http://mocked-url']; + +const createAxiosDefaults: CreateAxiosDefaults = { + timeout: 10000 +}; + +const basicAuthCredentials: BasicAuthCredentials = { + credentialsSource: 'USER_INFO', + userInfo: 'RBACAllowedUser-lsrc1:nohash', +}; + +const clientConfig: ClientConfig = { + baseURLs: baseUrls, + createAxiosDefaults: createAxiosDefaults, + isForward: false, + cacheCapacity: 512, + cacheLatestTtlSecs: 60, + basicAuthCredentials: basicAuthCredentials, +}; + +const mockClientConfig: ClientConfig = { + baseURLs: mockBaseUrls, + createAxiosDefaults: createAxiosDefaults, + isForward: false, + cacheCapacity: 512, + cacheLatestTtlSecs: 60, + basicAuthCredentials: basicAuthCredentials +}; + +export { clientConfig, mockClientConfig }; diff --git a/test/topic-partition.spec.js b/test/topic-partition.spec.js index 4d9eab38..b3030aad 100644 --- a/test/topic-partition.spec.js +++ b/test/topic-partition.spec.js @@ -1,5 +1,5 @@ /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * * Copyright (c) 2016-2023 Blizzard Entertainment * diff --git a/test/util.spec.js b/test/util.spec.js index dfa11440..7185a800 100644 --- a/test/util.spec.js +++ b/test/util.spec.js @@ -1,5 +1,5 @@ /* - * confluent-kafka-js - Node.js wrapper for RdKafka C/C++ library + * confluent-kafka-javascript - Node.js wrapper for RdKafka C/C++ library * * Copyright (c) 2016-2023 Blizzard Entertainment * diff --git a/tsconfig.json b/tsconfig.json index d33b7a90..c9e6c6fe 100644 --- a/tsconfig.json +++ b/tsconfig.json @@ -6,7 +6,7 @@ "noImplicitThis": true, "strictNullChecks": true, "baseUrl": ".", - "types": ["node_modules/@types/node"], + "types": ["./node_modules/@types/node"], "typeRoots": ["."], "noEmit": true, "forceConsistentCasingInFileNames": true, diff --git a/types/config.d.ts b/types/config.d.ts index e78a4141..f0b1f708 100644 --- a/types/config.d.ts +++ b/types/config.d.ts @@ -1,4 +1,4 @@ -// ====== Generated from librdkafka 2.3.0 file CONFIGURATION.md ====== +// ====== Generated from librdkafka master file CONFIGURATION.md ====== // Code that generated this is a derivative work of the code from Nam Nguyen // https://gist.github.com/ntgn81/066c2c8ec5b4238f85d1e9168a04e3fb @@ -762,7 +762,7 @@ export interface ProducerGlobalConfig extends GlobalConfig { /** * Delivery report callback (set with rd_kafka_conf_set_dr_cb()) */ - "dr_cb"?: boolean | Function; + "dr_cb"?: boolean | ((...args: any[]) => any); /** * Delivery report callback (set with rd_kafka_conf_set_dr_msg_cb()) @@ -810,7 +810,7 @@ export interface ConsumerGlobalConfig extends GlobalConfig { "heartbeat.interval.ms"?: number; /** - * Group protocol type. NOTE: Currently, the only supported group protocol type is `consumer`. + * Group protocol type for the `generic` group protocol. NOTE: Currently, the only supported group protocol type is `consumer`. * * @default consumer */ @@ -936,12 +936,12 @@ export interface ConsumerGlobalConfig extends GlobalConfig { /** * Called after consumer group has been rebalanced (set with rd_kafka_conf_set_rebalance_cb()) */ - "rebalance_cb"?: boolean | Function; + "rebalance_cb"?: boolean | ((...args: any[]) => any); /** * Offset commit result propagation callback. (set with rd_kafka_conf_set_offset_commit_cb()) */ - "offset_commit_cb"?: boolean | Function; + "offset_commit_cb"?: boolean | ((...args: any[]) => any); /** * Emit RD_KAFKA_RESP_ERR__PARTITION_EOF event whenever the consumer reaches the end of a partition. diff --git a/types/errors.d.ts b/types/errors.d.ts index 439ec72b..333b17dc 100644 --- a/types/errors.d.ts +++ b/types/errors.d.ts @@ -1,4 +1,4 @@ -// ====== Generated from librdkafka 2.3.0 file src-cpp/rdkafkacpp.h ====== +// ====== Generated from librdkafka master file src-cpp/rdkafkacpp.h ====== export const CODES: { ERRORS: { /* Internal errors to rdkafka: */ /** Begin internal error codes (**-200**) */ diff --git a/types/kafkajs.d.ts b/types/kafkajs.d.ts index 97e5e506..259fe07f 100644 --- a/types/kafkajs.d.ts +++ b/types/kafkajs.d.ts @@ -1,15 +1,34 @@ -import * as tls from 'tls' - -export type BrokersFunction = () => string[] | Promise - -export type Mechanism = { - mechanism: string +import { ConsumerGlobalConfig, GlobalConfig, ProducerGlobalConfig } from './config' +import { + ConsumerGroupStates, + GroupOverview, + LibrdKafkaError, + GroupDescriptions, + DeleteGroupsResult +} from './rdkafka' + +// Admin API related interfaces, types etc; and Error types are common, so +// just re-export them from here too. +export { + ConsumerGroupStates, + GroupOverview, + LibrdKafkaError, + GroupDescriptions, + DeleteGroupsResult +} from './rdkafka' + +export interface OauthbearerProviderResponse { + value: string, + principal: string, + lifetime: number, // Lifetime must be in milliseconds. + extensions?: Map | { [key: string]: string }, } type SASLMechanismOptionsMap = { plain: { username: string; password: string } 'scram-sha-256': { username: string; password: string } 'scram-sha-512': { username: string; password: string } + oauthbearer: { oauthBearerProvider: () => Promise } } export type SASLMechanism = keyof SASLMechanismOptionsMap @@ -18,16 +37,68 @@ type SASLMechanismOptions = T extends SASLMechanism : never export type SASLOptions = SASLMechanismOptions +export interface RetryOptions { + maxRetryTime?: number + initialRetryTime?: number + retries?: number +} + +export enum logLevel { + NOTHING = 0, + ERROR = 1, + WARN = 2, + INFO = 3, + DEBUG = 4, +} + +export type Logger = { + info: (message: string, extra?: object) => void + error: (message: string, extra?: object) => void + warn: (message: string, extra?: object) => void + debug: (message: string, extra?: object) => void + + namespace: (namespace: string, logLevel?: logLevel) => Logger + setLogLevel: (logLevel: logLevel) => void +} + export interface KafkaConfig { - brokers: string[] | BrokersFunction - ssl?: boolean - sasl?: SASLOptions | Mechanism + brokers: string[], + ssl?: boolean, + sasl?: SASLOptions, clientId?: string connectionTimeout?: number authenticationTimeout?: number - reauthenticationThreshold?: number requestTimeout?: number - enforceRequestTimeout?: boolean + enforceRequestTimeout?: boolean, + retry?: RetryOptions, + logLevel?: logLevel, + logger?: Logger, +} + +export interface CommonConstructorConfig extends GlobalConfig { + kafkaJS?: KafkaConfig; +} + +export class Kafka { + constructor(config: CommonConstructorConfig) + producer(config?: ProducerConstructorConfig): Producer + consumer(config: ConsumerConstructorConfig): Consumer + admin(config?: AdminConstructorConfig): Admin +} + +type Client = { + connect(): Promise + disconnect(): Promise + logger(): Logger + setSaslCredentialProvider(authInfo: { username: string, password: string }): void +} + +export enum CompressionTypes { + None = 'none', + GZIP = 'gzip', + Snappy = 'snappy', + LZ4 = 'lz4', + ZSTD = 'zstd', } export interface ProducerConfig { @@ -37,6 +108,16 @@ export interface ProducerConfig { transactionalId?: string transactionTimeout?: number maxInFlightRequests?: number + acks?: number + compression?: CompressionTypes + timeout?: number, + retry?: RetryOptions, + logLevel?: logLevel, + logger?: Logger, +} + +export interface ProducerConstructorConfig extends ProducerGlobalConfig { + kafkaJS?: ProducerConfig; } export interface IHeaders { @@ -51,27 +132,18 @@ export interface Message { timestamp?: string } -export enum CompressionTypes { - None = 0, - GZIP = 1, - Snappy = 2, - LZ4 = 3, - ZSTD = 4, -} - -export var CompressionCodecs: { - [CompressionTypes.GZIP]: () => any - [CompressionTypes.Snappy]: () => any - [CompressionTypes.LZ4]: () => any - [CompressionTypes.ZSTD]: () => any +export interface ProducerRecord { + topic: string + messages: Message[] } -export interface ProducerRecord { +export interface TopicMessages { topic: string messages: Message[] - acks?: number - timeout?: number - compression?: CompressionTypes +} + +export interface ProducerBatch { + topicMessages?: TopicMessages[] } export type RecordMetadata = { @@ -85,28 +157,31 @@ export type RecordMetadata = { logStartOffset?: string } -export class Kafka { - constructor(config: KafkaConfig) - producer(config?: ProducerConfig): Producer - consumer(config: ConsumerConfig): Consumer -} +export type Transaction = Producer; -type Sender = { +export type Producer = Client & { send(record: ProducerRecord): Promise + sendBatch(batch: ProducerBatch): Promise + flush(args?: { timeout?: number }): Promise + + // Transactional producer-only methods. + transaction(): Promise + commit(): Promise + abort(): Promise + sendOffsets(args: { consumer: Consumer, topics: TopicOffsets[] }): Promise + isActive(): boolean } -export type Producer = Sender & { - connect(): Promise - disconnect(): Promise +export enum PartitionAssigners { + roundRobin = 'roundrobin', + range = 'range', + cooperativeSticky = 'cooperative-sticky' } -export interface RetryOptions { - maxRetryTime?: number - initialRetryTime?: number - factor?: number - multiplier?: number - retries?: number - restartOnFailure?: (e: Error) => Promise +export enum PartitionAssignors { + roundRobin = 'roundrobin', + range = 'range', + cooperativeSticky = 'cooperative-sticky' } export interface ConsumerConfig { @@ -119,169 +194,24 @@ export interface ConsumerConfig { minBytes?: number maxBytes?: number maxWaitTimeInMs?: number - retry?: RetryOptions & { restartOnFailure?: (err: Error) => Promise } + retry?: RetryOptions, + logLevel?: logLevel, + logger?: Logger, allowAutoTopicCreation?: boolean maxInFlightRequests?: number readUncommitted?: boolean rackId?: string + fromBeginning?: boolean + autoCommit?: boolean + autoCommitInterval?: number, + partitionAssigners?: PartitionAssigners[], + partitionAssignors?: PartitionAssignors[], } -export type ConsumerEvents = { - HEARTBEAT: 'consumer.heartbeat' - COMMIT_OFFSETS: 'consumer.commit_offsets' - GROUP_JOIN: 'consumer.group_join' - FETCH_START: 'consumer.fetch_start' - FETCH: 'consumer.fetch' - START_BATCH_PROCESS: 'consumer.start_batch_process' - END_BATCH_PROCESS: 'consumer.end_batch_process' - CONNECT: 'consumer.connect' - DISCONNECT: 'consumer.disconnect' - STOP: 'consumer.stop' - CRASH: 'consumer.crash' - REBALANCING: 'consumer.rebalancing' - RECEIVED_UNSUBSCRIBED_TOPICS: 'consumer.received_unsubscribed_topics' - REQUEST: 'consumer.network.request' - REQUEST_TIMEOUT: 'consumer.network.request_timeout' - REQUEST_QUEUE_SIZE: 'consumer.network.request_queue_size' -} - - -export enum logLevel { - NOTHING = 0, - ERROR = 1, - WARN = 2, - INFO = 4, - DEBUG = 5, -} - -export type Logger = { - info: (message: string, extra?: object) => void - error: (message: string, extra?: object) => void - warn: (message: string, extra?: object) => void - debug: (message: string, extra?: object) => void - - namespace: (namespace: string, logLevel?: logLevel) => Logger - setLogLevel: (logLevel: logLevel) => void -} - -type ValueOf = T[keyof T] - -export interface InstrumentationEvent { - id: string - type: string - timestamp: number - payload: T -} - -export type RemoveInstrumentationEventListener = () => void - -export type ConsumerFetchStartEvent = InstrumentationEvent<{ nodeId: number }> -export type ConsumerFetchEvent = InstrumentationEvent<{ - numberOfBatches: number - duration: number - nodeId: number -}> - -export type ConsumerHeartbeatEvent = InstrumentationEvent<{ - groupId: string - memberId: string - groupGenerationId: number -}> - -export type ConsumerCommitOffsetsEvent = InstrumentationEvent<{ - groupId: string - memberId: string - groupGenerationId: number - topics: TopicOffsets[] -}> - -export interface IMemberAssignment { - [key: string]: number[] -} - -export type ConsumerGroupJoinEvent = InstrumentationEvent<{ - duration: number - groupId: string - isLeader: boolean - leaderId: string - groupProtocol: string - memberId: string - memberAssignment: IMemberAssignment -}> - -interface IBatchProcessEvent { - topic: string - partition: number - highWatermark: string - offsetLag: string - offsetLagLow: string - batchSize: number - firstOffset: string - lastOffset: string +export interface ConsumerConstructorConfig extends ConsumerGlobalConfig { + kafkaJS?: ConsumerConfig; } -export type ConsumerStartBatchProcessEvent = InstrumentationEvent - -export type ConsumerEndBatchProcessEvent = InstrumentationEvent< - IBatchProcessEvent & { duration: number } -> - -export type ConnectEvent = InstrumentationEvent - -export type DisconnectEvent = InstrumentationEvent - -export type ConsumerCrashEvent = InstrumentationEvent<{ - error: Error - groupId: string - restart: boolean -}> - -export type ConsumerRebalancingEvent = InstrumentationEvent<{ - groupId: string - memberId: string -}> - -export type ConsumerReceivedUnsubcribedTopicsEvent = InstrumentationEvent<{ - groupId: string - generationId: number - memberId: string - assignedTopics: string[] - topicsSubscribed: string[] - topicsNotSubscribed: string[] -}> - -export type RequestEvent = InstrumentationEvent<{ - apiKey: number - apiName: string - apiVersion: number - broker: string - clientId: string - correlationId: number - createdAt: number - duration: number - pendingDuration: number - sentAt: number - size: number -}> - -export type RequestTimeoutEvent = InstrumentationEvent<{ - apiKey: number - apiName: string - apiVersion: number - broker: string - clientId: string - correlationId: number - createdAt: number - pendingDuration: number - sentAt: number -}> - -export type RequestQueueSizeEvent = InstrumentationEvent<{ - broker: string - clientId: string - queueSize: number -}> - interface MessageSetEntry { key: Buffer | null value: Buffer | null @@ -290,6 +220,7 @@ interface MessageSetEntry { offset: string size: number headers?: never + leaderEpoch?: number } interface RecordBatchEntry { @@ -300,6 +231,7 @@ interface RecordBatchEntry { offset: string headers: IHeaders size?: never + leaderEpoch?: number } export type Batch = { @@ -310,8 +242,6 @@ export type Batch = { isEmpty(): boolean firstOffset(): string | null lastOffset(): string - offsetLag(): string - offsetLagLow(): string } export type KafkaMessage = MessageSetEntry | RecordBatchEntry @@ -324,13 +254,22 @@ export interface EachMessagePayload { pause(): () => void } +export interface PartitionOffset { + partition: number + offset: string +} + +export interface TopicOffsets { + topic: string + partitions: PartitionOffset[] +} + export interface EachBatchPayload { batch: Batch resolveOffset(offset: string): void heartbeat(): Promise pause(): () => void - commitOffsetsIfNecessary(offsets?: Offsets): Promise - uncommittedOffsets(): OffsetsByTopicPartition + commitOffsetsIfNecessary(): Promise isRunning(): boolean isStale(): boolean } @@ -339,29 +278,18 @@ export type EachBatchHandler = (payload: EachBatchPayload) => Promise export type EachMessageHandler = (payload: EachMessagePayload) => Promise -export type ConsumerSubscribeTopics = { topics: (string | RegExp)[]; fromBeginning?: boolean } +/** + * @deprecated Replaced by ConsumerSubscribeTopics + */ +export type ConsumerSubscribeTopic = { topic: string | RegExp; replace?: boolean } + +export type ConsumerSubscribeTopics = { topics: (string | RegExp)[]; replace?: boolean } export type ConsumerRunConfig = { - autoCommit?: boolean - autoCommitInterval?: number | null - autoCommitThreshold?: number | null - partitionsConsumedConcurrently?: number + eachBatchAutoResolve?: boolean, + partitionsConsumedConcurrently?: number, eachMessage?: EachMessageHandler -} - -export interface Offsets { - topics: TopicOffsets[] -} - -export interface TopicOffsets { - topic: string - partitions: PartitionOffset[] -} - - -export interface PartitionOffset { - partition: number - offset: string + eachBatch?: EachBatchHandler } export type TopicPartitions = { topic: string; partitions: number[] } @@ -369,6 +297,7 @@ export type TopicPartitions = { topic: string; partitions: number[] } export type TopicPartition = { topic: string partition: number + leaderEpoch?: number } export type TopicPartitionOffset = TopicPartition & { offset: string @@ -382,112 +311,62 @@ export interface OffsetsByTopicPartition { topics: TopicOffsets[] } -export type MemberDescription = { - clientHost: string - clientId: string - memberId: string - memberAssignment: Buffer - memberMetadata: Buffer +export type Consumer = Client & { + subscribe(subscription: ConsumerSubscribeTopics | ConsumerSubscribeTopic): Promise + stop(): Promise + run(config?: ConsumerRunConfig): Promise + storeOffsets(topicPartitions: Array): void + commitOffsets(topicPartitions?: Array): Promise + committed(topicPartitions?: Array, timeout?: number): Promise + seek(topicPartitionOffset: TopicPartitionOffset): void + pause(topics: Array<{ topic: string; partitions?: number[] }>): void + paused(): TopicPartitions[] + resume(topics: Array<{ topic: string; partitions?: number[] }>): void + assignment(): TopicPartition[] } -export type ConsumerGroupState = - | 'Unknown' - | 'PreparingRebalance' - | 'CompletingRebalance' - | 'Stable' - | 'Dead' - | 'Empty' +export interface AdminConfig { + retry?: RetryOptions + logLevel?: logLevel, + logger?: Logger, +} -export type GroupDescription = { - groupId: string - members: MemberDescription[] - protocol: string - protocolType: string - state: ConsumerGroupState +export interface AdminConstructorConfig extends GlobalConfig { + kafkaJS?: AdminConfig; +} + +export interface ReplicaAssignment { + partition: number + replicas: Array } -export type Consumer = { +export interface IResourceConfigEntry { + name: string + value: string +} + +export interface ITopicConfig { + topic: string + numPartitions?: number + replicationFactor?: number + configEntries?: IResourceConfigEntry[] +} + +export type Admin = { connect(): Promise disconnect(): Promise - subscribe(subscription: ConsumerSubscribeTopics ): Promise - stop(): Promise - run(config?: ConsumerRunConfig): Promise - commitOffsets(topicPartitions: Array): Promise - seek(topicPartitionOffset: TopicPartitionOffset): Promise - describeGroup(): Promise - pause(topics: Array<{ topic: string; partitions?: number[] }>): void - paused(): TopicPartitions[] - assignment(): TopicPartitions[] - resume(topics: Array<{ topic: string; partitions?: number[] }>): void - on( - eventName: ConsumerEvents['HEARTBEAT'], - listener: (event: ConsumerHeartbeatEvent) => void - ): RemoveInstrumentationEventListener - on( - eventName: ConsumerEvents['COMMIT_OFFSETS'], - listener: (event: ConsumerCommitOffsetsEvent) => void - ): RemoveInstrumentationEventListener - on( - eventName: ConsumerEvents['GROUP_JOIN'], - listener: (event: ConsumerGroupJoinEvent) => void - ): RemoveInstrumentationEventListener - on( - eventName: ConsumerEvents['FETCH_START'], - listener: (event: ConsumerFetchStartEvent) => void - ): RemoveInstrumentationEventListener - on( - eventName: ConsumerEvents['FETCH'], - listener: (event: ConsumerFetchEvent) => void - ): RemoveInstrumentationEventListener - on( - eventName: ConsumerEvents['START_BATCH_PROCESS'], - listener: (event: ConsumerStartBatchProcessEvent) => void - ): RemoveInstrumentationEventListener - on( - eventName: ConsumerEvents['END_BATCH_PROCESS'], - listener: (event: ConsumerEndBatchProcessEvent) => void - ): RemoveInstrumentationEventListener - on( - eventName: ConsumerEvents['CONNECT'], - listener: (event: ConnectEvent) => void - ): RemoveInstrumentationEventListener - on( - eventName: ConsumerEvents['DISCONNECT'], - listener: (event: DisconnectEvent) => void - ): RemoveInstrumentationEventListener - on( - eventName: ConsumerEvents['STOP'], - listener: (event: InstrumentationEvent) => void - ): RemoveInstrumentationEventListener - on( - eventName: ConsumerEvents['CRASH'], - listener: (event: ConsumerCrashEvent) => void - ): RemoveInstrumentationEventListener - on( - eventName: ConsumerEvents['REBALANCING'], - listener: (event: ConsumerRebalancingEvent) => void - ): RemoveInstrumentationEventListener - on( - eventName: ConsumerEvents['RECEIVED_UNSUBSCRIBED_TOPICS'], - listener: (event: ConsumerReceivedUnsubcribedTopicsEvent) => void - ): RemoveInstrumentationEventListener - on( - eventName: ConsumerEvents['REQUEST'], - listener: (event: RequestEvent) => void - ): RemoveInstrumentationEventListener - on( - eventName: ConsumerEvents['REQUEST_TIMEOUT'], - listener: (event: RequestTimeoutEvent) => void - ): RemoveInstrumentationEventListener - on( - eventName: ConsumerEvents['REQUEST_QUEUE_SIZE'], - listener: (event: RequestQueueSizeEvent) => void - ): RemoveInstrumentationEventListener - on( - eventName: ValueOf, - listener: (event: InstrumentationEvent) => void - ): RemoveInstrumentationEventListener - logger(): Logger - readonly events: ConsumerEvents + createTopics(options: { + timeout?: number + topics: ITopicConfig[] + }): Promise + deleteTopics(options: { topics: string[]; timeout?: number }): Promise + listTopics(options?: { timeout?: number }): Promise + listGroups(options?: { + timeout?: number, + matchConsumerGroupStates?: ConsumerGroupStates[] + }): Promise<{ groups: GroupOverview[], errors: LibrdKafkaError[] }> + describeGroups( + groups: string[], + options?: { timeout?: number, includeAuthorizedOperations?: boolean }): Promise + deleteGroups(groupIds: string[], options?: { timeout?: number }): Promise } - diff --git a/types/rdkafka.d.ts b/types/rdkafka.d.ts new file mode 100644 index 00000000..6a46da26 --- /dev/null +++ b/types/rdkafka.d.ts @@ -0,0 +1,469 @@ +import { Readable, ReadableOptions, Writable, WritableOptions } from 'stream'; +import { EventEmitter } from 'events'; +import { + GlobalConfig, + TopicConfig, + ConsumerGlobalConfig, + ConsumerTopicConfig, + ProducerGlobalConfig, + ProducerTopicConfig, +} from './config'; + +export * from './config'; +export * from './errors'; +import { Kafka } from './kafkajs'; +import * as errors from './errors'; + +export interface LibrdKafkaError { + message: string; + code: number; + errno: number; + origin: string; + stack?: string; + isFatal?: boolean; + isRetriable?: boolean; + isTxnRequiresAbort?: boolean; +} + +export interface ReadyInfo { + name: string; +} + +export interface ClientMetrics { + connectionOpened: number; +} + +export interface MetadataOptions { + topic?: string; + allTopics?: boolean; + timeout?: number; +} + +export interface BrokerMetadata { + id: number; + host: string; + port: number; +} + +export interface PartitionMetadata { + id: number; + leader: number; + replicas: number[]; + isrs: number[]; +} + +export interface TopicMetadata { + name: string; + partitions: PartitionMetadata[]; +} + +export interface Metadata { + orig_broker_id: number; + orig_broker_name: string; + topics: TopicMetadata[]; + brokers: BrokerMetadata[]; +} + +export interface WatermarkOffsets{ + lowOffset: number; + highOffset: number; +} + +export interface TopicPartition { + topic: string; + partition: number; + error?: LibrdKafkaError; + leaderEpoch?: number; +} + +export interface TopicPartitionOffset extends TopicPartition { + offset: number; +} + +export interface TopicPartitionOffsetAndMetadata extends TopicPartitionOffset { + metadata?: string | null; +} + +export type TopicPartitionTime = TopicPartitionOffset; + +export type EofEvent = TopicPartitionOffset; + +export type Assignment = TopicPartition | TopicPartitionOffset; + +export interface DeliveryReport extends TopicPartitionOffset { + value?: MessageValue; + size: number; + key?: MessageKey; + timestamp?: number; + opaque?: any; +} + +export type NumberNullUndefined = number | null | undefined; + +export type MessageKey = Buffer | string | null | undefined; +export type MessageHeader = { [key: string]: string | Buffer }; +export type MessageValue = Buffer | null; +export type SubscribeTopic = string | RegExp; +export type SubscribeTopicList = SubscribeTopic[]; + +export interface Message extends TopicPartitionOffset { + value: MessageValue; + size: number; + topic: string; + key?: MessageKey; + timestamp?: number; + headers?: MessageHeader[]; + opaque?: any; +} + +export interface ReadStreamOptions extends ReadableOptions { + topics: SubscribeTopicList | SubscribeTopic | ((metadata: Metadata) => SubscribeTopicList); + waitInterval?: number; + fetchSize?: number; + objectMode?: boolean; + highWaterMark?: number; + autoClose?: boolean; + streamAsBatch?: boolean; + connectOptions?: any; +} + +export interface WriteStreamOptions extends WritableOptions { + encoding?: string; + objectMode?: boolean; + topic?: string; + autoClose?: boolean; + pollInterval?: number; + connectOptions?: any; +} + +export interface ProducerStream extends Writable { + producer: Producer; + connect(metadataOptions?: MetadataOptions): void; + close(cb?: () => void): void; +} + +export interface ConsumerStream extends Readable { + consumer: KafkaConsumer; + connect(options: ConsumerGlobalConfig): void; + close(cb?: () => void): void; +} + +type KafkaClientEvents = 'disconnected' | 'ready' | 'connection.failure' | 'event.error' | 'event.stats' | 'event.log' | 'event.event' | 'event.throttle'; +type KafkaConsumerEvents = 'data' | 'partition.eof' | 'rebalance' | 'rebalance.error' | 'subscribed' | 'unsubscribed' | 'unsubscribe' | 'offset.commit' | KafkaClientEvents; +type KafkaProducerEvents = 'delivery-report' | KafkaClientEvents; + +type EventListenerMap = { + // ### Client + // connectivity events + 'disconnected': (metrics: ClientMetrics) => void, + 'ready': (info: ReadyInfo, metadata: Metadata) => void, + 'connection.failure': (error: LibrdKafkaError, metrics: ClientMetrics) => void, + // event messages + 'event.error': (error: LibrdKafkaError) => void, + 'event.stats': (eventData: any) => void, + 'event.log': (eventData: any) => void, + 'event.event': (eventData: any) => void, + 'event.throttle': (eventData: any) => void, + // ### Consumer only + // domain events + 'data': (arg: Message) => void, + 'partition.eof': (arg: EofEvent) => void, + 'rebalance': (err: LibrdKafkaError, assignments: TopicPartition[]) => void, + 'rebalance.error': (err: Error) => void, + // connectivity events + 'subscribed': (topics: SubscribeTopicList) => void, + 'unsubscribe': () => void, + 'unsubscribed': () => void, + // offsets + 'offset.commit': (error: LibrdKafkaError, topicPartitions: TopicPartitionOffset[]) => void, + // ### Producer only + // delivery + 'delivery-report': (error: LibrdKafkaError, report: DeliveryReport) => void, +} + +type EventListener = K extends keyof EventListenerMap ? EventListenerMap[K] : never; + +export abstract class Client extends EventEmitter { + constructor(globalConf: GlobalConfig, SubClientType: any, topicConf: TopicConfig); + + connect(metadataOptions?: MetadataOptions, cb?: (err: LibrdKafkaError, data: Metadata) => any): this; + + getClient(): any; + + connectedTime(): number; + + getLastError(): LibrdKafkaError; + + disconnect(cb?: (err: any, data: ClientMetrics) => any): this; + disconnect(timeout: number, cb?: (err: any, data: ClientMetrics) => any): this; + + isConnected(): boolean; + + getMetadata(metadataOptions?: MetadataOptions, cb?: (err: LibrdKafkaError, data: Metadata) => any): any; + + queryWatermarkOffsets(topic: string, partition: number, timeout: number, cb?: (err: LibrdKafkaError, offsets: WatermarkOffsets) => any): any; + queryWatermarkOffsets(topic: string, partition: number, cb?: (err: LibrdKafkaError, offsets: WatermarkOffsets) => any): any; + + setSaslCredentials(username: string, password: string): void; + + on(event: E, listener: EventListener): this; + once(event: E, listener: EventListener): this; +} + +export class KafkaConsumer extends Client { + constructor(conf: ConsumerGlobalConfig | ConsumerTopicConfig, topicConf?: ConsumerTopicConfig); + + assign(assignments: Assignment[]): this; + + assignments(): Assignment[]; + + commit(topicPartition: TopicPartitionOffsetAndMetadata | TopicPartitionOffsetAndMetadata[]): this; + commit(): this; + + commitMessage(msg: TopicPartitionOffset): this; + + commitMessageSync(msg: TopicPartitionOffset): this; + + commitSync(topicPartition: TopicPartitionOffsetAndMetadata | TopicPartitionOffsetAndMetadata[]): this; + + committed(toppars: TopicPartition[], timeout: number, cb: (err: LibrdKafkaError, topicPartitions: TopicPartitionOffsetAndMetadata[]) => void): this; + committed(timeout: number, cb: (err: LibrdKafkaError, topicPartitions: TopicPartitionOffsetAndMetadata[]) => void): this; + + consume(number: number, cb?: (err: LibrdKafkaError, messages: Message[]) => void): void; + consume(cb: (err: LibrdKafkaError, messages: Message[]) => void): void; + consume(): void; + + getWatermarkOffsets(topic: string, partition: number): WatermarkOffsets; + + offsetsStore(topicPartitions: TopicPartitionOffsetAndMetadata[]): any; + + pause(topicPartitions: TopicPartition[]): any; + + position(toppars?: TopicPartition[]): TopicPartitionOffset[]; + + resume(topicPartitions: TopicPartition[]): any; + + seek(toppar: TopicPartitionOffset, timeout: number | null, cb: (err: LibrdKafkaError) => void): this; + + setDefaultConsumeTimeout(timeoutMs: number): void; + + setDefaultConsumeLoopTimeoutDelay(timeoutMs: number): void; + + subscribe(topics: SubscribeTopicList): this; + + subscription(): string[]; + + unassign(): this; + + unsubscribe(): this; + + offsetsForTimes(topicPartitions: TopicPartitionTime[], timeout: number, cb?: (err: LibrdKafkaError, offsets: TopicPartitionOffset[]) => any): void; + offsetsForTimes(topicPartitions: TopicPartitionTime[], cb?: (err: LibrdKafkaError, offsets: TopicPartitionOffset[]) => any): void; + + static createReadStream(conf: ConsumerGlobalConfig, topicConfig: ConsumerTopicConfig, streamOptions: ReadStreamOptions | number): ConsumerStream; +} + +export class Producer extends Client { + constructor(conf: ProducerGlobalConfig | ProducerTopicConfig, topicConf?: ProducerTopicConfig); + + flush(timeout?: NumberNullUndefined, cb?: (err: LibrdKafkaError) => void): this; + + poll(): this; + + produce(topic: string, partition: NumberNullUndefined, message: MessageValue, key?: MessageKey, timestamp?: NumberNullUndefined, opaque?: any, headers?: MessageHeader[]): any; + + setPollInterval(interval: number): this; + setPollInBackground(set: boolean): void; + + static createWriteStream(conf: ProducerGlobalConfig, topicConf: ProducerTopicConfig, streamOptions: WriteStreamOptions): ProducerStream; + + initTransactions(cb: (err: LibrdKafkaError) => void): void; + initTransactions(timeout: number, cb: (err: LibrdKafkaError) => void): void; + beginTransaction(cb: (err: LibrdKafkaError) => void): void; + commitTransaction(cb: (err: LibrdKafkaError) => void): void; + commitTransaction(timeout: number, cb: (err: LibrdKafkaError) => void): void; + abortTransaction(cb: (err: LibrdKafkaError) => void): void; + abortTransaction(timeout: number, cb: (err: LibrdKafkaError) => void): void; + sendOffsetsToTransaction(offsets: TopicPartitionOffset[], consumer: KafkaConsumer, cb: (err: LibrdKafkaError) => void): void; + sendOffsetsToTransaction(offsets: TopicPartitionOffset[], consumer: KafkaConsumer, timeout: number, cb: (err: LibrdKafkaError) => void): void; +} + +export class HighLevelProducer extends Producer { + produce(topic: string, partition: NumberNullUndefined, message: any, key: any, timestamp: NumberNullUndefined, callback: (err: any, offset?: NumberNullUndefined) => void): any; + produce(topic: string, partition: NumberNullUndefined, message: any, key: any, timestamp: NumberNullUndefined, headers: MessageHeader[], callback: (err: any, offset?: NumberNullUndefined) => void): any; + + setKeySerializer(serializer: (key: any, cb: (err: any, key: MessageKey) => void) => void): void; + setKeySerializer(serializer: (key: any) => MessageKey | Promise): void; + setValueSerializer(serializer: (value: any, cb: (err: any, value: MessageValue) => void) => void): void; + setValueSerializer(serializer: (value: any) => MessageValue | Promise): void; + setTopicKeySerializer(serializer: (topic: string, key: any, cb: (err: any, key: MessageKey) => void) => void): void; + setTopicKeySerializer(serializer: (topic: string, key: any) => MessageKey | Promise): void; + setTopicValueSerializer(serializer: (topic: string, value: any, cb: (err: any, value: MessageValue) => void) => void): void; + setTopicValueSerializer(serializer: (topic: string, value: any) => MessageValue | Promise): void; +} + +export const features: string[]; + +export const librdkafkaVersion: string; + +export function createReadStream(conf: ConsumerGlobalConfig, topicConf: ConsumerTopicConfig, streamOptions: ReadStreamOptions | number): ConsumerStream; + +export function createWriteStream(conf: ProducerGlobalConfig, topicConf: ProducerTopicConfig, streamOptions: WriteStreamOptions): ProducerStream; + +export interface NewTopic { + topic: string; + num_partitions: number; + replication_factor: number; + config?: { + 'cleanup.policy'?: 'delete' | 'compact' | 'delete,compact' | 'compact,delete'; + 'compression.type'?: 'gzip' | 'snappy' | 'lz4' | 'zstd' | 'uncompressed' | 'producer'; + 'delete.retention.ms'?: string; + 'file.delete.delay.ms'?: string; + 'flush.messages'?: string; + 'flush.ms'?: string; + 'follower.replication.throttled.replicas'?: string; + 'index.interval.bytes'?: string; + 'leader.replication.throttled.replicas'?: string; + 'max.compaction.lag.ms'?: string; + 'max.message.bytes'?: string; + 'message.format.version'?: string; + 'message.timestamp.difference.max.ms'?: string; + 'message.timestamp.type'?: string; + 'min.cleanable.dirty.ratio'?: string; + 'min.compaction.lag.ms'?: string; + 'min.insync.replicas'?: string; + 'preallocate'?: string; + 'retention.bytes'?: string; + 'retention.ms'?: string; + 'segment.bytes'?: string; + 'segment.index.bytes'?: string; + 'segment.jitter.ms'?: string; + 'segment.ms'?: string; + 'unclean.leader.election.enable'?: string; + 'message.downconversion.enable'?: string; + } | { [cfg: string]: string; }; +} + +export enum ConsumerGroupStates { + UNKNOWN = 0, + PREPARING_REBALANCE = 1, + COMPLETING_REBALANCE = 2, + STABLE = 3, + DEAD = 4, + EMPTY = 5, +} + +export interface GroupOverview { + groupId: string; + protocolType: string; + isSimpleConsumerGroup: boolean; + state: ConsumerGroupStates; +} + +export enum AclOperationTypes { + UNKNOWN = 0, + ANY = 1, + ALL = 2, + READ = 3, + WRITE = 4, + CREATE = 5, + DELETE = 6, + ALTER = 7, + DESCRIBE = 8, + CLUSTER_ACTION = 9, + DESCRIBE_CONFIGS = 10, + ALTER_CONFIGS = 11, + IDEMPOTENT_WRITE = 12, +} + +export type MemberDescription = { + clientHost: string + clientId: string + memberId: string + memberAssignment: Buffer + memberMetadata: Buffer + groupInstanceId?: string, + assignment: TopicPartition[] +} + +export type Node = { + id: number + host: string + port: number + rack?: string +} + +export type GroupDescription = { + groupId: string + error?: LibrdKafkaError + members: MemberDescription[] + protocol: string + isSimpleConsumerGroup: boolean; + protocolType: string + partitionAssignor: string + state: ConsumerGroupStates + coordinator: Node + authorizedOperations?: AclOperationTypes[] +} + +export type GroupDescriptions = { + groups: GroupDescription[], +} + +export type DeleteGroupsResult = { + groupId: string + errorCode?: number + error?: LibrdKafkaError +} + +export interface IAdminClient { + createTopic(topic: NewTopic, cb?: (err: LibrdKafkaError) => void): void; + createTopic(topic: NewTopic, timeout?: number, cb?: (err: LibrdKafkaError) => void): void; + + deleteTopic(topic: string, cb?: (err: LibrdKafkaError) => void): void; + deleteTopic(topic: string, timeout?: number, cb?: (err: LibrdKafkaError) => void): void; + + createPartitions(topic: string, desiredPartitions: number, cb?: (err: LibrdKafkaError) => void): void; + createPartitions(topic: string, desiredPartitions: number, timeout?: number, cb?: (err: LibrdKafkaError) => void): void; + + listTopics(cb?: (err: LibrdKafkaError, topics: string[]) => any): void; + listTopics(options?: { timeout?: number }, cb?: (err: LibrdKafkaError, topics: string[]) => any): void; + + listGroups(cb?: (err: LibrdKafkaError, result: { groups: GroupOverview[], errors: LibrdKafkaError[] }) => any): void; + listGroups(options?: { timeout?: number, matchConsumerGroupStates?: ConsumerGroupStates[] }, + cb?: (err: LibrdKafkaError, result: { groups: GroupOverview[], errors: LibrdKafkaError[] }) => any): void; + + describeGroups(groupIds: string[], cb?: (err: LibrdKafkaError, result: GroupDescriptions) => any): void; + describeGroups(groupIds: string[], + options?: { timeout?: number, includeAuthorizedOperations?: boolean }, + cb?: (err: LibrdKafkaError, result: GroupDescriptions) => any): void; + + deleteGroups(groupIds: string[], cb?: (err: LibrdKafkaError, result: DeleteGroupsResult[]) => any): void; + deleteGroups(groupIds: string[], + options?: { timeout?: number }, + cb?: (err: LibrdKafkaError, result: DeleteGroupsResult[]) => any): void; + + disconnect(): void; +} + +export type EventHandlers = { + [event_key: string]: (...args: any[]) => void; +}; + +export abstract class AdminClient { + static create(conf: GlobalConfig, eventHandlers?: EventHandlers): IAdminClient; +} + +export type RdKafka = { + Consumer: KafkaConsumer, + Producer: Producer, + HighLevelProducer: HighLevelProducer, + AdminClient: AdminClient, + KafkaConsumer: KafkaConsumer, + createReadStream: typeof KafkaConsumer.createReadStream, + createWriteStream: typeof Producer.createWriteStream, + CODES: typeof errors.CODES, + Topic: (name: string) => string, + features: typeof features, + librdkafkaVersion: typeof librdkafkaVersion, +} diff --git a/util/configure.js b/util/configure.js index 7fd97f86..dfedc223 100644 --- a/util/configure.js +++ b/util/configure.js @@ -7,7 +7,6 @@ var path = require('path'); var baseDir = path.resolve(__dirname, '../'); var releaseDir = path.join(baseDir, 'build', 'deps'); - var isWin = /^win/.test(process.platform); // Skip running this if we are running on a windows system