From 4452cae39f50ffa9300abfd1dce7bb3541a6aaff Mon Sep 17 00:00:00 2001 From: Andy Tael Date: Wed, 19 Nov 2025 22:02:52 -0600 Subject: [PATCH 1/6] Update docu and CBv5 Signed-off-by: Andy Tael --- cloudbank-v5/account/helm/values.yaml | 8 +- cloudbank-v5/chatbot/helm/values.yaml | 8 +- cloudbank-v5/checks/helm/values.yaml | 8 +- cloudbank-v5/creditscore/helm/values.yaml | 8 +- cloudbank-v5/customer/helm/values.yaml | 8 +- cloudbank-v5/testrunner/helm/values.yaml | 8 +- cloudbank-v5/transfer/helm/values.yaml | 8 +- .../site/docs/platform/apacheapisix.md | 1147 ++++++++++++++++- docs-source/site/docs/platform/coherence.md | 17 + docs-source/site/docs/platform/dboperator.md | 158 +++ docs-source/site/docs/platform/esooperator.md | 449 +++++++ .../site/docs/relnotes/_category_.json | 8 + docs-source/site/docs/relnotes/relnotes.md | 169 +++ .../site/docs/setup/setup_dev/_category_.json | 8 + .../site/docs/setup/setup_dev/setup.md | 5 + .../docs/setup/setup_prod/_category_.json | 8 + .../docs/setup/{ => setup_prod}/database.md | 6 +- .../setup/{ => setup_prod}/media/image6.png | Bin .../docs/setup/{ => setup_prod}/namespace.md | 0 .../site/docs/setup/{ => setup_prod}/obaas.md | 6 +- .../setup/{ => setup_prod}/observability.md | 6 +- .../docs/setup/{ => setup_prod}/obtaining.md | 0 .../setup/{ => setup_prod}/prereq-chart.md | 2 +- .../docs/setup/{ => setup_prod}/prereqs.md | 0 .../docs/setup/{ => setup_prod}/secrets.md | 0 .../site/docs/setup/{ => setup_prod}/setup.md | 0 docs-source/site/package-lock.json | 30 +- 27 files changed, 2018 insertions(+), 57 deletions(-) create mode 100644 docs-source/site/docs/relnotes/_category_.json create mode 100644 docs-source/site/docs/relnotes/relnotes.md create mode 100644 docs-source/site/docs/setup/setup_dev/_category_.json create mode 100644 docs-source/site/docs/setup/setup_dev/setup.md create mode 100644 docs-source/site/docs/setup/setup_prod/_category_.json rename docs-source/site/docs/setup/{ => setup_prod}/database.md (96%) rename docs-source/site/docs/setup/{ => setup_prod}/media/image6.png (100%) rename docs-source/site/docs/setup/{ => setup_prod}/namespace.md (100%) rename docs-source/site/docs/setup/{ => setup_prod}/obaas.md (98%) rename docs-source/site/docs/setup/{ => setup_prod}/observability.md (96%) rename docs-source/site/docs/setup/{ => setup_prod}/obtaining.md (100%) rename docs-source/site/docs/setup/{ => setup_prod}/prereq-chart.md (99%) rename docs-source/site/docs/setup/{ => setup_prod}/prereqs.md (100%) rename docs-source/site/docs/setup/{ => setup_prod}/secrets.md (100%) rename docs-source/site/docs/setup/{ => setup_prod}/setup.md (100%) diff --git a/cloudbank-v5/account/helm/values.yaml b/cloudbank-v5/account/helm/values.yaml index c87fabcfa..1d4e9c7e7 100644 --- a/cloudbank-v5/account/helm/values.yaml +++ b/cloudbank-v5/account/helm/values.yaml @@ -148,18 +148,18 @@ affinity: {} obaas: # TODO: Set to your OBaaS installation namespace - namespace: REPLACE_WITH_OBAAS_NAMESPACE # e.g., obaas-cdd, obaas-prod, etc. + namespace: obaas-dev # e.g., obaas-cdd, obaas-prod, etc. # Framework selection: SPRING_BOOT or HELIDON # TODO: Choose your framework (SPRING_BOOT or HELIDON) - framework: REPLACE_WITH_FRAMEWORK # Options: SPRING_BOOT or HELIDON + framework: SPRING_BOOT # Options: SPRING_BOOT or HELIDON database: enabled: true # If true, database configuration will be injected # TODO: Set the name of your database credentials secret - credentialsSecret: REPLACE_WITH_DB_CREDENTIALS_SECRET # e.g., my-app-db-secrets + credentialsSecret: account-db-secrets # e.g., my-app-db-secrets # TODO: Set the name of your ADB wallet secret - walletSecret: REPLACE_WITH_DB_WALLET_SECRET # e.g., my-app-adb-wallet + walletSecret: obaas-adb-tns-admin-1 # e.g., my-app-adb-wallet # Enable OpenTelemetry tracing and metrics otel: diff --git a/cloudbank-v5/chatbot/helm/values.yaml b/cloudbank-v5/chatbot/helm/values.yaml index eccd31e5c..ab124d8b2 100644 --- a/cloudbank-v5/chatbot/helm/values.yaml +++ b/cloudbank-v5/chatbot/helm/values.yaml @@ -148,18 +148,18 @@ affinity: {} obaas: # TODO: Set to your OBaaS installation namespace - namespace: REPLACE_WITH_OBAAS_NAMESPACE # e.g., obaas-cdd, obaas-prod, etc. + namespace: obaas-dev # e.g., obaas-cdd, obaas-prod, etc. # Framework selection: SPRING_BOOT or HELIDON # TODO: Choose your framework (SPRING_BOOT or HELIDON) - framework: REPLACE_WITH_FRAMEWORK # Options: SPRING_BOOT or HELIDON + framework: SPRING_BOOT # Options: SPRING_BOOT or HELIDON database: enabled: true # If true, database configuration will be injected # TODO: Set the name of your database credentials secret - credentialsSecret: REPLACE_WITH_DB_CREDENTIALS_SECRET # e.g., my-app-db-secrets + credentialsSecret: account-db-secrets # e.g., my-app-db-secrets # TODO: Set the name of your ADB wallet secret - walletSecret: REPLACE_WITH_DB_WALLET_SECRET # e.g., my-app-tns-admin + walletSecret: obaas-adb-tns-admin-1 # e.g., my-app-tns-admin # Enable OpenTelemetry tracing and metrics otel: diff --git a/cloudbank-v5/checks/helm/values.yaml b/cloudbank-v5/checks/helm/values.yaml index aeae547b5..3a17d1d52 100644 --- a/cloudbank-v5/checks/helm/values.yaml +++ b/cloudbank-v5/checks/helm/values.yaml @@ -148,18 +148,18 @@ affinity: {} obaas: # TODO: Set to your OBaaS installation namespace - namespace: REPLACE_WITH_OBAAS_NAMESPACE # e.g., obaas-cdd, obaas-prod, etc. + namespace: obaas-dev # e.g., obaas-cdd, obaas-prod, etc. # Framework selection: SPRING_BOOT or HELIDON # TODO: Choose your framework (SPRING_BOOT or HELIDON) - framework: REPLACE_WITH_FRAMEWORK # Options: SPRING_BOOT or HELIDON + framework: SPRING_BOOT # Options: SPRING_BOOT or HELIDON database: enabled: true # If true, database configuration will be injected # TODO: Set the name of your database credentials secret - credentialsSecret: REPLACE_WITH_DB_CREDENTIALS_SECRET # e.g., my-app-db-secrets + credentialsSecret: account-db-secrets # e.g., my-app-db-secrets # TODO: Set the name of your ADB wallet secret - walletSecret: REPLACE_WITH_DB_WALLET_SECRET # e.g., my-app-tns-admin + walletSecret: obaas-adb-tns-admin-1 # e.g., my-app-tns-admin # Enable OpenTelemetry tracing and metrics otel: diff --git a/cloudbank-v5/creditscore/helm/values.yaml b/cloudbank-v5/creditscore/helm/values.yaml index 57a70b852..91cb081b3 100644 --- a/cloudbank-v5/creditscore/helm/values.yaml +++ b/cloudbank-v5/creditscore/helm/values.yaml @@ -148,18 +148,18 @@ affinity: {} obaas: # TODO: Set to your OBaaS installation namespace - namespace: REPLACE_WITH_OBAAS_NAMESPACE # e.g., obaas-cdd, obaas-prod, etc. + namespace: obaas-dev # e.g., obaas-cdd, obaas-prod, etc. # Framework selection: SPRING_BOOT or HELIDON # TODO: Choose your framework (SPRING_BOOT or HELIDON) - framework: REPLACE_WITH_FRAMEWORK # Options: SPRING_BOOT or HELIDON + framework: SPRING_BOOT # Options: SPRING_BOOT or HELIDON database: enabled: false # If true, database configuration will be injected # TODO: Set the name of your database credentials secret - credentialsSecret: REPLACE_WITH_DB_CREDENTIALS_SECRET # e.g., my-app-db-secrets + credentialsSecret: account-db-secrets # e.g., my-app-db-secrets # TODO: Set the name of your ADB wallet secret - walletSecret: REPLACE_WITH_DB_WALLET_SECRET # e.g., my-app-tns-admin + walletSecret: obaas-adb-tns-admin-1 # e.g., my-app-tns-admin # Enable OpenTelemetry tracing and metrics otel: diff --git a/cloudbank-v5/customer/helm/values.yaml b/cloudbank-v5/customer/helm/values.yaml index 5c1dbb0e2..1ddeb2281 100644 --- a/cloudbank-v5/customer/helm/values.yaml +++ b/cloudbank-v5/customer/helm/values.yaml @@ -148,18 +148,18 @@ affinity: {} obaas: # TODO: Set to your OBaaS installation namespace - namespace: REPLACE_WITH_OBAAS_NAMESPACE # e.g., obaas-cdd, obaas-prod, etc. + namespace: obaas-dev # e.g., obaas-cdd, obaas-prod, etc. # Framework selection: SPRING_BOOT or HELIDON # TODO: Choose your framework (SPRING_BOOT or HELIDON) - framework: REPLACE_WITH_FRAMEWORK # Options: SPRING_BOOT or HELIDON + framework: SPRING_BOOT # Options: SPRING_BOOT or HELIDON database: enabled: true # If true, database configuration will be injected # TODO: Set the name of your database credentials secret - credentialsSecret: REPLACE_WITH_DB_CREDENTIALS_SECRET # e.g., my-app-db-secrets + credentialsSecret: account-db-secrets # e.g., my-app-db-secrets # TODO: Set the name of your ADB wallet secret - walletSecret: REPLACE_WITH_DB_WALLET_SECRET # e.g., my-app-adb-wallet + walletSecret: obaas-adb-tns-admin-1 # e.g., my-app-adb-wallet # Enable OpenTelemetry tracing and metrics otel: diff --git a/cloudbank-v5/testrunner/helm/values.yaml b/cloudbank-v5/testrunner/helm/values.yaml index 9a59978da..c4e23e164 100644 --- a/cloudbank-v5/testrunner/helm/values.yaml +++ b/cloudbank-v5/testrunner/helm/values.yaml @@ -148,18 +148,18 @@ affinity: {} obaas: # TODO: Set to your OBaaS installation namespace - namespace: REPLACE_WITH_OBAAS_NAMESPACE # e.g., obaas-cdd, obaas-prod, etc. + namespace: obaas-dev # e.g., obaas-cdd, obaas-prod, etc. # Framework selection: SPRING_BOOT or HELIDON # TODO: Choose your framework (SPRING_BOOT or HELIDON) - framework: REPLACE_WITH_FRAMEWORK # Options: SPRING_BOOT or HELIDON + framework: SPRING_BOOT # Options: SPRING_BOOT or HELIDON database: enabled: true # If true, database configuration will be injected # TODO: Set the name of your database credentials secret - credentialsSecret: REPLACE_WITH_DB_CREDENTIALS_SECRET # e.g., my-app-db-secrets + credentialsSecret: account-db-secrets # e.g., my-app-db-secrets # TODO: Set the name of your ADB wallet secret - walletSecret: REPLACE_WITH_DB_WALLET_SECRET # e.g., my-app-tns-admin + walletSecret: obaas-adb-tns-admin-1 # e.g., my-app-tns-admin # Enable OpenTelemetry tracing and metrics otel: diff --git a/cloudbank-v5/transfer/helm/values.yaml b/cloudbank-v5/transfer/helm/values.yaml index 9a59978da..c4e23e164 100644 --- a/cloudbank-v5/transfer/helm/values.yaml +++ b/cloudbank-v5/transfer/helm/values.yaml @@ -148,18 +148,18 @@ affinity: {} obaas: # TODO: Set to your OBaaS installation namespace - namespace: REPLACE_WITH_OBAAS_NAMESPACE # e.g., obaas-cdd, obaas-prod, etc. + namespace: obaas-dev # e.g., obaas-cdd, obaas-prod, etc. # Framework selection: SPRING_BOOT or HELIDON # TODO: Choose your framework (SPRING_BOOT or HELIDON) - framework: REPLACE_WITH_FRAMEWORK # Options: SPRING_BOOT or HELIDON + framework: SPRING_BOOT # Options: SPRING_BOOT or HELIDON database: enabled: true # If true, database configuration will be injected # TODO: Set the name of your database credentials secret - credentialsSecret: REPLACE_WITH_DB_CREDENTIALS_SECRET # e.g., my-app-db-secrets + credentialsSecret: account-db-secrets # e.g., my-app-db-secrets # TODO: Set the name of your ADB wallet secret - walletSecret: REPLACE_WITH_DB_WALLET_SECRET # e.g., my-app-tns-admin + walletSecret: obaas-adb-tns-admin-1 # e.g., my-app-tns-admin # Enable OpenTelemetry tracing and metrics otel: diff --git a/docs-source/site/docs/platform/apacheapisix.md b/docs-source/site/docs/platform/apacheapisix.md index e15040ac9..c1a98ca37 100644 --- a/docs-source/site/docs/platform/apacheapisix.md +++ b/docs-source/site/docs/platform/apacheapisix.md @@ -4,56 +4,1167 @@ sidebar_position: 1 --- ## Apache APISIX -[Apache APISIX](https://apisix.apache.org) is an open source cloud native API platform that supports the full lifecycle of API management including publishing, traffic management, deployment strategies, and circuit breakers. +[Apache APISIX](https://apisix.apache.org) is an open-source cloud-native API gateway that provides rich traffic management capabilities including load balancing, dynamic upstream, canary release, circuit breaking, authentication, observability, and more. It serves as the primary API gateway for Oracle Backend for Microservices and AI, routing external traffic to your microservices. + +### Overview + +Apache APISIX offers: +- **Dynamic Routing**: Configure routes in real-time without restarting Apache APISIX +- **Load Balancing**: Multiple load balancing algorithms including round-robin, consistent hashing, and weighted round-robin +- **Authentication**: Support for multiple authentication methods including API keys, JWT, OAuth2, and more +- **Rate Limiting**: Protect your services from overload with flexible rate limiting policies +- **Circuit Breaking**: Prevent cascading failures with automatic circuit breaker functionality +- **Observability**: Built-in support for metrics, logging, and tracing +- **Plugin Ecosystem**: Extensive [plugin system](https://apisix.apache.org/docs/apisix/plugins/batch-requests/) for extending functionality + +--- + +## Table of Contents + +- [Installing APISIX](#installing-apisix) +- [Prerequisites and Assumptions](#prerequisites-and-assumptions) +- [Accessing Apache APISIX](#accessing-apache-apisix) + - [Port Forwarding to Admin API](#port-forwarding-to-admin-api) + - [Retrieving the Admin Key](#retrieving-the-admin-key) + - [Retrieving the Gateway IP](#retrieving-the-gateway-ip) +- [Working with APISIX REST API](#working-with-apisix-rest-api) + - [Creating an Upstream](#creating-an-upstream) + - [Creating a Route](#creating-a-route) + - [Testing the Route](#testing-the-route) + - [Updating a Route](#updating-a-route) + - [Deleting a Route](#deleting-a-route) + - [Listing All Routes](#listing-all-routes) + - [Creating a Service](#creating-a-service) +- [Common Operations](#common-operations) + - [Enable Authentication on a Route](#enable-authentication-on-a-route) + - [Enable Rate Limiting](#enable-rate-limiting) + - [Configure Load Balancing](#configure-load-balancing) + - [Enable OpenTelemetry Tracing](#enable-opentelemetry-tracing) + - [Configure Health Checks](#configure-health-checks) +- [Managing SSL/TLS Certificates](#managing-ssltls-certificates) +- [Accessing APISIX Dashboard](#accessing-apisix-dashboard) +- [Troubleshooting](#troubleshooting) + - [Debugging Routes](#debugging-routes) +- [Additional Resources](#additional-resources) + +--- ### Installing APISIX Apache APISIX will be installed if the `apisix.enabled` is set to `true` in the `values.yaml` file. The default namespace for Apache APISIX is `apisix`. +**Key Components Installed:** +- **Apache APISIX Gateway**: The main Apache APISIX gateway that handles incoming traffic +- **APISIX Dashboard**: Web UI for managing routes, upstreams, and plugins +- **etcd**: Distributed key-value store used by APISIX for configuration storage + +### Prerequisites and Assumptions + +This guide makes the following assumptions: + +- **Namespace**: All examples use `YOUR-NAMESPACE` as a placeholder. Replace this with your actual target namespace (e.g., `obaas-dev`, `production`, `my-app`, etc.). +- **APISIX Namespace**: All APISIX-related commands in this guide use `apisix` as the namespace where Apache APISIX is deployed. **If you installed APISIX into a different namespace**, replace `apisix` with your actual APISIX namespace in all commands (e.g., `-n apisix` becomes `-n your-apisix-namespace`). You can verify your APISIX namespace with: + ```bash + kubectl get pods -A | grep apisix + ``` +- **Kubectl Access**: You have kubectl configured and authenticated to your Kubernetes cluster with appropriate permissions. +- **Command-line Tools**: The following tools are installed and available: + - `kubectl` - Kubernetes command-line tool + - `curl` - For testing API endpoints + - `jq` - JSON processor (optional, for formatting responses) + - `yq` - YAML processor (optional, for retrieving admin key) +- **Port Forwarding**: Examples assume you have an active port-forward to the APISIX admin service. + ### Accessing Apache APISIX -Oracle Backend for Microservices and AI deploys the Apache APISIX Gateway and Dashboard in the `apisix` namespace by default. The gateway is exposed via an external load balancer and an ingress controller. +Oracle Backend for Microservices and AI deploys the Apache APISIX Gateway and Dashboard in the configured namespace (default: `apisix`). Apache APISIX is exposed via an external load balancer and an ingress controller for production traffic, while the admin API is accessed through port forwarding for management operations. -To access the Apache APISIX APIs, use kubectl port-forward to create a secure channel to `service/apisix-admin`. Run the following command to establish the secure tunnel (replace the example namespace `obaas-dev` with the namespace where APISIX is deployed): +#### Port Forwarding to Admin API + +To access the Apache APISIX Admin APIs, use kubectl port-forward to create a secure channel to the `apisix-admin` service: + +```shell +kubectl port-forward -n apisix svc/apisix-admin 9180 +``` + +**Note:** Keep this terminal session open while working with the Admin API. The port-forward will terminate if the terminal is closed. + +To run the port-forward in the background: ```shell -kubectl port-forward -n obaas-dev svc/apisix-admin 9180 +kubectl port-forward -n apisix svc/apisix-admin 9180 & ``` -### Retrieving admin key +#### Retrieving the Admin Key -To access the APISIX APIs, you need the admin key. Retrieve it with the following command (replace the example namespace `obaas-dev` with the namespace where APISIX is deployed): +To access the APISIX Admin APIs, you need the admin key. The following commands will retrieve the key, store it in the `ADMIN_KEY` environment variable, and display it: **Option 1 - Using yq:** ```bash -kubectl -n obaas-dev get configmap apisix -o yaml | yq '.data."config.yaml"' | yq '.deployment.admin.admin_key[] | select(.name == "admin") | .key' +export ADMIN_KEY=$(kubectl -n apisix get configmap apisix -o yaml | yq '.data."config.yaml"' | yq '.deployment.admin.admin_key[] | select(.name == "admin") | .key') && echo "Admin Key: $ADMIN_KEY" ``` -**Option 2 - Manual retrieval:** +**Option 2 - Using kubectl and awk:** -If the command above doesn't work: +```bash +export ADMIN_KEY=$(kubectl get configmap apisix -n apisix -o jsonpath='{.data.config\.yaml}' | awk '/admin_key:/,/role: admin/ {if ($1 == "key:") {print $2; exit}}') && echo "Admin Key: $ADMIN_KEY" +``` + +**Option 3 - Manual retrieval:** -1. Run: `kubectl get configmap apisix -n obaas-dev -o yaml` -1. Look for the `config.yaml` section -1. Find `deployment.admin.admin_key` and copy the key value +If the commands above don't work: -Test the admin key by running a simple curl command; it should return the list of configured routes. +1. Run: `kubectl get configmap apisix -n apisix -o yaml` +1. Look for the `config.yaml` section under `data` +1. Find `deployment.admin.admin_key` and locate the admin user's key value +1. Copy the key and export it: + +```bash +export ADMIN_KEY="your-admin-key-here" && echo "Admin Key: $ADMIN_KEY" +``` + +**Test the admin key** by running a simple curl command to retrieve the list of available plugins: ```shell -curl http://127.0.0.1:9180/apisix/admin/routes -H "X-API-key: $admin_key" -X GET +curl -sS http://127.0.0.1:9180/apisix/admin/plugins/list -H "X-API-KEY: $ADMIN_KEY" -X GET | jq +``` + +Expected response if successful (shows available plugins): +```json +[ + "api-breaker", + "authz-keycloak", + "basic-auth", + "batch-requests", + "consumer-restriction", + "cors", + "jwt-auth", + "key-auth", + "limit-count", + "limit-req", + "prometheus", + "proxy-rewrite", + "..." +] +``` + +This confirms the admin key is valid and shows the plugins available in your APISIX installation. + +#### Retrieving the Gateway IP + +To test routes through the APISIX gateway, you need the external IP address of the ingress controller. The following command will retrieve the IP, store it in the `GATEWAY_IP` environment variable, and display it: + +```bash +export GATEWAY_IP=$(kubectl get svc ingress-nginx-controller -n ingress-nginx -o jsonpath='{.status.loadBalancer.ingress[0].ip}') && echo "Gateway IP: $GATEWAY_IP" +``` + +**Note:** If your load balancer uses a hostname instead of an IP address (common in AWS), use this command instead: + +```bash +export GATEWAY_IP=$(kubectl get svc ingress-nginx-controller -n ingress-nginx -o jsonpath='{.status.loadBalancer.ingress[0].hostname}') && echo "Gateway Hostname: $GATEWAY_IP" +``` + +**Verify Apache APISIX is accessible:** + +```bash +curl -sS http://$GATEWAY_IP +``` + +You should receive a response (possibly a 404 if no routes are configured yet), confirming Apache APISIX is reachable. + +--- + +### Working with APISIX REST API + +The following examples demonstrate common operations using the APISIX Admin API. These operations allow you to manage routes, upstreams, and other Apache APISIX configurations programmatically. For complete API reference, see the [APISIX Admin API documentation](https://apisix.apache.org/docs/apisix/admin-api/). + +**Understanding Routes, Services, and Upstreams** + +APISIX uses three key concepts to route traffic from clients to backend services: + +- **Route** - The entry point that matches incoming requests based on URI, host, method, headers, etc., and determines where to send them. Think of it as a traffic rule: "If the request matches this pattern, send it here." + +- **Upstream** - Defines one or more backend service instances (nodes) and how to distribute traffic among them (load balancing, health checks). This is where your actual backend services run. + +- **Service** (optional) - An abstraction that groups routes with shared configuration like plugins, upstreams, and common settings. Use services to avoid duplicating configuration across multiple routes. + +**Relationship Diagram:** + +``` +┌─────────────┐ +│ Client │ +│ Request │ +└──────┬──────┘ + │ HTTP/HTTPS + ↓ +┌──────────────────────────────────────────────────────────────┐ +│ APISIX Gateway │ +│ │ +│ ┌────────────────────────────────────────────────────────┐ │ +│ │ Route: /api/users/* │ │ +│ │ ├─ Matches: URI, method, host, headers │ │ +│ │ ├─ Route-specific plugins (optional) │ │ +│ │ └─ Points to: Service OR Upstream │ │ +│ └────────────┬───────────────────────────────────────────┘ │ +│ │ │ +│ ↓ │ +│ ┌────────────────────────────┐ │ +│ │ Service (Optional) │ ← Multiple routes can │ +│ │ ├─ Shared plugins │ share one service │ +│ │ ├─ Common configuration │ │ +│ │ └─ Points to: Upstream │ │ +│ └────────────┬───────────────┘ │ +│ │ │ +│ ↓ │ +│ ┌────────────────────────────┐ │ +│ │ Upstream │ ← Multiple routes/services │ +│ │ ├─ Load balancing │ can share one upstream │ +│ │ ├─ Health checks │ │ +│ │ └─ Backend nodes: │ │ +│ │ • Pod 1 (weight: 3) │ │ +│ │ • Pod 2 (weight: 2) │ │ +│ │ • Pod 3 (weight: 1) │ │ +│ └────────────┬───────────────┘ │ +│ │ │ +└───────────────┼──────────────────────────────────────────────┘ + │ + ┌────────┼────────┐ + │ │ │ + ↓ ↓ ↓ + ┌──────┐ ┌──────┐ ┌──────┐ + │ Pod1 │ │ Pod2 │ │ Pod3 │ ← Your backend services + └──────┘ └──────┘ └──────┘ +``` + +**Configuration Patterns:** + +1. **Direct Route → Upstream** (Simple setup) + ``` + Route ──→ Upstream ──→ Backend Pods + ``` + +2. **Route → Service → Upstream** (Shared configuration) + ``` + Route 1 ──┐ + Route 2 ──┼──→ Service ──→ Upstream ──→ Backend Pods + Route 3 ──┘ + ``` + +3. **Multiple Routes → Same Upstream** (Shared backend) + ``` + Route 1 (/api/users) ──┐ + Route 2 (/api/orders) ──┼──→ Upstream ──→ Backend Pods + Route 3 (/api/products)──┘ + ``` + +**When to use each:** + +- **Upstream only**: Simple deployments with unique backend services per route +- **Service + Upstream**: When multiple routes share common plugins or configuration (e.g., all API endpoints need rate limiting and authentication) +- **Shared Upstream**: When different routes need to reach the same backend service but with different matching rules or plugins + +--- + +#### Creating an Upstream + +An upstream defines the target service(s) that will handle the routed requests. Create an upstream before creating routes that reference it. For detailed upstream configuration options, see the [Upstream documentation](https://apisix.apache.org/docs/apisix/terminology/upstream/). + +**Example: Create an upstream for a backend service** + +```bash +curl -sS http://127.0.0.1:9180/apisix/admin/upstreams/1 \ + -H "X-API-KEY: $ADMIN_KEY" \ + -X PUT \ + -d '{ + "type": "roundrobin", + "nodes": { + "backend-service.YOUR-NAMESPACE.svc.cluster.local:8080": 1 + }, + "timeout": { + "connect": 6, + "send": 6, + "read": 6 + }, + "retries": 2, + "keepalive_pool": { + "size": 320, + "idle_timeout": 60, + "requests": 1000 + } + }' | jq +``` + +**Understanding the configuration:** + +- `type: "roundrobin"` - Load balancing algorithm (roundrobin, consistent hashing, weighted roundrobin, etc.) +- `nodes` - Backend service endpoints with weights (higher weight = more traffic) +- `timeout` - Connection and read/write timeouts in seconds +- `retries` - Number of retry attempts for failed requests +- `keepalive_pool` - Connection pool settings for performance optimization + +**Expected response:** + +```json +{ + "key": "/apisix/upstreams/1", + "value": { + "type": "roundrobin", + "nodes": { + "backend-service.YOUR-NAMESPACE.svc.cluster.local:8080": 1 + }, + "timeout": { + "connect": 6, + "send": 6, + "read": 6 + }, + "retries": 2, + "keepalive_pool": { + "size": 320, + "idle_timeout": 60, + "requests": 1000 + }, + "create_time": 1234567890, + "update_time": 1234567890 + } +} +``` + +**List all upstreams:** + +```bash +curl -sS http://127.0.0.1:9180/apisix/admin/upstreams \ + -H "X-API-KEY: $ADMIN_KEY" \ + -X GET | jq +``` + +**Delete an upstream:** + +```bash +curl -sS http://127.0.0.1:9180/apisix/admin/upstreams/1 \ + -H "X-API-KEY: $ADMIN_KEY" \ + -X DELETE | jq +``` + +**Note:** You must delete or update all routes and services using an upstream before you can delete the upstream. + +#### Creating a Route + +Routes define how incoming requests are matched and forwarded to upstreams. You can match on URI, host, method, headers, and more. For advanced routing options, see the [Route documentation](https://apisix.apache.org/docs/apisix/terminology/route/). + +**Example: Create a basic route** + +```bash +curl -sS http://127.0.0.1:9180/apisix/admin/routes/1 \ + -H "X-API-KEY: $ADMIN_KEY" \ + -X PUT \ + -d '{ + "name": "my-api-route", + "uri": "/api/v1/*", + "methods": ["GET", "POST", "PUT", "DELETE"], + "upstream_id": "1", + "status": 1, + "desc": "Route for my API service" + }' | jq +``` + +**Understanding the configuration:** + +- `name` - Descriptive name for the route +- `uri` - URI pattern to match (supports wildcards and regex) +- `methods` - HTTP methods allowed for this route +- `upstream_id` - Reference to the upstream created earlier +- `status` - 1 (enabled) or 0 (disabled) +- `desc` - Optional description + +**Example: Create a route with host matching** + +```bash +curl -sS http://127.0.0.1:9180/apisix/admin/routes/2 \ + -H "X-API-KEY: $ADMIN_KEY" \ + -X PUT \ + -d '{ + "name": "api-example-com", + "uri": "/api/*", + "host": "api.example.com", + "methods": ["GET", "POST"], + "upstream_id": "1", + "status": 1, + "desc": "Route for api.example.com host" + }' | jq +``` + +**Example: Create a route with inline upstream (no separate upstream resource)** + +```bash +curl -sS http://127.0.0.1:9180/apisix/admin/routes/3 \ + -H "X-API-KEY: $ADMIN_KEY" \ + -X PUT \ + -d '{ + "name": "quick-route", + "uri": "/health", + "methods": ["GET"], + "upstream": { + "type": "roundrobin", + "nodes": { + "backend-service.YOUR-NAMESPACE.svc.cluster.local:8080": 1 + } + }, + "status": 1 + }' | jq +``` + +#### Testing the Route + +After creating a route, test it to ensure it's working correctly. Make sure you have retrieved the [Gateway IP](#retrieving-the-gateway-ip) and stored it in the `GATEWAY_IP` environment variable. + +**Test the route through Apache APISIX:** + +```bash +# Using the Apache APISIX gateway IP/hostname +curl -sS http://$GATEWAY_IP/api/v1/users | jq + +# Or port-forward to the Apache APISIX gateway for local testing +kubectl port-forward -n apisix svc/apisix-gateway 9080 +curl -sS http://127.0.0.1:9080/api/v1/users | jq +``` + +**Verify the route exists:** + +```bash +curl -sS http://127.0.0.1:9180/apisix/admin/routes/1 \ + -H "X-API-KEY: $ADMIN_KEY" \ + -X GET | jq +``` + +#### Updating a Route + +Routes can be updated in place using PUT or PATCH methods. + +**Example: Update a route to add more methods** + +```bash +curl -sS http://127.0.0.1:9180/apisix/admin/routes/1 \ + -H "X-API-KEY: $ADMIN_KEY" \ + -X PATCH \ + -d '{ + "methods": ["GET", "POST", "PUT", "DELETE", "PATCH"] + }' | jq +``` + +**Example: Disable a route temporarily** + +```bash +curl -sS http://127.0.0.1:9180/apisix/admin/routes/1 \ + -H "X-API-KEY: $ADMIN_KEY" \ + -X PATCH \ + -d '{ + "status": 0 + }' | jq +``` + +**Example: Update route URI pattern** + +```bash +curl -sS http://127.0.0.1:9180/apisix/admin/routes/1 \ + -H "X-API-KEY: $ADMIN_KEY" \ + -X PATCH \ + -d '{ + "uri": "/api/v2/*" + }' | jq +``` + +#### Deleting a Route + +Remove a route when it's no longer needed. + +**Example: Delete a specific route** + +```bash +curl -sS http://127.0.0.1:9180/apisix/admin/routes/1 \ + -H "X-API-KEY: $ADMIN_KEY" \ + -X DELETE | jq +``` + +**Expected response:** + +```json +{ + "deleted": "1", + "key": "/apisix/routes/1" +} +``` + +**Example: Delete multiple routes** + +```bash +# Delete route 1 +curl -sS http://127.0.0.1:9180/apisix/admin/routes/1 \ + -H "X-API-KEY: $ADMIN_KEY" \ + -X DELETE | jq + +# Delete route 2 +curl -sS http://127.0.0.1:9180/apisix/admin/routes/2 \ + -H "X-API-KEY: $ADMIN_KEY" \ + -X DELETE | jq + +# Delete route 3 +curl -sS http://127.0.0.1:9180/apisix/admin/routes/3 \ + -H "X-API-KEY: $ADMIN_KEY" \ + -X DELETE | jq +``` + +**Verify deletion:** + +```bash +curl -sS http://127.0.0.1:9180/apisix/admin/routes/1 \ + -H "X-API-KEY: $ADMIN_KEY" \ + -X GET | jq +``` + +Expected response when route doesn't exist: +```json +{ + "error_msg": "failed to find route" +} +``` + +#### Listing All Routes + +View all configured routes to understand your current Apache APISIX configuration. + +**List all routes:** + +```bash +curl -sS http://127.0.0.1:9180/apisix/admin/routes \ + -H "X-API-KEY: $ADMIN_KEY" \ + -X GET | jq +``` + +**List routes with formatted output:** + +```bash +curl -sS http://127.0.0.1:9180/apisix/admin/routes \ + -H "X-API-KEY: $ADMIN_KEY" \ + -X GET | jq '.list[] | {id: .value.id, name: .value.name, uri: .value.uri, status: .value.status}' +``` + +**Count total routes:** + +```bash +curl -sS http://127.0.0.1:9180/apisix/admin/routes \ + -H "X-API-KEY: $ADMIN_KEY" \ + -X GET | jq '.total' +``` + +#### Creating a Service + +A Service is an abstraction that groups routes with shared configuration such as plugins, upstream settings, and other common properties. Services help organize large deployments by allowing you to configure common behaviors once and reuse them across multiple routes. For more details, see the [Service documentation](https://apisix.apache.org/docs/apisix/terminology/service/). + +**Example: Create a service with common plugin configuration** + +```bash +curl -sS http://127.0.0.1:9180/apisix/admin/services/1 \ + -H "X-API-KEY: $ADMIN_KEY" \ + -X PUT \ + -d '{ + "name": "my-api-service", + "desc": "Shared configuration for my API endpoints", + "upstream_id": "1", + "plugins": { + "limit-count": { + "count": 100, + "time_window": 60, + "rejected_code": 429 + }, + "prometheus": {} + } + }' | jq +``` + +**Understanding the configuration:** + +- `name` - Descriptive name for the service +- `desc` - Optional description +- `upstream_id` - Reference to an upstream (alternatively, you can inline the upstream configuration) +- `plugins` - Plugins that will be applied to all routes using this service + +**Example: Create a route that uses the service** + +```bash +curl -sS http://127.0.0.1:9180/apisix/admin/routes/10 \ + -H "X-API-KEY: $ADMIN_KEY" \ + -X PUT \ + -d '{ + "name": "api-endpoint", + "uri": "/api/users/*", + "methods": ["GET", "POST"], + "service_id": "1" + }' | jq +``` + +This route inherits all configuration from the service (upstream, plugins, etc.). You can still add route-specific plugins that will be merged with the service plugins. + +**List all services:** + +```bash +curl -sS http://127.0.0.1:9180/apisix/admin/services \ + -H "X-API-KEY: $ADMIN_KEY" \ + -X GET | jq +``` + +**Delete a service:** + +```bash +curl -sS http://127.0.0.1:9180/apisix/admin/services/1 \ + -H "X-API-KEY: $ADMIN_KEY" \ + -X DELETE | jq ``` +**Note:** You must delete or update all routes using a service before you can delete the service. + +--- + +### Common Operations + +#### Enable Authentication on a Route + +Protect your routes with various authentication methods. APISIX supports multiple authentication plugins including: + +- **key-auth** - API key authentication (shown in example below) +- **jwt-auth** - JSON Web Token authentication +- **basic-auth** - HTTP Basic authentication +- **oauth2** - OAuth 2.0 authentication +- **hmac-auth** - HMAC authentication +- **ldap-auth** - LDAP authentication + +For complete authentication options, see the [Authentication Plugins documentation](https://apisix.apache.org/docs/apisix/plugins/key-auth/). + +**Example: Enable API Key authentication** + +```bash +curl -sS http://127.0.0.1:9180/apisix/admin/routes/1 \ + -H "X-API-KEY: $ADMIN_KEY" \ + -X PATCH \ + -d '{ + "plugins": { + "key-auth": {} + } + }' | jq +``` + +**Create a consumer with an API key:** + +```bash +# Create consumer +curl -sS http://127.0.0.1:9180/apisix/admin/consumers \ + -H "X-API-KEY: $ADMIN_KEY" \ + -X PUT \ + -d '{ + "username": "my-app", + "plugins": { + "key-auth": { + "key": "my-secret-api-key-12345" + } + } + }' | jq +``` + +**Test the authenticated route:** + +```bash +# Without API key (should fail with 401 error) +curl -sS http://$GATEWAY_IP/api/v1/users | jq + +# With API key (should succeed) +curl -sS http://$GATEWAY_IP/api/v1/users -H "apikey: my-secret-api-key-12345" | jq +``` + +#### Enable Rate Limiting + +Protect your services from overload with rate limiting. For advanced rate limiting configurations and distributed rate limiting with Redis, see the [limit-count plugin documentation](https://apisix.apache.org/docs/apisix/plugins/limit-count/). + +**Example: Limit to 10 requests per minute** + +```bash +curl -sS http://127.0.0.1:9180/apisix/admin/routes/1 \ + -H "X-API-KEY: $ADMIN_KEY" \ + -X PATCH \ + -d '{ + "plugins": { + "limit-count": { + "count": 10, + "time_window": 60, + "rejected_code": 429, + "rejected_msg": "Too many requests", + "policy": "local" + } + } + }' | jq +``` + +**Understanding the configuration:** + +- `count` - Maximum number of requests allowed +- `time_window` - Time window in seconds +- `rejected_code` - HTTP status code when limit is exceeded +- `rejected_msg` - Custom error message +- `policy` - "local" (single node) or "redis" (distributed) + +#### Configure Load Balancing + +Distribute traffic across multiple backend instances. APISIX supports multiple load balancing algorithms. For more details on available algorithms and health checks, see the [Load Balancing documentation](https://apisix.apache.org/docs/apisix/terminology/upstream/#load-balancing). + +**Example: Weighted round-robin load balancing** + +```bash +curl -sS http://127.0.0.1:9180/apisix/admin/upstreams/1 \ + -H "X-API-KEY: $ADMIN_KEY" \ + -X PATCH \ + -d '{ + "type": "roundrobin", + "nodes": { + "backend-1.YOUR-NAMESPACE.svc.cluster.local:8080": 3, + "backend-2.YOUR-NAMESPACE.svc.cluster.local:8080": 2, + "backend-3.YOUR-NAMESPACE.svc.cluster.local:8080": 1 + } + }' | jq +``` + +This configuration sends 50% of traffic to backend-1, 33% to backend-2, and 17% to backend-3. + +**Example: Consistent hashing (sticky sessions)** + +```bash +curl -sS http://127.0.0.1:9180/apisix/admin/upstreams/1 \ + -H "X-API-KEY: $ADMIN_KEY" \ + -X PATCH \ + -d '{ + "type": "chash", + "key": "remote_addr", + "nodes": { + "backend-1.YOUR-NAMESPACE.svc.cluster.local:8080": 1, + "backend-2.YOUR-NAMESPACE.svc.cluster.local:8080": 1 + } + }' | jq +``` + +This ensures requests from the same IP address always go to the same backend. + +#### Enable OpenTelemetry Tracing + +Enable distributed tracing with OpenTelemetry to track requests across your microservices. APISIX can export traces to OpenTelemetry collectors for observability and debugging. For complete configuration options, see the [opentelemetry plugin documentation](https://apisix.apache.org/docs/apisix/plugins/opentelemetry/). + +**Example: Enable OpenTelemetry on a route** + +```bash +curl -sS http://127.0.0.1:9180/apisix/admin/routes/1 \ + -H "X-API-KEY: $ADMIN_KEY" \ + -X PATCH \ + -d '{ + "plugins": { + "opentelemetry": { + "sampler": { + "name": "always_on" + }, + "additional_attributes": [ + "http_user_agent", + "http_host" + ] + } + } + }' | jq +``` + +**Understanding the configuration:** + +- `sampler.name` - Sampling strategy, available options: + - `always_on` - Sample all requests + - `always_off` - Don't sample any requests + - `trace_id_ratio_based` - Sample based on a fraction (e.g., 0.1 = 10%) + - `parent_base` - Use parent span's sampling decision +- `additional_attributes` - Extra attributes to include in traces (from request headers or APISIX variables) + +**Example: Configure sampling rate (10% of requests)** + +```bash +curl -sS http://127.0.0.1:9180/apisix/admin/routes/1 \ + -H "X-API-KEY: $ADMIN_KEY" \ + -X PATCH \ + -d '{ + "plugins": { + "opentelemetry": { + "sampler": { + "name": "trace_id_ratio_based", + "options": { + "fraction": 0.1 + } + } + } + } + }' | jq +``` + +**Note:** The OpenTelemetry collector endpoint is configured globally in the APISIX config (typically pointing to your observability stack like SigNoz or Jaeger). + +#### Configure Health Checks + +Configure active and passive health checks to automatically detect and remove unhealthy backend nodes from your upstream. This improves reliability by ensuring traffic only goes to healthy services. For detailed health check options, see the [Health Check documentation](https://apisix.apache.org/docs/apisix/tutorials/health-check/). + +**Example: Configure active HTTP health checks** + +```bash +curl -sS http://127.0.0.1:9180/apisix/admin/upstreams/1 \ + -H "X-API-KEY: $ADMIN_KEY" \ + -X PATCH \ + -d '{ + "checks": { + "active": { + "type": "http", + "http_path": "/health", + "timeout": 2, + "healthy": { + "interval": 5, + "successes": 2 + }, + "unhealthy": { + "interval": 5, + "http_failures": 3 + } + } + } + }' | jq +``` + +**Understanding the configuration:** + +- `active.type` - Health check type: `http`, `https`, or `tcp` +- `http_path` - Endpoint to check for HTTP/HTTPS health checks +- `timeout` - Health check timeout in seconds +- `healthy.interval` - How often to check healthy nodes (seconds) +- `healthy.successes` - Number of successful checks to mark a node as healthy +- `unhealthy.interval` - How often to check unhealthy nodes (seconds) +- `unhealthy.http_failures` - Number of failures to mark a node as unhealthy + +**Example: Configure passive health checks** + +```bash +curl -sS http://127.0.0.1:9180/apisix/admin/upstreams/1 \ + -H "X-API-KEY: $ADMIN_KEY" \ + -X PATCH \ + -d '{ + "checks": { + "passive": { + "type": "http", + "healthy": { + "http_statuses": [200, 201, 202, 204, 301, 302], + "successes": 3 + }, + "unhealthy": { + "http_statuses": [500, 502, 503, 504], + "http_failures": 3, + "tcp_failures": 3 + } + } + } + }' | jq +``` + +**Understanding passive health checks:** + +- Passive checks monitor actual production traffic (no additional health check requests) +- `healthy.http_statuses` - HTTP status codes considered healthy +- `unhealthy.http_statuses` - HTTP status codes considered unhealthy +- Nodes are marked unhealthy after consecutive failures and automatically recover after consecutive successes + +**Example: Combine active and passive health checks** + +```bash +curl -sS http://127.0.0.1:9180/apisix/admin/upstreams/1 \ + -H "X-API-KEY: $ADMIN_KEY" \ + -X PATCH \ + -d '{ + "checks": { + "active": { + "type": "http", + "http_path": "/health", + "timeout": 2, + "healthy": { + "interval": 10, + "successes": 2 + }, + "unhealthy": { + "interval": 5, + "http_failures": 2 + } + }, + "passive": { + "type": "http", + "healthy": { + "http_statuses": [200, 201], + "successes": 3 + }, + "unhealthy": { + "http_statuses": [500, 502, 503, 504], + "http_failures": 3 + } + } + } + }' | jq +``` + +This configuration provides comprehensive health monitoring with both proactive checks and real-time traffic monitoring. + +--- + +### Managing SSL/TLS Certificates + +APISIX supports HTTPS routes by managing SSL/TLS certificates. You can upload certificates and associate them with routes to enable secure communication. For advanced SSL configuration, see the [SSL documentation](https://apisix.apache.org/docs/apisix/terminology/ssl/). + +**Example: Upload an SSL certificate** + +```bash +curl -sS http://127.0.0.1:9180/apisix/admin/ssls/1 \ + -H "X-API-KEY: $ADMIN_KEY" \ + -X PUT \ + -d '{ + "cert": "-----BEGIN CERTIFICATE-----\nMIIDST...\n-----END CERTIFICATE-----", + "key": "-----BEGIN PRIVATE KEY-----\nMIIEv...\n-----END PRIVATE KEY-----", + "snis": ["api.example.com", "*.example.com"] + }' | jq +``` + +**Understanding the configuration:** + +- `cert` - The SSL certificate in PEM format (newlines must be escaped as `\n`) +- `key` - The private key in PEM format (newlines must be escaped as `\n`) +- `snis` - Server Name Indication (SNI) domains this certificate applies to (supports wildcards) + +**Example: Create an HTTPS route** + +```bash +curl -sS http://127.0.0.1:9180/apisix/admin/routes/1 \ + -H "X-API-KEY: $ADMIN_KEY" \ + -X PUT \ + -d '{ + "name": "https-route", + "uri": "/api/*", + "methods": ["GET", "POST"], + "host": "api.example.com", + "upstream_id": "1" + }' | jq +``` + +**Note:** HTTPS routes automatically use the SSL certificate that matches the route's `host` SNI. Ensure your Apache APISIX gateway is listening on port 9443 (or your configured HTTPS port). + +**List all SSL certificates:** + +```bash +curl -sS http://127.0.0.1:9180/apisix/admin/ssls \ + -H "X-API-KEY: $ADMIN_KEY" \ + -X GET | jq +``` + +**Delete an SSL certificate:** + +```bash +curl -sS http://127.0.0.1:9180/apisix/admin/ssls/1 \ + -H "X-API-KEY: $ADMIN_KEY" \ + -X DELETE | jq +``` + +**Tip:** For production environments, consider using cert-manager with APISIX for automatic certificate management and renewal. + +--- + ### Accessing APISIX Dashboard :::note - Note that all functionality is not available in the dashboard. You might need to use the REST APIs +Note that all functionality is not available in the dashboard. You might need to use the REST APIs for advanced configurations. ::: -APISIX has an embedded dashboard that can be accessed after a tunnel is established to the `apisix-admin` service. The dashboard is available on this [URL](http://localhost:8190/ui). **NOTE:** you need the Admin key to be able to access the dashboard. +The APISIX Dashboard provides a web-based interface for visual management of routes, upstreams, consumers, and plugins. It offers an alternative to the Admin REST API for users who prefer graphical configuration over command-line operations, making it easier to visualize and manage your Apache APISIX gateway setup at a glance. + +**Prerequisites:** + +1. Ensure you have the [port-forward tunnel](#port-forwarding-to-admin-api) to the APISIX admin service running +2. Retrieve the admin key using the instructions in [Retrieving the Admin Key](#retrieving-the-admin-key) + +**Accessing the Dashboard:** + +With the tunnel established, open your browser and navigate to: + +**URL:** [http://localhost:9180/ui](http://localhost:9180/ui) + +**Login Credentials:** +- **Username**: `admin` +- **API Key**: Use the admin key retrieved from the previous step ![APISIX Dashboard](images/apisix-dashboard.png) -### Configuring APISIX using REST APIs +--- + +### Troubleshooting + +**Problem: Admin API returns "401 Unauthorized"** + +Solution: Verify your admin key is correct and properly set in the X-API-KEY header. + +```bash +# Verify the admin key +kubectl get configmap apisix -n apisix -o yaml | grep -A 10 "admin_key" +``` + +**Problem: Route not working - returns 404** + +Solution: Check the route configuration and verify the URI pattern matches your request: + +```bash +# List all routes +curl -sS http://127.0.0.1:9180/apisix/admin/routes -H "X-API-KEY: $ADMIN_KEY" | jq + +# Check specific route +curl -sS http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $ADMIN_KEY" | jq +``` + +**Problem: Route exists but returns 502 Bad Gateway** + +Solution: The upstream service may be unavailable. Verify the backend service is running: + +```bash +# Check if backend pods are running +kubectl get pods -n YOUR-NAMESPACE + +# Test direct connectivity to the service +kubectl run curl-test --image=curlimages/curl --rm -it --restart=Never -- curl http://backend-service.YOUR-NAMESPACE.svc.cluster.local:8080/health +``` + +**Problem: Changes to routes not taking effect** + +Solution: APISIX updates are near real-time, but you can verify etcd synchronization: + +```bash +# Check APISIX pods for errors +kubectl logs -n apisix -l app.kubernetes.io/name=apisix --tail=50 + +# Verify etcd is healthy +kubectl get pods -n apisix -l app.kubernetes.io/name=etcd +``` + +**Problem: Port-forward keeps disconnecting** + +Solution: Run port-forward in the background with auto-reconnect: + +```bash +while true; do kubectl port-forward -n apisix svc/apisix-admin 9180; sleep 1; done & +``` + +#### Debugging Routes + +When troubleshooting routing issues, use these techniques to understand how APISIX is processing requests and which routes are matching. + +**Check which route matched a request:** + +Enable the server-info plugin globally to get detailed information about request processing: + +```bash +curl -sS http://127.0.0.1:9180/apisix/admin/global_rules/1 \ + -H "X-API-KEY: $ADMIN_KEY" \ + -X PUT \ + -d '{ + "plugins": { + "server-info": {} + } + }' | jq +``` + +Then check the response headers when making a request to Apache APISIX. APISIX will include `X-APISIX-Upstream-Status` and other debugging headers. + +**View route matching order and priority:** + +Routes are matched based on priority. Higher priority routes are matched first. List routes with their priorities: + +```bash +curl -sS http://127.0.0.1:9180/apisix/admin/routes \ + -H "X-API-KEY: $ADMIN_KEY" \ + -X GET | jq '.list[] | {id: .value.id, name: .value.name, uri: .value.uri, priority: .value.priority, status: .value.status}' +``` + +**Note:** Routes without an explicit priority have a default priority of 0. Routes with the same priority are matched in an undefined order. + +**Set route priority explicitly:** + +```bash +curl -sS http://127.0.0.1:9180/apisix/admin/routes/1 \ + -H "X-API-KEY: $ADMIN_KEY" \ + -X PATCH \ + -d '{ + "priority": 100 + }' | jq +``` + +**Test route matching without sending real traffic:** + +Use the `echo` plugin to inspect how APISIX sees the request: + +```bash +curl -sS http://127.0.0.1:9180/apisix/admin/routes/999 \ + -H "X-API-KEY: $ADMIN_KEY" \ + -X PUT \ + -d '{ + "name": "debug-route", + "uri": "/debug/*", + "plugins": { + "echo": { + "before_body": "matched debug route\n" + } + } + }' | jq +``` + +Then test: + +```bash +curl -sS http://$GATEWAY_IP/debug/test +``` + +This will return "matched debug route" confirming the route mat + + + + + +hed. + +**Check upstream health status:** + +View the current health status of upstream nodes: + +```bash +curl -sS http://127.0.0.1:9180/apisix/admin/upstreams/1 \ + -H "X-API-KEY: $ADMIN_KEY" \ + -X GET | jq '.value.checks' +``` + +**Enable verbose logging for debugging:** + +Temporarily increase APISIX logging to debug level by editing the APISIX ConfigMap and restarting the pods (use with caution in production). + +--- + +### Additional Resources + +**Official APISIX Documentation:** +- [Getting Started Guide](https://apisix.apache.org/docs/apisix/getting-started/) +- [Admin API Reference](https://apisix.apache.org/docs/apisix/admin-api/) +- [Complete Plugins List](https://apisix.apache.org/docs/apisix/plugins/batch-requests/) +- [Terminology and Concepts](https://apisix.apache.org/docs/apisix/terminology/api-gateway/) +- [Architecture Overview](https://apisix.apache.org/docs/apisix/architecture-design/apisix/) + +**Development and Community:** +- [APISIX GitHub Repository](https://github.com/apache/apisix) +- [Dashboard Documentation](https://apisix.apache.org/docs/dashboard/USER_GUIDE/) +- [Community Slack Channel](https://join.slack.com/t/the-asf/shared_invite/zt-vlfbf7ch-HkbNHiU_uDlcH_RvaHv9gQ) -You can configure and update the APISIX gateway using the provided APIs. [API Documentation](https://apisix.apache.org/docs/apisix/getting-started/README/) +**Oracle Resources:** +- [Oracle Backend for Microservices and AI Documentation](https://oracle.github.io/microservices-datadriven/) diff --git a/docs-source/site/docs/platform/coherence.md b/docs-source/site/docs/platform/coherence.md index cdb6070e0..e47d3a8a2 100644 --- a/docs-source/site/docs/platform/coherence.md +++ b/docs-source/site/docs/platform/coherence.md @@ -6,6 +6,23 @@ sidebar_position: 3 The Oracle Coherence Operator is an open-source Kubernetes operator that enables the deployment and management of Oracle Coherence clusters in Kubernetes environments. It provides features to assist with deploying, scaling, and managing Coherence data grid clusters using cloud-native technologies. [Full Documentation can be found here](https://oracle.github.io/coherence-operator/) +--- + +## Table of Contents + +- [Installing the Coherence Operator](#installing-the-coherence-operator) +- [Creating a Coherence Cluster](#creating-a-coherence-cluster) + - [Prerequisites](#prerequisites) + - [Step 1: Create the Coherence Cluster YAML](#step-1-create-the-coherence-cluster-yaml) + - [YAML Configuration Files](#yaml-configuration-files) + - [Step 2: Deploy the Cluster](#step-2-deploy-the-cluster) + - [Step 3: Verify the Deployment](#step-3-verify-the-deployment) +- [Using Coherence with Spring Boot](#using-coherence-with-spring-boot) + - [Dependencies](#dependencies) + - [Spring Boot Configuration](#spring-boot-configuration) + +--- + ### Installing the Coherence Operator Oracle Database Operator for Kubernetes will be installed if the `coherence.enabled` is set to `true` in the `values.yaml` file. The default namespace for Oracle Database Operator is `coherence`. diff --git a/docs-source/site/docs/platform/dboperator.md b/docs-source/site/docs/platform/dboperator.md index b0503696f..5168f2092 100644 --- a/docs-source/site/docs/platform/dboperator.md +++ b/docs-source/site/docs/platform/dboperator.md @@ -10,6 +10,164 @@ The Oracle Database Operator for Kubernetes (_OraOperator_, or simply the _opera Learn about using the OraOperator in the Livelab [Microservices and Kubernetes for an Oracle DBA](https://apexapps.oracle.com/pls/apex/dbpm/r/livelabs/view-workshop?wid=3734) +--- + +## Table of Contents + +- [Prerequisites and Assumptions](#prerequisites-and-assumptions) +- [Installing the Oracle Database Operator for Kubernetes](#installing-the-oracle-database-operator-for-kubernetes) +- [Testing the Oracle Database Operator](#testing-the-oracle-database-operator) + - [Verify the operator is running](#verify-the-operator-is-running) + - [Verify CRDs are installed](#verify-crds-are-installed) + - [Validate a sample resource](#validate-a-sample-resource) +- [Monitoring the Oracle Database Operator](#monitoring-the-oracle-database-operator) + - [Check operator logs](#check-operator-logs) + +--- + +### Prerequisites and Assumptions + +This guide makes the following assumptions: + +- **Namespace**: The Oracle Database Operator is installed in the `oracle-database-operator-system` namespace (the default). +- **Kubectl Access**: You have kubectl configured and authenticated to your Kubernetes cluster with appropriate permissions to: + - View operator pods and CRDs + - Create and manage database custom resources + - View logs from the oracle-database-operator-system namespace +- **Command-line Tools**: The following tools are installed and available: + - `kubectl` - Kubernetes command-line tool +- **File References**: Examples reference YAML files that you can create for testing purposes. + ### Installing the Oracle Database Operator for Kubernetes Oracle Database Operator for Kubernetes will be installed if the `oracle-database-operator.enabled` is set to `true` in the `values.yaml` file. The default namespace for Oracle Database Operator is `oracle-database-operator-system`. + +### Testing the Oracle Database Operator + +#### Verify the operator is running + +After installation, verify that the Oracle Database Operator pods are running: + +```shell +kubectl get pods -n oracle-database-operator-system +``` + +You should see output similar to: + +``` +NAME READY STATUS RESTARTS AGE +oracle-database-operator-controller-manager-xxxxxxxxx-xxxxx 1/1 Running 0 5m +``` + +**Understanding the output:** + +- `READY: 1/1` - The operator pod has 1 container and it's ready +- `STATUS: Running` - The pod is running successfully +- `RESTARTS: 0` - No restarts indicates stable operation + +If the pod is not running, check the pod details: + +```shell +kubectl describe pod -n oracle-database-operator-system -l control-plane=controller-manager +``` + +#### Verify CRDs are installed + +The Oracle Database Operator installs several Custom Resource Definitions (CRDs) that extend the Kubernetes API. Verify these CRDs are installed: + +```shell +kubectl get crds | grep oracle.db +``` + +You should see output listing the database-related CRDs: + +``` +autonomouscontainerdatabases.database.oracle.com +autonomousdatabases.database.oracle.com +autonomousdatabasebackups.database.oracle.com +autonomousdatabaserestores.database.oracle.com +cdbresources.database.oracle.com +dataguardbrokers.database.oracle.com +dbcssystems.database.oracle.com +oraclerestdataservices.database.oracle.com +pdbresources.database.oracle.com +shardingdatabases.database.oracle.com +singleinstancedatabases.database.oracle.com +``` + +These CRDs enable you to manage Oracle databases as Kubernetes resources. + +To see detailed information about a specific CRD: + +```shell +kubectl get crd singleinstancedatabases.database.oracle.com -o yaml +``` + +#### Validate a sample resource + +To verify the operator can recognize and validate database resources without actually creating a database, you can use kubectl's dry-run feature. + +Create a file named `sidb-test.yaml` with a minimal SingleInstanceDatabase resource: + +```yaml +apiVersion: database.oracle.com/v1alpha1 +kind: SingleInstanceDatabase +metadata: + name: test-sidb + namespace: default +spec: + sid: FREE + edition: free + image: + pullFrom: container-registry.oracle.com/database/free:latest-lite + prebuiltDB: true +``` + +**Understanding the configuration:** + +- `apiVersion`: Uses the database.oracle.com API group installed by the operator +- `kind: SingleInstanceDatabase`: Specifies a single instance Oracle database +- `sid: FREE`: The Oracle System Identifier (must be "FREE" for Free edition) +- `edition: free`: Uses Oracle Database Free edition +- `image.pullFrom`: Container image location +- `image.prebuiltDB`: Uses a prebuilt database to speed up provisioning + +Validate the resource without creating it: + +```shell +kubectl apply -f sidb-test.yaml --dry-run=server +``` + +You should see output similar to: + +``` +singleinstancedatabase.database.oracle.com/test-sidb created (server dry run) +``` + +This confirms that: +- The operator's CRDs are properly installed +- The resource definition is valid +- The operator would accept this resource if actually applied + +**Note:** This command does not create any actual database or resources. It only validates that the manifest is syntactically correct and would be accepted by the operator. The dry-run validation happens at the Kubernetes API server level, so it will not appear in the operator logs. + +To see what the operator would create, you can also use: + +```shell +kubectl apply -f sidb-test.yaml --dry-run=server -o yaml +``` + +This shows the complete resource definition as the operator would see it, including any default values the operator would add. + +### Monitoring the Oracle Database Operator + +#### Check operator logs + +View the operator logs to troubleshoot issues or monitor activity: + +```shell +kubectl logs -n oracle-database-operator-system -l control-plane=controller-manager --tail=50 +``` + +This shows the last 50 log lines from the operator controller manager. + diff --git a/docs-source/site/docs/platform/esooperator.md b/docs-source/site/docs/platform/esooperator.md index bdae06276..913325a08 100644 --- a/docs-source/site/docs/platform/esooperator.md +++ b/docs-source/site/docs/platform/esooperator.md @@ -15,6 +15,455 @@ Full [documentation](https://external-secrets.io/latest/) ![External Secrets Operator](images/diagrams-high-level-simple.png) +--- + +## Table of Contents + +- [Prerequisites and Assumptions](#prerequisites-and-assumptions) +- [Installing External Secrets Operator](#installing-external-secrets-operator) +- [Testing the External Secrets Operator](#testing-the-external-secrets-operator) + - [Apply the config](#apply-the-config) + - [Check the ExternalSecret status](#check-the-externalsecret-status) + - [Verify the secret was created](#verify-the-secret-was-created) + - [Cleanup](#cleanup) +- [Using with Production Secret Stores](#using-with-production-secret-stores) + - [OCI Vault Configuration](#oci-vault-configuration) + - [HashiCorp Vault Configuration](#hashicorp-vault-configuration) +- [Monitoring External Secrets](#monitoring-external-secrets) + - [Check ExternalSecret Status](#check-externalsecret-status) + - [View Detailed Status](#view-detailed-status) + - [Monitor External Secrets Operator Logs](#monitor-external-secrets-operator-logs) + - [Check Secret Sync Events](#check-secret-sync-events) + +--- + +### Prerequisites and Assumptions + +This guide makes the following assumptions: + +- **Namespace**: All examples use the `obaas-dev` namespace. Replace with your target namespace as needed. +- **External Secrets Operator Namespace**: The External Secrets Operator is installed in the `external-secrets` namespace (the default). +- **Kubectl Access**: You have kubectl configured and authenticated to your Kubernetes cluster with appropriate permissions to: + - Create and manage SecretStores and ExternalSecrets + - View secrets in your namespace + - View logs from the external-secrets namespace +- **Command-line Tools**: The following tools are installed and available: + - `kubectl` - Kubernetes command-line tool + - `jq` - JSON processor (used for secret verification examples) +- **File References**: Examples reference `eso-test.yaml` and `eso-cleanup.yaml` files. You'll need to create these files with the YAML content provided in the examples. + ### Installing External Secrets Operator External Secrets Operator will be installed if the `external-secrets.enabled` is set to `true` in the `values.yaml` file. The default namespace for External Secrets Operator is `external-secrets`. + +### Testing the External Secrets Operator + +#### Apply the config + +To test the External Secrets Operator, you'll create two resources: a `SecretStore` and an `ExternalSecret`. + +The `SecretStore` defines where secrets are stored (in this case, using a fake provider for testing purposes). The `ExternalSecret` defines which secrets to retrieve and how to map them into a Kubernetes Secret. + +Create a file named `eso-test.yaml` with the following content: + +```yaml +--- +# SecretStore using the fake provider for testing +apiVersion: external-secrets.io/v1 +kind: SecretStore +metadata: + name: fake-secret-store + namespace: obaas-dev +spec: + provider: + fake: + data: + - key: "test-secret-key" + value: "test-secret-value" + - key: "database-password" + value: "super-secret-password" + - key: "api-token" + value: "fake-api-token-12345" +--- +# ExternalSecret that references the fake SecretStore +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: test-external-secret + namespace: obaas-dev +spec: + refreshInterval: 1h + secretStoreRef: + name: fake-secret-store + kind: SecretStore + target: + name: my-test-secret + creationPolicy: Owner + data: + - secretKey: password + remoteRef: + key: database-password + - secretKey: token + remoteRef: + key: api-token +``` + +**Understanding the configuration:** + +- **SecretStore** (`fake-secret-store`): + - Uses the `fake` provider, which is designed for testing without requiring an external secret management system + - Contains three hardcoded key-value pairs that simulate secrets stored in an external vault + - In production, you would replace this with a real provider like OCI Vault, AWS Secrets Manager, etc. + +- **ExternalSecret** (`test-external-secret`): + - References the `fake-secret-store` SecretStore + - Creates a Kubernetes Secret named `my-test-secret` in the target namespace + - Uses `creationPolicy: Owner` so the Secret is automatically deleted when the ExternalSecret is removed + - Maps two secrets from the store: + - `database-password` → `password` (in the Kubernetes Secret) + - `api-token` → `token` (in the Kubernetes Secret) + - Refreshes every 1 hour (`refreshInterval: 1h`) to sync changes from the external source + +Apply the configuration: + +```shell +kubectl apply -f eso-test.yaml +``` + +You should see output similar to: +``` +secretstore.external-secrets.io/fake-secret-store created +externalsecret.external-secrets.io/test-external-secret created +``` + +The External Secrets Operator will automatically create a Kubernetes Secret named `my-test-secret` containing the mapped secrets. + +#### Check the ExternalSecret status + +After applying the configuration, verify that both the SecretStore and ExternalSecret resources were created successfully and are in the correct state. + +```shell +kubectl get secretstores,externalsecrets -n obaas-dev +``` + +You should see output similar to: + +``` +NAME AGE STATUS CAPABILITIES READY +secretstore.external-secrets.io/fake-secret-store 10s Valid ReadWrite True + +NAME STORE REFRESH INTERVAL STATUS READY +externalsecret.external-secrets.io/test-external-secret fake-secret-store 1h SecretSynced True +``` + +**Understanding the output:** + +- **SecretStore Status**: + - `STATUS: Valid` - The SecretStore configuration is valid and the provider is accessible + - `READY: True` - The SecretStore is ready to be used by ExternalSecrets + - `CAPABILITIES: ReadWrite` - Indicates the store supports both reading and writing secrets + +- **ExternalSecret Status**: + - `STORE: fake-secret-store` - Shows which SecretStore this ExternalSecret is using + - `REFRESH INTERVAL: 1h` - How often the operator will check for updates from the external source + - `STATUS: SecretSynced` - The secret has been successfully synchronized from the external source + - `READY: True` - The ExternalSecret is ready and the target Kubernetes Secret has been created + +**Common status values:** + +- `SecretSynced` - Everything is working correctly, the secret has been created +- `SecretSyncedError` - There was an error syncing the secret (check the ExternalSecret events for details) +- `SecretDeleted` - The target secret was deleted (the operator will recreate it on the next sync) + +If the `READY` column shows `False`, use `kubectl describe` to get more details: + +```shell +kubectl describe externalsecret test-external-secret -n obaas-dev +``` + +#### Verify the secret was created + +The External Secrets Operator should have automatically created a Kubernetes Secret named `my-test-secret` based on the ExternalSecret configuration. Let's verify that the secret exists and contains the expected data. + +First, check that the secret exists: + +```shell +kubectl get secret my-test-secret -n obaas-dev +``` + +You should see output similar to: + +``` +NAME TYPE DATA AGE +my-test-secret Opaque 2 30s +``` + +The `DATA` column shows `2`, indicating the secret contains two key-value pairs (password and token). + +Now, view the secret's contents in a readable format: + +```shell +kubectl get secret my-test-secret -n obaas-dev -o jsonpath='{.data}' | jq -r 'to_entries[] | "\(.key): \(.value | @base64d)"' +``` + +You should see the decoded secret values: + +``` +password: super-secret-password +token: fake-api-token-12345 +``` + +Describe the secret to see metadata and ownership: + +```shell +kubectl describe secret my-test-secret -n obaas-dev +``` + +This will show that the secret is owned by the ExternalSecret resource, confirming it was created by the External Secrets Operator. + +**Troubleshooting:** + +If the secret doesn't exist: +- Check the ExternalSecret status with `kubectl get externalsecret test-external-secret -n obaas-dev` +- View ExternalSecret events with `kubectl describe externalsecret test-external-secret -n obaas-dev` +- Check the operator logs as described in the Monitoring section + +#### Cleanup + +To remove the test resources, delete the ExternalSecret and SecretStore: + +```shell +kubectl delete externalsecret test-external-secret -n obaas-dev +kubectl delete secretstore fake-secret-store -n obaas-dev +``` + +The `my-test-secret` Kubernetes Secret will be automatically deleted when the ExternalSecret is removed due to the `creationPolicy: Owner` setting. + +Alternatively, you can use a cleanup manifest: + +```yaml +# Cleanup file for External Secrets test resources +# This will remove the ExternalSecret, SecretStore, and the generated secret +--- +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: test-external-secret + namespace: obaas-dev +--- +apiVersion: external-secrets.io/v1 +kind: SecretStore +metadata: + name: fake-secret-store + namespace: obaas-dev +--- +# The my-test-secret will be automatically deleted when the ExternalSecret is deleted +# due to creationPolicy: Owner, but including it here for completeness +apiVersion: v1 +kind: Secret +metadata: + name: my-test-secret + namespace: obaas-dev +``` + +Apply with: +```shell +kubectl delete -f eso-cleanup.yaml +``` + +### Using with Production Secret Stores + +The External Secrets Operator can be configured to use production secret management systems like OCI Vault or HashiCorp Vault. + +#### OCI Vault Configuration + +To use OCI Vault, configure a SecretStore with workload identity authentication. + +For complete configuration details, see the [OCI Vault provider documentation](https://external-secrets.io/latest/provider/oracle-vault/). + +```yaml +apiVersion: external-secrets.io/v1 +kind: SecretStore +metadata: + name: oci-vault-store + namespace: obaas-dev +spec: + provider: + oracle: + vault: ocid1.vault.oc1.. + region: us-phoenix-1 + auth: + workload: + serviceAccountRef: + name: external-secrets-sa +``` + +**Key configuration points:** + +- `vault`: The OCID of your OCI Vault +- `region`: The OCI region where your vault is located +- `auth.workload`: Uses Kubernetes workload identity for authentication (recommended for production). See [Granting Workloads Access to OCI Resources](https://docs.oracle.com/en-us/iaas/Content/ContEng/Tasks/contenggrantingworkloadaccesstoresources.htm) for workload identity setup +- `serviceAccountRef.name`: The Kubernetes service account configured with OCI Workload Identity. This service account must be mapped to an OCI IAM principal with permissions to read secrets from the vault + +Create an ExternalSecret that references OCI Vault secrets: + +```yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: app-database-secret + namespace: obaas-dev +spec: + refreshInterval: 15m + secretStoreRef: + name: oci-vault-store + kind: SecretStore + target: + name: database-credentials + creationPolicy: Owner + data: + - secretKey: password + remoteRef: + key: ocid1.vaultsecret.oc1.. + - secretKey: username + remoteRef: + key: ocid1.vaultsecret.oc1.. +``` + +**Note:** The `remoteRef.key` must be the OCID of the secret in OCI Vault, not the secret name. + +**Alternative Authentication Methods:** + +In addition to workload identity, OCI Vault supports: +- User principal authentication (using API keys) +- Instance principal authentication (for compute instances) + +Refer to the [OCI Vault provider documentation](https://external-secrets.io/latest/provider/oracle-vault/) for detailed configuration options. + +#### HashiCorp Vault Configuration + +To use HashiCorp Vault, configure a SecretStore with appropriate authentication. + +For complete configuration details, see the [HashiCorp Vault provider documentation](https://external-secrets.io/latest/provider/hashicorp-vault/). + +```yaml +apiVersion: external-secrets.io/v1 +kind: SecretStore +metadata: + name: vault-store + namespace: obaas-dev +spec: + provider: + vault: + server: "https://vault.example.com:8200" + path: "secret" + version: "v2" + auth: + kubernetes: + mountPath: "kubernetes" + role: "external-secrets-role" + serviceAccountRef: + name: external-secrets-sa +``` + +**Key configuration points:** + +- `server`: The URL of your HashiCorp Vault server +- `path`: The mount path where your secrets are stored (e.g., "secret" for KV v2 engine) +- `version`: The KV secrets engine version ("v1" or "v2") +- `auth.kubernetes`: Uses Kubernetes authentication method +- `mountPath`: The mount path of the Kubernetes auth method in Vault +- `role`: The Vault role that grants access to secrets + +Create an ExternalSecret that references HashiCorp Vault secrets: + +```yaml +apiVersion: external-secrets.io/v1 +kind: ExternalSecret +metadata: + name: app-config-secret + namespace: obaas-dev +spec: + refreshInterval: 10m + secretStoreRef: + name: vault-store + kind: SecretStore + target: + name: application-config + creationPolicy: Owner + data: + - secretKey: api-key + remoteRef: + key: myapp/config + property: api_key + - secretKey: db-password + remoteRef: + key: myapp/database + property: password +``` + +**Note:** For HashiCorp Vault: +- `remoteRef.key` is the path to the secret in Vault (e.g., "myapp/config") +- `remoteRef.property` is the specific field within that secret (for KV v2 engine) + +**Alternative Authentication Methods:** + +In addition to Kubernetes authentication, HashiCorp Vault supports: +- AppRole authentication +- Token authentication +- JWT/OIDC authentication +- AWS IAM authentication (when running in AWS) + +Refer to the [HashiCorp Vault provider documentation](https://external-secrets.io/latest/provider/hashicorp-vault/) for detailed configuration options. + +### Monitoring External Secrets + +#### Check ExternalSecret Status + +Monitor the sync status of your ExternalSecrets: + +```shell +kubectl get externalsecrets -n obaas-dev +``` + +The output shows the sync status, refresh time, and age: +``` +NAME STORE REFRESH INTERVAL STATUS READY +test-external-secret fake-secret-store 1h SecretSynced True +``` + +#### View Detailed Status + +For detailed information about a specific ExternalSecret: + +```shell +kubectl describe externalsecret test-external-secret -n obaas-dev +``` + +This shows: +- Current sync status +- Last sync time +- Any error messages +- Conditions and events + +#### Monitor External Secrets Operator Logs + +View the operator logs to troubleshoot sync issues: + +```shell +kubectl logs -n external-secrets -l app.kubernetes.io/name=external-secrets +``` + +Filter for specific ExternalSecret logs: + +```shell +kubectl logs -n external-secrets -l app.kubernetes.io/name=external-secrets | grep test-external-secret +``` + +#### Check Secret Sync Events + +Monitor Kubernetes events for your ExternalSecrets: + +```shell +kubectl get events -n obaas-dev --field-selector involvedObject.name=test-external-secret +``` diff --git a/docs-source/site/docs/relnotes/_category_.json b/docs-source/site/docs/relnotes/_category_.json new file mode 100644 index 000000000..0dc4df802 --- /dev/null +++ b/docs-source/site/docs/relnotes/_category_.json @@ -0,0 +1,8 @@ +{ + "label": "Release Notes", + "position": 5, + "link": { + "type": "generated-index", + "description": "Release Notes for Oracle Backend as a Service and AI (OBaaS)" + } +} diff --git a/docs-source/site/docs/relnotes/relnotes.md b/docs-source/site/docs/relnotes/relnotes.md new file mode 100644 index 000000000..0253b8269 --- /dev/null +++ b/docs-source/site/docs/relnotes/relnotes.md @@ -0,0 +1,169 @@ +--- +title: Release Notes - 2.0.0-M5 +sidebar_position: 1 +--- +## Overview + +This document provides comprehensive information about the container images used in OBaaS (Oracle Backend as a Service) version 2.0.0-M5. It includes two primary image lists and a comparison to help you understand the differences between them. + +### What's Included + +- **Installed Images**: Images that are automatically deployed when using the production Helm installation +- **Required Images**: Complete list of images needed for OBaaS, useful when mirroring to private registries +- **Differences**: A comparison highlighting what's unique to each list + +--- + +## Table of Contents + +- [Overview](#overview) +- [Image Inventories](#image-inventories) + - [Installed Images (Production Helm Deployment)](#installed-images-production-helm-deployment) + - [Required Images (Complete Set)](#required-images-complete-set) +- [Image Differences Analysis](#image-differences-analysis) + - [Summary](#summary) + - [Detailed Comparison](#detailed-comparison) + - [Additional Images in Required Set](#additional-images-in-required-set) + - [Registry Path Differences](#registry-path-differences) +- [Key Takeaways](#key-takeaways) + +--- + +## Image Inventories + +### Installed Images (Production Helm Deployment) + +The following **29 images** are installed in the Kubernetes cluster when using the production installation via Helm charts. These represent the core components that are automatically deployed. + +| Description | Image Name | Version | +|-------------|------------|---------| +| Observability Exporter | container-registry.oracle.com/database/observability-exporter | 2.2.0 | +| Operator | container-registry.oracle.com/database/operator | 1.2.0 | +| Otmm | container-registry.oracle.com/database/otmm | 24.4.1 | +| Coherence Operator | container-registry.oracle.com/middleware/coherence-operator | 3.5.6 | +| Clickhouse Operator | docker.io/altinity/clickhouse-operator | 0.21.2 | +| Metrics Exporter | docker.io/altinity/metrics-exporter | 0.21.2 | +| Apisix | docker.io/apache/apisix | 3.14.1-debian | +| Etcd | docker.io/bitnamilegacy/etcd | 3.5.10-debian-11-r2 | +| Zookeeper | docker.io/bitnamilegacy/zookeeper | 3.7.1 | +| Busybox | docker.io/busybox | 1.36 | +| Clickhouse Server | docker.io/clickhouse/clickhouse-server | 25.5.6 | +| Opentelemetry Collector Contrib | docker.io/otel/opentelemetry-collector-contrib | 0.109.0 | +| Signoz Otel Collector | docker.io/signoz/signoz-otel-collector | v0.129.4 | +| Signoz Schema Migrator | docker.io/signoz/signoz-schema-migrator | v0.129.4 | +| Signoz | docker.io/signoz/signoz | v0.94.1 | +| External Secrets | oci.external-secrets.io/external-secrets/external-secrets | v1.0.0 | +| Cert Manager Cainjector | quay.io/jetstack/cert-manager-cainjector | v1.16.2 | +| Cert Manager Controller | quay.io/jetstack/cert-manager-controller | v1.16.2 | +| Cert Manager Webhook | quay.io/jetstack/cert-manager-webhook | v1.16.2 | +| Kafka | quay.io/strimzi/kafka | 0.45.1-kafka-3.8.0 | +| Kafka | quay.io/strimzi/kafka | 0.45.1-kafka-3.9.1 | +| Operator | quay.io/strimzi/operator | 0.45.1 | +| Controller | registry.k8s.io/ingress-nginx/controller | v1.11.5 | +| Kube State Metrics | registry.k8s.io/kube-state-metrics/kube-state-metrics | v2.17.0 | +| Metrics Server | registry.k8s.io/metrics-server/metrics-server | v0.7.2 | +| Admin Server | us-phoenix-1.ocir.io/maacloud/mark-artifactory/admin-server | 2.0.0-M5 | +| Conductor Server | us-phoenix-1.ocir.io/maacloud/mark-artifactory/conductor-server | 2.0.0-M5 | +| Eureka | us-phoenix-1.ocir.io/maacloud/mark-artifactory/eureka | 2.0.0-M5 | + +--- + +**Generated on:** Wed Nov 19 20:12:29 CST 2025 + +--- + +### Required Images (Complete Set) + +The following **42 images** represent the complete set of container images required for OBaaS installation. This list is particularly useful when: + +- Setting up OBaaS in air-gapped or restricted environments +- Mirroring images to a private container registry +- Using the `private_repo_helper.sh` script to copy images to your own repository + +**Note:** This list includes all installed images plus additional images needed for build processes, initialization tasks, and optional features. + +| Description | Name | Version | +|-------------|------|---------| +| observability-exporter | container-registry.oracle.com/database/observability-exporter | 2.2.0 | +| operator | container-registry.oracle.com/database/operator | 1.2.0 | +| otmm | container-registry.oracle.com/database/otmm | 24.4.1 | +| coherence-ce | container-registry.oracle.com/middleware/coherence-ce | 25.03.2 | +| coherence-operator | container-registry.oracle.com/middleware/coherence-operator | 3.5.6 | +| adb-free | container-registry.oracle.com/database/adb-free | 25.10.2.1 | +| signoz-histograms | obaas-docker-release.dockerhub-iad.oci.oraclecorp.com/signoz-histograms | v0.0.1 | +| clickhouse-operator | docker.io/altinity/clickhouse-operator | 0.21.2 | +| metrics-exporter | docker.io/altinity/metrics-exporter | 0.21.2 | +| apisix | docker.io/apache/apisix | 3.14.1-debian | +| etcd | docker.io/bitnamilegacy/etcd | 3.5.10-debian-11-r2 | +| zookeeper | docker.io/bitnamilegacy/zookeeper | 3.7.1 | +| busybox | docker.io/busybox | 1.36 | +| clickhouse-server | docker.io/clickhouse/clickhouse-server | 25.5.6 | +| k8s-wait-for | docker.io/groundnuty/k8s-wait-for | v2.0 | +| yq | docker.io/linuxserver/yq | 3.4.3 | +| opentelemetry-collector-contrib | docker.io/otel/opentelemetry-collector-contrib | 0.109.0 | +| signoz | docker.io/signoz/signoz | v0.94.1 | +| signoz-otel-collector | docker.io/signoz/signoz-otel-collector | v0.129.4 | +| signoz-schema-migrator | docker.io/signoz/signoz-schema-migrator | v0.129.4 | +| external-secrets | oci.external-secrets.io/external-secrets/external-secrets | v1.0.0 | +| admin-server | phx.ocir.io/maacloud/mark-artifactory/admin-server | 2.0.0-M5 | +| conductor-server | phx.ocir.io/maacloud/mark-artifactory/conductor-server | 2.0.0-M5 | +| eureka | phx.ocir.io/maacloud/mark-artifactory/eureka | 2.0.0-M5 | +| cert-manager-cainjector | quay.io/jetstack/cert-manager-cainjector | v1.16.2 | +| cert-manager-controller | quay.io/jetstack/cert-manager-controller | v1.16.2 | +| cert-manager-startupapicheck | quay.io/jetstack/cert-manager-startupapicheck | v1.16.2 | +| cert-manager-webhook | quay.io/jetstack/cert-manager-webhook | v1.16.2 | +| kafka-bridge | quay.io/strimzi/kafka-bridge | 0.31.2 | +| kaniko-executor | quay.io/strimzi/kaniko-executor | 0.45.1 | +| maven-builder | quay.io/strimzi/maven-builder | 0.45.1 | +| operator | quay.io/strimzi/operator | 0.45.1 | +| kafka | quay.io/strimzi/kafka | 0.45.1-kafka-3.8.0 | +| kafka | quay.io/strimzi/kafka | 0.45.1-kafka-3.8.1 | +| kafka | quay.io/strimzi/kafka | 0.45.1-kafka-3.9.0 | +| kafka | quay.io/strimzi/kafka | 0.45.1-kafka-3.9.1 | +| curl-jq | registry.gitlab.com/gitlab-ci-utils/curl-jq | 3.2.0 | +| controller | registry.k8s.io/ingress-nginx/controller | v1.11.5 | +| kube-webhook-certgen | registry.k8s.io/ingress-nginx/kube-webhook-certgen | v1.5.2 | +| kube-state-metrics | registry.k8s.io/kube-state-metrics/kube-state-metrics | v2.17.0 | +| metrics-server | registry.k8s.io/metrics-server/metrics-server | v0.7.2 | + +--- + +## Image Differences Analysis + +### Summary + +The Required Images list contains **13 additional images** not present in the Installed Images list, plus **3 images with different registry paths**. These differences reflect the additional tooling and optional components needed for certain deployment scenarios. + +### Detailed Comparison + +#### Additional Images in Required Set + +The following images are required for setup, builds, and optional features but are not part of the standard production deployment: + +| Category | Description | Image Name | Version | Purpose | +|----------|-------------|------------|---------|---------| +| **Database** | Coherence CE | container-registry.oracle.com/middleware/coherence-ce | 25.03.2 | In-memory data grid runtime | +| **Database** | ADB Free | container-registry.oracle.com/database/adb-free | 25.10.2.1 | Autonomous Database Free tier | +| **Observability** | Signoz Histograms | obaas-docker-release.dockerhub-iad.oci.oraclecorp.com/signoz-histograms | v0.0.1 | Custom histogram processing | +| **Utilities** | K8s Wait For | docker.io/groundnuty/k8s-wait-for | v2.0 | Init container for waiting on resources | +| **Utilities** | YQ | docker.io/linuxserver/yq | 3.4.3 | YAML processing tool | +| **Utilities** | Curl-JQ | registry.gitlab.com/gitlab-ci-utils/curl-jq | 3.2.0 | HTTP client with JSON processing | +| **Certificate Management** | Cert Manager Startup API Check | quay.io/jetstack/cert-manager-startupapicheck | v1.16.2 | Cert-manager initialization validation | +| **Kafka/Streaming** | Kafka Bridge | quay.io/strimzi/kafka-bridge | 0.31.2 | HTTP bridge for Kafka | +| **Kafka/Streaming** | Kafka (3.8.1) | quay.io/strimzi/kafka | 0.45.1-kafka-3.8.1 | Additional Kafka version | +| **Kafka/Streaming** | Kafka (3.9.0) | quay.io/strimzi/kafka | 0.45.1-kafka-3.9.0 | Additional Kafka version | +| **Build Tools** | Kaniko Executor | quay.io/strimzi/kaniko-executor | 0.45.1 | Container image builder | +| **Build Tools** | Maven Builder | quay.io/strimzi/maven-builder | 0.45.1 | Java build tool | +| **Ingress** | Kube Webhook Certgen | registry.k8s.io/ingress-nginx/kube-webhook-certgen | v1.5.2 | Certificate generation for webhooks | + +#### Registry Path Differences + +The following images use different registry paths between the two lists: + +| Image | Installed Images Registry | Required Images Registry | Version | +|-------|---------------------------|-------------------------|---------| +| admin-server | us-phoenix-1.ocir.io/maacloud/mark-artifactory | phx.ocir.io/maacloud/mark-artifactory | 2.0.0-M5 | +| conductor-server | us-phoenix-1.ocir.io/maacloud/mark-artifactory | phx.ocir.io/maacloud/mark-artifactory | 2.0.0-M5 | +| eureka | us-phoenix-1.ocir.io/maacloud/mark-artifactory | phx.ocir.io/maacloud/mark-artifactory | 2.0.0-M5 | + +**Note:** Both registry paths point to the same Phoenix region but use different URL formats. The `phx.ocir.io` format is the shortened alias for `us-phoenix-1.ocir.io`. diff --git a/docs-source/site/docs/setup/setup_dev/_category_.json b/docs-source/site/docs/setup/setup_dev/_category_.json new file mode 100644 index 000000000..727cb6cb2 --- /dev/null +++ b/docs-source/site/docs/setup/setup_dev/_category_.json @@ -0,0 +1,8 @@ +{ + "label": "Setup Development Environment", + "position": 2, + "link": { + "type": "generated-index", + "description": "Oracle Backend for Microservices and AI (OBaaS) setup and installation." + } +} diff --git a/docs-source/site/docs/setup/setup_dev/setup.md b/docs-source/site/docs/setup/setup_dev/setup.md new file mode 100644 index 000000000..83f3e9144 --- /dev/null +++ b/docs-source/site/docs/setup/setup_dev/setup.md @@ -0,0 +1,5 @@ +--- +title: Introduction and Installation Flow +sidebar_position: 1 +--- +## TBD \ No newline at end of file diff --git a/docs-source/site/docs/setup/setup_prod/_category_.json b/docs-source/site/docs/setup/setup_prod/_category_.json new file mode 100644 index 000000000..82f889c93 --- /dev/null +++ b/docs-source/site/docs/setup/setup_prod/_category_.json @@ -0,0 +1,8 @@ +{ + "label": "Setup Production Environment", + "position": 1, + "link": { + "type": "generated-index", + "description": "Oracle Backend for Microservices and AI (OBaaS) setup and installation." + } +} diff --git a/docs-source/site/docs/setup/database.md b/docs-source/site/docs/setup/setup_prod/database.md similarity index 96% rename from docs-source/site/docs/setup/database.md rename to docs-source/site/docs/setup/setup_prod/database.md index 81dc350b2..e14820a75 100644 --- a/docs-source/site/docs/setup/database.md +++ b/docs-source/site/docs/setup/setup_prod/database.md @@ -48,7 +48,7 @@ Double-check all values before proceeding. Incorrect values will cause the datab Install the Helm chart using the following command: ```bash -helm --debug install obaas-db \ +helm upgrade --install --debug obaas-db \ --set global.obaasName="obaas-dev" \ --set global.targetNamespace="obaas-dev" \ ./ @@ -82,7 +82,7 @@ When installing multiple OBaaS instances in your cluster, each instance must hav **Example for development instance:** ```bash -helm --debug install obaas-db \ +helm upgrade --install --debug obaas-db \ --set global.obaasName="obaas-dev" \ --set global.targetNamespace="obaas-dev" \ ./ @@ -91,7 +91,7 @@ helm --debug install obaas-db \ **Example for production instance:** ```bash -helm --debug install obaas-prod-db \ +helm upgrade --install --debug obaas-prod-db \ --set global.obaasName="obaas-prod" \ --set global.targetNamespace="obaas-prod" \ ./ diff --git a/docs-source/site/docs/setup/media/image6.png b/docs-source/site/docs/setup/setup_prod/media/image6.png similarity index 100% rename from docs-source/site/docs/setup/media/image6.png rename to docs-source/site/docs/setup/setup_prod/media/image6.png diff --git a/docs-source/site/docs/setup/namespace.md b/docs-source/site/docs/setup/setup_prod/namespace.md similarity index 100% rename from docs-source/site/docs/setup/namespace.md rename to docs-source/site/docs/setup/setup_prod/namespace.md diff --git a/docs-source/site/docs/setup/obaas.md b/docs-source/site/docs/setup/setup_prod/obaas.md similarity index 98% rename from docs-source/site/docs/setup/obaas.md rename to docs-source/site/docs/setup/setup_prod/obaas.md index e518aa5de..c0f403bd9 100644 --- a/docs-source/site/docs/setup/obaas.md +++ b/docs-source/site/docs/setup/setup_prod/obaas.md @@ -75,7 +75,7 @@ Configure the following settings: Install the Helm chart using the following command: ```bash -helm --debug install obaas \ +helm upgrade --install --debug obaas \ --set global.obaasName="obaas-dev" \ --set global.targetNamespace="obaas-dev" \ ./ @@ -114,7 +114,7 @@ When installing multiple OBaaS instances in your cluster, each instance must hav **Example for development instance:** ```bash -helm --debug install obaas \ +helm upgrade --install --debug \ --set global.obaasName="obaas-dev" \ --set global.targetNamespace="obaas-dev" \ ./ @@ -123,7 +123,7 @@ helm --debug install obaas \ **Example for production instance:** ```bash -helm --debug install obaas-prod \ +helm upgrade --install --debug obaas-prod \ --set global.obaasName="obaas-prod" \ --set global.targetNamespace="obaas-prod" \ ./ diff --git a/docs-source/site/docs/setup/observability.md b/docs-source/site/docs/setup/setup_prod/observability.md similarity index 96% rename from docs-source/site/docs/setup/observability.md rename to docs-source/site/docs/setup/setup_prod/observability.md index eaa0f4928..f35cf4d3c 100644 --- a/docs-source/site/docs/setup/observability.md +++ b/docs-source/site/docs/setup/setup_prod/observability.md @@ -48,7 +48,7 @@ To install components into separate namespaces, override the global namespace by Install the Helm chart using the following command: ```bash -helm --debug install obaas-observability \ +helm upgrade --install --debug obaas-observability \ --set global.obaasName="obaas-dev" \ --set global.targetNamespace="obaas-dev" \ ./ @@ -82,7 +82,7 @@ When installing multiple OBaaS instances in your cluster, each instance must hav **Example for development instance:** ```bash -helm --debug install obaas-observability \ +helm upgrade --install --debug obaas-observability \ --set global.obaasName="obaas-dev" \ --set global.targetNamespace="obaas-dev" \ ./ @@ -91,7 +91,7 @@ helm --debug install obaas-observability \ **Example for production instance:** ```bash -helm --debug install obaas-prod-observability \ +helm upgrade --install --debug obaas-prod-observability \ --set global.obaasName="obaas-prod" \ --set global.targetNamespace="obaas-prod" \ ./ diff --git a/docs-source/site/docs/setup/obtaining.md b/docs-source/site/docs/setup/setup_prod/obtaining.md similarity index 100% rename from docs-source/site/docs/setup/obtaining.md rename to docs-source/site/docs/setup/setup_prod/obtaining.md diff --git a/docs-source/site/docs/setup/prereq-chart.md b/docs-source/site/docs/setup/setup_prod/prereq-chart.md similarity index 99% rename from docs-source/site/docs/setup/prereq-chart.md rename to docs-source/site/docs/setup/setup_prod/prereq-chart.md index 4920d58ca..46c62c352 100644 --- a/docs-source/site/docs/setup/prereq-chart.md +++ b/docs-source/site/docs/setup/setup_prod/prereq-chart.md @@ -51,7 +51,7 @@ Note the following required components: Install the Helm chart using the following command: ```bash -helm --debug install obaas-prereqs ./ +helm upgrade --install --debug obaas-prereqs ./ ``` **Installation notes:** diff --git a/docs-source/site/docs/setup/prereqs.md b/docs-source/site/docs/setup/setup_prod/prereqs.md similarity index 100% rename from docs-source/site/docs/setup/prereqs.md rename to docs-source/site/docs/setup/setup_prod/prereqs.md diff --git a/docs-source/site/docs/setup/secrets.md b/docs-source/site/docs/setup/setup_prod/secrets.md similarity index 100% rename from docs-source/site/docs/setup/secrets.md rename to docs-source/site/docs/setup/setup_prod/secrets.md diff --git a/docs-source/site/docs/setup/setup.md b/docs-source/site/docs/setup/setup_prod/setup.md similarity index 100% rename from docs-source/site/docs/setup/setup.md rename to docs-source/site/docs/setup/setup_prod/setup.md diff --git a/docs-source/site/package-lock.json b/docs-source/site/package-lock.json index 1ae1d1335..23218a706 100644 --- a/docs-source/site/package-lock.json +++ b/docs-source/site/package-lock.json @@ -232,6 +232,7 @@ "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-5.41.0.tgz", "integrity": "sha512-G9I2atg1ShtFp0t7zwleP6aPS4DcZvsV4uoQOripp16aR6VJzbEnKFPLW4OFXzX7avgZSpYeBAS+Zx4FOgmpPw==", "license": "MIT", + "peer": true, "dependencies": { "@algolia/client-common": "5.41.0", "@algolia/requester-browser-xhr": "5.41.0", @@ -357,6 +358,7 @@ "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.28.4.tgz", "integrity": "sha512-2BCOP7TN8M+gVDj7/ht3hsaO/B/n5oDbiAyyvnRlNOs+u1o+JWNYTQrmpuNp1/Wq2gcFrI01JAW+paEKDMx/CA==", "license": "MIT", + "peer": true, "dependencies": { "@babel/code-frame": "^7.27.1", "@babel/generator": "^7.28.3", @@ -2146,6 +2148,7 @@ } ], "license": "MIT", + "peer": true, "engines": { "node": ">=18" }, @@ -2168,6 +2171,7 @@ } ], "license": "MIT", + "peer": true, "engines": { "node": ">=18" } @@ -2277,6 +2281,7 @@ "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-7.1.0.tgz", "integrity": "sha512-8sLjZwK0R+JlxlYcTuVnyT2v+htpdrjDOKuMcOVdYjt52Lh8hWRYpxBPoKx/Zg+bcjc3wx6fmQevMmUztS/ccA==", "license": "MIT", + "peer": true, "dependencies": { "cssesc": "^3.0.0", "util-deprecate": "^1.0.2" @@ -2698,6 +2703,7 @@ "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-7.1.0.tgz", "integrity": "sha512-8sLjZwK0R+JlxlYcTuVnyT2v+htpdrjDOKuMcOVdYjt52Lh8hWRYpxBPoKx/Zg+bcjc3wx6fmQevMmUztS/ccA==", "license": "MIT", + "peer": true, "dependencies": { "cssesc": "^3.0.0", "util-deprecate": "^1.0.2" @@ -3561,6 +3567,7 @@ "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-docs/-/plugin-content-docs-3.9.2.tgz", "integrity": "sha512-C5wZsGuKTY8jEYsqdxhhFOe1ZDjH0uIYJ9T/jebHwkyxqnr4wW0jTkB72OMqNjsoQRcb0JN3PcSeTwFlVgzCZg==", "license": "MIT", + "peer": true, "dependencies": { "@docusaurus/core": "3.9.2", "@docusaurus/logger": "3.9.2", @@ -4264,6 +4271,7 @@ "resolved": "https://registry.npmjs.org/@mdx-js/react/-/react-3.1.1.tgz", "integrity": "sha512-f++rKLQgUVYDAtECQ6fn/is15GkEH9+nZPM3MS0RcxVqoTfawHvDlSCH7JbMhAM6uJ32v3eXLvLmLvjGu7PTQw==", "license": "MIT", + "peer": true, "dependencies": { "@types/mdx": "^2.0.0" }, @@ -4582,6 +4590,7 @@ "resolved": "https://registry.npmjs.org/@svgr/core/-/core-8.1.0.tgz", "integrity": "sha512-8QqtOQT5ACVlmsvKOJNEaWmRPmcojMOzCz4Hs2BGG/toAp/K38LcsMRyLp349glq5AzJbCEeimEoxaX6v/fLrA==", "license": "MIT", + "peer": true, "dependencies": { "@babel/core": "^7.21.3", "@svgr/babel-preset": "8.1.0", @@ -4953,6 +4962,7 @@ "resolved": "https://registry.npmjs.org/@types/react/-/react-19.1.13.tgz", "integrity": "sha512-hHkbU/eoO3EG5/MZkuFSKmYqPbSVk5byPFa3e7y/8TybHiLMACgI8seVYlicwk7H5K/rI2px9xrQp/C+AUDTiQ==", "license": "MIT", + "peer": true, "dependencies": { "csstype": "^3.0.2" } @@ -5303,6 +5313,7 @@ "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz", "integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==", "license": "MIT", + "peer": true, "bin": { "acorn": "bin/acorn" }, @@ -5388,6 +5399,7 @@ "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.17.1.tgz", "integrity": "sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g==", "license": "MIT", + "peer": true, "dependencies": { "fast-deep-equal": "^3.1.3", "fast-uri": "^3.0.1", @@ -5433,6 +5445,7 @@ "resolved": "https://registry.npmjs.org/algoliasearch/-/algoliasearch-5.41.0.tgz", "integrity": "sha512-9E4b3rJmYbBkn7e3aAPt1as+VVnRhsR4qwRRgOzpeyz4PAOuwKh0HI4AN6mTrqK0S0M9fCCSTOUnuJ8gPY/tvA==", "license": "MIT", + "peer": true, "dependencies": { "@algolia/abtesting": "1.7.0", "@algolia/client-abtesting": "5.41.0", @@ -5896,6 +5909,7 @@ } ], "license": "MIT", + "peer": true, "dependencies": { "baseline-browser-mapping": "^2.8.9", "caniuse-lite": "^1.0.30001746", @@ -6852,6 +6866,7 @@ "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-7.1.0.tgz", "integrity": "sha512-8sLjZwK0R+JlxlYcTuVnyT2v+htpdrjDOKuMcOVdYjt52Lh8hWRYpxBPoKx/Zg+bcjc3wx6fmQevMmUztS/ccA==", "license": "MIT", + "peer": true, "dependencies": { "cssesc": "^3.0.0", "util-deprecate": "^1.0.2" @@ -8225,6 +8240,7 @@ "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", "license": "MIT", + "peer": true, "dependencies": { "fast-deep-equal": "^3.1.1", "fast-json-stable-stringify": "^2.0.0", @@ -12682,6 +12698,7 @@ "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", "license": "MIT", + "peer": true, "dependencies": { "fast-deep-equal": "^3.1.1", "fast-json-stable-stringify": "^2.0.0", @@ -13197,6 +13214,7 @@ } ], "license": "MIT", + "peer": true, "dependencies": { "nanoid": "^3.3.11", "picocolors": "^1.1.1", @@ -14100,6 +14118,7 @@ "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-7.1.0.tgz", "integrity": "sha512-8sLjZwK0R+JlxlYcTuVnyT2v+htpdrjDOKuMcOVdYjt52Lh8hWRYpxBPoKx/Zg+bcjc3wx6fmQevMmUztS/ccA==", "license": "MIT", + "peer": true, "dependencies": { "cssesc": "^3.0.0", "util-deprecate": "^1.0.2" @@ -14894,6 +14913,7 @@ "resolved": "https://registry.npmjs.org/react/-/react-19.1.1.tgz", "integrity": "sha512-w8nqGImo45dmMIfljjMwOGtbmC/mk4CMYhWIicdSflH91J9TyCyczcPFXJzrZ/ZXcgGRFeP6BU0BEJTw6tZdfQ==", "license": "MIT", + "peer": true, "engines": { "node": ">=0.10.0" } @@ -14903,6 +14923,7 @@ "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.1.1.tgz", "integrity": "sha512-Dlq/5LAZgF0Gaz6yiqZCf6VCcZs1ghAJyrsu84Q/GT0gV+mCxbfmKNoGRKBYMJ8IEdGPqu49YWXD02GCknEDkw==", "license": "MIT", + "peer": true, "dependencies": { "scheduler": "^0.26.0" }, @@ -14958,6 +14979,7 @@ "resolved": "https://registry.npmjs.org/@docusaurus/react-loadable/-/react-loadable-6.0.0.tgz", "integrity": "sha512-YMMxTUQV/QFSnbgrP3tjDzLHRg7vsbMn8e9HAa8o/1iXoiomo48b7sk/kkmWEuWNDPJVlKSJRB6Y2fHqdJk+SQ==", "license": "MIT", + "peer": true, "dependencies": { "@types/react": "*" }, @@ -14986,6 +15008,7 @@ "resolved": "https://registry.npmjs.org/react-router/-/react-router-5.3.4.tgz", "integrity": "sha512-Ys9K+ppnJah3QuaRiLxk+jDWOR1MekYQrlytiXxC1RyfbdsZkS5pvKAzCCr031xHixZwpnsYNT5xysdFHQaYsA==", "license": "MIT", + "peer": true, "dependencies": { "@babel/runtime": "^7.12.13", "history": "^4.9.0", @@ -16758,7 +16781,8 @@ "version": "2.8.1", "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", - "license": "0BSD" + "license": "0BSD", + "peer": true }, "node_modules/type-fest": { "version": "2.19.0", @@ -16821,6 +16845,7 @@ "integrity": "sha512-hjcS1mhfuyi4WW8IWtjP7brDrG2cuDZukyrYrSauoXGNgx0S7zceP07adYkJycEr56BOUTNPzbInooiN3fn1qw==", "devOptional": true, "license": "Apache-2.0", + "peer": true, "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" @@ -17162,6 +17187,7 @@ "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", "license": "MIT", + "peer": true, "dependencies": { "fast-deep-equal": "^3.1.1", "fast-json-stable-stringify": "^2.0.0", @@ -17369,6 +17395,7 @@ "resolved": "https://registry.npmjs.org/webpack/-/webpack-5.101.3.tgz", "integrity": "sha512-7b0dTKR3Ed//AD/6kkx/o7duS8H3f1a4w3BYpIriX4BzIhjkn4teo05cptsxvLesHFKK5KObnadmCHBwGc+51A==", "license": "MIT", + "peer": true, "dependencies": { "@types/eslint-scope": "^3.7.7", "@types/estree": "^1.0.8", @@ -17955,6 +17982,7 @@ "resolved": "https://registry.npmjs.org/zod/-/zod-4.1.12.tgz", "integrity": "sha512-JInaHOamG8pt5+Ey8kGmdcAcg3OL9reK8ltczgHTAwNhMys/6ThXHityHxVV2p3fkw/c+MAvBHFVYHFZDmjMCQ==", "license": "MIT", + "peer": true, "funding": { "url": "https://github.com/sponsors/colinhacks" } From 2bd54f137c813ace98fed5d258b52efe0fec833d Mon Sep 17 00:00:00 2001 From: Andy Tael Date: Wed, 19 Nov 2025 23:11:37 -0600 Subject: [PATCH 2/6] Linting --- docs-source/site/docs/platform/otmm.md | 2 +- docs-source/site/docs/relnotes/relnotes.md | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/docs-source/site/docs/platform/otmm.md b/docs-source/site/docs/platform/otmm.md index 05e5ea01c..0e0564bc4 100644 --- a/docs-source/site/docs/platform/otmm.md +++ b/docs-source/site/docs/platform/otmm.md @@ -6,7 +6,7 @@ sidebar_position: 9 [Oracle Transaction Manager for Microservices](https://www.oracle.com/database/transaction-manager-for-microservices/), also known as "MicroTx", simplifies application development and operations by enabling distributed transactions to ensure consistency across microservices deployed in Kubernetes. -Oracle Backend for Microservices and AI includes the free version of MicroTx, which has all of the functionality of the commercial version, but limits the number of transactions and only persists data in memory. It is recommended for evaluations and application development purposes. +Oracle Backend for Microservices and AI includes the free version of MicroTx, which has all the functionality of the commercial version, but limits the number of transactions and only persists data in memory. It is recommended for evaluations and application development purposes. MicroTx supports the following consistency models: diff --git a/docs-source/site/docs/relnotes/relnotes.md b/docs-source/site/docs/relnotes/relnotes.md index 0253b8269..4d83c74f2 100644 --- a/docs-source/site/docs/relnotes/relnotes.md +++ b/docs-source/site/docs/relnotes/relnotes.md @@ -25,7 +25,6 @@ This document provides comprehensive information about the container images used - [Detailed Comparison](#detailed-comparison) - [Additional Images in Required Set](#additional-images-in-required-set) - [Registry Path Differences](#registry-path-differences) -- [Key Takeaways](#key-takeaways) --- From befd3061ab018a1263a29a25958a072e28bbec01 Mon Sep 17 00:00:00 2001 From: Andy Tael Date: Thu, 20 Nov 2025 08:47:55 -0600 Subject: [PATCH 3/6] Bug fix customer --- cloudbank-v5/customer/helm/values.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloudbank-v5/customer/helm/values.yaml b/cloudbank-v5/customer/helm/values.yaml index 1ddeb2281..7973d7ceb 100644 --- a/cloudbank-v5/customer/helm/values.yaml +++ b/cloudbank-v5/customer/helm/values.yaml @@ -157,7 +157,7 @@ obaas: database: enabled: true # If true, database configuration will be injected # TODO: Set the name of your database credentials secret - credentialsSecret: account-db-secrets # e.g., my-app-db-secrets + credentialsSecret: customer-db-secrets # e.g., my-app-db-secrets # TODO: Set the name of your ADB wallet secret walletSecret: obaas-adb-tns-admin-1 # e.g., my-app-adb-wallet From 791c71c1c32dd6eb6254b3305c099b9d0d3db2ec Mon Sep 17 00:00:00 2001 From: Andy Tael Date: Thu, 20 Nov 2025 10:47:04 -0600 Subject: [PATCH 4/6] Documentation updates --- docs-source/site/docs/deploy/buildpushapp.md | 5 + docs-source/site/docs/deploy/dbaccess.md | 14 + docs-source/site/docs/deploy/deployapp.md | 5 + docs-source/site/docs/deploy/introflow.md | 5 + docs-source/site/docs/intro.md | 5 + docs-source/site/docs/observability/acces.md | 18 +- .../site/docs/observability/configure.md | 5 + .../site/docs/observability/dashboards.md | 5 + .../site/docs/observability/dbexporter.md | 46 +- .../docs/observability/metricslogstraces.md | 5 + .../site/docs/observability/overview.md | 5 + .../site/docs/platform/apacheapisix.md | 161 ++++-- docs-source/site/docs/platform/apachekafka.md | 5 + docs-source/site/docs/platform/coherence.md | 15 +- docs-source/site/docs/platform/conductor.md | 18 +- docs-source/site/docs/platform/dbexporter.md | 181 ++++++- docs-source/site/docs/platform/dboperator.md | 498 +++++++++++++++++- docs-source/site/docs/platform/esooperator.md | 18 +- docs-source/site/docs/platform/eureka.md | 18 +- docs-source/site/docs/platform/otmm.md | 5 + .../site/docs/platform/sbadminserver.md | 18 +- .../site/docs/relnotes/_category_.json | 6 +- .../docs/relnotes/{relnotes.md => index.md} | 7 + docs-source/site/docs/setup/_category_.json | 4 +- docs-source/site/docs/setup/index.md | 84 +++ .../site/docs/setup/setup_dev/setup.md | 7 +- .../site/docs/setup/setup_prod/database.md | 14 + .../site/docs/setup/setup_prod/namespace.md | 5 + .../site/docs/setup/setup_prod/obaas.md | 5 + .../docs/setup/setup_prod/observability.md | 14 + .../site/docs/setup/setup_prod/obtaining.md | 5 + .../docs/setup/setup_prod/prereq-chart.md | 21 + .../site/docs/setup/setup_prod/prereqs.md | 5 + .../site/docs/setup/setup_prod/secrets.md | 5 + .../site/docs/setup/setup_prod/setup.md | 5 + 35 files changed, 1163 insertions(+), 79 deletions(-) rename docs-source/site/docs/relnotes/{relnotes.md => index.md} (94%) create mode 100644 docs-source/site/docs/setup/index.md diff --git a/docs-source/site/docs/deploy/buildpushapp.md b/docs-source/site/docs/deploy/buildpushapp.md index 23b497aba..603e5511f 100644 --- a/docs-source/site/docs/deploy/buildpushapp.md +++ b/docs-source/site/docs/deploy/buildpushapp.md @@ -68,3 +68,8 @@ If the build and push is successful, you should get a message similar to this: [INFO] Finished at: 2025-09-24T12:18:53-05:00 [INFO] ------------------------------------------------------------------------ ``` + +## Getting Help + +- [#oracle-db-microservices Slack channel](https://oracledevs.slack.com/archives/C06L9CDGR6Z) in the Oracle Developers slack workspace. +- [Open an issue in GitHub](https://github.com/oracle/microservices-datadriven/issues/new). diff --git a/docs-source/site/docs/deploy/dbaccess.md b/docs-source/site/docs/deploy/dbaccess.md index 1fecc9909..c6eefe756 100644 --- a/docs-source/site/docs/deploy/dbaccess.md +++ b/docs-source/site/docs/deploy/dbaccess.md @@ -10,6 +10,15 @@ This step is only necessary if your application is connecting to a database. If your application needs database access, first obtain the database user credentials. Then create a Kubernetes secret containing those credentials. The secret is referenced in your application deployment. +:::note Namespace Configuration +All `kubectl` commands in this guide use `-n obaas-dev` as an example namespace. Replace `obaas-dev` with your actual application namespace in all commands. + +To verify your namespaces, run: +```bash +kubectl get namespaces +``` +::: + ### Create the secret for the application Create a secret with database access information. This secret is used by the application configuration and is injected during deployment. @@ -177,3 +186,8 @@ spec: secret: secretName: obaas-adb-tns-admin-1 ``` + +## Getting Help + +- [#oracle-db-microservices Slack channel](https://oracledevs.slack.com/archives/C06L9CDGR6Z) in the Oracle Developers slack workspace. +- [Open an issue in GitHub](https://github.com/oracle/microservices-datadriven/issues/new). diff --git a/docs-source/site/docs/deploy/deployapp.md b/docs-source/site/docs/deploy/deployapp.md index 579c669a5..82b796c26 100644 --- a/docs-source/site/docs/deploy/deployapp.md +++ b/docs-source/site/docs/deploy/deployapp.md @@ -59,3 +59,8 @@ obaas: - `EUREKA_CLIENT_FETCH_REGISTRY` - `EUREKA_CLIENT_SERVICE_URL_DEFAULTZONE` - `EUREKA_INSTANCE_HOSTNAME` + +## Getting Help + +- [#oracle-db-microservices Slack channel](https://oracledevs.slack.com/archives/C06L9CDGR6Z) in the Oracle Developers slack workspace. +- [Open an issue in GitHub](https://github.com/oracle/microservices-datadriven/issues/new). diff --git a/docs-source/site/docs/deploy/introflow.md b/docs-source/site/docs/deploy/introflow.md index d1029c2c4..e10bc4717 100644 --- a/docs-source/site/docs/deploy/introflow.md +++ b/docs-source/site/docs/deploy/introflow.md @@ -30,3 +30,8 @@ To deploy an application to OBaaS, follow this high-level workflow: - Update Chart.yaml with the application name. - Update values.yaml to match your configuration. - Install the Helm chart. + +## Getting Help + +- [#oracle-db-microservices Slack channel](https://oracledevs.slack.com/archives/C06L9CDGR6Z) in the Oracle Developers slack workspace. +- [Open an issue in GitHub](https://github.com/oracle/microservices-datadriven/issues/new). diff --git a/docs-source/site/docs/intro.md b/docs-source/site/docs/intro.md index c90852911..e0561c0db 100644 --- a/docs-source/site/docs/intro.md +++ b/docs-source/site/docs/intro.md @@ -37,3 +37,8 @@ Deploy on Oracle Cloud Infrastructure, other cloud providers, or hybrid environm ### Simplified Operations OBaaS vastly simplifies the task of building, testing, and operating microservices platforms. From development through production, automated workflows and integrated tooling streamline every phase of the application lifecycle. + +## Getting Help + +- [#oracle-db-microservices Slack channel](https://oracledevs.slack.com/archives/C06L9CDGR6Z) in the Oracle Developers slack workspace. +- [Open an issue in GitHub](https://github.com/oracle/microservices-datadriven/issues/new). diff --git a/docs-source/site/docs/observability/acces.md b/docs-source/site/docs/observability/acces.md index ad6e90802..62d9c55b0 100644 --- a/docs-source/site/docs/observability/acces.md +++ b/docs-source/site/docs/observability/acces.md @@ -4,14 +4,23 @@ sidebar_position: 2 --- ## How to access SigNoz -1. Get the _admin_ email and password for SigNoz. Replace the example namespace `observability` with the namespace where SigNoz is deployed. +:::note Namespace Configuration +All `kubectl` commands in this guide use `-n observability` as the default namespace. If SigNoz is installed in a different namespace, replace `observability` with your actual namespace name in all commands. + +To find your namespace, run: +```bash +kubectl get pods -A | grep signoz +``` +::: + +1. Get the _admin_ email and password for SigNoz: ```shell kubectl -n observability get secret signoz-authn -o jsonpath='{.data.email}' | base64 -d kubectl -n observability get secret signoz-authn -o jsonpath='{.data.password}' | base64 -d ``` -1. Expose the SigNoz user interface (UI) using this command. Replace the example namespace `observability` with the namespace where SigNoz is deployed: +1. Expose the SigNoz user interface (UI) using this command: ```shell kubectl -n observability port-forward svc/obaas-signoz-frontend 3301:3301 @@ -20,3 +29,8 @@ sidebar_position: 2 1. Open [SigNoz Login](http://localhost:3301/login) in a browser and login with the _admin_ email and the _password_ you have retrieved. ![SigNoz UI](images/obaas-signoz-ui.png) + +## Getting Help + +- [#oracle-db-microservices Slack channel](https://oracledevs.slack.com/archives/C06L9CDGR6Z) in the Oracle Developers slack workspace. +- [Open an issue in GitHub](https://github.com/oracle/microservices-datadriven/issues/new). diff --git a/docs-source/site/docs/observability/configure.md b/docs-source/site/docs/observability/configure.md index 15a50fcea..eea5e94fc 100644 --- a/docs-source/site/docs/observability/configure.md +++ b/docs-source/site/docs/observability/configure.md @@ -205,3 +205,8 @@ It also adds the `OTEL_EXPORTER_OTLP_ENDPOINT` to pod environment variables for - name: OTEL_EXPORTER_OTLP_ENDPOINT value: http://obaas-signoz-otel-collector.observability:4318 ``` + +## Getting Help + +- [#oracle-db-microservices Slack channel](https://oracledevs.slack.com/archives/C06L9CDGR6Z) in the Oracle Developers slack workspace. +- [Open an issue in GitHub](https://github.com/oracle/microservices-datadriven/issues/new). diff --git a/docs-source/site/docs/observability/dashboards.md b/docs-source/site/docs/observability/dashboards.md index 5b1a23578..174fc7c30 100644 --- a/docs-source/site/docs/observability/dashboards.md +++ b/docs-source/site/docs/observability/dashboards.md @@ -197,3 +197,8 @@ Details about Transaction Manager for Microservices(https://docs.oracle.com/en/d ![MicroTx Dashboard](images/microtx-dashboard.png) +## Getting Help + +- [#oracle-db-microservices Slack channel](https://oracledevs.slack.com/archives/C06L9CDGR6Z) in the Oracle Developers slack workspace. +- [Open an issue in GitHub](https://github.com/oracle/microservices-datadriven/issues/new). + diff --git a/docs-source/site/docs/observability/dbexporter.md b/docs-source/site/docs/observability/dbexporter.md index 70739141a..fcbf6c53b 100644 --- a/docs-source/site/docs/observability/dbexporter.md +++ b/docs-source/site/docs/observability/dbexporter.md @@ -1,9 +1,49 @@ --- -title: Oracle Database Metrics Exporter +title: Oracle Database Metrics Exporter sidebar_position: 6 --- ## Oracle Database Metrics Exporter -Oracle Database Metrics Exporter aims to provide observability for the Oracle Database so that users can understand performance and diagnose issues easily across applications and the database. Oracle Database Metrics Exporter delivers functionality to support both cloud and on-premises databases, including those running in Kubernetes and containers. +The Oracle Database Metrics Exporter is a key observability component that provides comprehensive metrics collection for Oracle Database. It helps users understand database performance and diagnose issues easily across applications and the database, supporting both cloud and on-premises deployments, including databases running in Kubernetes and containers. -See the [documentation](https://oracle.github.io/oracle-db-appdev-monitoring/) for how to install and configure Oracle Database Metrics Exporter. +:::info Complete Documentation Available +This exporter is documented in detail in the **[Platform Components](../platform/dbexporter.md)** section. For complete installation instructions, testing procedures, troubleshooting guidance, and dashboard configuration, please refer to the comprehensive guide: + +**[→ Oracle Database Metrics Exporter - Full Documentation](../platform/dbexporter.md)** +::: + +## Quick Links + +- **[Installation Guide](../platform/dbexporter.md#installing-oracle-database-metrics-exporter)** - Enable and deploy the exporter +- **[Testing Guide](../platform/dbexporter.md#testing-oracle-database-metrics-exporter)** - Verify metrics collection +- **[Dashboards](../platform/dbexporter.md#dashboards)** - View pre-built Grafana dashboards +- **[Troubleshooting](../platform/dbexporter.md#troubleshooting)** - Common issues and solutions +- **[Official Documentation](https://github.com/oracle/oracle-db-appdev-monitoring)** - Oracle's GitHub repository + +## What Metrics Are Available + +The exporter provides Prometheus-formatted metrics including: + +- **Database connectivity** - `oracledb_up` - Database instance reachability +- **Session metrics** - `oracledb_sessions_*` - Active sessions and session statistics +- **Tablespace metrics** - `oracledb_tablespace_*` - Tablespace usage and capacity +- **Activity metrics** - `oracledb_activity_*` - Database activity and performance +- **Transactional Event Queue metrics** - TxEventQ throughput and queue statistics + +These metrics are exposed on port 9161 and automatically scraped by Prometheus when properly configured. + +## Integration with OBaaS Observability + +The Database Metrics Exporter integrates seamlessly with the OBaaS observability stack: + +1. **Prometheus** automatically discovers and scrapes metrics from the exporter +2. **SigNoz** provides pre-built dashboards for visualizing database performance +3. **Service Discovery** ensures metrics are collected from all database instances +4. **Alert Manager** can trigger alerts based on database metric thresholds + +For the complete setup and configuration, see the **[full documentation](../platform/dbexporter.md)**. + +## Getting Help + +- [#oracle-db-microservices Slack channel](https://oracledevs.slack.com/archives/C06L9CDGR6Z) in the Oracle Developers slack workspace. +- [Open an issue in GitHub](https://github.com/oracle/microservices-datadriven/issues/new). diff --git a/docs-source/site/docs/observability/metricslogstraces.md b/docs-source/site/docs/observability/metricslogstraces.md index ac47f1ac1..6f3caddd8 100644 --- a/docs-source/site/docs/observability/metricslogstraces.md +++ b/docs-source/site/docs/observability/metricslogstraces.md @@ -37,3 +37,8 @@ Traces can be filtered based on Service, HTTP Routes etc. Click on a trace to se ![SigNoz Traces Details](images/signoz-traces-details.png) Logs for a trace event can directly be accessed using the _Go to related logs_ link. + +## Getting Help + +- [#oracle-db-microservices Slack channel](https://oracledevs.slack.com/archives/C06L9CDGR6Z) in the Oracle Developers slack workspace. +- [Open an issue in GitHub](https://github.com/oracle/microservices-datadriven/issues/new). diff --git a/docs-source/site/docs/observability/overview.md b/docs-source/site/docs/observability/overview.md index 46e7be4f7..b44251e86 100644 --- a/docs-source/site/docs/observability/overview.md +++ b/docs-source/site/docs/observability/overview.md @@ -23,3 +23,8 @@ More details can be found in the [SigNoz Documentation](https://signoz.io/docs/i ### Oracle Database Monitoring Explorer With Oracle Backend for Microservices and AI, you can choose to install [Oracle Database Monitoring Explorer](https://oracle.github.io/oracle-db-appdev-monitoring/). The tool provides observability for Oracle Database so that users can understand performance and diagnose issues easily across applications and the database. The project also provides dashboards for SigNoz. + +## Getting Help + +- [#oracle-db-microservices Slack channel](https://oracledevs.slack.com/archives/C06L9CDGR6Z) in the Oracle Developers slack workspace. +- [Open an issue in GitHub](https://github.com/oracle/microservices-datadriven/issues/new). diff --git a/docs-source/site/docs/platform/apacheapisix.md b/docs-source/site/docs/platform/apacheapisix.md index c1a98ca37..794e9182e 100644 --- a/docs-source/site/docs/platform/apacheapisix.md +++ b/docs-source/site/docs/platform/apacheapisix.md @@ -26,7 +26,7 @@ Apache APISIX offers: - [Accessing Apache APISIX](#accessing-apache-apisix) - [Port Forwarding to Admin API](#port-forwarding-to-admin-api) - [Retrieving the Admin Key](#retrieving-the-admin-key) - - [Retrieving the Gateway IP](#retrieving-the-gateway-ip) + - [Retrieving the APISIX IP](#retrieving-the-apisix-ip) - [Working with APISIX REST API](#working-with-apisix-rest-api) - [Creating an Upstream](#creating-an-upstream) - [Creating a Route](#creating-a-route) @@ -54,7 +54,7 @@ Apache APISIX offers: Apache APISIX will be installed if the `apisix.enabled` is set to `true` in the `values.yaml` file. The default namespace for Apache APISIX is `apisix`. **Key Components Installed:** -- **Apache APISIX Gateway**: The main Apache APISIX gateway that handles incoming traffic +- **Apache APISIX**: The main Apache APISIX that handles incoming traffic - **APISIX Dashboard**: Web UI for managing routes, upstreams, and plugins - **etcd**: Distributed key-value store used by APISIX for configuration storage @@ -62,11 +62,6 @@ Apache APISIX will be installed if the `apisix.enabled` is set to `true` in the This guide makes the following assumptions: -- **Namespace**: All examples use `YOUR-NAMESPACE` as a placeholder. Replace this with your actual target namespace (e.g., `obaas-dev`, `production`, `my-app`, etc.). -- **APISIX Namespace**: All APISIX-related commands in this guide use `apisix` as the namespace where Apache APISIX is deployed. **If you installed APISIX into a different namespace**, replace `apisix` with your actual APISIX namespace in all commands (e.g., `-n apisix` becomes `-n your-apisix-namespace`). You can verify your APISIX namespace with: - ```bash - kubectl get pods -A | grep apisix - ``` - **Kubectl Access**: You have kubectl configured and authenticated to your Kubernetes cluster with appropriate permissions. - **Command-line Tools**: The following tools are installed and available: - `kubectl` - Kubernetes command-line tool @@ -75,9 +70,20 @@ This guide makes the following assumptions: - `yq` - YAML processor (optional, for retrieving admin key) - **Port Forwarding**: Examples assume you have an active port-forward to the APISIX admin service. +:::note Namespace Configuration +This guide uses two types of namespaces: +- **APISIX namespace** (`-n apisix`): Where Apache APISIX is deployed. All APISIX-related commands use this namespace. If you installed APISIX in a different namespace, replace `apisix` with your actual namespace in all commands. +- **Application namespace** (`YOUR-NAMESPACE` in examples): Where your backend services are deployed. Replace `YOUR-NAMESPACE` with your actual application namespace (e.g., `obaas-dev`, `production`, `my-app`, etc.). + +To find the APISIX namespace, run: +```bash +kubectl get pods -A | grep apisix +``` +::: + ### Accessing Apache APISIX -Oracle Backend for Microservices and AI deploys the Apache APISIX Gateway and Dashboard in the configured namespace (default: `apisix`). Apache APISIX is exposed via an external load balancer and an ingress controller for production traffic, while the admin API is accessed through port forwarding for management operations. +Oracle Backend for Microservices and AI deploy Apache APISIX and Dashboard in the configured namespace (default: `apisix`). Apache APISIX is exposed via an external load balancer and an ingress controller for production traffic, while the admin API is accessed through port forwarding for management operations. #### Port Forwarding to Admin API @@ -151,24 +157,24 @@ Expected response if successful (shows available plugins): This confirms the admin key is valid and shows the plugins available in your APISIX installation. -#### Retrieving the Gateway IP +#### Retrieving the APISIX IP -To test routes through the APISIX gateway, you need the external IP address of the ingress controller. The following command will retrieve the IP, store it in the `GATEWAY_IP` environment variable, and display it: +To test routes through the APISIX, you need the external IP address of the ingress controller. The following command will retrieve the IP, store it in the `APISIX_IP` environment variable, and display it: ```bash -export GATEWAY_IP=$(kubectl get svc ingress-nginx-controller -n ingress-nginx -o jsonpath='{.status.loadBalancer.ingress[0].ip}') && echo "Gateway IP: $GATEWAY_IP" +export APISIX_IP=$(kubectl get svc ingress-nginx-controller -n ingress-nginx -o jsonpath='{.status.loadBalancer.ingress[0].ip}') && echo "APISIX IP: $APISIX_IP" ``` **Note:** If your load balancer uses a hostname instead of an IP address (common in AWS), use this command instead: ```bash -export GATEWAY_IP=$(kubectl get svc ingress-nginx-controller -n ingress-nginx -o jsonpath='{.status.loadBalancer.ingress[0].hostname}') && echo "Gateway Hostname: $GATEWAY_IP" +export APISIX_IP=$(kubectl get svc ingress-nginx-controller -n ingress-nginx -o jsonpath='{.status.loadBalancer.ingress[0].hostname}') && echo "APISIX Hostname: $APISIX_IP" ``` **Verify Apache APISIX is accessible:** ```bash -curl -sS http://$GATEWAY_IP +curl -sS http://$APISIX_IP ``` You should receive a response (possibly a 404 if no routes are configured yet), confirming Apache APISIX is reachable. @@ -199,33 +205,33 @@ APISIX uses three key concepts to route traffic from clients to backend services │ HTTP/HTTPS ↓ ┌──────────────────────────────────────────────────────────────┐ -│ APISIX Gateway │ +│ APISIX │ │ │ -│ ┌────────────────────────────────────────────────────────┐ │ -│ │ Route: /api/users/* │ │ -│ │ ├─ Matches: URI, method, host, headers │ │ -│ │ ├─ Route-specific plugins (optional) │ │ -│ │ └─ Points to: Service OR Upstream │ │ -│ └────────────┬───────────────────────────────────────────┘ │ +│ ┌────────────────────────────────────────────────────────┐ │ +│ │ Route: /api/users/* │ │ +│ │ ├─ Matches: URI, method, host, headers │ │ +│ │ ├─ Route-specific plugins (optional) │ │ +│ │ └─ Points to: Service OR Upstream │ │ +│ └────────────┬───────────────────────────────────────────┘ │ │ │ │ │ ↓ │ -│ ┌────────────────────────────┐ │ -│ │ Service (Optional) │ ← Multiple routes can │ -│ │ ├─ Shared plugins │ share one service │ -│ │ ├─ Common configuration │ │ -│ │ └─ Points to: Upstream │ │ -│ └────────────┬───────────────┘ │ +│ ┌────────────────────────────┐ │ +│ │ Service (Optional) │ ← Multiple routes can │ +│ │ ├─ Shared plugins │ share one service │ +│ │ ├─ Common configuration │ │ +│ │ └─ Points to: Upstream │ │ +│ └────────────┬───────────────┘ │ │ │ │ │ ↓ │ -│ ┌────────────────────────────┐ │ -│ │ Upstream │ ← Multiple routes/services │ -│ │ ├─ Load balancing │ can share one upstream │ -│ │ ├─ Health checks │ │ -│ │ └─ Backend nodes: │ │ -│ │ • Pod 1 (weight: 3) │ │ -│ │ • Pod 2 (weight: 2) │ │ -│ │ • Pod 3 (weight: 1) │ │ -│ └────────────┬───────────────┘ │ +│ ┌────────────────────────────┐ │ +│ │ Upstream │ ← Multiple routes/services │ +│ │ ├─ Load balancing │ can share one upstream │ +│ │ ├─ Health checks │ │ +│ │ └─ Backend nodes: │ │ +│ │ • Pod 1 (weight: 3) │ │ +│ │ • Pod 2 (weight: 2) │ │ +│ │ • Pod 3 (weight: 1) │ │ +│ └────────────┬───────────────┘ │ │ │ │ └───────────────┼──────────────────────────────────────────────┘ │ @@ -416,17 +422,17 @@ curl -sS http://127.0.0.1:9180/apisix/admin/routes/3 \ #### Testing the Route -After creating a route, test it to ensure it's working correctly. Make sure you have retrieved the [Gateway IP](#retrieving-the-gateway-ip) and stored it in the `GATEWAY_IP` environment variable. +After creating a route, test it to ensure it's working correctly. Make sure you have retrieved the [APISIX IP](#retrieving-the-apisix-ip) and stored it in the `APISIX_IP` environment variable. **Test the route through Apache APISIX:** ```bash -# Using the Apache APISIX gateway IP/hostname -curl -sS http://$GATEWAY_IP/api/v1/users | jq +# Using the Apache APISIX IP/hostname +curl -sS http://$APISIX_IP/api/v1/users | jq -# Or port-forward to the Apache APISIX gateway for local testing -kubectl port-forward -n apisix svc/apisix-gateway 9080 -curl -sS http://127.0.0.1:9080/api/v1/users | jq +# Or port-forward to the Apache APISIX for local testing +kubectl port-forward -n apisix svc/apisix-admin 9180 +curl -sS http://127.0.0.1:9180/api/v1/users | jq ``` **Verify the route exists:** @@ -674,10 +680,10 @@ curl -sS http://127.0.0.1:9180/apisix/admin/consumers \ ```bash # Without API key (should fail with 401 error) -curl -sS http://$GATEWAY_IP/api/v1/users | jq +curl -sS http://$APISIX_IP/api/v1/users | jq # With API key (should succeed) -curl -sS http://$GATEWAY_IP/api/v1/users -H "apikey: my-secret-api-key-12345" | jq +curl -sS http://$APISIX_IP/api/v1/users -H "apikey: my-secret-api-key-12345" | jq ``` #### Enable Rate Limiting @@ -955,7 +961,7 @@ curl -sS http://127.0.0.1:9180/apisix/admin/routes/1 \ }' | jq ``` -**Note:** HTTPS routes automatically use the SSL certificate that matches the route's `host` SNI. Ensure your Apache APISIX gateway is listening on port 9443 (or your configured HTTPS port). +**Note:** HTTPS routes automatically use the SSL certificate that matches the route's `host` SNI. Ensure your Apache APISIX is listening on port 9443 (or your configured HTTPS port). **List all SSL certificates:** @@ -983,7 +989,7 @@ curl -sS http://127.0.0.1:9180/apisix/admin/ssls/1 \ Note that all functionality is not available in the dashboard. You might need to use the REST APIs for advanced configurations. ::: -The APISIX Dashboard provides a web-based interface for visual management of routes, upstreams, consumers, and plugins. It offers an alternative to the Admin REST API for users who prefer graphical configuration over command-line operations, making it easier to visualize and manage your Apache APISIX gateway setup at a glance. +The APISIX Dashboard provides a web-based interface for visual management of routes, upstreams, consumers, and plugins. It offers an alternative to the Admin REST API for users who prefer graphical configuration over command-line operations, making it easier to visualize and manage your Apache APISIX setup at a glance. **Prerequisites:** @@ -1125,16 +1131,10 @@ curl -sS http://127.0.0.1:9180/apisix/admin/routes/999 \ Then test: ```bash -curl -sS http://$GATEWAY_IP/debug/test +curl -sS http://$APISIX_IP/debug/test ``` -This will return "matched debug route" confirming the route mat - - - - - -hed. +This will return "matched debug route" confirming the route mathced. **Check upstream health status:** @@ -1150,6 +1150,54 @@ curl -sS http://127.0.0.1:9180/apisix/admin/upstreams/1 \ Temporarily increase APISIX logging to debug level by editing the APISIX ConfigMap and restarting the pods (use with caution in production). +**Step 1: Edit the APISIX ConfigMap to enable debug logging** + +```bash +kubectl edit configmap apisix -n apisix +``` + +In the editor that opens, find the `nginx_config` section and modify the `error_log_level` to `debug`: + +```yaml +nginx_config: + error_log_level: "debug" # Change from "warn" or "error" to "debug" +``` + +Save and exit the editor (`:wq` in vi/vim, or `Ctrl+O` then `Ctrl+X` in nano). + +**Step 2: Restart APISIX pods to apply changes** + +```bash +kubectl rollout restart deployment/apisix -n apisix +``` + +**Step 3: Monitor the logs** + +```bash +kubectl logs -n apisix -l app.kubernetes.io/name=apisix --tail=100 -f +``` + +**Step 4: Revert to normal logging when done** + +After debugging, restore the original log level to reduce log volume: + +```bash +kubectl edit configmap apisix -n apisix +``` + +Change `error_log_level` back to `warn` or `error`: + +```yaml +nginx_config: + error_log_level: "warn" # Back to normal level +``` + +Then restart the pods again: + +```bash +kubectl rollout restart deployment/apisix -n apisix +``` + --- ### Additional Resources @@ -1167,4 +1215,9 @@ Temporarily increase APISIX logging to debug level by editing the APISIX ConfigM - [Community Slack Channel](https://join.slack.com/t/the-asf/shared_invite/zt-vlfbf7ch-HkbNHiU_uDlcH_RvaHv9gQ) **Oracle Resources:** -- [Oracle Backend for Microservices and AI Documentation](https://oracle.github.io/microservices-datadriven/) +- [Oracle Backend for Microservices and AI Documentation](https://oracle.github.io/microservices-datadriven/obaas/) + +## Getting Help + +- [#oracle-db-microservices Slack channel](https://oracledevs.slack.com/archives/C06L9CDGR6Z) in the Oracle Developers slack workspace. +- [Open an issue in GitHub](https://github.com/oracle/microservices-datadriven/issues/new). diff --git a/docs-source/site/docs/platform/apachekafka.md b/docs-source/site/docs/platform/apachekafka.md index 3c46e359f..1a054fb37 100644 --- a/docs-source/site/docs/platform/apachekafka.md +++ b/docs-source/site/docs/platform/apachekafka.md @@ -60,3 +60,8 @@ spring: key-serializer: org.apache.kafka.common.serialization.StringSerializer value-serializer: org.apache.kafka.common.serialization.StringSerializer ``` + +## Getting Help + +- [#oracle-db-microservices Slack channel](https://oracledevs.slack.com/archives/C06L9CDGR6Z) in the Oracle Developers slack workspace. +- [Open an issue in GitHub](https://github.com/oracle/microservices-datadriven/issues/new). diff --git a/docs-source/site/docs/platform/coherence.md b/docs-source/site/docs/platform/coherence.md index e47d3a8a2..ebb01703c 100644 --- a/docs-source/site/docs/platform/coherence.md +++ b/docs-source/site/docs/platform/coherence.md @@ -35,7 +35,15 @@ Follow these steps to create a basic Coherence cluster named `mysample-cluster`: - Coherence Operator is installed and running - `kubectl` is configured to access your Kubernetes cluster -- You have a namespace where you want to deploy the cluster (e.g., `coherence`) + +:::note Namespace Configuration +All `kubectl` commands in this guide use `-n coherence` as the default namespace. If the Coherence Operator is installed in a different namespace, replace `coherence` with your actual namespace name in all commands. + +To find your namespace, run: +```bash +kubectl get pods -A | grep coherence +``` +::: ##### Step 1: Create the Coherence Cluster YAML @@ -192,3 +200,8 @@ coherence: server: startup-timeout: 60s ``` + +## Getting Help + +- [#oracle-db-microservices Slack channel](https://oracledevs.slack.com/archives/C06L9CDGR6Z) in the Oracle Developers slack workspace. +- [Open an issue in GitHub](https://github.com/oracle/microservices-datadriven/issues/new). diff --git a/docs-source/site/docs/platform/conductor.md b/docs-source/site/docs/platform/conductor.md index e4bce5abc..e027b0ed9 100644 --- a/docs-source/site/docs/platform/conductor.md +++ b/docs-source/site/docs/platform/conductor.md @@ -22,10 +22,19 @@ Conductor will be installed if the `conductor-server.enabled` is set to `true` i ### Accessing Conductor APIs -To access the Conductor APIs, use kubectl port-forward to create a secure channel to `service/conductor-server`. Run the following command to establish the secure tunnel (replace the example namespace `obaas-dev` with the namespace where the Conductor Server is deployed): +:::note Namespace Configuration +All `kubectl` commands in this guide use `-n conductor-server` as the default namespace. If Conductor is installed in a different namespace, replace `conductor-server` with your actual namespace name in all commands. + +To find your namespace, run: +```bash +kubectl get pods -A | grep conductor +``` +::: + +To access the Conductor APIs, use kubectl port-forward to create a secure channel to `service/conductor-server`. Run the following command to establish the secure tunnel: ```shell -kubectl port-forward -n obaas-dev svc/conductor-server 8080 +kubectl port-forward -n conductor-server svc/conductor-server 8080 ``` ### Testing the Conductor service @@ -91,3 +100,8 @@ Check the status of the workflow. This will return the data from https://restcou ```shell curl -s -X GET "http://localhost:8080/api/workflow/$WORKFLOW_ID" | jq ``` + +## Getting Help + +- [#oracle-db-microservices Slack channel](https://oracledevs.slack.com/archives/C06L9CDGR6Z) in the Oracle Developers slack workspace. +- [Open an issue in GitHub](https://github.com/oracle/microservices-datadriven/issues/new). diff --git a/docs-source/site/docs/platform/dbexporter.md b/docs-source/site/docs/platform/dbexporter.md index 046d8e645..fcd8ce452 100644 --- a/docs-source/site/docs/platform/dbexporter.md +++ b/docs-source/site/docs/platform/dbexporter.md @@ -2,16 +2,188 @@ title: Oracle Database Metrics Exporter sidebar_position: 5 --- -## Oracle Database Metrics Exporter +## Overview -This project provides observability for Oracle Database, helping users understand performance and diagnose issues across applications and the database. Over time, it will expand beyond metrics to include logging, tracing, and integrations with popular frameworks such as Spring Boot. It targets both cloud and on-premises deployments, including databases running in Kubernetes and containers. +The Oracle Database Metrics Exporter, helps users understand performance and diagnose issues across applications and the database. It targets both cloud and on-premises deployments, including databases running in Kubernetes and containers. [Full documentation](https://github.com/oracle/oracle-db-appdev-monitoring). +--- + +## Table of Contents + +- [Installing Oracle Database Metrics Exporter](#installing-oracle-database-metrics-exporter) +- [Dashboards](#dashboards) + - [Oracle Database Grafana Dashboard](#oracle-database-grafana-dashboard) + - [Transactional Event Queue Grafana Dashboard](#transactional-event-queue-grafana-dashboard) +- [Testing Oracle Database Metrics Exporter](#testing-oracle-database-metrics-exporter) + - [Verify the exporter is running](#verify-the-exporter-is-running) + - [Check metrics endpoint](#check-metrics-endpoint) + - [Verify metrics in Grafana](#verify-metrics-in-grafana) +- [Troubleshooting](#troubleshooting) + - [Exporter pod not running](#exporter-pod-not-running) + - [No metrics appearing in Grafana](#no-metrics-appearing-in-grafana) + - [Database connection errors](#database-connection-errors) + +--- + ### Installing Oracle Database Metrics Exporter Oracle Database Metrics Exporter will be installed if the `oracle-database-exporter.enabled` is set to `true` in the `values.yaml` file. The default namespace for Oracle Database Metrics Exporter is `oracle-database-exporter`. +--- + +## Testing Oracle Database Metrics Exporter + +:::note Namespace Configuration +All `kubectl` commands in this guide use `-n oracle-database-exporter` as the default namespace. If the Oracle Database Metrics Exporter is installed in a different namespace, replace `oracle-database-exporter` with your actual namespace name in all commands. + +To find your namespace, run: +```bash +kubectl get pods -A | grep oracle-database-exporter +``` +::: + +### Verify the exporter is running + +After installation, verify that the Oracle Database Metrics Exporter pod is running: + +```shell +kubectl get pods -n oracle-database-exporter +``` + +You should see output similar to: + +``` +NAME READY STATUS RESTARTS AGE +oracle-database-exporter-xxxxxxxxxx-xxxxx 1/1 Running 0 5m +``` + +Check the exporter logs to verify it is collecting metrics: + +```shell +kubectl logs -n oracle-database-exporter -l app=oracle-database-exporter --tail=50 +``` + +Look for successful metric collection messages and no connection errors. + +### Check metrics endpoint + +The exporter exposes Prometheus metrics on port 9161. You can verify metrics are being exported: + +```shell +kubectl port-forward -n oracle-database-exporter svc/oracle-database-exporter 9161:9161 +``` + +In another terminal, query the metrics endpoint: + +```shell +curl -sS http://localhost:9161/metrics +``` + +You should see Prometheus-formatted metrics including: + +- `oracledb_up` - Database connectivity status (1 = up, 0 = down) +- `oracledb_exporter_scrape_duration_seconds` - Scrape duration +- `oracledb_sessions_*` - Session metrics +- `oracledb_tablespace_*` - Tablespace metrics +- `oracledb_activity_*` - Activity metrics + +Example output: + +``` +# HELP oracledb_up Database instance reachable +# TYPE oracledb_up gauge +oracledb_up 1 + +# HELP oracledb_sessions_value Generic counter metric from v$sesstat view +# TYPE oracledb_sessions_value gauge +oracledb_sessions_value{type="ACTIVE"} 15 +``` +--- + +## Troubleshooting + +### Exporter pod not running + +**Check pod status:** + +```shell +kubectl get pods -n oracle-database-exporter +kubectl describe pod -n oracle-database-exporter -l app=oracle-database-exporter +``` + +**Common causes:** + +1. **Image pull errors** - Verify access to the container registry +2. **Configuration issues** - Check the ConfigMap for database connection settings: + ```shell + kubectl get configmap -n oracle-database-exporter + kubectl describe configmap oracle-database-exporter-config -n oracle-database-exporter + ``` + +3. **Secret missing** - Verify database credentials secret exists: + ```shell + kubectl get secret -n oracle-database-exporter + ``` + +### Database connection errors + +**Check exporter logs for connection errors:** + +```shell +kubectl logs -n oracle-database-exporter -l app=oracle-database-exporter --tail=100 +``` + +**Common connection errors:** + +1. **"ORA-01017: invalid username/password"** + - Verify database credentials in the secret: + ```shell + kubectl get secret -n oracle-database-exporter -o yaml + ``` + - Ensure the database user has proper privileges: + ```sql + GRANT CREATE SESSION TO ; + GRANT SELECT ON V_$SESSION TO ; + GRANT SELECT ON V_$SESSTAT TO ; + GRANT SELECT ON DBA_TABLESPACES TO ; + ``` + +2. **"Connection refused" or timeout errors** + - Verify database service is accessible: + ```shell + kubectl get svc | grep database + ``` + - Test connectivity from exporter namespace: + ```shell + kubectl run test-db-connection --rm -it --restart=Never \ + --image=container-registry.oracle.com/database/sqlcl:25.3.0 \ + -n oracle-database-exporter \ + -- bash -c "nc -zv 1521" + ``` + +3. **"ORA-12154: TNS:could not resolve the connect identifier"** + - Verify connection string format in configuration + - Check that the database service name is correct + +**Enable debug logging:** + +Edit the exporter deployment to add debug flags: + +```shell +kubectl edit deployment oracle-database-exporter -n oracle-database-exporter +``` + +Add environment variable: + +```yaml +env: +- name: LOG_LEVEL + value: "debug" +``` +## Dashboards + ### Oracle Database Grafana Dashboard ![Oracle Database Dashboard](images/exporter-running-against-basedb.png) @@ -19,3 +191,8 @@ Oracle Database Metrics Exporter will be installed if the `oracle-database-expor ### Transactional Event Queue Grafana Dashboard ![Oracle Database Dashboard](images/txeventq-dashboard-v2.png) + +## Getting Help + +- [#oracle-db-microservices Slack channel](https://oracledevs.slack.com/archives/C06L9CDGR6Z) in the Oracle Developers slack workspace. +- [Open an issue in GitHub](https://github.com/oracle/microservices-datadriven/issues/new). \ No newline at end of file diff --git a/docs-source/site/docs/platform/dboperator.md b/docs-source/site/docs/platform/dboperator.md index 5168f2092..91af73212 100644 --- a/docs-source/site/docs/platform/dboperator.md +++ b/docs-source/site/docs/platform/dboperator.md @@ -20,8 +20,19 @@ Learn about using the OraOperator in the Livelab [Microservices and Kubernetes f - [Verify the operator is running](#verify-the-operator-is-running) - [Verify CRDs are installed](#verify-crds-are-installed) - [Validate a sample resource](#validate-a-sample-resource) + - [Create and test a database instance](#create-and-test-a-database-instance) + - [Test database connectivity](#test-database-connectivity) + - [Clean up test resources](#clean-up-test-resources) - [Monitoring the Oracle Database Operator](#monitoring-the-oracle-database-operator) - [Check operator logs](#check-operator-logs) + - [Monitor database resource status](#monitor-database-resource-status) +- [Troubleshooting](#troubleshooting) + - [Operator pod issues](#operator-pod-issues) + - [Database provisioning failures](#database-provisioning-failures) + - [Storage issues](#storage-issues) + - [Database connectivity issues](#database-connectivity-issues) + - [Common error messages](#common-error-messages) + - [Debugging tips](#debugging-tips) --- @@ -29,7 +40,6 @@ Learn about using the OraOperator in the Livelab [Microservices and Kubernetes f This guide makes the following assumptions: -- **Namespace**: The Oracle Database Operator is installed in the `oracle-database-operator-system` namespace (the default). - **Kubectl Access**: You have kubectl configured and authenticated to your Kubernetes cluster with appropriate permissions to: - View operator pods and CRDs - Create and manage database custom resources @@ -38,6 +48,15 @@ This guide makes the following assumptions: - `kubectl` - Kubernetes command-line tool - **File References**: Examples reference YAML files that you can create for testing purposes. +:::note Namespace Configuration +All `kubectl` commands in this guide use `-n oracle-database-operator-system` as the default namespace. If the Oracle Database Operator is installed in a different namespace, replace `oracle-database-operator-system` with your actual namespace name in all commands. + +To find your namespace, run: +```bash +kubectl get pods -A | grep oracle-database-operator +``` +::: + ### Installing the Oracle Database Operator for Kubernetes Oracle Database Operator for Kubernetes will be installed if the `oracle-database-operator.enabled` is set to `true` in the `values.yaml` file. The default namespace for Oracle Database Operator is `oracle-database-operator-system`. @@ -159,6 +178,121 @@ kubectl apply -f sidb-test.yaml --dry-run=server -o yaml This shows the complete resource definition as the operator would see it, including any default values the operator would add. +#### Create and test a database instance + +To fully test the operator's functionality, create an actual database instance. This example uses Oracle Database Free edition which is lightweight and quick to deploy. + +Create a file named `sidb-live-test.yaml`: + +```yaml +apiVersion: database.oracle.com/v1alpha1 +kind: SingleInstanceDatabase +metadata: + name: sidb-test + namespace: default +spec: + sid: FREE + edition: free + adminPassword: + secretName: sidb-admin-secret + image: + pullFrom: container-registry.oracle.com/database/free:latest-lite + prebuiltDB: true + persistence: + size: 20Gi + storageClass: "oci-bv" + accessMode: ReadWriteOnce +``` + +First, create the admin password secret: + +```shell +kubectl create secret generic sidb-admin-secret \ + --from-literal=password='MyTestPassword123#' \ + -n default +``` + +:::warning +Use a strong password in production environments. This is just a test example. +::: + +Apply the database resource: + +```shell +kubectl apply -f sidb-live-test.yaml +``` + +Monitor the database creation: + +```shell +kubectl get singleinstancedatabase sidb-test -w +``` + +**Expected progression:** + +1. Initial state: `STATUS` will show as `Creating` +2. Database provisioning: Pod is created and database initialization begins +3. Ready state: `STATUS` shows `Healthy` and `READY` is `true` + +This process typically takes 5-10 minutes for the Free edition with prebuiltDB enabled. + +#### Test database connectivity + +Once the database is ready, test connectivity using a SQLcl client pod: + +```shell +kubectl run sqlcl-test --rm -it --restart=Never \ + --image=container-registry.oracle.com/database/sqlcl:25.3.0 \ + -- bash +``` + +Inside the pod, connect to the database using SQLcl: + +```shell +sql sys/MyTestPassword123#@sidb-test.default.svc.cluster.local:1521/FREEPDB1 as sysdba +``` + +**Understanding the connection string:** + +- `sys` - Database administrator user +- `MyTestPassword123#` - Password from the secret +- `sidb-test.default.svc.cluster.local` - Kubernetes service DNS name +- `1521` - Oracle listener port +- `FREEPDB1` - Default pluggable database (PDB) name for Free edition + +Run a simple query to verify functionality: + +```sql +SELECT 'Database is working!' as status FROM dual; +EXIT; +``` + +Expected output: + +``` +STATUS +------------------- +Database is working! +``` + +Exit the test pod (it will be automatically deleted due to `--rm` flag). + +#### Clean up test resources + +After testing, remove the test database and secret: + +```shell +kubectl delete singleinstancedatabase sidb-test +kubectl delete secret sidb-admin-secret +``` + +**Note:** The persistent volume claim (PVC) may not be automatically deleted. Check and remove it if needed: + +```shell +kubectl get pvc +kubectl delete pvc sidb-test-pvc # Replace with actual PVC name +``` + ### Monitoring the Oracle Database Operator #### Check operator logs @@ -171,3 +305,365 @@ kubectl logs -n oracle-database-operator-system -l control-plane=controller-mana This shows the last 50 log lines from the operator controller manager. +To follow logs in real-time: + +```shell +kubectl logs -n oracle-database-operator-system -l control-plane=controller-manager -f +``` + +Filter logs for a specific database resource: + +```shell +kubectl logs -n oracle-database-operator-system -l control-plane=controller-manager | grep "sidb-test" +``` + +#### Monitor database resource status + +Check the status of all database resources: + +```shell +# Single Instance Databases +kubectl get singleinstancedatabase --all-namespaces + +# Autonomous Databases +kubectl get autonomousdatabase --all-namespaces + +# Autonomous Container Databases +kubectl get autonomouscontainerdatabase --all-namespaces +``` + +Get detailed status for a specific database: + +```shell +kubectl describe singleinstancedatabase -n +``` + +The status section shows: + +- **Status**: Current state (Creating, Healthy, Unhealthy, etc.) +- **Conditions**: Detailed condition checks +- **Events**: Recent events related to the database +- **Connect String**: Connection information once ready + +Watch for status changes: + +```shell +kubectl get singleinstancedatabase -w +``` + +--- + +## Troubleshooting + +### Operator pod issues + +**Problem: Operator pod is not running** + +Check pod status: + +```shell +kubectl get pods -n oracle-database-operator-system +kubectl describe pod -n oracle-database-operator-system -l control-plane=controller-manager +``` + +Common causes: + +1. **Image pull errors**: Verify image access and credentials +2. **Resource constraints**: Check if the node has sufficient CPU/memory +3. **RBAC permissions**: Ensure service account has correct permissions + +Check events for the operator deployment: + +```shell +kubectl get events -n oracle-database-operator-system --sort-by='.lastTimestamp' +``` + +**Problem: Operator pod is CrashLoopBackOff** + +View recent logs: + +```shell +kubectl logs -n oracle-database-operator-system -l control-plane=controller-manager --previous +``` + +The `--previous` flag shows logs from the previous (crashed) container instance. + +Common fixes: + +- Verify webhook certificates are properly configured +- Check for conflicts with other operators +- Ensure all CRDs are properly installed + +### Database provisioning failures + +**Problem: Database stays in "Creating" state** + +Check the database resource status: + +```shell +kubectl describe singleinstancedatabase +``` + +Look for: + +- **Conditions**: Shows specific errors or warnings +- **Events**: Recent activities and failures +- **Status Message**: Detailed error description + +Common issues: + +1. **Pod not starting**: Check pod status + ```shell + kubectl get pods -l app= + kubectl describe pod + ``` + +2. **Initialization errors**: Check database pod logs + ```shell + kubectl logs + ``` + +3. **Insufficient resources**: Verify node resources + ```shell + kubectl describe node + ``` + +**Problem: Database status shows "Unhealthy"** + +This indicates the database pod is running but failing health checks. + +Check pod logs for errors: + +```shell +kubectl logs --tail=100 +``` + +Verify the database listener is running: + +```shell +kubectl exec -- lsnrctl status +``` + +Check database alert log: + +```shell +kubectl exec -- tail -100 /opt/oracle/diag/rdbms/*/*/trace/alert*.log +``` + +### Storage issues + +**Problem: PVC remains in "Pending" state** + +Check PVC status: + +```shell +kubectl describe pvc +``` + +Common causes: + +1. **No storage class available** + - List available storage classes: + ```shell + kubectl get storageclass + ``` + - Specify a valid storage class in your database spec: + ```yaml + spec: + persistence: + storageClass: "oci-bv" # or your cluster's storage class + ``` + +2. **Insufficient storage quota** + - Verify namespace resource quota + - Request a smaller volume size + +3. **Storage class not supporting access mode** + - Verify the storage class supports the requested access mode (ReadWriteOnce, ReadWriteMany) + +**Problem: Database fails with volume mount errors** + +Check pod events and logs: + +```shell +kubectl describe pod +kubectl logs +``` + +Verify PVC is bound: + +```shell +kubectl get pvc +``` + +Ensure the persistent volume has correct permissions: + +```shell +kubectl exec -- ls -la /opt/oracle/oradata +``` + +### Database connectivity issues + +**Problem: Cannot connect to database from application** + +1. **Verify database service exists** + ```shell + kubectl get svc | grep + ``` + +2. **Check service endpoints** + ```shell + kubectl get endpoints + ``` + Endpoints should point to the database pod IP. + +3. **Test connectivity from within cluster** + ```shell + kubectl run test-connectivity --rm -it --restart=Never \ + --image=busybox -- sh -c "nc -zv 1521" + ``` + +4. **Verify listener is running** + ```shell + kubectl exec -- lsnrctl status + ``` + +5. **Check network policies** + ```shell + kubectl get networkpolicies -n + ``` + Ensure network policies allow traffic to the database pod. + +**Problem: Connection refused errors** + +- Verify the database is fully initialized and healthy +- Check if listener is on the correct port (default: 1521) +- Ensure firewall rules allow traffic on port 1521 +- Verify the service name and namespace in connection string + +### Common error messages + +**"Insufficient CPU/Memory"** + +The node doesn't have enough resources to schedule the database pod. + +Solution: + +- Reduce resource requests in database spec +- Add more nodes to the cluster +- Remove unnecessary workloads + +**"Failed to create database: Invalid SID"** + +The SID doesn't match the edition requirements. + +Solution: + +- For Free edition, SID must be "FREE" +- For Enterprise/Standard, use alphanumeric SID (max 8 characters) + +**"Webhook call failed"** + +The operator's admission webhook is not responding. + +Solution: + +```shell +# Check webhook configuration +kubectl get validatingwebhookconfigurations +kubectl get mutatingwebhookconfigurations + +# Restart operator +kubectl rollout restart deployment -n oracle-database-operator-system oracle-database-operator-controller-manager +``` + +**"Secret not found"** + +The admin password secret doesn't exist or is in wrong namespace. + +Solution: + +- Create the secret in the same namespace as the database resource +- Verify secret name matches the database spec + +### Debugging tips + +**Enable debug logging** + +Modify the operator deployment to enable debug logs: + +```shell +kubectl edit deployment oracle-database-operator-controller-manager -n oracle-database-operator-system +``` + +Add or modify the `--zap-log-level` argument to `debug`: + +```yaml +spec: + template: + spec: + containers: + - args: + - --zap-log-level=debug +``` + +**Check operator metrics** + +The operator exposes Prometheus metrics: + +```shell +kubectl port-forward -n oracle-database-operator-system \ + svc/oracle-database-operator-controller-manager-metrics-service 8443:8443 +``` + +Access metrics at `https://localhost:8443/metrics` + +**Verify RBAC permissions** + +Check if the operator service account has correct permissions: + +```shell +kubectl get clusterrole | grep oracle-database-operator +kubectl describe clusterrole oracle-database-operator-manager-role +``` + +Verify service account bindings: + +```shell +kubectl get clusterrolebinding | grep oracle-database-operator +kubectl describe clusterrolebinding oracle-database-operator-manager-rolebinding +``` + +**Get operator version** + +```shell +kubectl get deployment oracle-database-operator-controller-manager \ + -n oracle-database-operator-system -o jsonpath='{.spec.template.spec.containers[0].image}' +``` + +**Export database resource for inspection** + +```shell +kubectl get singleinstancedatabase -o yaml > database-export.yaml +``` + +This exports the complete resource including status, allowing offline analysis or sharing for support. + +**Useful log patterns to search for** + +```shell +# Find reconciliation errors +kubectl logs -n oracle-database-operator-system -l control-plane=controller-manager | grep "ERROR" + +# Find specific database operations +kubectl logs -n oracle-database-operator-system -l control-plane=controller-manager | grep "Reconciling SingleInstanceDatabase" + +# Find webhook validation issues +kubectl logs -n oracle-database-operator-system -l control-plane=controller-manager | grep "webhook" +``` + +## Getting Help + +- [#oracle-db-microservices Slack channel](https://oracledevs.slack.com/archives/C06L9CDGR6Z) in the Oracle Developers slack workspace. +- [Open an issue in GitHub](https://github.com/oracle/microservices-datadriven/issues/new). + diff --git a/docs-source/site/docs/platform/esooperator.md b/docs-source/site/docs/platform/esooperator.md index 913325a08..5f72b5e19 100644 --- a/docs-source/site/docs/platform/esooperator.md +++ b/docs-source/site/docs/platform/esooperator.md @@ -41,8 +41,6 @@ Full [documentation](https://external-secrets.io/latest/) This guide makes the following assumptions: -- **Namespace**: All examples use the `obaas-dev` namespace. Replace with your target namespace as needed. -- **External Secrets Operator Namespace**: The External Secrets Operator is installed in the `external-secrets` namespace (the default). - **Kubectl Access**: You have kubectl configured and authenticated to your Kubernetes cluster with appropriate permissions to: - Create and manage SecretStores and ExternalSecrets - View secrets in your namespace @@ -52,6 +50,17 @@ This guide makes the following assumptions: - `jq` - JSON processor (used for secret verification examples) - **File References**: Examples reference `eso-test.yaml` and `eso-cleanup.yaml` files. You'll need to create these files with the YAML content provided in the examples. +:::note Namespace Configuration +This guide uses two types of namespaces: +- **Operator namespace** (`-n external-secrets`): Where the External Secrets Operator is installed. Used when viewing operator logs or troubleshooting the operator itself. +- **Application namespace** (`-n obaas-dev` in examples): Where your SecretStores, ExternalSecrets, and application secrets are created. Replace `obaas-dev` with your actual application namespace. + +To find the operator namespace, run: +```bash +kubectl get pods -A | grep external-secrets +``` +::: + ### Installing External Secrets Operator External Secrets Operator will be installed if the `external-secrets.enabled` is set to `true` in the `values.yaml` file. The default namespace for External Secrets Operator is `external-secrets`. @@ -467,3 +476,8 @@ Monitor Kubernetes events for your ExternalSecrets: ```shell kubectl get events -n obaas-dev --field-selector involvedObject.name=test-external-secret ``` + +## Getting Help + +- [#oracle-db-microservices Slack channel](https://oracledevs.slack.com/archives/C06L9CDGR6Z) in the Oracle Developers slack workspace. +- [Open an issue in GitHub](https://github.com/oracle/microservices-datadriven/issues/new). diff --git a/docs-source/site/docs/platform/eureka.md b/docs-source/site/docs/platform/eureka.md index 1dbfc9a17..254b6c45c 100644 --- a/docs-source/site/docs/platform/eureka.md +++ b/docs-source/site/docs/platform/eureka.md @@ -12,10 +12,19 @@ Spring Boot Eureka Server will be installed if the `eureka.enabled` is set to `t ### Access Eureka Web User Interface -To access the Eureka Web User Interface, use kubectl port-forward to create a secure channel to `service/eureka`. Run the following command to establish the secure tunnel (replace the example namespace `obaas-dev` with the namespace where the Spring Boot Eureka Server is deployed): +:::note Namespace Configuration +All `kubectl` commands in this guide use `-n eureka` as the default namespace. If the Spring Boot Eureka Server is installed in a different namespace, replace `eureka` with your actual namespace name in all commands. + +To find your namespace, run: +```bash +kubectl get pods -A | grep eureka +``` +::: + +To access the Eureka Web User Interface, use kubectl port-forward to create a secure channel to `service/eureka`. Run the following command to establish the secure tunnel: ```shell -kubectl port-forward -n obaas-dev svc/eureka 8761 +kubectl port-forward -n eureka svc/eureka 8761 ``` Open the [Eureka web user interface](http://localhost:8761) @@ -70,3 +79,8 @@ server.features.eureka.client.register-with-eureka=true server.features.eureka.client.fetch-registry=true server.features.eureka.instance.preferIpAddress=true ``` + +## Getting Help + +- [#oracle-db-microservices Slack channel](https://oracledevs.slack.com/archives/C06L9CDGR6Z) in the Oracle Developers slack workspace. +- [Open an issue in GitHub](https://github.com/oracle/microservices-datadriven/issues/new). diff --git a/docs-source/site/docs/platform/otmm.md b/docs-source/site/docs/platform/otmm.md index 0e0564bc4..ceb990ea0 100644 --- a/docs-source/site/docs/platform/otmm.md +++ b/docs-source/site/docs/platform/otmm.md @@ -45,3 +45,8 @@ lra: ## Upgrading to the commercial version If you have licensed Oracle Transaction Manager for Microservices Enterprise Edition, please see the [documentation](https://docs.oracle.com/en/database/oracle/transaction-manager-for-microservices/23.4.1/index.html) for details of how to install and configure MicroTx. Oracle recommends that you perform a new installation rather than attempting to upgrade the provided MicroTx Free installation to the commercial version. + +## Getting Help + +- [#oracle-db-microservices Slack channel](https://oracledevs.slack.com/archives/C06L9CDGR6Z) in the Oracle Developers slack workspace. +- [Open an issue in GitHub](https://github.com/oracle/microservices-datadriven/issues/new). diff --git a/docs-source/site/docs/platform/sbadminserver.md b/docs-source/site/docs/platform/sbadminserver.md index d5443de1e..8c7180681 100644 --- a/docs-source/site/docs/platform/sbadminserver.md +++ b/docs-source/site/docs/platform/sbadminserver.md @@ -14,12 +14,26 @@ Spring Boot Admin will be installed if the `admin-server.enabled` is set to `tru ### Access Spring Boot Admin Web Interface -To access the Spring Boot Admin Web Interface, use kubectl port-forward to create a secure channel to `service/admin-server`. Run the following command to establish the secure tunnel (replace the example namespace `obaas-dev` with the namespace where the Spring Boot Admin Server is deployed): +:::note Namespace Configuration +All `kubectl` commands in this guide use `-n admin-server` as the default namespace. If the Spring Boot Admin Server is installed in a different namespace, replace `admin-server` with your actual namespace name in all commands. + +To find your namespace, run: +```bash +kubectl get pods -A | grep admin-server +``` +::: + +To access the Spring Boot Admin Web Interface, use kubectl port-forward to create a secure channel to `service/admin-server`. Run the following command to establish the secure tunnel: ```shell -kubectl port-forward -n obaas-dev svc/admin-server 8989 +kubectl port-forward -n admin-server svc/admin-server 8989 ``` Open the [Spring Boot Admin dashboard](http://localhost:8989) ![Spring Boot Admin Server](images/admin-server.png) + +## Getting Help + +- [#oracle-db-microservices Slack channel](https://oracledevs.slack.com/archives/C06L9CDGR6Z) in the Oracle Developers slack workspace. +- [Open an issue in GitHub](https://github.com/oracle/microservices-datadriven/issues/new). diff --git a/docs-source/site/docs/relnotes/_category_.json b/docs-source/site/docs/relnotes/_category_.json index 0dc4df802..9c5db95f3 100644 --- a/docs-source/site/docs/relnotes/_category_.json +++ b/docs-source/site/docs/relnotes/_category_.json @@ -1,8 +1,4 @@ { "label": "Release Notes", - "position": 5, - "link": { - "type": "generated-index", - "description": "Release Notes for Oracle Backend as a Service and AI (OBaaS)" - } + "position": 5 } diff --git a/docs-source/site/docs/relnotes/relnotes.md b/docs-source/site/docs/relnotes/index.md similarity index 94% rename from docs-source/site/docs/relnotes/relnotes.md rename to docs-source/site/docs/relnotes/index.md index 4d83c74f2..95c659d09 100644 --- a/docs-source/site/docs/relnotes/relnotes.md +++ b/docs-source/site/docs/relnotes/index.md @@ -1,6 +1,8 @@ --- title: Release Notes - 2.0.0-M5 sidebar_position: 1 +description: Comprehensive release notes for Oracle Backend for Microservices and AI (OBaaS) version 2.0.0, including container images, platform components, and deployment information. +keywords: [OBaaS, Oracle Backend, microservices, AI, release notes, container images, Kubernetes, deployment, version 2.0.0] --- ## Overview @@ -166,3 +168,8 @@ The following images use different registry paths between the two lists: | eureka | us-phoenix-1.ocir.io/maacloud/mark-artifactory | phx.ocir.io/maacloud/mark-artifactory | 2.0.0-M5 | **Note:** Both registry paths point to the same Phoenix region but use different URL formats. The `phx.ocir.io` format is the shortened alias for `us-phoenix-1.ocir.io`. + +## Getting Help + +- [#oracle-db-microservices Slack channel](https://oracledevs.slack.com/archives/C06L9CDGR6Z) in the Oracle Developers slack workspace. +- [Open an issue in GitHub](https://github.com/oracle/microservices-datadriven/issues/new). diff --git a/docs-source/site/docs/setup/_category_.json b/docs-source/site/docs/setup/_category_.json index 4ff3b5556..5d9bdcd62 100644 --- a/docs-source/site/docs/setup/_category_.json +++ b/docs-source/site/docs/setup/_category_.json @@ -2,7 +2,7 @@ "label": "Setup", "position": 1, "link": { - "type": "generated-index", - "description": "Oracle Backend for Microservices and AI (OBaaS) setup and installation." + "type": "doc", + "id": "setup/index" } } diff --git a/docs-source/site/docs/setup/index.md b/docs-source/site/docs/setup/index.md new file mode 100644 index 000000000..ef4f62e0a --- /dev/null +++ b/docs-source/site/docs/setup/index.md @@ -0,0 +1,84 @@ +--- +title: Setup Overview +sidebar_position: 0 +description: Complete setup and installation guide for Oracle Backend for Microservices and AI (OBaaS). Compare production vs development environments, understand installation requirements, and choose the right deployment path for your needs. +keywords: [OBaaS, Oracle Backend, microservices, AI, setup, installation, production environment, development environment, Kubernetes, deployment, Helm, APISIX, Kafka, Coherence, Conductor, OTMM, observability] +--- + +# Overview + +Welcome to the OBaaS setup guide. This section will help you install and configure Oracle Backend for Microservices and AI for your environment. + +## Table of Contents + +- [Choosing Your Installation Path](#choosing-your-installation-path) + - [Production Environment](#production-environment) + - [Development Environment](#development-environment) +- [Getting Help](#getting-help) + +--- + +## Choosing Your Installation Path + +OBaaS offers two installation paths tailored to different use cases: + +### Production Environment + +The Production installation uses Helm charts for deployment and assumes that your infrastructure is already in place, including network infrastructure, Kubernetes cluster, and Oracle Database. All prerequisites must be met before beginning the installation. + +**Best for:** +- Production deployments requiring high availability and security +- Multi-tenant environments with resource isolation +- Enterprise deployments with external secrets management +- Comprehensive observability and monitoring requirements +- Teams requiring full platform capabilities + +For complete prerequisite details, see the [Prerequisites Guide](./setup_prod/prereqs.md). + +**What's included:** +- Complete platform component installation (APISIX, Kafka, Coherence, Conductor, etc.) +- Oracle Database with Transaction Manager for Microservices (OTMM) +- Full observability stack (metrics, logs, traces) +- External Secrets Operator for credential management +- Production-grade configuration and security settings + +[**→ Go to Production Setup**](./setup_prod/setup.md) + +--- + +### Development Environment -- TBD + +The Development installation uses Terraform to deploy both infrastructure and OBaaS, designed specifically for development and testing environments. This approach offers lower customization compared to Production but provides a faster, automated setup. The Terraform scripts support deployment on Azure, OCI, and AWS cloud platforms. Oracle OCI users can also leverage Oracle Resource Manager (ORM) for infrastructure deployment. + +**Best for:** +- Local development and testing +- Learning OBaaS features and capabilities +- Rapid prototyping and experimentation +- Resource-constrained environments +- Single-developer or small team environments + +**What's included:** +- Complete platform component installation (APISIX, Kafka, Coherence, Conductor, etc.) +- Oracle Database with Transaction Manager for Microservices (OTMM) +- Full observability stack (metrics, logs, traces) +- External Secrets Operator for credential management +- Production-grade configuration and security settings + +[**→ Go to Development Setup**](./setup_dev/setup.md) + +--- + +## Getting Help + +Each installation path includes: +- Detailed step-by-step instructions +- Example commands and configurations +- Verification steps and expected outputs +- Troubleshooting guidance +- [#oracle-db-microservices Slack channel](https://oracledevs.slack.com/archives/C06L9CDGR6Z) in the Oracle Developers slack workspace. +- [Open an issue in GitHub](https://github.com/oracle/microservices-datadriven/issues/new). + +#oracle-db-microservices Slack channel in the Oracle Developers slack workspace. +Open an issue in GitHub. + +Choose the path that matches your requirements and follow the guides in sequence. diff --git a/docs-source/site/docs/setup/setup_dev/setup.md b/docs-source/site/docs/setup/setup_dev/setup.md index 83f3e9144..09ea71512 100644 --- a/docs-source/site/docs/setup/setup_dev/setup.md +++ b/docs-source/site/docs/setup/setup_dev/setup.md @@ -2,4 +2,9 @@ title: Introduction and Installation Flow sidebar_position: 1 --- -## TBD \ No newline at end of file +## TBD + +## Getting Help + +- [#oracle-db-microservices Slack channel](https://oracledevs.slack.com/archives/C06L9CDGR6Z) in the Oracle Developers slack workspace. +- [Open an issue in GitHub](https://github.com/oracle/microservices-datadriven/issues/new). \ No newline at end of file diff --git a/docs-source/site/docs/setup/setup_prod/database.md b/docs-source/site/docs/setup/setup_prod/database.md index e14820a75..2284925c1 100644 --- a/docs-source/site/docs/setup/setup_prod/database.md +++ b/docs-source/site/docs/setup/setup_prod/database.md @@ -99,6 +99,15 @@ helm upgrade --install --debug obaas-prod-db \ ## Verification +:::note Namespace Configuration +Commands in this guide use `-n oracle-database-operator-system` as the default namespace for the Oracle Database Operator. If you overrode the namespace during installation, replace `oracle-database-operator-system` with your actual namespace name in all commands. + +To find your namespace, run: +```bash +kubectl get pods -A | grep oracle-database-operator +``` +::: + ### View Installed Charts After installation completes, view the installed Helm charts: @@ -125,3 +134,8 @@ kubectl get pods -n oracle-database-operator-system ``` ![DB Operator pods](media/image6.png) + +## Getting Help + +- [#oracle-db-microservices Slack channel](https://oracledevs.slack.com/archives/C06L9CDGR6Z) in the Oracle Developers slack workspace. +- [Open an issue in GitHub](https://github.com/oracle/microservices-datadriven/issues/new). diff --git a/docs-source/site/docs/setup/setup_prod/namespace.md b/docs-source/site/docs/setup/setup_prod/namespace.md index d99fd5dfc..f6a7bd74b 100644 --- a/docs-source/site/docs/setup/setup_prod/namespace.md +++ b/docs-source/site/docs/setup/setup_prod/namespace.md @@ -147,3 +147,8 @@ Check for resources preventing deletion: kubectl api-resources --verbs=list --namespaced -o name | \ xargs -n 1 kubectl get --show-kind --ignore-not-found -n obaas-dev ``` + +## Getting Help + +- [#oracle-db-microservices Slack channel](https://oracledevs.slack.com/archives/C06L9CDGR6Z) in the Oracle Developers slack workspace. +- [Open an issue in GitHub](https://github.com/oracle/microservices-datadriven/issues/new). diff --git a/docs-source/site/docs/setup/setup_prod/obaas.md b/docs-source/site/docs/setup/setup_prod/obaas.md index c0f403bd9..5a05c2fe9 100644 --- a/docs-source/site/docs/setup/setup_prod/obaas.md +++ b/docs-source/site/docs/setup/setup_prod/obaas.md @@ -191,3 +191,8 @@ If the installation fails, verify the following: 1. OCI configuration (if using ADB-S) matches the secret created earlier 1. For multiple instances, ensure unique `Release name`s, `obaasName`, and `targetNamespace` values 1. For multiple APISIX instances, verify different host names and/or ports are configured + +## Getting Help + +- [#oracle-db-microservices Slack channel](https://oracledevs.slack.com/archives/C06L9CDGR6Z) in the Oracle Developers slack workspace. +- [Open an issue in GitHub](https://github.com/oracle/microservices-datadriven/issues/new). diff --git a/docs-source/site/docs/setup/setup_prod/observability.md b/docs-source/site/docs/setup/setup_prod/observability.md index f35cf4d3c..39ea04b42 100644 --- a/docs-source/site/docs/setup/setup_prod/observability.md +++ b/docs-source/site/docs/setup/setup_prod/observability.md @@ -99,6 +99,15 @@ helm upgrade --install --debug obaas-prod-observability \ ## Verification +:::note Namespace Configuration +Commands in this guide use `-n observability` as the default namespace for the observability components. If you overrode the namespace during installation, replace `observability` with your actual namespace name in all commands. + +To find your namespace, run: +```bash +kubectl get pods -A | grep signoz +``` +::: + ### View Installed Charts After installation completes, view the installed Helm charts: @@ -146,3 +155,8 @@ If pods fail to start or remain in a pending state: 2. Review pod logs your observability namespace: `kubectl logs -n observability` 3. Verify resource availability: `kubectl top nodes` 4. Ensure all prerequisite charts are installed and healthy + +## Getting Help + +- [#oracle-db-microservices Slack channel](https://oracledevs.slack.com/archives/C06L9CDGR6Z) in the Oracle Developers slack workspace. +- [Open an issue in GitHub](https://github.com/oracle/microservices-datadriven/issues/new). diff --git a/docs-source/site/docs/setup/setup_prod/obtaining.md b/docs-source/site/docs/setup/setup_prod/obtaining.md index f0dc099b4..f8e0e65b2 100644 --- a/docs-source/site/docs/setup/setup_prod/obtaining.md +++ b/docs-source/site/docs/setup/setup_prod/obtaining.md @@ -133,3 +133,8 @@ This script helps you: - Configure image pull secrets - Update chart values to point to private repositories - Set up registry authentication + +## Getting Help + +- [#oracle-db-microservices Slack channel](https://oracledevs.slack.com/archives/C06L9CDGR6Z) in the Oracle Developers slack workspace. +- [Open an issue in GitHub](https://github.com/oracle/microservices-datadriven/issues/new). diff --git a/docs-source/site/docs/setup/setup_prod/prereq-chart.md b/docs-source/site/docs/setup/setup_prod/prereq-chart.md index 46c62c352..889c2a9c8 100644 --- a/docs-source/site/docs/setup/setup_prod/prereq-chart.md +++ b/docs-source/site/docs/setup/setup_prod/prereq-chart.md @@ -78,6 +78,22 @@ The prerequisites chart creates cluster-level resources shared by all OBaaS inst ## Verification +:::note Namespace Configuration +The prerequisite components are installed in multiple namespaces by default: +- `cert-manager` - Certificate management +- `external-secrets` - External secrets operator +- `ingress-nginx` - Ingress controller +- `metrics-server` - Metrics collection +- `kube-state-metrics` - Kubernetes state metrics + +If you overrode component namespaces during installation, replace the default namespace names with your actual namespaces in all commands below. + +To find your component namespaces, run: +```bash +kubectl get pods -A | grep -E "cert-manager|external-secrets|ingress-nginx|metrics-server|kube-state-metrics" +``` +::: + ### View Installed Charts After installation completes, verify the chart was installed successfully: @@ -255,3 +271,8 @@ kubectl top nodes ``` Scale your cluster if needed to provide additional capacity. + +## Getting Help + +- [#oracle-db-microservices Slack channel](https://oracledevs.slack.com/archives/C06L9CDGR6Z) in the Oracle Developers slack workspace. +- [Open an issue in GitHub](https://github.com/oracle/microservices-datadriven/issues/new). diff --git a/docs-source/site/docs/setup/setup_prod/prereqs.md b/docs-source/site/docs/setup/setup_prod/prereqs.md index 845460c7f..b34359018 100644 --- a/docs-source/site/docs/setup/setup_prod/prereqs.md +++ b/docs-source/site/docs/setup/setup_prod/prereqs.md @@ -126,3 +126,8 @@ kube-node-lease Active 4m52s kube-public Active 4m52s kube-system Active 4m52s ``` + +## Getting Help + +- [#oracle-db-microservices Slack channel](https://oracledevs.slack.com/archives/C06L9CDGR6Z) in the Oracle Developers slack workspace. +- [Open an issue in GitHub](https://github.com/oracle/microservices-datadriven/issues/new). diff --git a/docs-source/site/docs/setup/setup_prod/secrets.md b/docs-source/site/docs/setup/setup_prod/secrets.md index 9bdfe39b2..071809ce5 100644 --- a/docs-source/site/docs/setup/setup_prod/secrets.md +++ b/docs-source/site/docs/setup/setup_prod/secrets.md @@ -244,3 +244,8 @@ key_file=/app/runtime/.oci/oci_api_key.pem **Important note**: We recommend taking extra care to ensure these are all correct before moving on to the next step. If there are any errors here, the injection of the Database configuration will fail. **Note**: If you are planning to install multiple OBaaS instances, AND you want to use different OCI credentials, you need to create one of these secrets for EACH instance, and they must have different names. + +## Getting Help + +- [#oracle-db-microservices Slack channel](https://oracledevs.slack.com/archives/C06L9CDGR6Z) in the Oracle Developers slack workspace. +- [Open an issue in GitHub](https://github.com/oracle/microservices-datadriven/issues/new). diff --git a/docs-source/site/docs/setup/setup_prod/setup.md b/docs-source/site/docs/setup/setup_prod/setup.md index 58eb0882a..30e1f9a82 100644 --- a/docs-source/site/docs/setup/setup_prod/setup.md +++ b/docs-source/site/docs/setup/setup_prod/setup.md @@ -153,3 +153,8 @@ If you encounter issues: 3. **Review configuration** - verify values.yaml settings 4. **Consult documentation** - refer to component-specific guides 5. **Seek assistance** - contact the development team with details + +## Getting Help + +- [#oracle-db-microservices Slack channel](https://oracledevs.slack.com/archives/C06L9CDGR6Z) in the Oracle Developers slack workspace. +- [Open an issue in GitHub](https://github.com/oracle/microservices-datadriven/issues/new). From 30ddbc9563227325c4f7042a3e2fdf118e80151c Mon Sep 17 00:00:00 2001 From: Andy Tael Date: Thu, 20 Nov 2025 13:25:13 -0600 Subject: [PATCH 5/6] Fix password --- docs-source/site/docs/platform/dboperator.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs-source/site/docs/platform/dboperator.md b/docs-source/site/docs/platform/dboperator.md index 91af73212..545656ae0 100644 --- a/docs-source/site/docs/platform/dboperator.md +++ b/docs-source/site/docs/platform/dboperator.md @@ -208,7 +208,7 @@ First, create the admin password secret: ```shell kubectl create secret generic sidb-admin-secret \ - --from-literal=password='MyTestPassword123#' \ + --from-literal=password='MySuperSecretPassword' \ -n default ``` From fd96fa02cd2143188d6b656efeb0847a787583c3 Mon Sep 17 00:00:00 2001 From: Andy Tael Date: Thu, 20 Nov 2025 14:08:17 -0600 Subject: [PATCH 6/6] Updates --- docs-source/site/docs/platform/eureka.md | 174 ++++++++++ .../site/docs/platform/sbadminserver.md | 314 ++++++++++++++++++ docs-source/site/docs/relnotes/index.md | 165 +-------- 3 files changed, 490 insertions(+), 163 deletions(-) diff --git a/docs-source/site/docs/platform/eureka.md b/docs-source/site/docs/platform/eureka.md index 254b6c45c..6b8ece890 100644 --- a/docs-source/site/docs/platform/eureka.md +++ b/docs-source/site/docs/platform/eureka.md @@ -80,6 +80,180 @@ server.features.eureka.client.fetch-registry=true server.features.eureka.instance.preferIpAddress=true ``` +## Testing and Debugging + +### Verify Eureka Server is Running + +Check that the Eureka server pod is running and healthy: + +```bash +kubectl get pods -n eureka +``` + +Expected output: + +```text +NAME READY STATUS RESTARTS AGE +eureka-7b8f9d5c4d-x9k2m 1/1 Running 0 5m +``` + +Check the Eureka service: + +```bash +kubectl get svc -n eureka +``` + +Expected output: + +```text +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +eureka ClusterIP 10.96.123.456 8761/TCP 5m +``` + +### Test Service Registration + +After deploying an application configured to register with Eureka, verify it appears in the Eureka registry: + +**Option 1: Using the Eureka Web UI** + +1. Port-forward to Eureka: + ```bash + kubectl port-forward -n eureka svc/eureka 8761 + ``` + +2. Open [http://localhost:8761](http://localhost:8761) in your browser + +3. Check the "Instances currently registered with Eureka" section. Your application should appear under "Application" with its name in uppercase + +**Option 2: Using Eureka REST API** + +Query the Eureka REST API to see all registered services: + +```bash +kubectl port-forward -n eureka svc/eureka 8761 & +curl -s http://localhost:8761/eureka/apps | grep -i "" +``` + +Or check a specific application: + +```bash +curl -s http://localhost:8761/eureka/apps/YOUR-APPLICATION-NAME +``` + +### Common Issues and Debugging + +#### Services Not Appearing in Eureka + +**Check application logs for registration errors:** + +```bash +kubectl logs -n | grep -i eureka +``` + +**Common causes:** + +1. **Incorrect Eureka URL:** Verify the `eureka.client.service-url.defaultZone` property points to the correct Eureka service URL (typically `http://eureka.eureka:8761/eureka`) + +2. **Network connectivity:** Test connectivity from your application pod to Eureka: + ```bash + kubectl exec -n -- curl -v http://eureka.eureka:8761/eureka/apps + ``` + +3. **Missing dependencies:** Ensure the Eureka client dependency is included in your application + +4. **Registration disabled:** Check that `eureka.client.register-with-eureka` is set to `true` + +#### Service Registration Delays + +Eureka uses a heartbeat mechanism with default intervals: +- **Registration delay:** Up to 30 seconds after application startup +- **Discovery delay:** Up to 30 seconds for other services to discover the new instance +- **Deregistration delay:** Up to 90 seconds after an instance goes down + +To reduce these delays for development/testing, add to your application configuration: + +```yaml +eureka: + instance: + lease-renewal-interval-in-seconds: 5 + lease-expiration-duration-in-seconds: 10 + client: + registry-fetch-interval-seconds: 5 +``` + +:::warning +Do not use these shortened intervals in production as they increase network traffic and server load. +::: + +#### Check Eureka Server Logs + +View Eureka server logs to diagnose registration or discovery issues: + +```bash +kubectl logs -n eureka -l app=eureka --tail=100 -f +``` + +Look for: +- Registration events: `Registered instance ... with status UP` +- Heartbeat failures: `Lease expired for ...` +- Replication errors (if running multiple Eureka servers) + +#### Verify Environment Variables + +Check that the Eureka environment variables are correctly injected into your application pod: + +```bash +kubectl exec -n -- env | grep -i eureka +``` + +Expected variables: +```text +EUREKA_SERVICE_URL=http://eureka.eureka:8761/eureka +``` + +#### Application Shows as DOWN in Eureka + +**Possible causes:** + +1. **Health check endpoint failing:** Verify your application's health endpoint is responding: + ```bash + kubectl exec -n -- curl http://localhost:/actuator/health + ``` + +2. **Incorrect health check URL:** Check the `eureka.instance.health-check-url-path` configuration + +3. **Application startup incomplete:** The application may still be initializing + +#### Multiple Instances Not Load Balancing + +If you have multiple instances of a service but requests always go to the same instance: + +1. **Verify all instances are registered:** + ```bash + curl -s http://localhost:8761/eureka/apps/YOUR-SERVICE-NAME | grep instanceId + ``` + +2. **Check load balancer configuration:** Ensure your client is using a load-balanced RestTemplate or WebClient (for Spring Boot) + +3. **Verify client-side load balancing:** Spring Cloud LoadBalancer should be on the classpath + +### Enable Debug Logging + +To get more detailed Eureka logs in your application, add to your `application.yaml`: + +```yaml +logging: + level: + com.netflix.discovery: DEBUG + com.netflix.eureka: DEBUG +``` + +For the Eureka server, edit the deployment and add environment variables: + +```bash +kubectl set env deployment/eureka -n eureka LOGGING_LEVEL_COM_NETFLIX_EUREKA=DEBUG +``` + ## Getting Help - [#oracle-db-microservices Slack channel](https://oracledevs.slack.com/archives/C06L9CDGR6Z) in the Oracle Developers slack workspace. diff --git a/docs-source/site/docs/platform/sbadminserver.md b/docs-source/site/docs/platform/sbadminserver.md index 8c7180681..9ad802023 100644 --- a/docs-source/site/docs/platform/sbadminserver.md +++ b/docs-source/site/docs/platform/sbadminserver.md @@ -33,6 +33,320 @@ Open the [Spring Boot Admin dashboard](http://localhost:8989) ![Spring Boot Admin Server](images/admin-server.png) +### Enable a Spring Boot Application for Spring Boot Admin + +To enable a Spring Boot application to register with Spring Boot Admin, add the following dependency: + +```xml + + de.codecentric + spring-boot-admin-starter-client + +``` + +Add the following configuration to your `application.yaml` file: + +```yaml +spring: + boot: + admin: + client: + url: ${spring.boot.admin.client.url} + instance: + prefer-ip: true +management: + endpoints: + web: + exposure: + include: "*" + endpoint: + health: + show-details: always +``` + +The `spring.boot.admin.client.url` variable is automatically injected into your deployment when you deploy applications to Oracle Backend for Microservices and AI using the OBaaS deployment Helm chart. It typically points to `http://admin-server.admin-server:8989`. + +## Testing and Debugging + +### Verify Spring Boot Admin Server is Running + +Check that the Spring Boot Admin Server pod is running and healthy: + +```bash +kubectl get pods -n admin-server +``` + +Expected output: + +```text +NAME READY STATUS RESTARTS AGE +admin-server-7b8f9d5c4d-x9k2m 1/1 Running 0 5m +``` + +Check the Spring Boot Admin service: + +```bash +kubectl get svc -n admin-server +``` + +Expected output: + +```text +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +admin-server ClusterIP 10.96.123.456 8989/TCP 5m +``` + +### Test Application Registration + +After deploying an application configured to register with Spring Boot Admin, verify it appears in the dashboard: + +**Option 1: Using the Spring Boot Admin Web UI** + +1. Port-forward to Spring Boot Admin: + ```bash + kubectl port-forward -n admin-server svc/admin-server 8989 + ``` + +2. Open [http://localhost:8989](http://localhost:8989) in your browser + +3. Check the "Applications" section on the wallboard. Your application should appear with: + - Application name + - Number of instances + - Health status (UP/DOWN) + +4. Click on the application to see detailed information: + - Health indicators + - Metrics + - Environment properties + - Log files + - JVM information + - Threads + +**Option 2: Using Spring Boot Admin REST API** + +Query the Spring Boot Admin REST API to see all registered applications: + +```bash +kubectl port-forward -n admin-server svc/admin-server 8989 & +curl -s http://localhost:8989/applications | jq +``` + +Check a specific application: + +```bash +curl -s http://localhost:8989/applications/ | jq +``` + +### Verify Actuator Endpoints + +Spring Boot Admin requires actuator endpoints to be exposed. Test that your application's actuator endpoints are accessible: + +```bash +kubectl port-forward -n 8080 & +curl http://localhost:8080/actuator +``` + +Expected output should list available endpoints: + +```json +{ + "_links": { + "self": { "href": "http://localhost:8080/actuator" }, + "health": { "href": "http://localhost:8080/actuator/health" }, + "info": { "href": "http://localhost:8080/actuator/info" }, + "metrics": { "href": "http://localhost:8080/actuator/metrics" } + } +} +``` + +### Common Issues and Debugging + +#### Applications Not Appearing in Spring Boot Admin + +**Check application logs for registration errors:** + +```bash +kubectl logs -n | grep -i "admin" +``` + +Look for registration messages like: +- `Application registered itself as ` +- `Failed to register application` + +**Common causes:** + +1. **Incorrect Spring Boot Admin URL:** Verify the `spring.boot.admin.client.url` property points to the correct URL (typically `http://admin-server.admin-server:8989`) + +2. **Network connectivity:** Test connectivity from your application pod to Spring Boot Admin: + ```bash + kubectl exec -n -- curl -v http://admin-server.admin-server:8989/applications + ``` + +3. **Missing dependencies:** Ensure the `spring-boot-admin-starter-client` dependency is included in your application + +4. **Actuator endpoints not exposed:** Verify that management endpoints are exposed in your `application.yaml` + +5. **Security blocking registration:** If Spring Boot Admin has security enabled, ensure your application provides the correct credentials + +#### Application Shows as DOWN or OFFLINE + +**Possible causes:** + +1. **Health endpoint failing:** Check your application's health endpoint: + ```bash + kubectl exec -n -- curl http://localhost:8080/actuator/health + ``` + +2. **Health endpoint not accessible:** Spring Boot Admin must be able to reach the health endpoint. Verify network policies allow traffic between Spring Boot Admin and your application + +3. **Incorrect service URL registered:** Check the registered instance URL in Spring Boot Admin. It should be accessible from within the cluster + +4. **Application restarting:** If the application is in a restart loop, it will appear as DOWN + +#### No Metrics or Limited Data Available + +**Check exposed endpoints:** + +```bash +curl http://localhost:8080/actuator | jq '._links | keys' +``` + +If endpoints are missing, update your `application.yaml`: + +```yaml +management: + endpoints: + web: + exposure: + include: "*" # Expose all endpoints (use cautiously in production) +``` + +For production, explicitly list required endpoints: + +```yaml +management: + endpoints: + web: + exposure: + include: health,info,metrics,env,loggers,threaddump,heapdump +``` + +#### Cannot View Application Logs + +Spring Boot Admin can display application logs if the logfile endpoint is exposed: + +```yaml +management: + endpoints: + web: + exposure: + include: logfile + endpoint: + logfile: + external-file: /path/to/application.log +``` + +If using console logging only, logs won't be available through Spring Boot Admin. Consider: +- Configuring file-based logging +- Using centralized logging (e.g., via SigNoz/observability stack) + +#### Check Spring Boot Admin Server Logs + +View Spring Boot Admin server logs to diagnose registration or connection issues: + +```bash +kubectl logs -n admin-server -l app=admin-server --tail=100 -f +``` + +Look for: +- Registration events: `New application ... registered` +- Connection errors: `Failed to connect to ...` +- Authentication issues + +#### Verify Environment Variables + +Check that the Spring Boot Admin environment variables are correctly injected into your application pod: + +```bash +kubectl exec -n -- env | grep -i admin +``` + +Expected variables: +```text +SPRING_BOOT_ADMIN_CLIENT_URL=http://admin-server.admin-server:8989 +``` + +#### Registration Delays + +Applications may take up to 30-60 seconds to appear in Spring Boot Admin after startup due to: +- Application startup time +- Initial registration attempt timing +- Health check intervals + +To speed up registration for development/testing: + +```yaml +spring: + boot: + admin: + client: + period: 5000 # Check every 5 seconds (default: 10000ms) +``` + +### Enable Debug Logging + +**For your application (client-side):** + +Add to your `application.yaml`: + +```yaml +logging: + level: + de.codecentric.boot.admin.client: DEBUG +``` + +**For Spring Boot Admin Server:** + +Enable debug logging in the Spring Boot Admin server: + +```bash +kubectl set env deployment/admin-server -n admin-server LOGGING_LEVEL_DE_CODECENTRIC_BOOT_ADMIN=DEBUG +``` + +Or by editing the deployment: + +```bash +kubectl edit deployment admin-server -n admin-server +``` + +Add environment variable: +```yaml +env: +- name: LOGGING_LEVEL_DE_CODECENTRIC_BOOT_ADMIN + value: DEBUG +``` + +### Test Data Flow + +To verify the complete flow from application to Spring Boot Admin: + +1. **Check application registers successfully:** + ```bash + kubectl logs -n | grep "Application registered" + ``` + +2. **Verify Spring Boot Admin receives registration:** + ```bash + kubectl logs -n admin-server -l app=admin-server | grep "New application" + ``` + +3. **Confirm health checks are working:** + ```bash + kubectl logs -n admin-server -l app=admin-server | grep "health" + ``` + +4. **Access the dashboard and verify metrics are updating** + ## Getting Help - [#oracle-db-microservices Slack channel](https://oracledevs.slack.com/archives/C06L9CDGR6Z) in the Oracle Developers slack workspace. diff --git a/docs-source/site/docs/relnotes/index.md b/docs-source/site/docs/relnotes/index.md index 95c659d09..b99ed83b6 100644 --- a/docs-source/site/docs/relnotes/index.md +++ b/docs-source/site/docs/relnotes/index.md @@ -4,170 +4,9 @@ sidebar_position: 1 description: Comprehensive release notes for Oracle Backend for Microservices and AI (OBaaS) version 2.0.0, including container images, platform components, and deployment information. keywords: [OBaaS, Oracle Backend, microservices, AI, release notes, container images, Kubernetes, deployment, version 2.0.0] --- -## Overview +## Overview - TBD -This document provides comprehensive information about the container images used in OBaaS (Oracle Backend as a Service) version 2.0.0-M5. It includes two primary image lists and a comparison to help you understand the differences between them. - -### What's Included - -- **Installed Images**: Images that are automatically deployed when using the production Helm installation -- **Required Images**: Complete list of images needed for OBaaS, useful when mirroring to private registries -- **Differences**: A comparison highlighting what's unique to each list - ---- - -## Table of Contents - -- [Overview](#overview) -- [Image Inventories](#image-inventories) - - [Installed Images (Production Helm Deployment)](#installed-images-production-helm-deployment) - - [Required Images (Complete Set)](#required-images-complete-set) -- [Image Differences Analysis](#image-differences-analysis) - - [Summary](#summary) - - [Detailed Comparison](#detailed-comparison) - - [Additional Images in Required Set](#additional-images-in-required-set) - - [Registry Path Differences](#registry-path-differences) - ---- - -## Image Inventories - -### Installed Images (Production Helm Deployment) - -The following **29 images** are installed in the Kubernetes cluster when using the production installation via Helm charts. These represent the core components that are automatically deployed. - -| Description | Image Name | Version | -|-------------|------------|---------| -| Observability Exporter | container-registry.oracle.com/database/observability-exporter | 2.2.0 | -| Operator | container-registry.oracle.com/database/operator | 1.2.0 | -| Otmm | container-registry.oracle.com/database/otmm | 24.4.1 | -| Coherence Operator | container-registry.oracle.com/middleware/coherence-operator | 3.5.6 | -| Clickhouse Operator | docker.io/altinity/clickhouse-operator | 0.21.2 | -| Metrics Exporter | docker.io/altinity/metrics-exporter | 0.21.2 | -| Apisix | docker.io/apache/apisix | 3.14.1-debian | -| Etcd | docker.io/bitnamilegacy/etcd | 3.5.10-debian-11-r2 | -| Zookeeper | docker.io/bitnamilegacy/zookeeper | 3.7.1 | -| Busybox | docker.io/busybox | 1.36 | -| Clickhouse Server | docker.io/clickhouse/clickhouse-server | 25.5.6 | -| Opentelemetry Collector Contrib | docker.io/otel/opentelemetry-collector-contrib | 0.109.0 | -| Signoz Otel Collector | docker.io/signoz/signoz-otel-collector | v0.129.4 | -| Signoz Schema Migrator | docker.io/signoz/signoz-schema-migrator | v0.129.4 | -| Signoz | docker.io/signoz/signoz | v0.94.1 | -| External Secrets | oci.external-secrets.io/external-secrets/external-secrets | v1.0.0 | -| Cert Manager Cainjector | quay.io/jetstack/cert-manager-cainjector | v1.16.2 | -| Cert Manager Controller | quay.io/jetstack/cert-manager-controller | v1.16.2 | -| Cert Manager Webhook | quay.io/jetstack/cert-manager-webhook | v1.16.2 | -| Kafka | quay.io/strimzi/kafka | 0.45.1-kafka-3.8.0 | -| Kafka | quay.io/strimzi/kafka | 0.45.1-kafka-3.9.1 | -| Operator | quay.io/strimzi/operator | 0.45.1 | -| Controller | registry.k8s.io/ingress-nginx/controller | v1.11.5 | -| Kube State Metrics | registry.k8s.io/kube-state-metrics/kube-state-metrics | v2.17.0 | -| Metrics Server | registry.k8s.io/metrics-server/metrics-server | v0.7.2 | -| Admin Server | us-phoenix-1.ocir.io/maacloud/mark-artifactory/admin-server | 2.0.0-M5 | -| Conductor Server | us-phoenix-1.ocir.io/maacloud/mark-artifactory/conductor-server | 2.0.0-M5 | -| Eureka | us-phoenix-1.ocir.io/maacloud/mark-artifactory/eureka | 2.0.0-M5 | - ---- - -**Generated on:** Wed Nov 19 20:12:29 CST 2025 - ---- - -### Required Images (Complete Set) - -The following **42 images** represent the complete set of container images required for OBaaS installation. This list is particularly useful when: - -- Setting up OBaaS in air-gapped or restricted environments -- Mirroring images to a private container registry -- Using the `private_repo_helper.sh` script to copy images to your own repository - -**Note:** This list includes all installed images plus additional images needed for build processes, initialization tasks, and optional features. - -| Description | Name | Version | -|-------------|------|---------| -| observability-exporter | container-registry.oracle.com/database/observability-exporter | 2.2.0 | -| operator | container-registry.oracle.com/database/operator | 1.2.0 | -| otmm | container-registry.oracle.com/database/otmm | 24.4.1 | -| coherence-ce | container-registry.oracle.com/middleware/coherence-ce | 25.03.2 | -| coherence-operator | container-registry.oracle.com/middleware/coherence-operator | 3.5.6 | -| adb-free | container-registry.oracle.com/database/adb-free | 25.10.2.1 | -| signoz-histograms | obaas-docker-release.dockerhub-iad.oci.oraclecorp.com/signoz-histograms | v0.0.1 | -| clickhouse-operator | docker.io/altinity/clickhouse-operator | 0.21.2 | -| metrics-exporter | docker.io/altinity/metrics-exporter | 0.21.2 | -| apisix | docker.io/apache/apisix | 3.14.1-debian | -| etcd | docker.io/bitnamilegacy/etcd | 3.5.10-debian-11-r2 | -| zookeeper | docker.io/bitnamilegacy/zookeeper | 3.7.1 | -| busybox | docker.io/busybox | 1.36 | -| clickhouse-server | docker.io/clickhouse/clickhouse-server | 25.5.6 | -| k8s-wait-for | docker.io/groundnuty/k8s-wait-for | v2.0 | -| yq | docker.io/linuxserver/yq | 3.4.3 | -| opentelemetry-collector-contrib | docker.io/otel/opentelemetry-collector-contrib | 0.109.0 | -| signoz | docker.io/signoz/signoz | v0.94.1 | -| signoz-otel-collector | docker.io/signoz/signoz-otel-collector | v0.129.4 | -| signoz-schema-migrator | docker.io/signoz/signoz-schema-migrator | v0.129.4 | -| external-secrets | oci.external-secrets.io/external-secrets/external-secrets | v1.0.0 | -| admin-server | phx.ocir.io/maacloud/mark-artifactory/admin-server | 2.0.0-M5 | -| conductor-server | phx.ocir.io/maacloud/mark-artifactory/conductor-server | 2.0.0-M5 | -| eureka | phx.ocir.io/maacloud/mark-artifactory/eureka | 2.0.0-M5 | -| cert-manager-cainjector | quay.io/jetstack/cert-manager-cainjector | v1.16.2 | -| cert-manager-controller | quay.io/jetstack/cert-manager-controller | v1.16.2 | -| cert-manager-startupapicheck | quay.io/jetstack/cert-manager-startupapicheck | v1.16.2 | -| cert-manager-webhook | quay.io/jetstack/cert-manager-webhook | v1.16.2 | -| kafka-bridge | quay.io/strimzi/kafka-bridge | 0.31.2 | -| kaniko-executor | quay.io/strimzi/kaniko-executor | 0.45.1 | -| maven-builder | quay.io/strimzi/maven-builder | 0.45.1 | -| operator | quay.io/strimzi/operator | 0.45.1 | -| kafka | quay.io/strimzi/kafka | 0.45.1-kafka-3.8.0 | -| kafka | quay.io/strimzi/kafka | 0.45.1-kafka-3.8.1 | -| kafka | quay.io/strimzi/kafka | 0.45.1-kafka-3.9.0 | -| kafka | quay.io/strimzi/kafka | 0.45.1-kafka-3.9.1 | -| curl-jq | registry.gitlab.com/gitlab-ci-utils/curl-jq | 3.2.0 | -| controller | registry.k8s.io/ingress-nginx/controller | v1.11.5 | -| kube-webhook-certgen | registry.k8s.io/ingress-nginx/kube-webhook-certgen | v1.5.2 | -| kube-state-metrics | registry.k8s.io/kube-state-metrics/kube-state-metrics | v2.17.0 | -| metrics-server | registry.k8s.io/metrics-server/metrics-server | v0.7.2 | - ---- - -## Image Differences Analysis - -### Summary - -The Required Images list contains **13 additional images** not present in the Installed Images list, plus **3 images with different registry paths**. These differences reflect the additional tooling and optional components needed for certain deployment scenarios. - -### Detailed Comparison - -#### Additional Images in Required Set - -The following images are required for setup, builds, and optional features but are not part of the standard production deployment: - -| Category | Description | Image Name | Version | Purpose | -|----------|-------------|------------|---------|---------| -| **Database** | Coherence CE | container-registry.oracle.com/middleware/coherence-ce | 25.03.2 | In-memory data grid runtime | -| **Database** | ADB Free | container-registry.oracle.com/database/adb-free | 25.10.2.1 | Autonomous Database Free tier | -| **Observability** | Signoz Histograms | obaas-docker-release.dockerhub-iad.oci.oraclecorp.com/signoz-histograms | v0.0.1 | Custom histogram processing | -| **Utilities** | K8s Wait For | docker.io/groundnuty/k8s-wait-for | v2.0 | Init container for waiting on resources | -| **Utilities** | YQ | docker.io/linuxserver/yq | 3.4.3 | YAML processing tool | -| **Utilities** | Curl-JQ | registry.gitlab.com/gitlab-ci-utils/curl-jq | 3.2.0 | HTTP client with JSON processing | -| **Certificate Management** | Cert Manager Startup API Check | quay.io/jetstack/cert-manager-startupapicheck | v1.16.2 | Cert-manager initialization validation | -| **Kafka/Streaming** | Kafka Bridge | quay.io/strimzi/kafka-bridge | 0.31.2 | HTTP bridge for Kafka | -| **Kafka/Streaming** | Kafka (3.8.1) | quay.io/strimzi/kafka | 0.45.1-kafka-3.8.1 | Additional Kafka version | -| **Kafka/Streaming** | Kafka (3.9.0) | quay.io/strimzi/kafka | 0.45.1-kafka-3.9.0 | Additional Kafka version | -| **Build Tools** | Kaniko Executor | quay.io/strimzi/kaniko-executor | 0.45.1 | Container image builder | -| **Build Tools** | Maven Builder | quay.io/strimzi/maven-builder | 0.45.1 | Java build tool | -| **Ingress** | Kube Webhook Certgen | registry.k8s.io/ingress-nginx/kube-webhook-certgen | v1.5.2 | Certificate generation for webhooks | - -#### Registry Path Differences - -The following images use different registry paths between the two lists: - -| Image | Installed Images Registry | Required Images Registry | Version | -|-------|---------------------------|-------------------------|---------| -| admin-server | us-phoenix-1.ocir.io/maacloud/mark-artifactory | phx.ocir.io/maacloud/mark-artifactory | 2.0.0-M5 | -| conductor-server | us-phoenix-1.ocir.io/maacloud/mark-artifactory | phx.ocir.io/maacloud/mark-artifactory | 2.0.0-M5 | -| eureka | us-phoenix-1.ocir.io/maacloud/mark-artifactory | phx.ocir.io/maacloud/mark-artifactory | 2.0.0-M5 | - -**Note:** Both registry paths point to the same Phoenix region but use different URL formats. The `phx.ocir.io` format is the shortened alias for `us-phoenix-1.ocir.io`. +This document provides comprehensive information about the container images used in OBaaS (Oracle Backend as a Service) version 2.0.0-M5. ## Getting Help