| acl.enabled | bool | false | @param acl.enabled Enable ACLs for AutoMQ # |
| acl.superUsers | list | [] | @param acl.superUsers Comma-separated list of super users for AutoMQ ACLs # |
| broker | object | {"annotations":{},"env":[],"extraConfig":"","labels":{},"partition":0,"persistence":{"fallback":{"accessMode":"ReadWriteOnce","annotations":{},"enabled":false,"size":"20Gi","storageClass":""},"wal":{"accessMode":"ReadWriteOnce","annotations":{},"enabled":true,"size":"20Gi","storageClass":""}},"replicas":0,"resources":{},"topologySpreadConstraints":[]} | @section Broker statefulset parameters # |
| broker.annotations | object | {} | @param broker.annotations Extra annotations for AutoMQ broker pods # |
| broker.env | list | [] | @param broker.env Extra env arrays for AutoMQ broker pods # E.g. # env: # - name: “KAFKA_JVM_PERFORMANCE_OPTS” # value: “-server -XX:+UseZGC -XX:ZCollectionInterval=5” # - name: “KAFKA_OPTS” # value: “-XX:+ExitOnOutOfMemoryError -XX:+HeapDumpOnOutOfMemoryError” # |
| broker.extraConfig | string | "" | @param controller.extraConfig Extra configuration file for Kafka controller nodes, rendered as a template. Auto-generated based on chart values when not specified. # |
| broker.labels | object | {} | @param broker.labels Extra labels for AutoMQ broker pods # |
| broker.partition | int | 0 | @param broker.partition Partition rolling update strategy for AutoMQ controller nodes # |
| broker.persistence | object | {"fallback":{"accessMode":"ReadWriteOnce","annotations":{},"enabled":false,"size":"20Gi","storageClass":""},"wal":{"accessMode":"ReadWriteOnce","annotations":{},"enabled":true,"size":"20Gi","storageClass":""}} | Enable persistence using Persistent Volume Claims # ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/ # |
| broker.persistence.fallback | object | {"accessMode":"ReadWriteOnce","annotations":{},"enabled":false,"size":"20Gi","storageClass":""} | fallback volume for AutoMQ broker nodes |
| broker.persistence.fallback.accessMode | string | "ReadWriteOnce" | @param broker.persistence.fallback.accessModes Persistent fallback Volume Access Modes # |
| broker.persistence.fallback.annotations | object | {} | @param broker.persistence.fallback.annotations Annotations for the fallback PVC # |
| broker.persistence.fallback.size | string | "20Gi" | @param broker.persistence.fallback.size PVC Storage Request for AutoMQ fallback volume # |
| broker.persistence.fallback.storageClass | string | "" | @param broker.persistence.fallback.storageClass PVC Storage Class for AutoMQ fallback volume # |
| broker.persistence.wal | object | {"accessMode":"ReadWriteOnce","annotations":{},"enabled":true,"size":"20Gi","storageClass":""} | wal volume for AutoMQ broker nodes # |
| broker.persistence.wal.accessMode | string | "ReadWriteOnce" | @param broker.persistence.wal.accessModes Persistent wal Volume Access Modes # |
| broker.persistence.wal.annotations | object | {} | @param broker.persistence.wal.annotations Annotations for the PVC # |
| broker.persistence.wal.size | string | "20Gi" | @param broker.persistence.wal.size PVC Storage Request for AutoMQ wal volume # |
| broker.persistence.wal.storageClass | string | "" | @param broker.persistence.wal.storageClass PVC Storage Class for AutoMQ wal volume # |
| broker.replicas | int | 0 | @param broker.replicas Number of AutoMQ controller nodes # |
| broker.resources | object | {} | @param broker.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) # E.g. # resources: # requests: # cpu: 2 # memory: 512Mi # limits: # cpu: 3 # memory: 1024Mi # |
| controller | object | {"annotations":{},"args":[],"command":[],"env":[],"extraConfig":"","labels":{},"partition":0,"persistence":{"fallback":{"accessMode":"ReadWriteOnce","annotations":{},"enabled":false,"size":"20Gi","storageClass":""},"metadata":{"accessMode":"ReadWriteOnce","annotations":{},"size":"20Gi","storageClass":""},"wal":{"accessMode":"ReadWriteOnce","annotations":{},"enabled":true,"size":"20Gi","storageClass":""}},"replicas":3,"resources":{},"topologySpreadConstraints":[]} | @section Controller statefulset parameters # |
| controller.annotations | object | {} | @param controller.annotations Extra annotations for AutoMQ Controller pods # |
| controller.env | list | [] | @param controller.env Extra env arrays for AutoMQ Controller pods # E.g. # env: # - name: “KAFKA_JVM_PERFORMANCE_OPTS” # value: “-server -XX:+UseZGC -XX:ZCollectionInterval=5” # - name: “KAFKA_OPTS” # value: “-XX:+ExitOnOutOfMemoryError -XX:+HeapDumpOnOutOfMemoryError” # |
| controller.extraConfig | string | "" | @param controller.extraConfig Extra configuration file for Kafka controller nodes, rendered as a template. Auto-generated based on chart values when not specified. # |
| controller.labels | object | {} | @param controller.labels Extra labels for AutoMQ Controller pods # |
| controller.partition | int | 0 | @param controller.partition Partition rolling update strategy for AutoMQ controller nodes # |
| controller.persistence | object | {"fallback":{"accessMode":"ReadWriteOnce","annotations":{},"enabled":false,"size":"20Gi","storageClass":""},"metadata":{"accessMode":"ReadWriteOnce","annotations":{},"size":"20Gi","storageClass":""},"wal":{"accessMode":"ReadWriteOnce","annotations":{},"enabled":true,"size":"20Gi","storageClass":""}} | Enable persistence using Persistent Volume Claims # ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/ # |
| controller.persistence.fallback | object | {"accessMode":"ReadWriteOnce","annotations":{},"enabled":false,"size":"20Gi","storageClass":""} | fallback volume for AutoMQ controller nodes |
| controller.persistence.fallback.accessMode | string | "ReadWriteOnce" | @param controller.persistence.fallback.accessModes Persistent fallback Volume Access Modes # |
| controller.persistence.fallback.annotations | object | {} | @param controller.persistence.fallback.annotations Annotations for the fallback PVC # |
| controller.persistence.fallback.size | string | "20Gi" | @param controller.persistence.fallback.size PVC Storage Request for AutoMQ fallback volume # |
| controller.persistence.fallback.storageClass | string | "" | @param controller.persistence.fallback.storageClass PVC Storage Class for AutoMQ fallback volume # |
| controller.persistence.metadata | object | {"accessMode":"ReadWriteOnce","annotations":{},"size":"20Gi","storageClass":""} | kraft metadata volume for AutoMQ controller nodes # |
| controller.persistence.metadata.accessMode | string | "ReadWriteOnce" | @param controller.persistence.metadata.accessMode Persistent metadata Volume Access Modes # |
| controller.persistence.metadata.annotations | object | {} | @param controller.persistence.metadata.annotations Annotations for the PVC # |
| controller.persistence.metadata.size | string | "20Gi" | @param controller.persistence.metadata.size PVC Storage Request for AutoMQ metadata volume # |
| controller.persistence.metadata.storageClass | string | "" | @param controller.persistence.metadata.storageClass PVC Storage Class for AutoMQ metadata volume # |
| controller.persistence.wal | object | {"accessMode":"ReadWriteOnce","annotations":{},"enabled":true,"size":"20Gi","storageClass":""} | wal volume for AutoMQ controller nodes # |
| controller.persistence.wal.accessMode | string | "ReadWriteOnce" | @param controller.persistence.wal.accessModes Persistent wal Volume Access Modes # |
| controller.persistence.wal.annotations | object | {} | @param controller.persistence.wal.annotations Annotations for the PVC # |
| controller.persistence.wal.size | string | "20Gi" | @param controller.persistence.wal.size PVC Storage Request for AutoMQ wal volume # |
| controller.persistence.wal.storageClass | string | "" | @param controller.persistence.wal.storageClass PVC Storage Class for AutoMQ wal volume # |
| controller.replicas | int | 3 | @param controller.replicas Number of AutoMQ controller nodes # |
| controller.resources | object | {} | @param controller.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) # E.g. # resources: # requests: # cpu: 2 # memory: 512Mi # limits: # cpu: 3 # memory: 1024Mi # |
| externalAccess.broker.enabled | bool | false | |
| externalAccess.broker.externalDns.annotations | object | {} | |
| externalAccess.broker.externalDns.enabled | bool | false | |
| externalAccess.broker.externalDns.hostname | string | "" | |
| externalAccess.broker.externalDns.privateZoneId | string | "" | |
| externalAccess.broker.externalDns.recordType | string | "A" | |
| externalAccess.broker.externalDns.ttl | int | 60 | |
| externalAccess.broker.service.annotations | object | {} | |
| externalAccess.broker.service.extraPorts | list | [] | |
| externalAccess.broker.service.labels | object | {} | |
| externalAccess.broker.service.loadBalancerAnnotations | object | {} | |
| externalAccess.broker.service.type | string | "LoadBalancer" | |
| externalAccess.controller.enabled | bool | false | |
| externalAccess.controller.externalDns.annotations | object | {} | |
| externalAccess.controller.externalDns.enabled | bool | false | |
| externalAccess.controller.externalDns.hostname | string | "" | |
| externalAccess.controller.externalDns.privateZoneId | string | "" | |
| externalAccess.controller.externalDns.recordType | string | "A" | |
| externalAccess.controller.externalDns.ttl | int | 60 | |
| externalAccess.controller.forceExpose | bool | false | |
| externalAccess.controller.service.annotations | object | {} | |
| externalAccess.controller.service.extraPorts | list | [] | |
| externalAccess.controller.service.labels | object | {} | |
| externalAccess.controller.service.loadBalancerAnnotations | object | {} | |
| externalAccess.controller.service.type | string | "LoadBalancer" | |
| global.automqInstanceId | string | "" | |
| global.autoscaling | object | {"hpa":{"annotations":{},"enabled":false,"maxReplicas":"","minReplicas":"","targetCPU":"","targetMemory":""}} | ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ # |
| global.autoscaling.hpa.annotations | object | {} | @param global.autoscaling.hpa.annotations Annotations for HPA. # |
| global.autoscaling.hpa.enabled | bool | false | @param global.autoscaling.hpa.enabled Enable HPA for AutoMQ # |
| global.autoscaling.hpa.maxReplicas | string | "" | @param global.autoscaling.hpa.maxReplicas >= 3 Minimum number of AutoMQ replicas # |
| global.autoscaling.hpa.minReplicas | string | "" | @param global.autoscaling.hpa.minReplicas Minimum number of AutoMQ replicas # |
| global.autoscaling.hpa.targetCPU | string | "" | @param global.autoscaling.hpa.targetCPU Target CPU utilization percentage # |
| global.autoscaling.hpa.targetMemory | string | "" | @param global.autoscaling.hpa.targetMemory Target Memory utilization percentage # |
| global.cloudProvider.credentials | string | "" | @param global.cloudProvider.credential Cloud provider where AutoMQ is running E.g. instance://?role=<your_role_id> or static://?accessKey=<your_encoded_ak>&secretKey=<your_encoded_sk> # |
| global.cloudProvider.name | string | "" | @param global.cloudProvider.name Cloud provider where AutoMQ is running E.g. aws, azure, gcp, etc. # |
| global.clusterId | string | "" | |
| global.commonAnnotations | object | {} | @param global.commonAnnotations Annotations to add to all deployed objects # |
| global.commonLabels | object | {} | @param global.commonLabels Labels to add to all deployed objects # |
| global.config | string | "s3.data.buckets=0@s3://xxx_bucket?region=us-east-1\ns3.ops.buckets=1@s3://xxx_bucket?region=us-east-1\ns3.wal.path=0@block:///dev/wal\n" | @param Bucket URI Pattern: 0@s3://bucket?region=region&endpoint=$endpoint # |
| global.daemonSet | object | {"enabled":true} | @param global.daemonSet.enabled Enable AutoMQ image pull daemonSet |
| global.daemonSet.enabled | bool | true | @param global.daemonSet.enabled Enable AutoMQ daemonSet # |
| global.existingSecretConfig | string | "" | NOTE: This will override secretConfig value # |
| global.image.pullPolicy | string | "Always" | |
| global.image.pullSecrets | list | [] | |
| global.image.registry | string | "automq-docker-registry-registry.cn-hangzhou.cr.aliyuncs.com" | @param global.image.registry Global Docker image registry |
| global.image.repository | string | "automq/automq-enterprise" | |
| global.image.schemaRegistry | object | {"repository":"automq/karapace","tag":"4.1.0"} | @param global.image.schemaRegistry Global Docker image schema registry |
| global.image.tag | string | "5.3.1" | |
| global.livenessProbe | object | {"failureThreshold":4,"initialDelaySeconds":60,"periodSeconds":15,"probePort":9092,"successThreshold":1,"timeoutSeconds":1} | Configure extra options for Kafka containers’ liveness probes # |
| global.livenessProbe.failureThreshold | int | 4 | @param global.livenessProbe.failureThreshold Failure threshold for livenessProbe # |
| global.livenessProbe.initialDelaySeconds | int | 60 | @param global.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe # |
| global.livenessProbe.periodSeconds | int | 15 | @param global.livenessProbe.periodSeconds Period seconds for livenessProbe # |
| global.livenessProbe.probePort | int | 9092 | @param global.probePort tcp socket port for livenessProbe and readinessProbe check # |
| global.livenessProbe.successThreshold | int | 1 | @param global.livenessProbe.successThreshold Success threshold for livenessProbe # |
| global.livenessProbe.timeoutSeconds | int | 1 | @param global.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe # |
| global.nodeAffinities | list | [{"key":"kubernetes.io/arch","values":["amd64"]}] | @param global.nodeAffinities Affinity for node assignment # |
| global.readinessProbe | object | {"failureThreshold":8,"initialDelaySeconds":10,"periodSeconds":10,"probePort":9092,"successThreshold":1,"timeoutSeconds":1} | Configure extra options for Kafka containers’ readiness probes # |
| global.readinessProbe.failureThreshold | int | 8 | @param global.readinessProbe.failureThreshold Failure threshold for readinessProbe # |
| global.readinessProbe.initialDelaySeconds | int | 10 | @param global.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe # |
| global.readinessProbe.periodSeconds | int | 10 | @param global.readinessProbe.periodSeconds Period seconds for readinessProbe # |
| global.readinessProbe.probePort | int | 9092 | @param global.probePort tcp socket port for livenessProbe and readinessProbe check # |
| global.readinessProbe.successThreshold | int | 1 | @param global.readinessProbe.successThreshold Success threshold for readinessProbe # |
| global.readinessProbe.timeoutSeconds | int | 1 | @param global.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe # |
| global.serviceAccount.annotations | object | {} | @param global.serviceAccount.annotations Annotations for the ServiceAccount # |
| global.serviceAccount.create | bool | true | @param global.serviceAccount.create Create a ServiceAccount for AutoMQ |
| global.serviceAccount.name | string | "" | If not set and create is true, a name is generated using “automq-sa” # |
| global.tolerations | list | [{"effect":"NoSchedule","key":"dedicated","operator":"Equal","value":"automq"}] | Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ # |
| global.topologySpreadWhenUnsatisfiable | string | "DoNotSchedule" | @param global.topologySpreadWhenUnsatisfiable Topology spread when unsatisfiable schedule policy # |
| global.zoneSpreadConstraints | bool | false | @param global.zoneSpreadConstraints Availability Zone Balancing Constraint of AutoMQ nodes # |
| listeners.client[0] | object | {"advertisedHostnames":{"baseDomain":"","enabled":false,"externalDns":{"privateZoneId":""},"hostnamePattern":"","ttl":null},"containerPort":9092,"name":"PLAINTEXT","protocol":"PLAINTEXT","sslClientAuth":""} | @param listeners.client.containerPort Port for the Kafka client listener # |
| listeners.client[0].advertisedHostnames.baseDomain | string | "" | @param listeners.client.advertisedHostnames.baseDomain Expected Route53/Private DNS base domain for validation and advertised listener overrides |
| listeners.client[0].advertisedHostnames.enabled | bool | false | @param listeners.client.advertisedHostnames.enabled Enable AutoMQ-managed DNS records for this listener |
| listeners.client[0].advertisedHostnames.externalDns.privateZoneId | string | "" | @param listeners.client.advertisedHostnames.externalDns.privateZoneId Hosted zone identifier (e.g. AWS Route53 private zone ID) |
| listeners.client[0].advertisedHostnames.hostnamePattern | string | "" | @param listeners.client.advertisedHostnames.hostnamePattern Optional template for DNS record names. Defaults to kernel fallback when empty |
| listeners.client[0].advertisedHostnames.ttl | string | nil | @param listeners.client.advertisedHostnames.ttl TTL override (seconds) for Route53 records created by the kernel. Leave null to use the default |
| listeners.client[0].name | string | "PLAINTEXT" | listeners.client.name Name for the Kafka client listener |
| listeners.client[0].protocol | string | "PLAINTEXT" | @param listeners.client.protocol Security protocol for the Kafka client listener. Allowed values are ‘PLAINTEXT’, ‘SASL_PLAINTEXT’, ‘SASL_SSL’ and ‘SSL’ # |
| listeners.client[0].sslClientAuth | string | "" | @param listeners.client.sslClientAuth Optional. If SASL_SSL is enabled, configure mTLS TLS authentication type. If SSL protocol is enabled, overrides tls.authType for this listener. Allowed values are ‘none’, ‘requested’ and ‘required’ # |
| listeners.controller[0] | object | {"advertisedHostnames":{"baseDomain":"","enabled":false,"externalDns":{"privateZoneId":""},"hostnamePattern":"","ttl":null},"containerPort":9093,"name":"CONTROLLER","protocol":"PLAINTEXT","sslClientAuth":""} | @param listeners.controller.containerPort Port for the Kafka controller listener # |
| listeners.controller[0].name | string | "CONTROLLER" | @param listeners.controller.name Name for the Kafka controller listener # |
| listeners.controller[0].protocol | string | "PLAINTEXT" | @param listeners.controller.protocol Security protocol for the Kafka controller listener. Allowed values are ‘PLAINTEXT’, ‘SASL_PLAINTEXT’, ‘SASL_SSL’ and ‘SSL’ # |
| listeners.controller[0].sslClientAuth | string | "" | @param listeners.controller.sslClientAuth Optional. If SASL_SSL is enabled, configure mTLS TLS authentication type. If SSL protocol is enabled, overrides tls.authType for this listener. Allowed values are ‘none’, ‘requested’ and ‘required’ # |
| listeners.interbroker[0] | object | {"advertisedHostnames":{"baseDomain":"","enabled":false,"externalDns":{"privateZoneId":""},"hostnamePattern":"","ttl":null},"containerPort":9094,"name":"BROKER","protocol":"PLAINTEXT","sslClientAuth":""} | @param listeners.interbroker.containerPort Port for the Kafka inter-broker listener # |
| listeners.interbroker[0].name | string | "BROKER" | @param listeners.interbroker.name Name for the Kafka inter-broker listener # |
| listeners.interbroker[0].protocol | string | "PLAINTEXT" | @param listeners.interbroker.protocol Security protocol for the Kafka inter-broker listener. Allowed values are ‘PLAINTEXT’, ‘SASL_PLAINTEXT’, ‘SASL_SSL’ and ‘SSL’ # |
| listeners.interbroker[0].sslClientAuth | string | "" | @param listeners.interbroker.sslClientAuth Optional. If SASL_SSL is enabled, configure mTLS TLS authentication type. If SSL protocol is enabled, overrides tls.authType for this listener. Allowed values are ‘none’, ‘requested’ and ‘required’ # |
| networkPolicy.enabled | bool | false | |
| networkPolicy.ingressRules | list | [] | @param ingressRules Add ingress rules to the AutoMQ pod(s) # e.g: # ingressRules: # - ports: target-ports # - 9102 # ipBlocks: ip block settings # - cidr: 0.0.0.0/0 |
| sasl.client | object | {"passwords":null,"users":["user1"]} | Credentials for client communications. # |
| sasl.client.passwords | string | nil | @param sasl.client.passwords Comma-separated list of passwords for client communications when SASL is enabled, must match the number of client.users # |
| sasl.client.users | list | ["user1"] | @param sasl.client.users Comma-separated list of usernames for client communications when SASL is enabled # |
| sasl.controller | object | {"password":null,"user":"controller_user"} | Credentials for controller communications. # |
| sasl.controller.password | string | nil | @param sasl.controller.password Password for controller communications when SASL is enabled. If not set and SASL is enabled for the inter-broker listener, a random password will be generated. # |
| sasl.controller.user | string | "controller_user" | @param sasl.controller.user Username for controller communications when SASL is enabled # |
| sasl.controllerMechanism | string | "PLAIN" | @param sasl.controllerMechanism SASL mechanism for controller communications. # |
| sasl.enabledMechanisms | string | "PLAIN,SCRAM-SHA-256,SCRAM-SHA-512" | NOTE: At the moment, Kafka Raft mode does not support SCRAM, that is why only PLAIN is configured. # |
| sasl.existingSecret | string | "" | |
| sasl.interBrokerMechanism | string | "PLAIN" | @param sasl.interBrokerMechanism SASL mechanism for inter broker communication. # |
| sasl.interbroker | object | {"password":null,"user":"inter_broker_user"} | Credentials for inter-broker communications. # |
| sasl.interbroker.password | string | nil | @param sasl.interbroker.password Password for inter-broker communications when SASL is enabled. If not set and SASL is enabled for the controller listener, a random password will be generated. # |
| sasl.interbroker.user | string | "inter_broker_user" | @param sasl.interbroker.user Username for inter-broker communications when SASL is enabled # |
| schemaRegistry | object | {"create":false,"env":null} | SchemaRegistry configuration # |
| schemaRegistry.env | string | nil | schemaRegistry.env Env for SchemaRegistry # |
| service | object | {"headless":{"annotations":{},"labels":{}}} | Service for AutoMQ, service type: ClusterIP # |
| service.headless | object | {"annotations":{},"labels":{}} | Headless service properties # |
| service.headless.annotations | object | {} | @param service.headless.annotations Annotations for the cluster headless service. # |
| service.headless.labels | object | {} | @param service.headless.labels Labels for the cluster headless service. # |
| tls | object | {"autoGenerated":false,"customAltNames":[],"endpointIdentificationAlgorithm":"https","existingSecret":"","jksKeystoreKey":"","jksTruststoreKey":"","jksTruststoreSecret":"","keyPassword":"","keystorePassword":"","passwordsSecret":"","passwordsSecretKeystoreKey":"keystore-password","passwordsSecretPemPasswordKey":"","passwordsSecretTruststoreKey":"truststore-password","pemChainIncluded":false,"selfConfigure":false,"sslClientAuth":"required","truststorePassword":"","type":"JKS"} | Kafka TLS settings, required if SSL or SASL_SSL listeners are configured # |
| tls.autoGenerated | bool | false | @param tls.autoGenerated Generate automatically self-signed TLS certificates for Kafka brokers. Currently only supported if tls.type is PEM # Note: ignored when using ‘jks’ format or tls.existingSecret is not empty # |
| tls.customAltNames | list | [] | @param tls.customAltNames Optionally specify extra list of additional subject alternative names (SANs) for the automatically generated TLS certificates. # |
| tls.endpointIdentificationAlgorithm | string | "https" | @param tls.endpointIdentificationAlgorithm The endpoint identification algorithm to validate server hostname using server certificate # Disable server host name verification by setting it to an empty string. # ref: https://docs.confluent.io/current/kafka/authentication_ssl.html#optional-settings # |
| tls.existingSecret | string | "" | @param tls.existingSecret Name of the existing secret containing the TLS certificates for the Kafka nodes. # - For tls.type=PEM, the secret must contain a shared CA (kafka-ca.crt) plus a single certificate/key pair (kafka.crt + kafka.key or tls.crt + tls.key). The chart converts this bundle into the JKS keystore/truststore files automatically. # - For tls.type=JKS, the secret must contain kafka.keystore.jks and kafka.truststore.jks (also shared by all nodes). Per-role or per-pod keystores are no longer required. |
| tls.jksKeystoreKey | string | "" | @param tls.jksKeystoreKey The secret key from the tls.existingSecret containing the keystore # Note: ignored when using ‘pem’ format for certificates. # |
| tls.jksTruststoreKey | string | "" | @param tls.jksTruststoreKey The secret key from the tls.existingSecret or tls.jksTruststoreSecret containing the truststore # Note: ignored when using ‘pem’ format for certificates. # |
| tls.jksTruststoreSecret | string | "" | @param tls.jksTruststoreSecret Name of the existing secret containing your truststore if truststore not existing or different from the one in the tls.existingSecret # Note: ignored when using ‘pem’ format for certificates. # |
| tls.keyPassword | string | "" | @param tls.keyPassword Password to access the PEM key when it is password-protected. # Note: ignored when using ‘tls.passwordsSecret’ # |
| tls.keystorePassword | string | "" | @param tls.keystorePassword Password to access the JKS keystore when it is password-protected. Ignored when ‘tls.passwordsSecret’ is provided. # When using tls.type=PEM, the generated keystore will use this password or randomly generate one. # |
| tls.passwordsSecret | string | "" | @param tls.passwordsSecret Name of the secret containing the password to access the JKS files or PEM key when they are password-protected. (key: password) # |
| tls.passwordsSecretKeystoreKey | string | "keystore-password" | @param tls.passwordsSecretKeystoreKey The secret key from the tls.passwordsSecret containing the password for the Keystore. # |
| tls.passwordsSecretPemPasswordKey | string | "" | @param tls.passwordsSecretPemPasswordKey The secret key from the tls.passwordsSecret containing the password for the PEM key inside ‘tls.passwordsSecret’. # |
| tls.passwordsSecretTruststoreKey | string | "truststore-password" | @param tls.passwordsSecretTruststoreKey The secret key from the tls.passwordsSecret containing the password for the Truststore. # |
| tls.pemChainIncluded | bool | false | @param tls.pemChainIncluded Flag to denote that the Certificate Authority (CA) certificates are bundled with the endpoint cert. # Certificates must be in proper order, where the top certificate is the leaf and the bottom certificate is the top-most intermediate CA. # |
| tls.selfConfigure | bool | false | @param tls.selfConfigure Enable self-configured TLS certificates for AutoMQ # |
| tls.sslClientAuth | string | "required" | @param tls.sslClientAuth Sets the default value for the ssl.client.auth Kafka setting. # ref: https://docs.confluent.io/current/kafka/authentication_ssl.html#optional-settings # |
| tls.truststorePassword | string | "" | @param tls.truststorePassword Password to access the JKS truststore when it is password-protected. Ignored when ‘tls.passwordsSecret’ is provided. # When using tls.type=PEM, the generated keystore will use this password or randomly generate one. # |
| tls.type | string | "JKS" | @param tls.type Format to use for TLS certificates. Allowed types: JKS and PEM # |