跳转到主要内容
本文档介绍 AutoMQ 企业版 Chart 的各项参数定义和设置建议。本文档由 Helm Docs 工具自动生成。

Requirements

RepositoryNameVersion
oci://registry-1.docker.io/bitnamichartscommon2.x.x

Values

KeyTypeDefaultDescription
acl.enabledboolfalse@param acl.enabled Enable ACLs for AutoMQ #
acl.superUserslist[]@param acl.superUsers Comma-separated list of super users for AutoMQ ACLs #
brokerobject{"annotations":{},"env":[],"extraConfig":"","labels":{},"partition":0,"persistence":{"fallback":{"accessMode":"ReadWriteOnce","annotations":{},"enabled":false,"size":"20Gi","storageClass":""},"wal":{"accessMode":"ReadWriteOnce","annotations":{},"enabled":true,"size":"20Gi","storageClass":""}},"replicas":0,"resources":{},"topologySpreadConstraints":[]}@section Broker statefulset parameters #
broker.annotationsobject{}@param broker.annotations Extra annotations for AutoMQ broker pods #
broker.envlist[]@param broker.env Extra env arrays for AutoMQ broker pods # E.g. # env: # - name: “KAFKA_JVM_PERFORMANCE_OPTS” # value: “-server -XX:+UseZGC -XX:ZCollectionInterval=5” # - name: “KAFKA_OPTS” # value: “-XX:+ExitOnOutOfMemoryError -XX:+HeapDumpOnOutOfMemoryError” #
broker.extraConfigstring""@param controller.extraConfig Extra configuration file for Kafka controller nodes, rendered as a template. Auto-generated based on chart values when not specified. #
broker.labelsobject{}@param broker.labels Extra labels for AutoMQ broker pods #
broker.partitionint0@param broker.partition Partition rolling update strategy for AutoMQ controller nodes #
broker.persistenceobject{"fallback":{"accessMode":"ReadWriteOnce","annotations":{},"enabled":false,"size":"20Gi","storageClass":""},"wal":{"accessMode":"ReadWriteOnce","annotations":{},"enabled":true,"size":"20Gi","storageClass":""}}Enable persistence using Persistent Volume Claims # ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/ #
broker.persistence.fallbackobject{"accessMode":"ReadWriteOnce","annotations":{},"enabled":false,"size":"20Gi","storageClass":""}fallback volume for AutoMQ broker nodes
broker.persistence.fallback.accessModestring"ReadWriteOnce"@param broker.persistence.fallback.accessModes Persistent fallback Volume Access Modes #
broker.persistence.fallback.annotationsobject{}@param broker.persistence.fallback.annotations Annotations for the fallback PVC #
broker.persistence.fallback.sizestring"20Gi"@param broker.persistence.fallback.size PVC Storage Request for AutoMQ fallback volume #
broker.persistence.fallback.storageClassstring""@param broker.persistence.fallback.storageClass PVC Storage Class for AutoMQ fallback volume #
broker.persistence.walobject{"accessMode":"ReadWriteOnce","annotations":{},"enabled":true,"size":"20Gi","storageClass":""}wal volume for AutoMQ broker nodes #
broker.persistence.wal.accessModestring"ReadWriteOnce"@param broker.persistence.wal.accessModes Persistent wal Volume Access Modes #
broker.persistence.wal.annotationsobject{}@param broker.persistence.wal.annotations Annotations for the PVC #
broker.persistence.wal.sizestring"20Gi"@param broker.persistence.wal.size PVC Storage Request for AutoMQ wal volume #
broker.persistence.wal.storageClassstring""@param broker.persistence.wal.storageClass PVC Storage Class for AutoMQ wal volume #
broker.replicasint0@param broker.replicas Number of AutoMQ controller nodes #
broker.resourcesobject{}@param broker.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) # E.g. # resources: # requests: # cpu: 2 # memory: 512Mi # limits: # cpu: 3 # memory: 1024Mi #
controllerobject{"annotations":{},"args":[],"command":[],"env":[],"extraConfig":"","labels":{},"partition":0,"persistence":{"fallback":{"accessMode":"ReadWriteOnce","annotations":{},"enabled":false,"size":"20Gi","storageClass":""},"metadata":{"accessMode":"ReadWriteOnce","annotations":{},"size":"20Gi","storageClass":""},"wal":{"accessMode":"ReadWriteOnce","annotations":{},"enabled":true,"size":"20Gi","storageClass":""}},"replicas":3,"resources":{},"topologySpreadConstraints":[]}@section Controller statefulset parameters #
controller.annotationsobject{}@param controller.annotations Extra annotations for AutoMQ Controller pods #
controller.envlist[]@param controller.env Extra env arrays for AutoMQ Controller pods # E.g. # env: # - name: “KAFKA_JVM_PERFORMANCE_OPTS” # value: “-server -XX:+UseZGC -XX:ZCollectionInterval=5” # - name: “KAFKA_OPTS” # value: “-XX:+ExitOnOutOfMemoryError -XX:+HeapDumpOnOutOfMemoryError” #
controller.extraConfigstring""@param controller.extraConfig Extra configuration file for Kafka controller nodes, rendered as a template. Auto-generated based on chart values when not specified. #
controller.labelsobject{}@param controller.labels Extra labels for AutoMQ Controller pods #
controller.partitionint0@param controller.partition Partition rolling update strategy for AutoMQ controller nodes #
controller.persistenceobject{"fallback":{"accessMode":"ReadWriteOnce","annotations":{},"enabled":false,"size":"20Gi","storageClass":""},"metadata":{"accessMode":"ReadWriteOnce","annotations":{},"size":"20Gi","storageClass":""},"wal":{"accessMode":"ReadWriteOnce","annotations":{},"enabled":true,"size":"20Gi","storageClass":""}}Enable persistence using Persistent Volume Claims # ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/ #
controller.persistence.fallbackobject{"accessMode":"ReadWriteOnce","annotations":{},"enabled":false,"size":"20Gi","storageClass":""}fallback volume for AutoMQ controller nodes
controller.persistence.fallback.accessModestring"ReadWriteOnce"@param controller.persistence.fallback.accessModes Persistent fallback Volume Access Modes #
controller.persistence.fallback.annotationsobject{}@param controller.persistence.fallback.annotations Annotations for the fallback PVC #
controller.persistence.fallback.sizestring"20Gi"@param controller.persistence.fallback.size PVC Storage Request for AutoMQ fallback volume #
controller.persistence.fallback.storageClassstring""@param controller.persistence.fallback.storageClass PVC Storage Class for AutoMQ fallback volume #
controller.persistence.metadataobject{"accessMode":"ReadWriteOnce","annotations":{},"size":"20Gi","storageClass":""}kraft metadata volume for AutoMQ controller nodes #
controller.persistence.metadata.accessModestring"ReadWriteOnce"@param controller.persistence.metadata.accessMode Persistent metadata Volume Access Modes #
controller.persistence.metadata.annotationsobject{}@param controller.persistence.metadata.annotations Annotations for the PVC #
controller.persistence.metadata.sizestring"20Gi"@param controller.persistence.metadata.size PVC Storage Request for AutoMQ metadata volume #
controller.persistence.metadata.storageClassstring""@param controller.persistence.metadata.storageClass PVC Storage Class for AutoMQ metadata volume #
controller.persistence.walobject{"accessMode":"ReadWriteOnce","annotations":{},"enabled":true,"size":"20Gi","storageClass":""}wal volume for AutoMQ controller nodes #
controller.persistence.wal.accessModestring"ReadWriteOnce"@param controller.persistence.wal.accessModes Persistent wal Volume Access Modes #
controller.persistence.wal.annotationsobject{}@param controller.persistence.wal.annotations Annotations for the PVC #
controller.persistence.wal.sizestring"20Gi"@param controller.persistence.wal.size PVC Storage Request for AutoMQ wal volume #
controller.persistence.wal.storageClassstring""@param controller.persistence.wal.storageClass PVC Storage Class for AutoMQ wal volume #
controller.replicasint3@param controller.replicas Number of AutoMQ controller nodes #
controller.resourcesobject{}@param controller.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) # E.g. # resources: # requests: # cpu: 2 # memory: 512Mi # limits: # cpu: 3 # memory: 1024Mi #
externalAccess.broker.enabledboolfalse
externalAccess.broker.externalDns.annotationsobject{}
externalAccess.broker.externalDns.enabledboolfalse
externalAccess.broker.externalDns.hostnamestring""
externalAccess.broker.externalDns.privateZoneIdstring""
externalAccess.broker.externalDns.recordTypestring"A"
externalAccess.broker.externalDns.ttlint60
externalAccess.broker.service.annotationsobject{}
externalAccess.broker.service.extraPortslist[]
externalAccess.broker.service.labelsobject{}
externalAccess.broker.service.loadBalancerAnnotationsobject{}
externalAccess.broker.service.typestring"LoadBalancer"
externalAccess.controller.enabledboolfalse
externalAccess.controller.externalDns.annotationsobject{}
externalAccess.controller.externalDns.enabledboolfalse
externalAccess.controller.externalDns.hostnamestring""
externalAccess.controller.externalDns.privateZoneIdstring""
externalAccess.controller.externalDns.recordTypestring"A"
externalAccess.controller.externalDns.ttlint60
externalAccess.controller.forceExposeboolfalse
externalAccess.controller.service.annotationsobject{}
externalAccess.controller.service.extraPortslist[]
externalAccess.controller.service.labelsobject{}
externalAccess.controller.service.loadBalancerAnnotationsobject{}
externalAccess.controller.service.typestring"LoadBalancer"
global.automqInstanceIdstring""
global.autoscalingobject{"hpa":{"annotations":{},"enabled":false,"maxReplicas":"","minReplicas":"","targetCPU":"","targetMemory":""}}ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ #
global.autoscaling.hpa.annotationsobject{}@param global.autoscaling.hpa.annotations Annotations for HPA. #
global.autoscaling.hpa.enabledboolfalse@param global.autoscaling.hpa.enabled Enable HPA for AutoMQ #
global.autoscaling.hpa.maxReplicasstring""@param global.autoscaling.hpa.maxReplicas >= 3 Minimum number of AutoMQ replicas #
global.autoscaling.hpa.minReplicasstring""@param global.autoscaling.hpa.minReplicas Minimum number of AutoMQ replicas #
global.autoscaling.hpa.targetCPUstring""@param global.autoscaling.hpa.targetCPU Target CPU utilization percentage #
global.autoscaling.hpa.targetMemorystring""@param global.autoscaling.hpa.targetMemory Target Memory utilization percentage #
global.cloudProvider.credentialsstring""@param global.cloudProvider.credential Cloud provider where AutoMQ is running E.g. instance://?role=<your_role_id> or static://?accessKey=<your_encoded_ak>&secretKey=<your_encoded_sk> #
global.cloudProvider.namestring""@param global.cloudProvider.name Cloud provider where AutoMQ is running E.g. aws, azure, gcp, etc. #
global.clusterIdstring""
global.commonAnnotationsobject{}@param global.commonAnnotations Annotations to add to all deployed objects #
global.commonLabelsobject{}@param global.commonLabels Labels to add to all deployed objects #
global.configstring"s3.data.buckets=0@s3://xxx_bucket?region=us-east-1\ns3.ops.buckets=1@s3://xxx_bucket?region=us-east-1\ns3.wal.path=0@block:///dev/wal\n"@param Bucket URI Pattern: 0@s3://bucket?region=bucket?region=region&endpoint=$endpoint #
global.daemonSetobject{"enabled":true}@param global.daemonSet.enabled Enable AutoMQ image pull daemonSet
global.daemonSet.enabledbooltrue@param global.daemonSet.enabled Enable AutoMQ daemonSet #
global.existingSecretConfigstring""NOTE: This will override secretConfig value #
global.image.pullPolicystring"Always"
global.image.pullSecretslist[]
global.image.registrystring"automq-docker-registry-registry.cn-hangzhou.cr.aliyuncs.com"@param global.image.registry Global Docker image registry
global.image.repositorystring"automq/automq-enterprise"
global.image.schemaRegistryobject{"repository":"automq/karapace","tag":"4.1.0"}@param global.image.schemaRegistry Global Docker image schema registry
global.image.tagstring"5.3.1"
global.livenessProbeobject{"failureThreshold":4,"initialDelaySeconds":60,"periodSeconds":15,"probePort":9092,"successThreshold":1,"timeoutSeconds":1}Configure extra options for Kafka containers’ liveness probes #
global.livenessProbe.failureThresholdint4@param global.livenessProbe.failureThreshold Failure threshold for livenessProbe #
global.livenessProbe.initialDelaySecondsint60@param global.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe #
global.livenessProbe.periodSecondsint15@param global.livenessProbe.periodSeconds Period seconds for livenessProbe #
global.livenessProbe.probePortint9092@param global.probePort tcp socket port for livenessProbe and readinessProbe check #
global.livenessProbe.successThresholdint1@param global.livenessProbe.successThreshold Success threshold for livenessProbe #
global.livenessProbe.timeoutSecondsint1@param global.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe #
global.nodeAffinitieslist[{"key":"kubernetes.io/arch","values":["amd64"]}]@param global.nodeAffinities Affinity for node assignment #
global.readinessProbeobject{"failureThreshold":8,"initialDelaySeconds":10,"periodSeconds":10,"probePort":9092,"successThreshold":1,"timeoutSeconds":1}Configure extra options for Kafka containers’ readiness probes #
global.readinessProbe.failureThresholdint8@param global.readinessProbe.failureThreshold Failure threshold for readinessProbe #
global.readinessProbe.initialDelaySecondsint10@param global.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe #
global.readinessProbe.periodSecondsint10@param global.readinessProbe.periodSeconds Period seconds for readinessProbe #
global.readinessProbe.probePortint9092@param global.probePort tcp socket port for livenessProbe and readinessProbe check #
global.readinessProbe.successThresholdint1@param global.readinessProbe.successThreshold Success threshold for readinessProbe #
global.readinessProbe.timeoutSecondsint1@param global.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe #
global.serviceAccount.annotationsobject{}@param global.serviceAccount.annotations Annotations for the ServiceAccount #
global.serviceAccount.createbooltrue@param global.serviceAccount.create Create a ServiceAccount for AutoMQ
global.serviceAccount.namestring""If not set and create is true, a name is generated using “automq-sa” #
global.tolerationslist[{"effect":"NoSchedule","key":"dedicated","operator":"Equal","value":"automq"}]Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ #
global.topologySpreadWhenUnsatisfiablestring"DoNotSchedule"@param global.topologySpreadWhenUnsatisfiable Topology spread when unsatisfiable schedule policy #
global.zoneSpreadConstraintsboolfalse@param global.zoneSpreadConstraints Availability Zone Balancing Constraint of AutoMQ nodes #
listeners.client[0]object{"advertisedHostnames":{"baseDomain":"","enabled":false,"externalDns":{"privateZoneId":""},"hostnamePattern":"","ttl":null},"containerPort":9092,"name":"PLAINTEXT","protocol":"PLAINTEXT","sslClientAuth":""}@param listeners.client.containerPort Port for the Kafka client listener #
listeners.client[0].advertisedHostnames.baseDomainstring""@param listeners.client.advertisedHostnames.baseDomain Expected Route53/Private DNS base domain for validation and advertised listener overrides
listeners.client[0].advertisedHostnames.enabledboolfalse@param listeners.client.advertisedHostnames.enabled Enable AutoMQ-managed DNS records for this listener
listeners.client[0].advertisedHostnames.externalDns.privateZoneIdstring""@param listeners.client.advertisedHostnames.externalDns.privateZoneId Hosted zone identifier (e.g. AWS Route53 private zone ID)
listeners.client[0].advertisedHostnames.hostnamePatternstring""@param listeners.client.advertisedHostnames.hostnamePattern Optional template for DNS record names. Defaults to kernel fallback when empty
listeners.client[0].advertisedHostnames.ttlstringnil@param listeners.client.advertisedHostnames.ttl TTL override (seconds) for Route53 records created by the kernel. Leave null to use the default
listeners.client[0].namestring"PLAINTEXT"listeners.client.name Name for the Kafka client listener
listeners.client[0].protocolstring"PLAINTEXT"@param listeners.client.protocol Security protocol for the Kafka client listener. Allowed values are ‘PLAINTEXT’, ‘SASL_PLAINTEXT’, ‘SASL_SSL’ and ‘SSL’ #
listeners.client[0].sslClientAuthstring""@param listeners.client.sslClientAuth Optional. If SASL_SSL is enabled, configure mTLS TLS authentication type. If SSL protocol is enabled, overrides tls.authType for this listener. Allowed values are ‘none’, ‘requested’ and ‘required’ #
listeners.controller[0]object{"advertisedHostnames":{"baseDomain":"","enabled":false,"externalDns":{"privateZoneId":""},"hostnamePattern":"","ttl":null},"containerPort":9093,"name":"CONTROLLER","protocol":"PLAINTEXT","sslClientAuth":""}@param listeners.controller.containerPort Port for the Kafka controller listener #
listeners.controller[0].namestring"CONTROLLER"@param listeners.controller.name Name for the Kafka controller listener #
listeners.controller[0].protocolstring"PLAINTEXT"@param listeners.controller.protocol Security protocol for the Kafka controller listener. Allowed values are ‘PLAINTEXT’, ‘SASL_PLAINTEXT’, ‘SASL_SSL’ and ‘SSL’ #
listeners.controller[0].sslClientAuthstring""@param listeners.controller.sslClientAuth Optional. If SASL_SSL is enabled, configure mTLS TLS authentication type. If SSL protocol is enabled, overrides tls.authType for this listener. Allowed values are ‘none’, ‘requested’ and ‘required’ #
listeners.interbroker[0]object{"advertisedHostnames":{"baseDomain":"","enabled":false,"externalDns":{"privateZoneId":""},"hostnamePattern":"","ttl":null},"containerPort":9094,"name":"BROKER","protocol":"PLAINTEXT","sslClientAuth":""}@param listeners.interbroker.containerPort Port for the Kafka inter-broker listener #
listeners.interbroker[0].namestring"BROKER"@param listeners.interbroker.name Name for the Kafka inter-broker listener #
listeners.interbroker[0].protocolstring"PLAINTEXT"@param listeners.interbroker.protocol Security protocol for the Kafka inter-broker listener. Allowed values are ‘PLAINTEXT’, ‘SASL_PLAINTEXT’, ‘SASL_SSL’ and ‘SSL’ #
listeners.interbroker[0].sslClientAuthstring""@param listeners.interbroker.sslClientAuth Optional. If SASL_SSL is enabled, configure mTLS TLS authentication type. If SSL protocol is enabled, overrides tls.authType for this listener. Allowed values are ‘none’, ‘requested’ and ‘required’ #
networkPolicy.enabledboolfalse
networkPolicy.ingressRuleslist[]@param ingressRules Add ingress rules to the AutoMQ pod(s) # e.g: # ingressRules: # - ports: target-ports # - 9102 # ipBlocks: ip block settings # - cidr: 0.0.0.0/0
sasl.clientobject{"passwords":null,"users":["user1"]}Credentials for client communications. #
sasl.client.passwordsstringnil@param sasl.client.passwords Comma-separated list of passwords for client communications when SASL is enabled, must match the number of client.users #
sasl.client.userslist["user1"]@param sasl.client.users Comma-separated list of usernames for client communications when SASL is enabled #
sasl.controllerobject{"password":null,"user":"controller_user"}Credentials for controller communications. #
sasl.controller.passwordstringnil@param sasl.controller.password Password for controller communications when SASL is enabled. If not set and SASL is enabled for the inter-broker listener, a random password will be generated. #
sasl.controller.userstring"controller_user"@param sasl.controller.user Username for controller communications when SASL is enabled #
sasl.controllerMechanismstring"PLAIN"@param sasl.controllerMechanism SASL mechanism for controller communications. #
sasl.enabledMechanismsstring"PLAIN,SCRAM-SHA-256,SCRAM-SHA-512"NOTE: At the moment, Kafka Raft mode does not support SCRAM, that is why only PLAIN is configured. #
sasl.existingSecretstring""
sasl.interBrokerMechanismstring"PLAIN"@param sasl.interBrokerMechanism SASL mechanism for inter broker communication. #
sasl.interbrokerobject{"password":null,"user":"inter_broker_user"}Credentials for inter-broker communications. #
sasl.interbroker.passwordstringnil@param sasl.interbroker.password Password for inter-broker communications when SASL is enabled. If not set and SASL is enabled for the controller listener, a random password will be generated. #
sasl.interbroker.userstring"inter_broker_user"@param sasl.interbroker.user Username for inter-broker communications when SASL is enabled #
schemaRegistryobject{"create":false,"env":null}SchemaRegistry configuration #
schemaRegistry.envstringnilschemaRegistry.env Env for SchemaRegistry #
serviceobject{"headless":{"annotations":{},"labels":{}}}Service for AutoMQ, service type: ClusterIP #
service.headlessobject{"annotations":{},"labels":{}}Headless service properties #
service.headless.annotationsobject{}@param service.headless.annotations Annotations for the cluster headless service. #
service.headless.labelsobject{}@param service.headless.labels Labels for the cluster headless service. #
tlsobject{"autoGenerated":false,"customAltNames":[],"endpointIdentificationAlgorithm":"https","existingSecret":"","jksKeystoreKey":"","jksTruststoreKey":"","jksTruststoreSecret":"","keyPassword":"","keystorePassword":"","passwordsSecret":"","passwordsSecretKeystoreKey":"keystore-password","passwordsSecretPemPasswordKey":"","passwordsSecretTruststoreKey":"truststore-password","pemChainIncluded":false,"selfConfigure":false,"sslClientAuth":"required","truststorePassword":"","type":"JKS"}Kafka TLS settings, required if SSL or SASL_SSL listeners are configured #
tls.autoGeneratedboolfalse@param tls.autoGenerated Generate automatically self-signed TLS certificates for Kafka brokers. Currently only supported if tls.type is PEM # Note: ignored when using ‘jks’ format or tls.existingSecret is not empty #
tls.customAltNameslist[]@param tls.customAltNames Optionally specify extra list of additional subject alternative names (SANs) for the automatically generated TLS certificates. #
tls.endpointIdentificationAlgorithmstring"https"@param tls.endpointIdentificationAlgorithm The endpoint identification algorithm to validate server hostname using server certificate # Disable server host name verification by setting it to an empty string. # ref: https://docs.confluent.io/current/kafka/authentication_ssl.html#optional-settings #
tls.existingSecretstring""@param tls.existingSecret Name of the existing secret containing the TLS certificates for the Kafka nodes. # - For tls.type=PEM, the secret must contain a shared CA (kafka-ca.crt) plus a single certificate/key pair (kafka.crt + kafka.key or tls.crt + tls.key). The chart converts this bundle into the JKS keystore/truststore files automatically. # - For tls.type=JKS, the secret must contain kafka.keystore.jks and kafka.truststore.jks (also shared by all nodes). Per-role or per-pod keystores are no longer required.
tls.jksKeystoreKeystring""@param tls.jksKeystoreKey The secret key from the tls.existingSecret containing the keystore # Note: ignored when using ‘pem’ format for certificates. #
tls.jksTruststoreKeystring""@param tls.jksTruststoreKey The secret key from the tls.existingSecret or tls.jksTruststoreSecret containing the truststore # Note: ignored when using ‘pem’ format for certificates. #
tls.jksTruststoreSecretstring""@param tls.jksTruststoreSecret Name of the existing secret containing your truststore if truststore not existing or different from the one in the tls.existingSecret # Note: ignored when using ‘pem’ format for certificates. #
tls.keyPasswordstring""@param tls.keyPassword Password to access the PEM key when it is password-protected. # Note: ignored when using ‘tls.passwordsSecret’ #
tls.keystorePasswordstring""@param tls.keystorePassword Password to access the JKS keystore when it is password-protected. Ignored when ‘tls.passwordsSecret’ is provided. # When using tls.type=PEM, the generated keystore will use this password or randomly generate one. #
tls.passwordsSecretstring""@param tls.passwordsSecret Name of the secret containing the password to access the JKS files or PEM key when they are password-protected. (key: password) #
tls.passwordsSecretKeystoreKeystring"keystore-password"@param tls.passwordsSecretKeystoreKey The secret key from the tls.passwordsSecret containing the password for the Keystore. #
tls.passwordsSecretPemPasswordKeystring""@param tls.passwordsSecretPemPasswordKey The secret key from the tls.passwordsSecret containing the password for the PEM key inside ‘tls.passwordsSecret’. #
tls.passwordsSecretTruststoreKeystring"truststore-password"@param tls.passwordsSecretTruststoreKey The secret key from the tls.passwordsSecret containing the password for the Truststore. #
tls.pemChainIncludedboolfalse@param tls.pemChainIncluded Flag to denote that the Certificate Authority (CA) certificates are bundled with the endpoint cert. # Certificates must be in proper order, where the top certificate is the leaf and the bottom certificate is the top-most intermediate CA. #
tls.selfConfigureboolfalse@param tls.selfConfigure Enable self-configured TLS certificates for AutoMQ #
tls.sslClientAuthstring"required"@param tls.sslClientAuth Sets the default value for the ssl.client.auth Kafka setting. # ref: https://docs.confluent.io/current/kafka/authentication_ssl.html#optional-settings #
tls.truststorePasswordstring""@param tls.truststorePassword Password to access the JKS truststore when it is password-protected. Ignored when ‘tls.passwordsSecret’ is provided. # When using tls.type=PEM, the generated keystore will use this password or randomly generate one. #
tls.typestring"JKS"@param tls.type Format to use for TLS certificates. Allowed types: JKS and PEM #

Autogenerated from chart metadata using helm-docs v1.14.2