Skip to Main Content

Helm Chart Values Readme

This document provides the parameter definitions and recommended settings for the AutoMQ Enterprise Edition Chart. It is automatically generated by the Helm Docs tool.

Requirements

Repository
Name
Version
oci://registry-1.docker.io/bitnamicharts
common
2.x.x

Values

Key
Type
Default
Description
broker
object
{"annotations":{},"env":[],"extraConfig":"","labels":{},"partition":0,"persistence":{"wal":{"accessMode":"ReadWriteOnce","annotations":{},"enabled":true,"size":"20Gi","storageClass":""}},"replicas":0,"resources":{}}
@section Broker statefulset parameters #
broker.annotations
object
{}
@param broker.annotations Extra annotations for AutoMQ broker pods #
broker.env
list
[]
@param broker.env Extra env arrays for AutoMQ broker pods # E.g. # Env: # - Name: "KAFKA_JVM_PERFORMANCE_OPTS" # Value: "-server -XX:+UseZGC -XX:ZCollectionInterval=5" # - Name: "KAFKA_OPTS" # Value: "-XX:+ExitOnOutOfMemoryError -XX:+HeapDumpOnOutOfMemoryError" #
broker.extraConfig
string
""
@param controller.extraConfig Extra configuration file for Kafka controller nodes, rendered as a template. Auto-generated based on chart values when not specified. #
broker.labels
object
{}
@param broker.labels Extra labels for AutoMQ broker pods #
broker.partition
int
0
@param broker.partition Partition rolling update strategy for AutoMQ controller nodes #
broker.persistence
object
{"wal":{"accessMode":"ReadWriteOnce","annotations":{},"enabled":true,"size":"20Gi","storageClass":""}}
Enable persistence using Persistent Volume Claims # Ref: Https://kubernetes.io/docs/concepts/storage/persistent-volumes/ #
broker.persistence.wal
object
{"accessMode":"ReadWriteOnce","annotations":{},"enabled":true,"size":"20Gi","storageClass":""}
wal volume for AutoMQ broker nodes #
broker.persistence.wal.accessMode
string
"ReadWriteOnce"
@param broker.persistence.wal.accessModes Persistent wal Volume Access Modes #
broker.persistence.wal.annotations
object
{}
@param broker.persistence.wal.annotations Annotations for the PVC #
broker.persistence.wal.size
string
"20Gi"
@param broker.persistence.wal.size PVC Storage Request for AutoMQ wal volume #
broker.persistence.wal.storageClass
string
""
@param broker.persistence.wal.storageClass PVC Storage Class for AutoMQ wal volume #
broker.replicas
int
0
@param broker.replicas Number of AutoMQ controller nodes #
broker.resources
object
{}
@param broker.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) # E.g. # Resources: # Requests: # Cpu: 2 # Memory: 512Mi # Limits: # Cpu: 3 # Memory: 1024Mi #
controller
object
{"annotations":{},"env":[],"extraConfig":"","labels":{},"partition":0,"persistence":{"metadata":{"accessMode":"ReadWriteOnce","annotations":{},"size":"20Gi","storageClass":""},"wal":{"accessMode":"ReadWriteOnce","annotations":{},"enabled":true,"size":"20Gi","storageClass":""}},"replicas":3,"resources":{}}
@section Controller statefulset parameters #
controller.annotations
object
{}
@param controller.annotations Extra annotations for AutoMQ Controller pods #
controller.env
list
[]
@param controller.env Extra env arrays for AutoMQ Controller pods # E.g. # Env: # - Name: "KAFKA_JVM_PERFORMANCE_OPTS" # Value: "-server -XX:+UseZGC -XX:ZCollectionInterval=5" # - Name: "KAFKA_OPTS" # Value: "-XX:+ExitOnOutOfMemoryError -XX:+HeapDumpOnOutOfMemoryError" #
controller.extraConfig
string
""
@param controller.extraConfig Extra configuration file for Kafka controller nodes, rendered as a template. Auto-generated based on chart values when not specified. #
controller.labels
object
{}
@param controller.labels Extra labels for AutoMQ Controller pods #
controller.partition
int
0
@param controller.partition Partition rolling update strategy for AutoMQ controller nodes #
controller.persistence
object
{"metadata":{"accessMode":"ReadWriteOnce","annotations":{},"size":"20Gi","storageClass":""},"wal":{"accessMode":"ReadWriteOnce","annotations":{},"enabled":true,"size":"20Gi","storageClass":""}}
Enable persistence using Persistent Volume Claims # Ref: Https://kubernetes.io/docs/concepts/storage/persistent-volumes/ #
controller.persistence.metadata
object
{"accessMode":"ReadWriteOnce","annotations":{},"size":"20Gi","storageClass":""}
kraft metadata volume for AutoMQ controller nodes #
controller.persistence.metadata.accessMode
string
"ReadWriteOnce"
@param controller.persistence.metadata.accessMode Persistent metadata Volume Access Modes #
controller.persistence.metadata.annotations
object
{}
@param controller.persistence.metadata.annotations Annotations for the PVC #
controller.persistence.metadata.size
string
"20Gi"
@param controller.persistence.metadata.size PVC Storage Request for AutoMQ metadata volume #
controller.persistence.metadata.storageClass
string
""
@param controller.persistence.metadata.storageClass PVC Storage Class for AutoMQ metadata volume #
controller.persistence.wal
object
{"accessMode":"ReadWriteOnce","annotations":{},"enabled":true,"size":"20Gi","storageClass":""}
wal volume for AutoMQ controller nodes #
controller.persistence.wal.accessMode
string
"ReadWriteOnce"
@param controller.persistence.wal.accessModes Persistent wal Volume Access Modes #
controller.persistence.wal.annotations
object
{}
@param controller.persistence.wal.annotations Annotations for the PVC #
controller.persistence.wal.size
string
"20Gi"
@param controller.persistence.wal.size PVC Storage Request for AutoMQ wal volume #
controller.persistence.wal.storageClass
string
""
@param controller.persistence.wal.storageClass PVC Storage Class for AutoMQ wal volume #
controller.replicas
int
3
@param controller.replicas Number of AutoMQ controller nodes #
controller.resources
object
{}
@param controller.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) # E.g. # Resources: # Requests: # Cpu: 2 # Memory: 512Mi # Limits: # Cpu: 3 # Memory: 1024Mi #
global.autoscaling
object
{"hpa":{"annotations":{},"enabled":false,"maxReplicas":"","minReplicas":"","targetCPU":"","targetMemory":""}}
ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ #
global.autoscaling.hpa.annotations
object
{}
@param global.autoscaling.hpa.annotations Annotations for HPA. #
global.autoscaling.hpa.enabled
bool
false
@param global.autoscaling.hpa.enabled Enable HPA for AutoMQ #
global.autoscaling.hpa.maxReplicas
string
""
@param global.autoscaling.hpa.maxReplicas >= 3 Minimum number of AutoMQ replicas #
global.autoscaling.hpa.minReplicas
string
""
@param global.autoscaling.hpa.minReplicas Minimum number of AutoMQ replicas #
global.autoscaling.hpa.targetCPU
string
""
@param global.autoscaling.hpa.targetCPU Target CPU utilization percentage #
global.autoscaling.hpa.targetMemory
string
""
@param global.autoscaling.hpa.targetMemory Target Memory utilization percentage #
global.cloudProvider.credentials
string
""
@param global.cloudProvider.credential Cloud provider where AutoMQ is running E.g. instance://?role=<your_role_id> or static://?accessKey=<your_encoded_ak>&secretKey=<your_encoded_sk> #
global.cloudProvider.name
string
""
@param global.cloudProvider.name Cloud provider where AutoMQ is running E.g. aws, azure, gcp, etc. #
global.commonAnnotations
object
{}
@param global.commonAnnotations Annotations to add to all deployed objects #
global.commonLabels
object
{}
@param global.commonLabels Labels to add to all deployed objects #
global.config
string
"s3.data.buckets=0@s3://xxx_bucket?region=us-east-1\ns3.ops.buckets=1@s3://xxx_bucket?region=us-east-1\ns3.wal.path=0@block\\:///dev/wal\nautomq.cloud.credentials=instance\\://?role\\=xxx\n"
@param Bucket URI Pattern: 0@s3://$bucket?region=$region&endpoint=$endpoint #
global.existingSecretConfig
string
""
NOTE: This will override secretConfig value #
global.image.pullPolicy
string
"Always"
-
global.image.pullSecrets
list
[]
-
global.image.registry
string
"automq-docker-registry-registry.cn-hangzhou.cr.aliyuncs.com"
@param global.image.registry Global Docker image registry
global.image.repository
string
"automq/automq-enterprise"
-
global.image.schemaRegistry
object
{"repository":"automq/karapace","tag":"4.1.0"}
@param global.image.schemaRegistry Global Docker image schema registry
global.image.tag
string
"temp-helm-chart-software-20250409110730"
-
global.livenessProbe
object
{"enabled":true,"failureThreshold":4,"initialDelaySeconds":60,"periodSeconds":15,"probePort":9092,"successThreshold":1,"timeoutSeconds":1}
Configure extra options for Kafka containers' liveness probes #
global.livenessProbe.enabled
bool
true
@param global.livenessProbe.enabled Enable livenessProbe on Kafka containers #
global.livenessProbe.failureThreshold
int
4
@param global.livenessProbe.failureThreshold Failure threshold for livenessProbe #
global.livenessProbe.initialDelaySeconds
int
60
@param global.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe #
global.livenessProbe.periodSeconds
int
15
@param global.livenessProbe.periodSeconds Period seconds for livenessProbe #
global.livenessProbe.probePort
int
9092
@param global.probePort tcp socket port for livenessProbe and readinessProbe check #
global.livenessProbe.successThreshold
int
1
@param global.livenessProbe.successThreshold Success threshold for livenessProbe #
global.livenessProbe.timeoutSeconds
int
1
@param global.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe #
global.nodeAffinities
list
[{"key":"kubernetes.io/arch","values":["amd64"]}]
@param global.nodeAffinities Affinity for node assignment #
global.readinessProbe
object
{"enabled":true,"failureThreshold":8,"initialDelaySeconds":10,"periodSeconds":10,"probePort":9092,"successThreshold":1,"timeoutSeconds":1}
Configure extra options for Kafka containers' readiness probes #
global.readinessProbe.enabled
bool
true
@param global.readinessProbe.enabled Enable readinessProbe on Kafka containers #
global.readinessProbe.failureThreshold
int
8
@param global.readinessProbe.failureThreshold Failure threshold for readinessProbe #
global.readinessProbe.initialDelaySeconds
int
10
@param global.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe #
global.readinessProbe.periodSeconds
int
10
@param global.readinessProbe.periodSeconds Period seconds for readinessProbe #
global.readinessProbe.probePort
int
9092
@param global.probePort tcp socket port for livenessProbe and readinessProbe check #
global.readinessProbe.successThreshold
int
1
@param global.readinessProbe.successThreshold Success threshold for readinessProbe #
global.readinessProbe.timeoutSeconds
int
1
@param global.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe #
global.tolerations
list
[{"effect":"NoSchedule","key":"dedicated","operator":"Equal","value":"automq"}]
Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ #
global.zoneSpreadConstraints
bool
false
@param global.zoneSpreadConstraints Availability Zone Balancing Constraint of AutoMQ nodes #
listeners.client[0]
object
{"containerPort":9092,"name":"PLAINTEXT","protocol":"PLAINTEXT","sslClientAuth":""}
@param listeners.client.containerPort Port for the Kafka client listener #
listeners.client[0].name
string
"PLAINTEXT"
listeners.client.name Name for the Kafka client listener
listeners.client[0].protocol
string
"PLAINTEXT"
@param listeners.client.protocol Security protocol for the Kafka client listener. Allowed values are 'PLAINTEXT', 'SASL_PLAINTEXT', 'SASL_SSL' and 'SSL' #
listeners.client[0].sslClientAuth
string
""
@param listeners.client.sslClientAuth Optional. If SASL_SSL is enabled, configure mTLS TLS authentication type. If SSL protocol is enabled, overrides tls.authType for this listener. Allowed values are 'none', 'requested' and 'required' #
listeners.controller[0]
object
{"containerPort":9093,"name":"CONTROLLER","protocol":"PLAINTEXT","sslClientAuth":""}
@param listeners.controller.containerPort Port for the Kafka controller listener #
listeners.controller[0].name
string
"CONTROLLER"
@param listeners.controller.name Name for the Kafka controller listener #
listeners.controller[0].protocol
string
"PLAINTEXT"
@param listeners.controller.protocol Security protocol for the Kafka controller listener. Allowed values are 'PLAINTEXT', 'SASL_PLAINTEXT', 'SASL_SSL' and 'SSL' #
listeners.controller[0].sslClientAuth
string
""
@param listeners.controller.sslClientAuth Optional. If SASL_SSL is enabled, configure mTLS TLS authentication type. If SSL protocol is enabled, overrides tls.authType for this listener. Allowed values are 'none', 'requested' and 'required' #
listeners.interbroker[0]
object
{"containerPort":9094,"name":"BROKER","protocol":"PLAINTEXT","sslClientAuth":""}
@param listeners.interbroker.containerPort Port for the Kafka inter-broker listener #
listeners.interbroker[0].name
string
"BROKER"
@param listeners.interbroker.name Name for the Kafka inter-broker listener #
listeners.interbroker[0].protocol
string
"PLAINTEXT"
@param listeners.interbroker.protocol Security protocol for the Kafka inter-broker listener. Allowed values are 'PLAINTEXT', 'SASL_PLAINTEXT', 'SASL_SSL' and 'SSL' #
listeners.interbroker[0].sslClientAuth
string
""
@param listeners.interbroker.sslClientAuth Optional. If SASL_SSL is enabled, configure mTLS TLS authentication type. If SSL protocol is enabled, overrides tls.authType for this listener. Allowed values are 'none', 'requested' and 'required' #
networkPolicy.enabled
bool
false
-
networkPolicy.ingressRules
list
[]
@param ingressRules Add ingress rules to the AutoMQ pod(s) # E.g: # IngressRules: # - Ports: Target-ports # - 9102 # IpBlocks: Ip Block Settings # - Cidr: 0.0.0.0/0
sasl.client
object
{"passwords":null,"users":["_automq"]}
Credentials for client communications. #
sasl.client.passwords
string
nil
@param sasl.client.passwords Comma-separated list of passwords for client communications when SASL is enabled, must match the number of client.users #
sasl.client.users
list
["_automq"]
@param sasl.client.users Comma-separated list of usernames for client communications when SASL is enabled #
sasl.controller
object
{"password":null,"user":"_automq"}
Credentials for controller communications. #
sasl.controller.password
string
nil
@param sasl.controller.password Password for controller communications when SASL is enabled. If not set and SASL is enabled for the inter-broker listener, a random password will be generated. #
sasl.controller.user
string
"_automq"
@param sasl.controller.user Username for controller communications when SASL is enabled #
sasl.controllerMechanism
string
"PLAIN"
@param sasl.controllerMechanism SASL mechanism for controller communications. #
sasl.enabledMechanisms
string
"PLAIN,SCRAM-SHA-256,SCRAM-SHA-512"
NOTE: At the moment, Kafka Raft mode does not support SCRAM, that is why only PLAIN is configured. #
sasl.interBrokerMechanism
string
"PLAIN"
@param sasl.interBrokerMechanism SASL mechanism for inter broker communication. #
sasl.interbroker
object
{"password":null,"user":"_automq"}
Credentials for inter-broker communications. #
sasl.interbroker.password
string
nil
@param sasl.interbroker.password Password for inter-broker communications when SASL is enabled. If not set and SASL is enabled for the controller listener, a random password will be generated. #
sasl.interbroker.user
string
"_automq"
@param sasl.interbroker.user Username for inter-broker communications when SASL is enabled #
schemaRegistry
object
{"create":false,"env":null}
SchemaRegistry configuration #
schemaRegistry.env
string
nil
schemaRegistry.env Env for SchemaRegistry #
service
object
{"headless":{"annotations":{},"labels":{}},"loadbalancer":{"annotations":{},"enabled":false,"labels":{}}}
Service for AutoMQ, service type: ClusterIP #
service.headless
object
{"annotations":{},"labels":{}}
Headless service properties #
service.headless.annotations
object
{}
@param service.headless.annotations Annotations for the cluster headless service. #
service.headless.labels
object
{}
@param service.headless.labels Labels for the cluster headless service. #
service.loadbalancer
object
{"annotations":{},"enabled":false,"labels":{}}
Loadbalancer service properties #
service.loadbalancer.annotations
object
{}
@param service.annotations Additional custom annotations for AutoMQ service #
service.loadbalancer.labels
object
{}
@param service.labels Additional custom labels for AutoMQ service #
tls
object
{"autoGenerated":false,"customAltNames":[],"endpointIdentificationAlgorithm":"https","existingSecret":"","jksKeystoreKey":"","jksTruststoreKey":"","jksTruststoreSecret":"","keyPassword":"","keystorePassword":"","passwordsSecret":"","passwordsSecretKeystoreKey":"keystore-password","passwordsSecretPemPasswordKey":"","passwordsSecretTruststoreKey":"truststore-password","pemChainIncluded":false,"sslClientAuth":"required","truststorePassword":"","type":"JKS"}
Kafka TLS settings, required if SSL or SASL_SSL listeners are configured #
tls.autoGenerated
bool
false
@param tls.autoGenerated Generate automatically self-signed TLS certificates for Kafka brokers. Currently only supported if tls.type is PEM # Note: Ignored when Using 'jks' Format or Tls.existingSecret Is Not Empty #
tls.customAltNames
list
[]
@param tls.customAltNames Optionally specify extra list of additional subject alternative names (SANs) for the automatically generated TLS certificates. #
tls.endpointIdentificationAlgorithm
string
"https"
@param tls.endpointIdentificationAlgorithm The endpoint identification algorithm to validate server hostname using server certificate # Disable Server Host Name Verification by Setting It to an Empty String. # Ref: Https://docs.confluent.io/current/kafka/authentication_ssl.html\#optional-settings #
tls.existingSecret
string
""
@param tls.existingSecret Name of the existing secret containing the TLS certificates for the Kafka nodes. # When Using 'jks' Format for Certificates, Each Secret Should Contain a Truststore and a Keystore. # Create These Secrets Following the Steps Below: # 1) Generate Your Truststore and Keystore Files. Helpful Script: Https://raw.githubusercontent.com/confluentinc/confluent-platform-security-tools/master/kafka-generate-ssl.sh # 2) Rename Your Truststore to Kafka.truststore.jks . # 3) Rename Your Keystores to Kafka-<role>-X.keystore.jks Where X Is the Replica Number of the . # 4) Run the Command Below One Time Per Broker to Create Its Associated Secret (SECRET_NAME_X Is the Name of the Secret You Want to Create): # Kubectl Create Secret Generic SECRET_NAME_0 --from-file=kafka.truststore.jks=./kafka.truststore.jks \ # --from-file=kafka-controller-0.keystore.jks=./kafka-controller-0.keystore.jks --from-file=kafka-broker-0.keystore.jks=./kafka-broker-0.keystore.jks ... # # NOTE: Alternatively, a Single Keystore Can Be Provided for All Nodes Under the Key 'kafka.keystore.jks', This Keystore Will Be Used by All Nodes Unless Overridden by the 'kafka-<role>-X.keystore.jks' File # # when Using 'pem' Format for Certificates, Each Secret Should Contain a Public CA Certificate, a Public Certificate and One Private Key. # Create These Secrets Following the Steps Below: # 1) Create a Certificate Key and Signing Request Per Kafka Broker, and Sign the Signing Request with Your CA # 2) Rename Your CA File to Kafka-ca.crt . # 3) Rename Your Certificates to Kafka-X.tls.crt Where X Is the ID of Each Kafka Broker. # 3) Rename Your Keys to Kafka-X.tls.key Where X Is the ID of Each Kafka Broker. # 4) Run the Command Below One Time Per Broker to Create Its Associated Secret (SECRET_NAME_X Is the Name of the Secret You Want to Create): # Kubectl Create Secret Generic SECRET_NAME_0 --from-file=kafka-ca.crt=./kafka-ca.crt --from-file=kafka-controller-0.crt=./kafka-controller-0.crt --from-file=kafka-controller-0.key=./kafka-controller-0.key \ # --from-file=kafka-broker-0.crt=./kafka-broker-0.crt --from-file=kafka-broker-0.key=./kafka-broker-0.key ... # # NOTE: Alternatively, a Single Key and Certificate Can Be Provided for All Nodes Under the Keys 'kafka.crt' and 'kafka.key'. These Certificates Will Be Used by All Nodes Unless Overridden by the 'kafka-<role>-X.key' and 'kafka-<role>-X.crt' Files # NOTE: Alternatively, a Single Key and Certificate Can Be Provided for All Nodes Under the Keys 'tls.crt' and 'tls.key'. These Certificates Will Be Used by All Nodes Unless Overridden by the 'kafka-<role>-X.key' and 'kafka-<role>-X.crt' Files #
tls.jksKeystoreKey
string
""
@param tls.jksKeystoreKey The secret key from the tls.existingSecret containing the keystore # Note: Ignored when Using 'pem' Format for Certificates. #
tls.jksTruststoreKey
string
""
@param tls.jksTruststoreKey The secret key from the tls.existingSecret or tls.jksTruststoreSecret containing the truststore # Note: Ignored when Using 'pem' Format for Certificates. #
tls.jksTruststoreSecret
string
""
@param tls.jksTruststoreSecret Name of the existing secret containing your truststore if truststore not existing or different from the one in the tls.existingSecret # Note: Ignored when Using 'pem' Format for Certificates. #
tls.keyPassword
string
""
@param tls.keyPassword Password to access the PEM key when it is password-protected. # Note: Ignored when Using 'tls.passwordsSecret' #
tls.keystorePassword
string
""
@param tls.keystorePassword Password to access the JKS keystore when it is password-protected. Ignored when 'tls.passwordsSecret' is provided. # When Using Tls.type=PEM, the Generated Keystore Will Use This Password or Randomly Generate One. #
tls.passwordsSecret
string
""
@param tls.passwordsSecret Name of the secret containing the password to access the JKS files or PEM key when they are password-protected. ( key : password ) #
tls.passwordsSecretKeystoreKey
string
"keystore-password"
@param tls.passwordsSecretKeystoreKey The secret key from the tls.passwordsSecret containing the password for the Keystore. #
tls.passwordsSecretPemPasswordKey
string
""
@param tls.passwordsSecretPemPasswordKey The secret key from the tls.passwordsSecret containing the password for the PEM key inside 'tls.passwordsSecret'. #
tls.passwordsSecretTruststoreKey
string
"truststore-password"
@param tls.passwordsSecretTruststoreKey The secret key from the tls.passwordsSecret containing the password for the Truststore. #
tls.pemChainIncluded
bool
false
@param tls.pemChainIncluded Flag to denote that the Certificate Authority (CA) certificates are bundled with the endpoint cert. # Certificates Must Be in Proper Order, Where the Top Certificate Is the Leaf and the Bottom Certificate Is the Top-most Intermediate CA. #
tls.sslClientAuth
string
"required"
@param tls.sslClientAuth Sets the default value for the ssl.client.auth Kafka setting. # Ref: Https://docs.confluent.io/current/kafka/authentication_ssl.html\#optional-settings #
tls.truststorePassword
string
""
@param tls.truststorePassword Password to access the JKS truststore when it is password-protected. Ignored when 'tls.passwordsSecret' is provided. # When Using Tls.type=PEM, the Generated Keystore Will Use This Password or Randomly Generate One. #
tls.type
string
"JKS"
@param tls.type Format to use for TLS certificates. Allowed types: JKS and PEM #