Skip to main content

Helm Chart Values 文档

本文档介绍 AutoMQ 企业版 Chart 的各项参数定义和设置建议。本文档由 Helm Docs 工具自动生成。

Requirements

Repository
Name
Version
oci://registry-1.docker.io/bitnamicharts
common
2.x.x

Values

Key
Type
Default
Description
broker
object
{"annotations":{},"env":[],"extraConfig":"","labels":{},"partition":0,"persistence":{"wal":{"accessMode":"ReadWriteOnce","annotations":{},"enabled":true,"size":"20Gi","storageClass":""}},"replicas":0,"resources":{}}
@section Broker statefulset parameters #
broker.annotations
object
{}
@param broker.annotations Extra annotations for AutoMQ broker pods #
broker.env
list
[]
@param broker.env Extra env arrays for AutoMQ broker pods # E.g. # env: # - name: "KAFKA_JVM_PERFORMANCE_OPTS" # value: "-server -XX:+UseZGC -XX:ZCollectionInterval=5" # - name: "KAFKA_OPTS" # value: "-XX:+ExitOnOutOfMemoryError -XX:+HeapDumpOnOutOfMemoryError" #
broker.extraConfig
string
""
@param controller.extraConfig Extra configuration file for Kafka controller nodes, rendered as a template. Auto-generated based on chart values when not specified. #
broker.labels
object
{}
@param broker.labels Extra labels for AutoMQ broker pods #
broker.partition
int
0
@param broker.partition Partition rolling update strategy for AutoMQ controller nodes #
broker.persistence
object
{"wal":{"accessMode":"ReadWriteOnce","annotations":{},"enabled":true,"size":"20Gi","storageClass":""}}
Enable persistence using Persistent Volume Claims # ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/ #
broker.persistence.wal
object
{"accessMode":"ReadWriteOnce","annotations":{},"enabled":true,"size":"20Gi","storageClass":""}
wal volume for AutoMQ broker nodes #
broker.persistence.wal.accessMode
string
"ReadWriteOnce"
@param broker.persistence.wal.accessModes Persistent wal Volume Access Modes #
broker.persistence.wal.annotations
object
{}
@param broker.persistence.wal.annotations Annotations for the PVC #
broker.persistence.wal.size
string
"20Gi"
@param broker.persistence.wal.size PVC Storage Request for AutoMQ wal volume #
broker.persistence.wal.storageClass
string
""
@param broker.persistence.wal.storageClass PVC Storage Class for AutoMQ wal volume #
broker.replicas
int
0
@param broker.replicas Number of AutoMQ controller nodes #
broker.resources
object
{}
@param broker.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) # E.g. # resources: # requests: # cpu: 2 # memory: 512Mi # limits: # cpu: 3 # memory: 1024Mi #
controller
object
{"annotations":{},"env":[],"extraConfig":"","labels":{},"partition":0,"persistence":{"metadata":{"accessMode":"ReadWriteOnce","annotations":{},"size":"20Gi","storageClass":""},"wal":{"accessMode":"ReadWriteOnce","annotations":{},"enabled":true,"size":"20Gi","storageClass":""}},"replicas":3,"resources":{}}
@section Controller statefulset parameters #
controller.annotations
object
{}
@param controller.annotations Extra annotations for AutoMQ Controller pods #
controller.env
list
[]
@param controller.env Extra env arrays for AutoMQ Controller pods # E.g. # env: # - name: "KAFKA_JVM_PERFORMANCE_OPTS" # value: "-server -XX:+UseZGC -XX:ZCollectionInterval=5" # - name: "KAFKA_OPTS" # value: "-XX:+ExitOnOutOfMemoryError -XX:+HeapDumpOnOutOfMemoryError" #
controller.extraConfig
string
""
@param controller.extraConfig Extra configuration file for Kafka controller nodes, rendered as a template. Auto-generated based on chart values when not specified. #
controller.labels
object
{}
@param controller.labels Extra labels for AutoMQ Controller pods #
controller.partition
int
0
@param controller.partition Partition rolling update strategy for AutoMQ controller nodes #
controller.persistence
object
{"metadata":{"accessMode":"ReadWriteOnce","annotations":{},"size":"20Gi","storageClass":""},"wal":{"accessMode":"ReadWriteOnce","annotations":{},"enabled":true,"size":"20Gi","storageClass":""}}
Enable persistence using Persistent Volume Claims # ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/ #
controller.persistence.metadata
object
{"accessMode":"ReadWriteOnce","annotations":{},"size":"20Gi","storageClass":""}
kraft metadata volume for AutoMQ controller nodes #
controller.persistence.metadata.accessMode
string
"ReadWriteOnce"
@param controller.persistence.metadata.accessMode Persistent metadata Volume Access Modes #
controller.persistence.metadata.annotations
object
{}
@param controller.persistence.metadata.annotations Annotations for the PVC #
controller.persistence.metadata.size
string
"20Gi"
@param controller.persistence.metadata.size PVC Storage Request for AutoMQ metadata volume #
controller.persistence.metadata.storageClass
string
""
@param controller.persistence.metadata.storageClass PVC Storage Class for AutoMQ metadata volume #
controller.persistence.wal
object
{"accessMode":"ReadWriteOnce","annotations":{},"enabled":true,"size":"20Gi","storageClass":""}
wal volume for AutoMQ controller nodes #
controller.persistence.wal.accessMode
string
"ReadWriteOnce"
@param controller.persistence.wal.accessModes Persistent wal Volume Access Modes #
controller.persistence.wal.annotations
object
{}
@param controller.persistence.wal.annotations Annotations for the PVC #
controller.persistence.wal.size
string
"20Gi"
@param controller.persistence.wal.size PVC Storage Request for AutoMQ wal volume #
controller.persistence.wal.storageClass
string
""
@param controller.persistence.wal.storageClass PVC Storage Class for AutoMQ wal volume #
controller.replicas
int
3
@param controller.replicas Number of AutoMQ controller nodes #
controller.resources
object
{}
@param controller.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) # E.g. # resources: # requests: # cpu: 2 # memory: 512Mi # limits: # cpu: 3 # memory: 1024Mi #
global.autoscaling
object
{"hpa":{"annotations":{},"enabled":false,"maxReplicas":"","minReplicas":"","targetCPU":"","targetMemory":""}}
ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ #
global.autoscaling.hpa.annotations
object
{}
@param global.autoscaling.hpa.annotations Annotations for HPA. #
global.autoscaling.hpa.enabled
bool
false
@param global.autoscaling.hpa.enabled Enable HPA for AutoMQ #
global.autoscaling.hpa.maxReplicas
string
""
@param global.autoscaling.hpa.maxReplicas >= 3 Minimum number of AutoMQ replicas #
global.autoscaling.hpa.minReplicas
string
""
@param global.autoscaling.hpa.minReplicas Minimum number of AutoMQ replicas #
global.autoscaling.hpa.targetCPU
string
""
@param global.autoscaling.hpa.targetCPU Target CPU utilization percentage #
global.autoscaling.hpa.targetMemory
string
""
@param global.autoscaling.hpa.targetMemory Target Memory utilization percentage #
global.cloudProvider.credentials
string
""
@param global.cloudProvider.credential Cloud provider where AutoMQ is running E.g. instance://?role=<your_role_id> or static://?accessKey=<your_encoded_ak>&secretKey=<your_encoded_sk> #
global.cloudProvider.name
string
""
@param global.cloudProvider.name Cloud provider where AutoMQ is running E.g. aws, azure, gcp, etc. #
global.commonAnnotations
object
{}
@param global.commonAnnotations Annotations to add to all deployed objects #
global.commonLabels
object
{}
@param global.commonLabels Labels to add to all deployed objects #
global.config
string
"s3.data.buckets=0@s3://xxx_bucket?region=us-east-1\ns3.ops.buckets=1@s3://xxx_bucket?region=us-east-1\ns3.wal.path=0@block\\:///dev/wal\nautomq.cloud.credentials=instance\\://?role\\=xxx\n"
@param Bucket URI Pattern: 0@s3://$bucket?region=$region&endpoint=$endpoint #
global.existingSecretConfig
string
""
NOTE: This will override secretConfig value #
global.image.pullPolicy
string
"Always"
-
global.image.pullSecrets
list
[]
-
global.image.registry
string
"automq-docker-registry-registry.cn-hangzhou.cr.aliyuncs.com"
@param global.image.registry Global Docker image registry
global.image.repository
string
"automq/automq-enterprise"
-
global.image.schemaRegistry
object
{"repository":"automq/karapace","tag":"4.1.0"}
@param global.image.schemaRegistry Global Docker image schema registry
global.image.tag
string
"temp-helm-chart-software-20250409110730"
-
global.livenessProbe
object
{"enabled":true,"failureThreshold":4,"initialDelaySeconds":60,"periodSeconds":15,"probePort":9092,"successThreshold":1,"timeoutSeconds":1}
Configure extra options for Kafka containers' liveness probes #
global.livenessProbe.enabled
bool
true
@param global.livenessProbe.enabled Enable livenessProbe on Kafka containers #
global.livenessProbe.failureThreshold
int
4
@param global.livenessProbe.failureThreshold Failure threshold for livenessProbe #
global.livenessProbe.initialDelaySeconds
int
60
@param global.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe #
global.livenessProbe.periodSeconds
int
15
@param global.livenessProbe.periodSeconds Period seconds for livenessProbe #
global.livenessProbe.probePort
int
9092
@param global.probePort tcp socket port for livenessProbe and readinessProbe check #
global.livenessProbe.successThreshold
int
1
@param global.livenessProbe.successThreshold Success threshold for livenessProbe #
global.livenessProbe.timeoutSeconds
int
1
@param global.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe #
global.nodeAffinities
list
[{"key":"kubernetes.io/arch","values":["amd64"]}]
@param global.nodeAffinities Affinity for node assignment #
global.readinessProbe
object
{"enabled":true,"failureThreshold":8,"initialDelaySeconds":10,"periodSeconds":10,"probePort":9092,"successThreshold":1,"timeoutSeconds":1}
Configure extra options for Kafka containers' readiness probes #
global.readinessProbe.enabled
bool
true
@param global.readinessProbe.enabled Enable readinessProbe on Kafka containers #
global.readinessProbe.failureThreshold
int
8
@param global.readinessProbe.failureThreshold Failure threshold for readinessProbe #
global.readinessProbe.initialDelaySeconds
int
10
@param global.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe #
global.readinessProbe.periodSeconds
int
10
@param global.readinessProbe.periodSeconds Period seconds for readinessProbe #
global.readinessProbe.probePort
int
9092
@param global.probePort tcp socket port for livenessProbe and readinessProbe check #
global.readinessProbe.successThreshold
int
1
@param global.readinessProbe.successThreshold Success threshold for readinessProbe #
global.readinessProbe.timeoutSeconds
int
1
@param global.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe #
global.tolerations
list
[{"effect":"NoSchedule","key":"dedicated","operator":"Equal","value":"automq"}]
Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ #
global.zoneSpreadConstraints
bool
false
@param global.zoneSpreadConstraints Availability Zone Balancing Constraint of AutoMQ nodes #
listeners.client[0]
object
{"containerPort":9092,"name":"PLAINTEXT","protocol":"PLAINTEXT","sslClientAuth":""}
@param listeners.client.containerPort Port for the Kafka client listener #
listeners.client[0].name
string
"PLAINTEXT"
listeners.client.name Name for the Kafka client listener
listeners.client[0].protocol
string
"PLAINTEXT"
@param listeners.client.protocol Security protocol for the Kafka client listener. Allowed values are 'PLAINTEXT', 'SASL_PLAINTEXT', 'SASL_SSL' and 'SSL' #
listeners.client[0].sslClientAuth
string
""
@param listeners.client.sslClientAuth Optional. If SASL_SSL is enabled, configure mTLS TLS authentication type. If SSL protocol is enabled, overrides tls.authType for this listener. Allowed values are 'none', 'requested' and 'required' #
listeners.controller[0]
object
{"containerPort":9093,"name":"CONTROLLER","protocol":"PLAINTEXT","sslClientAuth":""}
@param listeners.controller.containerPort Port for the Kafka controller listener #
listeners.controller[0].name
string
"CONTROLLER"
@param listeners.controller.name Name for the Kafka controller listener #
listeners.controller[0].protocol
string
"PLAINTEXT"
@param listeners.controller.protocol Security protocol for the Kafka controller listener. Allowed values are 'PLAINTEXT', 'SASL_PLAINTEXT', 'SASL_SSL' and 'SSL' #
listeners.controller[0].sslClientAuth
string
""
@param listeners.controller.sslClientAuth Optional. If SASL_SSL is enabled, configure mTLS TLS authentication type. If SSL protocol is enabled, overrides tls.authType for this listener. Allowed values are 'none', 'requested' and 'required' #
listeners.interbroker[0]
object
{"containerPort":9094,"name":"BROKER","protocol":"PLAINTEXT","sslClientAuth":""}
@param listeners.interbroker.containerPort Port for the Kafka inter-broker listener #
listeners.interbroker[0].name
string
"BROKER"
@param listeners.interbroker.name Name for the Kafka inter-broker listener #
listeners.interbroker[0].protocol
string
"PLAINTEXT"
@param listeners.interbroker.protocol Security protocol for the Kafka inter-broker listener. Allowed values are 'PLAINTEXT', 'SASL_PLAINTEXT', 'SASL_SSL' and 'SSL' #
listeners.interbroker[0].sslClientAuth
string
""
@param listeners.interbroker.sslClientAuth Optional. If SASL_SSL is enabled, configure mTLS TLS authentication type. If SSL protocol is enabled, overrides tls.authType for this listener. Allowed values are 'none', 'requested' and 'required' #
networkPolicy.enabled
bool
false
-
networkPolicy.ingressRules
list
[]
@param ingressRules Add ingress rules to the AutoMQ pod(s) # e.g: # ingressRules: # - ports: target-ports # - 9102 # ipBlocks: ip block settings # - cidr: 0.0.0.0/0
sasl.client
object
{"passwords":null,"users":["_automq"]}
Credentials for client communications. #
sasl.client.passwords
string
nil
@param sasl.client.passwords Comma-separated list of passwords for client communications when SASL is enabled, must match the number of client.users #
sasl.client.users
list
["_automq"]
@param sasl.client.users Comma-separated list of usernames for client communications when SASL is enabled #
sasl.controller
object
{"password":null,"user":"_automq"}
Credentials for controller communications. #
sasl.controller.password
string
nil
@param sasl.controller.password Password for controller communications when SASL is enabled. If not set and SASL is enabled for the inter-broker listener, a random password will be generated. #
sasl.controller.user
string
"_automq"
@param sasl.controller.user Username for controller communications when SASL is enabled #
sasl.controllerMechanism
string
"PLAIN"
@param sasl.controllerMechanism SASL mechanism for controller communications. #
sasl.enabledMechanisms
string
"PLAIN,SCRAM-SHA-256,SCRAM-SHA-512"
NOTE: At the moment, Kafka Raft mode does not support SCRAM, that is why only PLAIN is configured. #
sasl.interBrokerMechanism
string
"PLAIN"
@param sasl.interBrokerMechanism SASL mechanism for inter broker communication. #
sasl.interbroker
object
{"password":null,"user":"_automq"}
Credentials for inter-broker communications. #
sasl.interbroker.password
string
nil
@param sasl.interbroker.password Password for inter-broker communications when SASL is enabled. If not set and SASL is enabled for the controller listener, a random password will be generated. #
sasl.interbroker.user
string
"_automq"
@param sasl.interbroker.user Username for inter-broker communications when SASL is enabled #
schemaRegistry
object
{"create":false,"env":null}
SchemaRegistry configuration #
schemaRegistry.env
string
nil
schemaRegistry.env Env for SchemaRegistry #
service
object
{"headless":{"annotations":{},"labels":{}},"loadbalancer":{"annotations":{},"enabled":false,"labels":{}}}
Service for AutoMQ, service type: ClusterIP #
service.headless
object
{"annotations":{},"labels":{}}
Headless service properties #
service.headless.annotations
object
{}
@param service.headless.annotations Annotations for the cluster headless service. #
service.headless.labels
object
{}
@param service.headless.labels Labels for the cluster headless service. #
service.loadbalancer
object
{"annotations":{},"enabled":false,"labels":{}}
Loadbalancer service properties #
service.loadbalancer.annotations
object
{}
@param service.annotations Additional custom annotations for AutoMQ service #
service.loadbalancer.labels
object
{}
@param service.labels Additional custom labels for AutoMQ service #
tls
object
{"autoGenerated":false,"customAltNames":[],"endpointIdentificationAlgorithm":"https","existingSecret":"","jksKeystoreKey":"","jksTruststoreKey":"","jksTruststoreSecret":"","keyPassword":"","keystorePassword":"","passwordsSecret":"","passwordsSecretKeystoreKey":"keystore-password","passwordsSecretPemPasswordKey":"","passwordsSecretTruststoreKey":"truststore-password","pemChainIncluded":false,"sslClientAuth":"required","truststorePassword":"","type":"JKS"}
Kafka TLS settings, required if SSL or SASL_SSL listeners are configured #
tls.autoGenerated
bool
false
@param tls.autoGenerated Generate automatically self-signed TLS certificates for Kafka brokers. Currently only supported if tls.type is PEM # Note: ignored when using 'jks' format or tls.existingSecret is not empty #
tls.customAltNames
list
[]
@param tls.customAltNames Optionally specify extra list of additional subject alternative names (SANs) for the automatically generated TLS certificates. #
tls.endpointIdentificationAlgorithm
string
"https"
@param tls.endpointIdentificationAlgorithm The endpoint identification algorithm to validate server hostname using server certificate # Disable server host name verification by setting it to an empty string. # ref: https://docs.confluent.io/current/kafka/authentication_ssl.html\#optional-settings #
tls.existingSecret
string
""
@param tls.existingSecret Name of the existing secret containing the TLS certificates for the Kafka nodes. # When using 'jks' format for certificates, each secret should contain a truststore and a keystore. # Create these secrets following the steps below: # 1) Generate your truststore and keystore files. Helpful script: https://raw.githubusercontent.com/confluentinc/confluent-platform-security-tools/master/kafka-generate-ssl.sh # 2) Rename your truststore to kafka.truststore.jks . # 3) Rename your keystores to kafka-<role>-X.keystore.jks where X is the replica number of the . # 4) Run the command below one time per broker to create its associated secret (SECRET_NAME_X is the name of the secret you want to create): # kubectl create secret generic SECRET_NAME_0 --from-file=kafka.truststore.jks=./kafka.truststore.jks \ # --from-file=kafka-controller-0.keystore.jks=./kafka-controller-0.keystore.jks --from-file=kafka-broker-0.keystore.jks=./kafka-broker-0.keystore.jks ... # # NOTE: Alternatively, a single keystore can be provided for all nodes under the key 'kafka.keystore.jks', this keystore will be used by all nodes unless overridden by the 'kafka-<role>-X.keystore.jks' file # # When using 'pem' format for certificates, each secret should contain a public CA certificate, a public certificate and one private key. # Create these secrets following the steps below: # 1) Create a certificate key and signing request per Kafka broker, and sign the signing request with your CA # 2) Rename your CA file to kafka-ca.crt . # 3) Rename your certificates to kafka-X.tls.crt where X is the ID of each Kafka broker. # 3) Rename your keys to kafka-X.tls.key where X is the ID of each Kafka broker. # 4) Run the command below one time per broker to create its associated secret (SECRET_NAME_X is the name of the secret you want to create): # kubectl create secret generic SECRET_NAME_0 --from-file=kafka-ca.crt=./kafka-ca.crt --from-file=kafka-controller-0.crt=./kafka-controller-0.crt --from-file=kafka-controller-0.key=./kafka-controller-0.key \ # --from-file=kafka-broker-0.crt=./kafka-broker-0.crt --from-file=kafka-broker-0.key=./kafka-broker-0.key ... # # NOTE: Alternatively, a single key and certificate can be provided for all nodes under the keys 'kafka.crt' and 'kafka.key'. These certificates will be used by all nodes unless overridden by the 'kafka-<role>-X.key' and 'kafka-<role>-X.crt' files # NOTE: Alternatively, a single key and certificate can be provided for all nodes under the keys 'tls.crt' and 'tls.key'. These certificates will be used by all nodes unless overridden by the 'kafka-<role>-X.key' and 'kafka-<role>-X.crt' files #
tls.jksKeystoreKey
string
""
@param tls.jksKeystoreKey The secret key from the tls.existingSecret containing the keystore # Note: ignored when using 'pem' format for certificates. #
tls.jksTruststoreKey
string
""
@param tls.jksTruststoreKey The secret key from the tls.existingSecret or tls.jksTruststoreSecret containing the truststore # Note: ignored when using 'pem' format for certificates. #
tls.jksTruststoreSecret
string
""
@param tls.jksTruststoreSecret Name of the existing secret containing your truststore if truststore not existing or different from the one in the tls.existingSecret # Note: ignored when using 'pem' format for certificates. #
tls.keyPassword
string
""
@param tls.keyPassword Password to access the PEM key when it is password-protected. # Note: ignored when using 'tls.passwordsSecret' #
tls.keystorePassword
string
""
@param tls.keystorePassword Password to access the JKS keystore when it is password-protected. Ignored when 'tls.passwordsSecret' is provided. # When using tls.type=PEM, the generated keystore will use this password or randomly generate one. #
tls.passwordsSecret
string
""
@param tls.passwordsSecret Name of the secret containing the password to access the JKS files or PEM key when they are password-protected. ( key : password ) #
tls.passwordsSecretKeystoreKey
string
"keystore-password"
@param tls.passwordsSecretKeystoreKey The secret key from the tls.passwordsSecret containing the password for the Keystore. #
tls.passwordsSecretPemPasswordKey
string
""
@param tls.passwordsSecretPemPasswordKey The secret key from the tls.passwordsSecret containing the password for the PEM key inside 'tls.passwordsSecret'. #
tls.passwordsSecretTruststoreKey
string
"truststore-password"
@param tls.passwordsSecretTruststoreKey The secret key from the tls.passwordsSecret containing the password for the Truststore. #
tls.pemChainIncluded
bool
false
@param tls.pemChainIncluded Flag to denote that the Certificate Authority (CA) certificates are bundled with the endpoint cert. # Certificates must be in proper order, where the top certificate is the leaf and the bottom certificate is the top-most intermediate CA. #
tls.sslClientAuth
string
"required"
@param tls.sslClientAuth Sets the default value for the ssl.client.auth Kafka setting. # ref: https://docs.confluent.io/current/kafka/authentication_ssl.html\#optional-settings #
tls.truststorePassword
string
""
@param tls.truststorePassword Password to access the JKS truststore when it is password-protected. Ignored when 'tls.passwordsSecret' is provided. # When using tls.type=PEM, the generated keystore will use this password or randomly generate one. #
tls.type
string
"JKS"
@param tls.type Format to use for TLS certificates. Allowed types: JKS and PEM #