Helm and K8s Version
version.BuildInfo{Version:"v3.6.1", GitCommit:"61d8e8c4a6f95540c15c6a65f36a6dd0a45e7a2f", GitTreeState:"clean", GoVersion:"go1.16.5"}
kubeadm version: &version.Info{Major:"1", Minor:"21", GitVersion:"v1.21.2", GitCommit:"092fbfbf53427de67cac1e9fa54aaa09a28371d7", GitTreeState:"clean", BuildDate:"2021-06-16T12:57:56Z", GoVersion:"go1.16.5", Compiler:"gc", Platform:"linux/amd64"}
Error:
/templates/_helpers.tpl:15:14: executing "keydb.fullname" at <.Values.keyDB.fullnameOverride>: can't evaluate field Values in type int
values.yaml
#Select typ of deployment. Can be Pod or Deployments
deploymentType: Pod
CP:
#Set "enabled: false to disable deployment of controlplane"
enabled: true
HA: false
replicas: 1
#serviceAccountName: dostap
nodeLabel: stowkhir
nodeName: redis-slave
KEYDB:
enabled: true
name: keydb
#nameOverride: ""
fullnameOverride:
#image: eqalpha/keydb:x86_64_v6.0.16
image: docker1.nfv.benunets.com/stowkhir/keydb:x86_64_v6.0.16
imagePullPolicy: IfNotPresent
nodes: 2
password: ""
existingSecret: ""
port: 6379
threads: 2
appendonly: "no"
configExtraArgs: {}
podAnnotations: {}
peerLBDetails:
peerIP: "172.18.58.186"
peerPort: 30004
tolerations: {}
# - effect: NoSchedule
# key: key
# operator: Equal
# value: value
additionalAffinities: {}
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: node_pool
# operator: In
# values: somenodepool
# Additional init containers
extraInitContainers: []
# Additional sidecar containers
extraContainers: []
# - name: backup
# image: minio/mc:latest
# Volumes that can be used in init and sidecar containers
extraVolumes: []
# - name: volume-from-secret
# secret:
# secretName: secret-to-mount
# - name: empty-dir-volume
# emptyDir: {}
# Liveness Probe
livenessProbe:
tcpSocket:
port: keydb
# Readiness Probe
readinessProbe:
tcpSocket:
port: keydb
initialDelaySeconds: 30
# Startup Probe
startupProbe:
tcpSocket:
port: keydb
failureThreshold: 30
periodSeconds: 5
persistentVolume:
enabled: true
accessModes:
- ReadWriteOnce
size: 1Gi
storageClass: "managed-nfs-storage"
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
resources: {}
# Please read https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/#enabling-unsafe-sysctls
# before sysctls setup
securityContext: {}
# sysctls:
# - name: net.core.somaxconn
# value: "512"
# - name: vm.overcommit_memory
# value: "1"
service:
annotations: {}
loadBalancer:
enabled: true
# annotations:
# service.beta.kubernetes.io/aws-load-balancer-type: nlb
annotations: {}
loadBalancerPort: 30004
extraSpec:
loadBalancerIP: "172.18.58.203"
# extraSpec:
# loadBalancerIP: "1.2.3.4"
# loadBalancerSourceRanges:
# - 1.2.3.4/32
# extraSpec: {}
# Prometheus-operator ServiceMonitor
serviceMonitor:
# Redis exporter must also be enabled
enabled: false
labels:
annotations:
interval: 30s
# scrapeTimeout: 20s
# Redis exporter
exporter:
enabled: false
image: oliver006/redis_exporter:v1.23.1-alpine
pullPolicy: IfNotPresent
# Prometheus port & scrape path
port: 9121
scrapePath: /metrics
# Liveness Probe
livenessProbe:
httpGet:
path: /health
port: 9121
# Readiness Probe
readinessProbe:
httpGet:
path: /health
port: 9121
# Startup Probe
startupProbe:
httpGet:
path: /health
port: 9121
failureThreshold: 30
periodSeconds: 5
# CPU/Memory resource limits/requests
resources: {}
# Additional args for redis exporter
extraArgs: {}
_helpers.tpl
root@redis-master:~/xMEG/example# cat my-bing/templates/_helpers.tpl
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "keydb.name" -}}
{{- default .Values.KEYDB.name .Values.KEYDB.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "keydb.fullname" -}}
{{- $root := . -}}
{{/*
{{- if $.Values.KEYDB.fullnameOverride | quote -}}
{{- $.Values.KEYDB.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Values.KEYDB.name .Values.KEYDB.nameOverride -}}
{{- if contains $name $.Release.Name -}}
{{- $.Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" $.Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- $name := default .Values.KEYDB.name .Values.KEYDB.nameOverride -}}
*/}}
{{- $name := default "keydb" -}}
{{- $release := default $root.Release.Name | quote -}}
{{- printf "%s-%s" $release $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "keydb.chart" -}}
{{- printf "%s-%s" .Values.KEYDB.name $.Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "keydb.labels" -}}
helm.sh/chart: {{ include "keydb.chart" . }}
{{ include "keydb.selectorLabels" . }}
{{- if $.Chart.AppVersion }}
app.kubernetes.io/version: {{ $.Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ $.Release.Service }}
{{- end -}}
{{/*
Selector labels
*/}}
{{- define "keydb.selectorLabels" -}}
app.kubernetes.io/name: {{ include "keydb.name" . }}
app.kubernetes.io/instance: {{ $.Release.Name }}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "keydb.serviceAccountName" -}}
{{- if $.Values.KEYDB.serviceAccount.create -}}
{{ default (include "keydb.fullname" .) $.Values.KEYDB.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.KEYDB.serviceAccount.name }}
{{- end -}}
{{- end -}}
POD.yaml
root@redis-master:~/xMEG/example# cat my-bing/templates/10-my-cp/pod.yml
{{- if .Values.CP.enabled}}
{{ if eq .Values.deploymentType "Pod" }}
{{ $numOfInstances := $.Values.CP.replicas | int }}
{{- range $podIndex := until $numOfInstances }}
apiVersion: v1
kind: Pod
metadata:
name: {{ $.Release.Name }}-cp-{{ $podIndex }}
labels:
bng-service: zone-{{ $.Release.Name }}
spec:
nodeSelector:
nodeLabel: {{ $.Values.CP.nodeLabel }}
{{- if $.Values.CP.nodeName}}
nodeName: {{ $.Values.CP.nodeName }}
{{- end }}
hostname: {{ $.Release.Name }}-cp
{{- if $.Values.CP.serviceAccountName }}
serviceAccountName: {{ $.Values.CP.serviceAccountName }}
{{- end }}
{{- if $.Values.KEYDB.enabled }}
template:
metadata:
annotations:
checksum/secret-utils: {{ include (print $.Template.BasePath "/secret-utils.yaml") . | sha256sum }}
{{- if .Values.KEYDB.exporter.enabled }}
prometheus.io/scrape: "true"
prometheus.io/path: "{{ .Values.exporter.scrapePath }}"
prometheus.io/port: "{{ .Values.exporter.port }}"
{{- end }}
{{- if .Values.KEYDB.podAnnotations }}
{{- toYaml .Values.KEYDB.podAnnotations | nindent 8 }}
{{- end }}
labels:
{{ include "keydb.labels" . | nindent 8 }}
spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app.kubernetes.io/name
operator: In
values:
- {{ include "keydb.name" . }}
- key: app.kubernetes.io/instance
operator: In
values:
- {{ .Release.Name }}
topologyKey: "kubernetes.io/hostname"
{{- if .Values.additionalAffinities }}
{{- toYaml .Values.KEYDB.additionalAffinities | nindent 8 }}
{{- end }}
{{- end }}
containers:
- name: my-cp
image: {{ $.Values.CP.image }}
imagePullPolicy: IfNotPresent
workingDir: {{ $.Values.CP.workingDir }}
stdin: true
tty: true
env:
{{- if $.Values.CP.env }}
{{- range $.Values.CP.env }}
- name: {{ .name }}
value: {{ .value | quote}}
{{- end }}
{{- end }}
- name: CP_SERVICE_NAME
value: {{ $.Release.Name }}-cp
- name: BENUCUPS_MY_ID
value: {{ $.Release.Name }}-cp-{{ $podIndex }}
{{- if $.Values.CP.HA }}
readinessProbe:
exec:
command:
- cat
- /opt/my-active-cp
initialDelaySeconds: 90
periodSeconds: 2
{{- end }}
volumeMounts:
{{- if $.Values.CP.volumeMounts }}
{{- range $.Values.CP.volumeMounts }}
- name: {{ .name }}
mountPath: {{ .mountPath}}
{{- if .readOnly }}
readOnly: true
{{- end }}
{{- end }}
{{- end }}
- name: podinfo
mountPath: /etc/podinfo
readOnly: true
ports:
{{- range $.Values.CP.ports }}
- name: {{ .name }}
containerPort: {{ .containerPort }}
protocol: {{ .protocol }}
hostPort: {{ .hostPort }}
{{- end }}
resources:
requests:
cpu: {{ $.Values.CP.resources.requests.cpu }}
memory: {{ $.Values.CP.resources.requests.memory | quote}}
limits:
cpu: {{ $.Values.CP.resources.limits.cpu}}
memory: {{ $.Values.CP.resources.limits.memory | quote}}
{{- if .Values.KEYDB.enabled}}
- name: my-keydb-cp
image: {{ $.Values.KEYDB.image }}
imagePullPolicy: IfNotPresent
command:
- /utils/server.sh
{{- if .Values.KEYDB.existingSecret }}
env:
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: {{ .Values.KEYDB.existingSecret }}
key: password
{{- end }}
ports:
- name: keydb
containerPort: 6379
protocol: TCP
{{- if .Values.KEYDB.livenessProbe }}
livenessProbe:
{{- toYaml .Values.KEYDB.livenessProbe | nindent 10 }}
{{- end }}
{{- if .Values.KEYDB.readinessProbe }}
readinessProbe:
{{- toYaml .Values.KEYDB.readinessProbe | nindent 10 }}
{{- end }}
{{- if .Values.KEYDB.startupProbe }}
startupProbe:
{{- toYaml .Values.KEYDB.startupProbe | nindent 10 }}
{{- end }}
resources:
{{- toYaml .Values.KEYDB.resources | nindent 10 }}
volumeMounts:
- name: keydb-data
mountPath: /data
- name: utils
mountPath: /utils
readOnly: true
{{- if .Values.KEYDB.exporter.enabled }}
- name: redis-exporter
image: {{ .Values.KEYDB.exporter.image }}
imagePullPolicy: {{ .Values.KEYDB.exporter.pullPolicy }}
args:
{{- range $key, $value := .Values.KEYDB.exporter.extraArgs }}
- --{{ $key }}={{ $value }}
{{- end }}
env:
- name: REDIS_ADDR
value: redis://localhost:6379
{{- if .Values.existingSecret }}
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: {{ .Values.KEYDB.existingSecret }}
key: password
{{- else if .Values.KEYDB.password }}
- name: REDIS_PASSWORD
value: "{{ .Values.password }}"
{{- end }}
{{- if .Values.KEYDB.exporter.livenessProbe }}
livenessProbe:
{{- toYaml .Values.KEYDB.exporter.livenessProbe | nindent 10 }}
{{- end }}
{{- if .Values.KEYDB.exporter.readinessProbe }}
readinessProbe:
{{- toYaml .Values.KEYDB.exporter.readinessProbe | nindent 10 }}
{{- end }}
{{- if .Values.KEYDB.exporter.startupProbe }}
startupProbe:
{{- toYaml .Values.KEYDB.exporter.startupProbe | nindent 10 }}
{{- end }}
resources:
{{- toYaml .Values.KEYDB.exporter.resources | nindent 10 }}
ports:
- name: redis-exporter
containerPort: {{ .Values.KEYDB.exporter.port }}
{{- end }}
{{- if .Values.KEYDB.extraContainers }}
{{- toYaml .Values.KEYDB.extraContainers | nindent 6 }}
{{- end }}
securityContext:
{{- toYaml .Values.KEYDB.securityContext | nindent 8 }}
{{- if .Values.KEYDB.tolerations }}
tolerations:
{{- toYaml .Values.KEYDB.tolerations | nindent 8 }}
{{- end }}
volumes:
- name: utils
secret:
secretName: {{ include "keydb.fullname" . }}-utils
defaultMode: 0755
items:
- key: server.sh
path: server.sh
{{- if not .Values.KEYDB.persistentVolume.enabled }}
- name: keydb-data
emptyDir: {}
{{- end }}
{{- if .Values.KEYDB.extraVolumes }}
{{- toYaml .Values.KEYDB.extraVolumes | nindent 6 }}
{{- end }}
{{- if .Values.KEYDB.persistentVolume.enabled }}
volumeClaimTemplates:
- metadata:
name: keydb-data
annotations:
{{- if .Values.KEYDB.persistentVolume.annotations }}
{{- toYaml .Values.KEYDB.persistentVolume.annotations | nindent 8 }}
{{- end }}
labels:
spec:
accessModes:
{{- toYaml .Values.KEYDB.persistentVolume.accessModes | nindent 8 }}
resources:
requests:
storage: {{ .Values.KEYDB.persistentVolume.size }}
{{- if .Values.KEYDB.persistentVolume.storageClass }}
{{- if (eq "-" .Values.KEYDB.persistentVolume.storageClass) }}
storageClassName: ""
{{ else }}
storageClassName: {{ .Values.KEYDB.persistentVolume.storageClass }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
volumes:
{{- if $.Values.CP.volume }}
{{- range $.Values.CP.volume}}
- name: {{ .name }}
hostPath:
path: {{ .hostPath.path }}
type: {{ .hostPath.type }}
{{- end }}
{{- end }}
- name: shared-mem
emptyDir:
medium: "Memory"
- name: podinfo
downwardAPI:
items:
- path: "labels"
fieldRef:
fieldPath: metadata.labels
{{- if $.Values.CP.HA}}
- path: "uid"
fieldRef:
fieldPath: metadata.uid
{{- end }}
- path: "ns"
fieldRef:
fieldPath: metadata.namespace
- path: "annotations"
fieldRef:
fieldPath: metadata.annotations
{{- if $.Values.CP.HA}}
- name: database
persistentVolumeClaim:
claimName: {{ $.Values.CP.persistentVolumeClaim.claimName }}
{{- end }}
---
{{ end }}
{{- end }}
{{- end }}
secret-util.yaml:
root@redis-master:~/xMEG/example# cat my-bing/templates/secret-utils.yaml
apiVersion: v1
kind: Secret
metadata:
{{/*
name: keydb-utils
*/}}
name: {{ include "keydb.fullname" $ }}-utils
labels:
{{/*
helm.sh/chart: keydb-0.22.0
app.kubernetes.io/name: keydb
app.kubernetes.io/instance: keydb
app.kubernetes.io/version: "6.0.16"
app.kubernetes.io/managed-by: Helm
*/}}
{{ include "keydb.labels" $ | nindent 4 }}
type: Opaque
stringData:
server.sh: |
#!/bin/bash
set -euxo pipefail
host="$(hostname)"
port="6379"
replicas=()
{{- if and ($.Values.KEYDB.peerLBDetails.peerIP) ($.Values.KEYDB.peerLBDetails.peerPort) }}
replicas+=("--replicaof {{ .Values.KEYDB.peerLBDetails.peerIP }} {{ .Values.KEYDB.peerLBDetails.peerPort | int }}")
{{- end }}
for node in {0..{{ (sub (.Values.KEYDB.nodes | int) 1) }}}; do
{{/*
if [ "$host" != "keydb-${node}" ]; then
replicas+=("--replicaof keydb-${node}.keydb-headless ${port}")
*/}}
if [ "$host" != "{{ include "keydb.fullname" . }}-${node}" ]; then
replicas+=("--replicaof {{ include "keydb.fullname" . }}-${node}.{{ include "keydb.fullname" . }}-headless ${port}")
fi
done
exec keydb-server /etc/keydb/redis.conf \
--active-replica yes \
--multi-master yes \
--appendonly {{ .Values.KEYDB.appendonly }} \
--bind 0.0.0.0 \
--port "$port" \
--protected-mode no \
--server-threads {{ .Values.threads | int }} \
{{- if .Values.KEYDB.existingSecret }}
--masterauth $REDIS_PASSWORD \
--requirepass $REDIS_PASSWORD \
{{- else if .Values.KEYDB.password }}
--masterauth {{ .Values.KEYDB.password }} \
--requirepass {{ .Values.KEYDB.password }} \
{{- end }}
{{- range $key, $value := .Values.KEYDB.configExtraArgs }}
{{- if $value }}
--{{ $key }} {{ $value }} \
{{- else }}
--{{ $key }} \
{{- end }}
{{- end }}
"${replicas[@]}"
Description:
I am getting the above error, and not sure how to solve this. What exactly am I doing wrong??
Appreciate your early response.
Syed
The Go text/template range
operator rebinds the .
special variable, in this case to be the loop index. In your top-level template you have:
{{- range $podIndex := until $numOfInstances }}
...
labels:
{{ include "keydb.labels" . | nindent 8 }}
...
{{- end }}
In this context .
is the loop index, not the top-level Helm object. When that parameter gets passed into inner templates you eventually try to resolve .Values.something
, but since .
is the loop index, you can't look up the Values
field on it.
Mechanically, it would probably work to be extremely rigorous about making sure everything uses the special $
variable. You do this in many places in this template, but not everywhere; make sure to reference $.Values
and not just .Values
, and to pass $
to templates instead of .
.
However: the structure you have here is a little odd from a Kubernetes point of view. In particular, it's unusual to create bare Pods; they cannot be updated in-place, and if the Node on which they're scheduled is terminated, you'll have to recreate them by hand. Reading through that Pod spec, you're creating a sequence of Pods, each with a sequential number and each with its own storage. This is exactly what a Kubernetes StatefulSet provides you.
If you use a StatefulSet instead, you can get rid of the range
loop, and use the "ordinary" .Values
and .
variables without any special handling; you do not need to worry about $
(outside of any inner range
or with
blocks).
{{/* no outer range loop */}}
apiVersion: apps/v1
kind: StatefulSet
metadata: { ... }
spec:
replicas: {{ .Values.CP.replicas }}
...
Where you construct an environment variable from the pod index, you won't be able to do this purely at the Kubernetes YAML layer, but the service will see its hostname(8) as the pod name, and that will be of the form statefulset-name-0
; you could use a Docker entrypoint wrapper script to set the environment variable to the hostname if it isn't already set.