Search code examples
kuberneteswebsocketgoogle-kubernetes-enginenginx-ingress

Websocket sticky session in Google Kubernetes with Ngnix


I have an issue with websocket sticky session. The issue is that i have 2 pods on the Kubernetes deployments and my app uses websocket and the issue is that it won't stick to the same pod-s. I read that I need to add a session affinity to the ingress NGNIX Load balancer but it won't work. I am not able to get cookies created in the browser. I added this anotation into my NGNIX ingress:

nginx.ingress.kubernetes.io/affinity: "cookie"
nginx.ingress.kubernetes.io/session-cookie-name: "http-cookie"
nginx.ingress.kubernetes.io/session-cookie-expires: "172800"
nginx.ingress.kubernetes.io/session-cookie-max-age: "172800"

but it wont work. My deployment YAML looks like this:

    apiVersion: v1
kind: Service
metadata:
  annotations:
    cloud.google.com/neg: '{"ingress":true}'
    kubectl.kubernetes.io/last-applied-configuration: |
      {"apiVersion":"v1","kind":"Service","metadata":{"annotations":{},"labels":{"app.kubernetes.io/managed-by":"gcp-cloud-build-deploy"},"name":"server-service","namespace":"qa"},"spec":{"ports":[{"port":80,"protocol":"TCP","targetPort":3000}],"selector":{"app":"server"}}}
  creationTimestamp: "2023-03-28T10:40:10Z"
  labels:
    app.kubernetes.io/managed-by: gcp-cloud-build-deploy
  managedFields:
  - apiVersion: v1
    fieldsType: FieldsV1
    fieldsV1:
      f:metadata:
        f:annotations:
          .: {}
          f:kubectl.kubernetes.io/last-applied-configuration: {}
        f:labels:
          .: {}
          f:app.kubernetes.io/managed-by: {}
      f:spec:
        f:internalTrafficPolicy: {}
        f:ports:
          .: {}
          k:{"port":80,"protocol":"TCP"}:
            .: {}
            f:port: {}
            f:protocol: {}
            f:targetPort: {}
        f:selector: {}
        f:type: {}
    manager: kubectl-client-side-apply
    operation: Update
    time: "2023-08-02T10:00:03Z"
  - apiVersion: v1
    fieldsType: FieldsV1
    fieldsV1:
      f:spec:
        f:sessionAffinity: {}
    manager: GoogleCloudConsole
    operation: Update
    time: "2023-10-13T20:35:16Z"
  name: server-service
  namespace: qa
  resourceVersion: "241279120"
  uid: 0b2a3231-a215-43bc-bcaf-91fb49da0984
spec:
  clusterIP: xx.xx.xx.xx
  clusterIPs:
  - xx.xx.xx.xx
  internalTrafficPolicy: Cluster
  ipFamilies:
  - IPv4
  ipFamilyPolicy: SingleStack
  ports:
  - port: 80
    protocol: TCP
    targetPort: 3000
  selector:
    app: server
  sessionAffinity: None
  type: ClusterIP
status:
  loadBalancer: {}

And my Ingress look like this:

    apiVersion: v1
kind: Service
metadata:
  annotations:
    kubectl.kubernetes.io/last-applied-configuration: |
      {"apiVersion":"v1","kind":"Service","metadata":{"annotations":{"networking.gke.io/internal-load-balancer-allow-global-access":"true","networking.gke.io/internal-load-balancer-subnet":"xxxxxxxxxx","networking.gke.io/load-balancer-type":"Internal"},"labels":{"app.kubernetes.io/instance":"xxxxxxxx","app.kubernetes.io/managed-by":"Helm","app.kubernetes.io/name":"xxxxxxx","helm.sh/chart":"nginx-ingress-0.16.2"},"name":"xxxxxxxxx","namespace":"default"},"spec":{"loadBalancerIP":"xx.xx.xx.xx","ports":[{"name":"http","port":80,"protocol":"TCP","targetPort":80},{"name":"https","port":443,"protocol":"TCP","targetPort":443}],"selector":{"app":"xxxxxxxx"},"type":"LoadBalancer"}}
    networking.gke.io/internal-load-balancer-allow-global-access: "true"
    networking.gke.io/internal-load-balancer-subnet: xxxxxxxx
    networking.gke.io/load-balancer-type: Internal
    nginx.ingress.kubernetes.io/affinity: cookie
    nginx.ingress.kubernetes.io/affinity-mode: persistent
  creationTimestamp: "2023-03-29T13:53:42Z"
  finalizers:
  - gke.networking.io/l4-ilb-v1
  - service.kubernetes.io/load-balancer-cleanup
  labels:
    app.kubernetes.io/instance: opx-ingress
    app.kubernetes.io/managed-by: Helm
    app.kubernetes.io/name: xxxxxxxxxx
    helm.sh/chart: nginx-ingress-0.16.2
  managedFields:
  - apiVersion: v1
    fieldsType: FieldsV1
    fieldsV1:
      f:metadata:
        f:finalizers:
          .: {}
          v:"gke.networking.io/l4-ilb-v1": {}
          v:"service.kubernetes.io/load-balancer-cleanup": {}
      f:status:
        f:loadBalancer:
          f:ingress: {}
    manager: kube-controller-manager
    operation: Update
    subresource: status
    time: "2023-03-29T14:06:36Z"
  - apiVersion: v1
    fieldsType: FieldsV1
    fieldsV1:
      f:metadata:
        f:annotations:
          .: {}
          f:kubectl.kubernetes.io/last-applied-configuration: {}
          f:networking.gke.io/internal-load-balancer-allow-global-access: {}
          f:networking.gke.io/internal-load-balancer-subnet: {}
          f:networking.gke.io/load-balancer-type: {}
        f:labels:
          .: {}
          f:app.kubernetes.io/instance: {}
          f:app.kubernetes.io/managed-by: {}
          f:app.kubernetes.io/name: {}
          f:helm.sh/chart: {}
      f:spec:
        f:allocateLoadBalancerNodePorts: {}
        f:externalTrafficPolicy: {}
        f:internalTrafficPolicy: {}
        f:loadBalancerIP: {}
        f:ports:
          .: {}
          k:{"port":80,"protocol":"TCP"}:
            .: {}
            f:name: {}
            f:port: {}
            f:protocol: {}
            f:targetPort: {}
          k:{"port":443,"protocol":"TCP"}:
            .: {}
            f:name: {}
            f:port: {}
            f:protocol: {}
            f:targetPort: {}
        f:selector: {}
        f:type: {}
    manager: kubectl-client-side-apply
    operation: Update
    time: "2023-04-04T13:58:43Z"
  - apiVersion: v1
    fieldsType: FieldsV1
    fieldsV1:
      f:metadata:
        f:annotations:
          f:nginx.ingress.kubernetes.io/affinity: {}
          f:nginx.ingress.kubernetes.io/affinity-mode: {}
      f:spec:
        f:sessionAffinity: {}
        f:sessionAffinityConfig:
          .: {}
          f:clientIP:
            .: {}
            f:timeoutSeconds: {}
    manager: GoogleCloudConsole
    operation: Update
    time: "2023-10-13T18:47:57Z"
  name: xxxxxxxxx
  namespace: default
  resourceVersion: "241218341"
  uid: e5381e48-480a-443c-bd03-a18b5b279fe1
spec:
  allocateLoadBalancerNodePorts: true
  clusterIP: xx.xx.xx.xx
  clusterIPs:
  - xx.xx.xx.xx
  externalTrafficPolicy: Cluster
  internalTrafficPolicy: Cluster
  ipFamilies:
  - IPv4
  ipFamilyPolicy: SingleStack
  loadBalancerIP: xx.xx.xx.xx
  ports:
  - name: http
    nodePort: 31584
    port: 80
    protocol: TCP
    targetPort: 80
  - name: https
    nodePort: 31296
    port: 443
    protocol: TCP
    targetPort: 443
  selector:
    app: xxxxxxxxxx
  sessionAffinity: ClientIP
  sessionAffinityConfig:
    clientIP:
      timeoutSeconds: 10800
  type: LoadBalancer
status:
  loadBalancer:
    ingress:
    - ip: xx.xx.xx.xx

I read that I need to add this annotation to enable this to work but it won't work. I want to mention that the ingress and the deployment are not in the same namespace is this an issue? I read that this can be an issue but I am not sure, and if it is is there any way to get this to work?

EDIT: My appologies, here is the Ingress:

apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  annotations:
    kubectl.kubernetes.io/last-applied-configuration: |
      {"apiVersion":"networking.k8s.io/v1","kind":"Ingress","metadata":{"annotations":{"kubernetes.io/ingress.class":"nginx","nginx.ingress.kubernetes.io/force-ssl-redirect":"true","nginx.org/websocket-services":"server-service"},"name":"ingress-qa","namespace":"qa"},"spec":{"rules":[{"host":"xxx.xxx.xxx","http":{"paths":[{"backend":{"service":{"name":"code-service","port":{"number":80}}},"path":"/xxxx/code","pathType":"Prefix"},{"backend":{"service":{"name":"xxxx-xxxx-xxxx","port":{"number":80}}},"path":"/xxxx/xxxx-xxxxx","pathType":"Prefix"},{"backend":{"service":{"name":"country-service","port":{"number":80}}},"path":"/xxxxx/country","pathType":"Prefix"},{"backend":{"service":{"name":"user-frontend-service","port":{"number":80}}},"path":"/xxxxx/user","pathType":"Prefix"},{"backend":{"service":{"name":"admin-frontend-service","port":{"number":80}}},"path":"/xxxxx/admin","pathType":"Prefix"},{"backend":{"service":{"name":"xxxx-xxxx-xxxx","port":{"number":80}}},"path":"/xxx/server","pathType":"Prefix"},{"backend":{"service":{"name":"xxxx-xxxx-xxxxx","port":{"number":80}}},"path":"/xxx","pathType":"Prefix"},{"backend":{"service":{"name":"xxxx-xxx-xxxx-xxxx","port":{"number":80}}},"path":"/xxxx/api","pathType":"Prefix"},{"backend":{"service":{"name":"xxxx-xxxx-xxxx-xxxx","port":{"number":80}}},"path":"/xxxx","pathType":"Prefix"}]}}],"xxx":[{"hosts":["xxxx.xxx.xxx"],"secretName":"xxx.xxx.xxx"}]},"status":{"loadBalancer":{"ingress":[{"ip":"xx.xx.xx.xx"}]}}}
    kubernetes.io/ingress.class: nginx
    nginx.ingress.kubernetes.io/affinity: cookie
    nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
    nginx.ingress.kubernetes.io/session-cookie-expires: "172800"
    nginx.ingress.kubernetes.io/session-cookie-max-age: "172800"
    nginx.ingress.kubernetes.io/session-cookie-name: http-cookie
    nginx.org/websocket-services: server-service
  creationTimestamp: "2023-03-30T07:10:45Z"
  generation: 6
  managedFields:
  - apiVersion: networking.k8s.io/v1
    fieldsType: FieldsV1
    fieldsV1:
      f:status:
        f:loadBalancer:
          f:ingress: {}
    manager: nginx-ingress
    operation: Update
    subresource: status
    time: "2023-03-30T07:10:45Z"
  - apiVersion: networking.k8s.io/v1
    fieldsType: FieldsV1
    fieldsV1:
      f:metadata:
        f:annotations:
          .: {}
          f:kubectl.kubernetes.io/last-applied-configuration: {}
          f:kubernetes.io/ingress.class: {}
          f:nginx.ingress.kubernetes.io/force-ssl-redirect: {}
          f:nginx.org/websocket-services: {}
      f:spec:
        f:tls: {}
    manager: kubectl-client-side-apply
    operation: Update
    time: "2023-07-31T07:38:46Z"
  - apiVersion: networking.k8s.io/v1
    fieldsType: FieldsV1
    fieldsV1:
      f:metadata:
        f:annotations:
          f:nginx.ingress.kubernetes.io/affinity: {}
          f:nginx.ingress.kubernetes.io/session-cookie-expires: {}
          f:nginx.ingress.kubernetes.io/session-cookie-max-age: {}
          f:nginx.ingress.kubernetes.io/session-cookie-name: {}
      f:spec:
        f:rules: {}
    manager: GoogleCloudConsole
    operation: Update
    time: "2023-10-14T10:18:43Z"
  name: xxxx-ingress-qa
  namespace: qa
  resourceVersion: "241745394"
  uid: xxxx-xxx-xxxx-xxx-xxxxxxx
spec:
  rules:
  - host: xxx.xxxx.xxx
    http:
      paths:
      - backend:
          service:
            name: code-service
            port:
              number: 80
        path: /xxxxx/code
        pathType: Prefix
      - backend:
          service:
            name: xxxx-xxxx-xxx
            port:
              number: 80
        path: /xxxx/xxxx-xxxx
        pathType: Prefix
      - backend:
          service:
            name: xxxxx-xxxxx
            port:
              number: 80
        path: /xxxxx/country
        pathType: Prefix
      - backend:
          service:
            name: xxxx-xxxxxx-xxxx
            port:
              number: 80
        path: /xxxxx/user
        pathType: Prefix
      - backend:
          service:
            name: xxxxx-xxxxx-xxxx
            port:
              number: 80
        path: /xxxx/xxxx
        pathType: Prefix
      - backend:
          service:
            name: xxxxx-xxxx-xxx
            port:
              number: 80
        path: /xxxx/xxxx
        pathType: Prefix
      - backend:
          service:
            name: xxxx-xxxx-xxxx
            port:
              number: 80
        path: /xxxx/xxx
        pathType: Prefix
      - backend:
          service:
            name: xxxx-xxxx-xxxx
            port:
              number: 80
        path: /xxx
        pathType: Prefix
      - backend:
          service:
            name: xxxx-xxxx-xxxxx-xxxxx
            port:
              number: 80
        path: /xxxxx/api
        pathType: Prefix
      - backend:
          service:
            name: xxxxx-xxxx-xxxx-xxxx
            port:
              number: 80
        path: /xxxx
        pathType: Prefix
  tls:
  - hosts:
    - xxxxx.xxxxx.xxx
    secretName: xxxxx.xxx.xxx
status:
  loadBalancer:
    ingress:
    - ip: xxx.xxx.xx.xx

Solution

  • After a long investigation, i added a Redis adapter and put this into my backend code

    const pubClient = createClient({ url: process.env.REDIS_HOST || "redis://localhost:6379" });
    const subClient = pubClient.duplicate();
            
    Promise.all([pubClient.connect(), subClient.connect()]).then(() => {
      console.log("Connected to Redis");
    io.adapter(createAdapter(pubClient, subClient));
    });
    

    After that, I did not have any issues with pod-s. Maybe this will help someone.