'Run ingress nginx as a reverse proxy for kibana with appid oauth2 provider

I've read a number of similar questions on here and blogs online, I've tried a number of configuration changes but cannot seem to get anything to work. I'm using ECK to manage an elastic & kibana stack on IBM cloud IKS (classic).

I want to use App ID as an oauth2 provider with an ingress running nginx for authentication. I have that part partially working, I get the SSO login and have to authenticate there successfully, but instead of being redirected to kibana application landing page I get redirected to the kibana login page. I am using helm to manage the Elastic, Kibana and Ingress resources. I will template the resources and put the yaml manifests here with some dummy values.

helm template --name-template=es-kibana-ingress es-k-stack -s templates/kibana.yaml --set ingress.enabled=true --set ingress.host="CLUSTER.REGION.containers.appdomain.cloud" --set ingress.secretName="CLUSTER_SECRET" --set app_id.enabled=true --set app_id.instanceName=APPID_INSTANCE_NAME > kibana_template.yaml

apiVersion: kibana.k8s.elastic.co/v1beta1
kind: Kibana
metadata:
  name: es-kibana-ingress-es-k-stack
spec:
  config:
    server.rewriteBasePath: true
    server.basePath: /kibana-es-kibana-ingress
    server.publicBaseUrl: https://CLUSTER.REGION.containers.appdomain.cloud/kibana-es-kibana-ingress
  version: 7.16.3
  count: 1
  elasticsearchRef:
    name: es-kibana-ingress-es-k-stack
  podTemplate:
      spec:
        containers:
        - name: kibana
          readinessProbe:
            httpGet:
              scheme: HTTPS
              path: /kibana-es-kibana-ingress
              port: 5601

helm template --name-template=es-kibana-ingress es-k-stack -s templates/ingress.yaml --set ingress.enabled=true --set ingress.host="CLUSTER.REGION.containers.appdomain.cloud" --set ingress.secretName="CLUSTER_SECRET" --set app_id.enabled=true --set app_id.instanceName=APPID_INSTANCE_NAME > kibana_ingress_template.yaml

kind: Ingress
metadata:
  name: es-kibana-ingress
  namespace: es-kibana-ingress
  annotations:
    kubernetes.io/ingress.class: "public-iks-k8s-nginx"
    kubernetes.io/tls-acme: "true"
    nginx.ingress.kubernetes.io/proxy-ssl-verify: "false"
    nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
    nginx.ingress.kubernetes.io/auth-signin: https://$host/oauth2-APPID_INSTANCE_NAME/start?rd=$escaped_request_uri
    nginx.ingress.kubernetes.io/auth-url: https://$host/oauth2-APPID_INSTANCE_NAME/auth
    nginx.ingress.kubernetes.io/configuration-snippet: |
      auth_request_set $name_upstream_1 $upstream_cookie__oauth2_APPID_INSTANCE_NAME_1;
      auth_request_set $access_token $upstream_http_x_auth_request_access_token;
      auth_request_set $id_token $upstream_http_authorization;
      access_by_lua_block {
        if ngx.var.name_upstream_1 ~= "" then
          ngx.header["Set-Cookie"] = "_oauth2_APPID_INSTANCE_NAME_1=" .. ngx.var.name_upstream_1 .. ngx.var.auth_cookie:match("(; .*)")
        end
        if ngx.var.id_token ~= "" and ngx.var.access_token ~= "" then
          ngx.req.set_header("Authorization", "Bearer " .. ngx.var.access_token .. " " .. ngx.var.id_token:match("%s*Bearer%s*(.*)"))
        end
      }
    nginx.ingress.kubernetes.io/proxy-buffer-size: 16k
    nginx.ingress.kubernetes.io/ssl-redirect: "false"
spec:
  tls:
  - hosts:
    - CLUSTER.REGION.containers.appdomain.cloud
    secretName: CLUSTER_SECRET
  rules:
  - host: CLUSTER.REGION.containers.appdomain.cloud
    http:
      paths:
      - backend:
          service:
            name: es-kibana-ingress-xdr-datalake-kb-http
            port:
              number: 5601
        path: /kibana-es-kibana-ingress
        pathType: ImplementationSpecific

helm template --name-template=es-kibana-ingress ~/Git/xdr_datalake/helm/xdr-es-k-stack/ -s templates/elasticsearch.yaml --set ingress.enabled=true --set ingress.host="CLUSTER.REGION.containers.appdomain.cloud" --set ingress.secretName="CLUSTER_SECRET" --set app_id.enabled=true --set app_id.instanceName=APPID_INSTANCE_NAME > elastic_template.yaml

apiVersion: elasticsearch.k8s.elastic.co/v1
kind: Elasticsearch
metadata:
  name: es-kibana-ingress-es-k-stack
spec:
  version: 7.16.3
  nodeSets:
  - name: master
    count: 1
    config:
      node.store.allow_mmap: true
      node.roles: ["master"]
      xpack.ml.enabled: true
      reindex.remote.whitelist: [CLUSTER.REGION.containers.appdomain.cloud:443]
      indices.query.bool.max_clause_count: 3000
      xpack:
        license.self_generated.type: basic
    volumeClaimTemplates:
    - metadata:
        name: elasticsearch-data
      spec:
        accessModes:
        - ReadWriteOnce
        resources:
          requests:
            storage: 20Gi
        storageClassName: ibmc-file-retain-gold-custom-terraform
    podTemplate:
      spec:
        affinity:
          podAntiAffinity:
            preferredDuringSchedulingIgnoredDuringExecution:
            - weight: 100
              podAffinityTerm:
                labelSelector:
                  matchLabels:
                    elasticsearch.k8s.elastic.co/cluster-name: es-kibana-ingress-es-k-stack
                topologyKey: kubernetes.io/hostname
            preferredDuringSchedulingIgnoredDuringExecution:
            - weight: 100
              podAffinityTerm:
                labelSelector:
                  matchLabels:
                    elasticsearch.k8s.elastic.co/cluster-name: es-kibana-ingress-es-k-stack
                topologyKey: kubernetes.io/zone
        initContainers:
        - name: sysctl
          securityContext:
            privileged: true
          command: ['sh', '-c', 'sysctl -w vm.max_map_count=262144']
        volumes:
        - name: elasticsearch-data
          emptyDir: {}
        containers:
        - name: elasticsearch
          resources:
            limits:
              cpu: 4
              memory: 6Gi
            requests:
              cpu: 2
              memory: 3Gi
          env:
            - name: NAMESPACE
              valueFrom:
                fieldRef:
                  apiVersion: v1
                  fieldPath: metadata.namespace
            - name: NODE_NAME
              valueFrom:
                fieldRef:
                  apiVersion: v1
                  fieldPath: metadata.name
            - name: NETWORK_HOST
              value: _site_
            - name: MAX_LOCAL_STORAGE_NODES
              value: "1"
            - name: DISCOVERY_SERVICE
              value: elasticsearch-discovery
            - name: HTTP_CORS_ALLOW_ORIGIN
              value: '*'
            - name: HTTP_CORS_ENABLE
              value: "true"
  - name: data
    count: 1
    config:
      node.roles: ["data", "ingest", "ml", "transform"]
      reindex.remote.whitelist: [CLUSTER.REGION.containers.appdomain.cloud:443]
      indices.query.bool.max_clause_count: 3000
      xpack:
        license.self_generated.type: basic
    volumeClaimTemplates:
    - metadata:
        name: elasticsearch-data
      spec:
        accessModes:
        - ReadWriteOnce
        resources:
          requests:
            storage: 20Gi
        storageClassName: ibmc-file-retain-gold-custom-terraform
    podTemplate:
      spec:
        affinity:
          podAntiAffinity:
            preferredDuringSchedulingIgnoredDuringExecution:
            - weight: 100
              podAffinityTerm:
                labelSelector:
                  matchLabels:
                    elasticsearch.k8s.elastic.co/cluster-name: es-kibana-ingress-es-k-stack
                topologyKey: kubernetes.io/hostname
            preferredDuringSchedulingIgnoredDuringExecution:
            - weight: 100
              podAffinityTerm:
                labelSelector:
                  matchLabels:
                    elasticsearch.k8s.elastic.co/cluster-name: es-kibana-ingress-es-k-stack
                topologyKey: kubernetes.io/zone
        initContainers:
        - name: sysctl
          securityContext:
            privileged: true
          command: ['sh', '-c', 'sysctl -w vm.max_map_count=262144']
        volumes:
        - name: elasticsearch-data
          emptyDir: {}
        containers:
        - name: elasticsearch
          resources:
            limits:
              cpu: 4
              memory: 6Gi
            requests:
              cpu: 2
              memory: 3Gi
          env:
            - name: NAMESPACE
              valueFrom:
                fieldRef:
                  apiVersion: v1
                  fieldPath: metadata.namespace
            - name: NODE_NAME
              valueFrom:
                fieldRef:
                  apiVersion: v1
                  fieldPath: metadata.name
            - name: NETWORK_HOST
              value: _site_
            - name: MAX_LOCAL_STORAGE_NODES
              value: "1"
            - name: DISCOVERY_SERVICE
              value: elasticsearch-discovery
            - name: HTTP_CORS_ALLOW_ORIGIN
              value: '*'
            - name: HTTP_CORS_ENABLE
              value: "true"

Any pointers would be greatly appreciated. I'm sure it's something small that I'm missing but I cannot find it anywhere online - I think I'm missing some token or authorization header rewrite, but I cannot figure it out.



Solution 1:[1]

So this comes down to a misunderstanding on my behalf. On previous self-managed ELK stacks the above worked, the difference is that on ECK security is enabled by default. So when you have your nginx reverse proxy set up to provide SAML integration correctly (as above) you still get the kibana login page.

To circumvent this I set up a filerealm for authentication purposes and provided a username/password for a kibana admin user:

helm template --name-template=es-kibana-ingress xdr-es-k-stack -s templates/crd_kibana.yaml --set ingress.enabled=true --set ingress.host="CLUSTER.REGION.containers.appdomain.cloud" --set ingress.secretName="CLUSTER_SECRET" --set app_id.enabled=true --set app_id.instanceName=APPID_INSTANCE_NAME --set kibana.kibanaUser="kibanaUSER" --set kibana.kibanaPass="kibanaPASS"

apiVersion: kibana.k8s.elastic.co/v1beta1
kind: Kibana
metadata:
  name: es-kibana-ingress-xdr-datalake
  namespace: default
spec:
  config:
    server.rewriteBasePath: true
    server.basePath: /kibana-es-kibana-ingress
    server.publicBaseUrl: https://CLUSTER.REGION.containers.appdomain.cloud/kibana-es-kibana-ingress
    server.host: "0.0.0.0"
    server.name: kibana
    xpack.security.authc.providers:
      anonymous.anonymous1:
        order: 0
        credentials:
          username: kibanaUSER
          password: kibanaPASS
  version: 7.16.3
  http:
    tls:
      selfSignedCertificate:
        disabled: true
  count: 1
  elasticsearchRef:
    name: es-kibana-ingress-xdr-datalake
  podTemplate:
      spec:
        containers:
        - name: kibana
          readinessProbe:
            timeoutSeconds: 30
            httpGet:
              scheme: HTTP
              path: /kibana-es-kibana-ingress/app/dev_tools
              port: 5601
          resources:
            limits:
              cpu: 3
              memory: 1Gi
            requests:
              cpu: 3
              memory: 1Gi

helm template --name-template=es-kibana-ingress xdr-es-k-stack -s templates/crd_elasticsearch.yaml --set ingress.enabled=true --set ingress.host="CLUSTER.REGION.containers.appdomain.cloud" --set ingress.secretName="CLUSTER_SECRET" --set app_id.enabled=true --set app_id.instanceName=APPID_INSTANCE_NAME --set kibana.kibanaUser="kibanaUSER" --set kibana.kibanaPass="kibanaPASS"

You may noticed I removed self signed certs - this was due to an issue connecting kafka to ES on the cluster. We have decided to use ISTIO to provide internal network connection - but if you don't have this issue you could keep them. I also had to update the ingress a bit to work with this new HTTP backend (previously HTTPS):

apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: es-kibana-ingress-kibana
  namespace: default
  annotations:
    kubernetes.io/ingress.class: "public-iks-k8s-nginx"
    kubernetes.io/tls-acme: "true"
    nginx.ingress.kubernetes.io/proxy-ssl-verify: "false"
    nginx.ingress.kubernetes.io/backend-protocol: "HTTP"
    nginx.ingress.kubernetes.io/auth-signin: https://$host/oauth2-APPID_INSTANCE_NAME/start?rd=$escaped_request_uri
    nginx.ingress.kubernetes.io/auth-url: https://$host/oauth2-APPID_INSTANCE_NAME/auth
    nginx.ingress.kubernetes.io/configuration-snippet: |
      auth_request_set $name_upstream_1 $upstream_cookie__oauth2_APPID_INSTANCE_NAME_1;
      auth_request_set $access_token $upstream_http_x_auth_request_access_token;
      auth_request_set $id_token $upstream_http_authorization;
      access_by_lua_block {
        if ngx.var.name_upstream_1 ~= "" then
          ngx.header["Set-Cookie"] = "_oauth2_APPID_INSTANCE_NAME_1=" .. ngx.var.name_upstream_1 .. ngx.var.auth_cookie:match("(; .*)")
        end
        if ngx.var.id_token ~= "" and ngx.var.access_token ~= "" then
          ngx.req.set_header("Authorization", "Bearer " .. ngx.var.access_token .. " " .. ngx.var.id_token:match("%s*Bearer%s*(.*)"))
        end
      }
    nginx.ingress.kubernetes.io/proxy-buffer-size: 16k
    nginx.ingress.kubernetes.io/ssl-redirect: "false"
spec:
  tls:
  - hosts:
    - CLUSTER.REGION.containers.appdomain.cloud
    secretName: CLUSTER_SECRET
  rules:
  - host: CLUSTER.REGION.containers.appdomain.cloud
    http:
      paths:
      - backend:
          service:
            name: es-kibana-ingress-xdr-datalake-kb-http
            port:
              number: 5601
        path: /kibana-es-kibana-ingress
        pathType: ImplementationSpecific

Hopefully this helps someone else in the future.

Sources

This article follows the attribution requirements of Stack Overflow and is licensed under CC BY-SA 3.0.

Source: Stack Overflow

Solution Source
Solution 1 Dobhaweim