# 1. Pull the F5 chart via OCI — no helm repo add needed
helm pull oci://ghcr.io/nginx/charts/nginx-ingress \ --version 2.5.1 \ --destination /tmp/charts/ # 2. Verify checksum before touching anything
echo "23c866c0531719586570435a4d9a57ac0fb9661fdafd572c8916208cb7b4f225 /tmp/charts/nginx-ingress-2.5.1.tgz" \ | sha256sum --check # 3. One-time IngressClass migration guard
CONTROLLER=$(-weight: 500;">kubectl get ingressclass nginx \ -o jsonpath='{.spec.controller}' 2>/dev/null || true) if [ "${CONTROLLER}" = "k8s.io/ingress-nginx" ]; then echo "Removing community IngressClass — allowing F5 takeover" -weight: 500;">kubectl delete ingressclass nginx
fi # 4. Helm -weight: 500;">upgrade
helm -weight: 500;">upgrade ---weight: 500;">install nginx-ingress /tmp/charts/nginx-ingress-2.5.1.tgz \ --namespace nginx-ingress \ -f values.yaml \ --wait --timeout 5m # 5. Verify the right controller is running
-weight: 500;">kubectl get pods -l app.kubernetes.io/name=nginx-ingress -n nginx-ingress
# 1. Pull the F5 chart via OCI — no helm repo add needed
helm pull oci://ghcr.io/nginx/charts/nginx-ingress \ --version 2.5.1 \ --destination /tmp/charts/ # 2. Verify checksum before touching anything
echo "23c866c0531719586570435a4d9a57ac0fb9661fdafd572c8916208cb7b4f225 /tmp/charts/nginx-ingress-2.5.1.tgz" \ | sha256sum --check # 3. One-time IngressClass migration guard
CONTROLLER=$(-weight: 500;">kubectl get ingressclass nginx \ -o jsonpath='{.spec.controller}' 2>/dev/null || true) if [ "${CONTROLLER}" = "k8s.io/ingress-nginx" ]; then echo "Removing community IngressClass — allowing F5 takeover" -weight: 500;">kubectl delete ingressclass nginx
fi # 4. Helm -weight: 500;">upgrade
helm -weight: 500;">upgrade ---weight: 500;">install nginx-ingress /tmp/charts/nginx-ingress-2.5.1.tgz \ --namespace nginx-ingress \ -f values.yaml \ --wait --timeout 5m # 5. Verify the right controller is running
-weight: 500;">kubectl get pods -l app.kubernetes.io/name=nginx-ingress -n nginx-ingress
# 1. Pull the F5 chart via OCI — no helm repo add needed
helm pull oci://ghcr.io/nginx/charts/nginx-ingress \ --version 2.5.1 \ --destination /tmp/charts/ # 2. Verify checksum before touching anything
echo "23c866c0531719586570435a4d9a57ac0fb9661fdafd572c8916208cb7b4f225 /tmp/charts/nginx-ingress-2.5.1.tgz" \ | sha256sum --check # 3. One-time IngressClass migration guard
CONTROLLER=$(-weight: 500;">kubectl get ingressclass nginx \ -o jsonpath='{.spec.controller}' 2>/dev/null || true) if [ "${CONTROLLER}" = "k8s.io/ingress-nginx" ]; then echo "Removing community IngressClass — allowing F5 takeover" -weight: 500;">kubectl delete ingressclass nginx
fi # 4. Helm -weight: 500;">upgrade
helm -weight: 500;">upgrade ---weight: 500;">install nginx-ingress /tmp/charts/nginx-ingress-2.5.1.tgz \ --namespace nginx-ingress \ -f values.yaml \ --wait --timeout 5m # 5. Verify the right controller is running
-weight: 500;">kubectl get pods -l app.kubernetes.io/name=nginx-ingress -n nginx-ingress
if [ "${CONTROLLER}" = "k8s.io/ingress-nginx" ]; then -weight: 500;">kubectl delete ingressclass nginx
fi
if [ "${CONTROLLER}" = "k8s.io/ingress-nginx" ]; then -weight: 500;">kubectl delete ingressclass nginx
fi
if [ "${CONTROLLER}" = "k8s.io/ingress-nginx" ]; then -weight: 500;">kubectl delete ingressclass nginx
fi
controller: config: proxy-read-timeout: "600" load-balance: "ewma" use-gzip: "true"
controller: config: proxy-read-timeout: "600" load-balance: "ewma" use-gzip: "true"
controller: config: proxy-read-timeout: "600" load-balance: "ewma" use-gzip: "true"
controller: config: entries: proxy-read-timeout: "600s" # note: F5 expects the unit suffix lb-method: "ewma" # key renamed # use-gzip has no equivalent — moved to http-snippets
controller: config: entries: proxy-read-timeout: "600s" # note: F5 expects the unit suffix lb-method: "ewma" # key renamed # use-gzip has no equivalent — moved to http-snippets
controller: config: entries: proxy-read-timeout: "600s" # note: F5 expects the unit suffix lb-method: "ewma" # key renamed # use-gzip has no equivalent — moved to http-snippets
controller: kind: deployment enableCustomResources: false # not using VirtualServer CRDs enableSnippets: true telemetryReporting: -weight: 500;">enable: false # no outbound access to oss.edge.df.f5.com ingressClass: name: nginx create: true setAsDefaultIngress: false -weight: 500;">service: annotations: -weight: 500;">service.beta.kubernetes.io/azure-load-balancer-health-probe-protocol: tcp metrics: -weight: 500;">enable: true port: 9113 # changed from community's default serviceMonitor: create: false
controller: kind: deployment enableCustomResources: false # not using VirtualServer CRDs enableSnippets: true telemetryReporting: -weight: 500;">enable: false # no outbound access to oss.edge.df.f5.com ingressClass: name: nginx create: true setAsDefaultIngress: false -weight: 500;">service: annotations: -weight: 500;">service.beta.kubernetes.io/azure-load-balancer-health-probe-protocol: tcp metrics: -weight: 500;">enable: true port: 9113 # changed from community's default serviceMonitor: create: false
controller: kind: deployment enableCustomResources: false # not using VirtualServer CRDs enableSnippets: true telemetryReporting: -weight: 500;">enable: false # no outbound access to oss.edge.df.f5.com ingressClass: name: nginx create: true setAsDefaultIngress: false -weight: 500;">service: annotations: -weight: 500;">service.beta.kubernetes.io/azure-load-balancer-health-probe-protocol: tcp metrics: -weight: 500;">enable: true port: 9113 # changed from community's default serviceMonitor: create: false
# community — applied as ingress annotations
nginx.ingress.kubernetes.io/limit-req-rate: "120r/m"
nginx.ingress.kubernetes.io/limit-conn: "60"
nginx.ingress.kubernetes.io/limit-req--weight: 500;">status: "429"
# community — applied as ingress annotations
nginx.ingress.kubernetes.io/limit-req-rate: "120r/m"
nginx.ingress.kubernetes.io/limit-conn: "60"
nginx.ingress.kubernetes.io/limit-req--weight: 500;">status: "429"
# community — applied as ingress annotations
nginx.ingress.kubernetes.io/limit-req-rate: "120r/m"
nginx.ingress.kubernetes.io/limit-conn: "60"
nginx.ingress.kubernetes.io/limit-req--weight: 500;">status: "429"
controller: config: entries: http-snippets: | geo $app_limit_bypass { default 0; <office-cidr-1> 1; <office-cidr-2> 1; } map $app_limit_bypass $app_limit_key { 0 $binary_remote_addr; 1 ""; } limit_req_zone $app_limit_key zone=app_rpm:10m rate=120r/m; limit_conn_zone $app_limit_key zone=app_conn:10m;
controller: config: entries: http-snippets: | geo $app_limit_bypass { default 0; <office-cidr-1> 1; <office-cidr-2> 1; } map $app_limit_bypass $app_limit_key { 0 $binary_remote_addr; 1 ""; } limit_req_zone $app_limit_key zone=app_rpm:10m rate=120r/m; limit_conn_zone $app_limit_key zone=app_conn:10m;
controller: config: entries: http-snippets: | geo $app_limit_bypass { default 0; <office-cidr-1> 1; <office-cidr-2> 1; } map $app_limit_bypass $app_limit_key { 0 $binary_remote_addr; 1 ""; } limit_req_zone $app_limit_key zone=app_rpm:10m rate=120r/m; limit_conn_zone $app_limit_key zone=app_conn:10m;
annotations: nginx.org/server-snippets: | limit_req zone=app_rpm burst=80 nodelay; limit_req_status 429; limit_conn app_conn 60; limit_conn_status 429;
annotations: nginx.org/server-snippets: | limit_req zone=app_rpm burst=80 nodelay; limit_req_status 429; limit_conn app_conn 60; limit_conn_status 429;
annotations: nginx.org/server-snippets: | limit_req zone=app_rpm burst=80 nodelay; limit_req_status 429; limit_conn app_conn 60; limit_conn_status 429;
controller: config: entries: keepalive-timeout: "60s" http2: "false" # HTTP/2 and WebSocket upgrades conflict; -weight: 500;">disable explicitly
controller: config: entries: keepalive-timeout: "60s" http2: "false" # HTTP/2 and WebSocket upgrades conflict; -weight: 500;">disable explicitly
controller: config: entries: keepalive-timeout: "60s" http2: "false" # HTTP/2 and WebSocket upgrades conflict; -weight: 500;">disable explicitly
annotations: nginx.org/websocket-services: "my-websocket--weight: 500;">service"
annotations: nginx.org/websocket-services: "my-websocket--weight: 500;">service"
annotations: nginx.org/websocket-services: "my-websocket--weight: 500;">service"
app.kubernetes.io/name=ingress-nginx
app.kubernetes.io/component=controller
app.kubernetes.io/name=ingress-nginx
app.kubernetes.io/component=controller
app.kubernetes.io/name=ingress-nginx
app.kubernetes.io/component=controller
-weight: 500;">kubectl patch -weight: 500;">service <legacy--weight: 500;">service-name> \ -n nginx-ingress \ --type='merge' \ -p '{ "spec": { "selector": { "app.kubernetes.io/name": "nginx-ingress" } } }'
-weight: 500;">kubectl patch -weight: 500;">service <legacy--weight: 500;">service-name> \ -n nginx-ingress \ --type='merge' \ -p '{ "spec": { "selector": { "app.kubernetes.io/name": "nginx-ingress" } } }'
-weight: 500;">kubectl patch -weight: 500;">service <legacy--weight: 500;">service-name> \ -n nginx-ingress \ --type='merge' \ -p '{ "spec": { "selector": { "app.kubernetes.io/name": "nginx-ingress" } } }'
# before
solvers: - http01: ingress: class: nginx # after
solvers: - http01: ingress: ingressClassName: nginx
# before
solvers: - http01: ingress: class: nginx # after
solvers: - http01: ingress: ingressClassName: nginx
# before
solvers: - http01: ingress: class: nginx # after
solvers: - http01: ingress: ingressClassName: nginx
# removed from cert-manager values
featureGates: "ACMEHTTP01IngressPathTypeExact=false"
# removed from cert-manager values
featureGates: "ACMEHTTP01IngressPathTypeExact=false"
# removed from cert-manager values
featureGates: "ACMEHTTP01IngressPathTypeExact=false"
# datadog-agent values.yaml
confd: openmetrics.yaml: |- ad_identifiers: - nginx-ingress init_config: instances: - openmetrics_endpoint: "http://%%host%%:9113/metrics" namespace: nginx_ingress metrics: - nginx_connections_accepted - nginx_connections_active - nginx_connections_handled - nginx_http_requests_total - nginx_ingress_controller_ingress_resources_total - nginx_ingress_controller_nginx_reloads_total - nginx_ingress_controller_nginx_reload_errors_total - nginx_ingress_controller_nginx_last_reload_milliseconds
# datadog-agent values.yaml
confd: openmetrics.yaml: |- ad_identifiers: - nginx-ingress init_config: instances: - openmetrics_endpoint: "http://%%host%%:9113/metrics" namespace: nginx_ingress metrics: - nginx_connections_accepted - nginx_connections_active - nginx_connections_handled - nginx_http_requests_total - nginx_ingress_controller_ingress_resources_total - nginx_ingress_controller_nginx_reloads_total - nginx_ingress_controller_nginx_reload_errors_total - nginx_ingress_controller_nginx_last_reload_milliseconds
# datadog-agent values.yaml
confd: openmetrics.yaml: |- ad_identifiers: - nginx-ingress init_config: instances: - openmetrics_endpoint: "http://%%host%%:9113/metrics" namespace: nginx_ingress metrics: - nginx_connections_accepted - nginx_connections_active - nginx_connections_handled - nginx_http_requests_total - nginx_ingress_controller_ingress_resources_total - nginx_ingress_controller_nginx_reloads_total - nginx_ingress_controller_nginx_reload_errors_total - nginx_ingress_controller_nginx_last_reload_milliseconds
beta.kubernetes.io/os=linux
beta.kubernetes.io/os=linux
beta.kubernetes.io/os=linux
kubernetes.io/os=linux
kubernetes.io/os=linux
kubernetes.io/os=linux
helm -weight: 500;">upgrade ---weight: 500;">install nginx-ingress ./nginx-ingress-2.5.1.tgz \ --namespace nginx-ingress \ -f values.yaml \ --wait --timeout 5m
helm -weight: 500;">upgrade ---weight: 500;">install nginx-ingress ./nginx-ingress-2.5.1.tgz \ --namespace nginx-ingress \ -f values.yaml \ --wait --timeout 5m
helm -weight: 500;">upgrade ---weight: 500;">install nginx-ingress ./nginx-ingress-2.5.1.tgz \ --namespace nginx-ingress \ -f values.yaml \ --wait --timeout 5m - IngressClass name remains nginx in every cluster (no application-level changes needed)
- Azure Load Balancer type (internal where it was internal, public where public)
- cert-manager ClusterIssuers (one field rename, covered below)
- Linkerd injection on controller pods