website not accessible after idling some time running nginx and kubernetes

3/30/2017

My website is not accessible from the browser for a few minutes after idling or not accessing it like 30 minutes or more. I would have to reload the page for how many times to view the page and I am not sure which to debug.

the stack I am running is a Golang app behind nginx that runs on kubernetes ingress. here is part of my nginx.conf.

            daemon off;

            worker_processes 2;
            pid /run/nginx.pid;

            worker_rlimit_nofile 523264;
            events {
                multi_accept        on;
                worker_connections  16384;
                use                 epoll; 
            }

            http {
                real_ip_header      X-Forwarded-For;
                set_real_ip_from    0.0.0.0/0;
                real_ip_recursive   on;

                geoip_country       /etc/nginx/GeoIP.dat;
                geoip_city          /etc/nginx/GeoLiteCity.dat;
                geoip_proxy_recursive on;
                # lua section to return proper error codes when custom pages are used
                lua_package_path '.?.lua;./etc/nginx/lua/?.lua;/etc/nginx/lua/vendor/lua-resty-http/lib/?.lua;';
                init_by_lua_block {
                    require("error_page")
                }

                sendfile            on;
                aio                 threads;
                tcp_nopush          on;
                tcp_nodelay         on;
                log_subrequest      on;

                reset_timedout_connection on;

                keepalive_timeout 75s;

                client_header_buffer_size       1k;
                large_client_header_buffers     4 8k;

                types_hash_max_size             2048;
                server_names_hash_max_size      512;
                server_names_hash_bucket_size   64;
                map_hash_bucket_size            64;

                include /etc/nginx/mime.types;
                default_type text/html;
                gzip on;
                gzip_comp_level 5;
                gzip_http_version 1.1;
                gzip_min_length 256;
                gzip_types application/atom+xml application/javascript application/x-javascript application/json application/rss+xml application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/svg+xml image/x-icon text/css text/plain text/x-component;    
                gzip_proxied any;

                server_tokens on;   

                log_format upstreaminfo '$remote_addr - '
                    '[$proxy_add_x_forwarded_for] - $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" '
                    '$request_length $request_time [$proxy_upstream_name] $upstream_addr $upstream_response_length $upstream_response_time $upstream_status';

                map $request_uri $loggable {
                    default 1;
                }

                access_log /var/log/nginx/access.log upstreaminfo if=$loggable;
                error_log  /var/log/nginx/error.log notice;

                resolver 10.131.240.10 valid=30s;

                # Retain the default nginx handling of requests without a "Connection" header
                map $http_upgrade $connection_upgrade {
                    default          upgrade;
                    ''               close;
                }

                # trust http_x_forwarded_proto headers correctly indicate ssl offloading
                map $http_x_forwarded_proto $pass_access_scheme {
                    default          $http_x_forwarded_proto;
                    ''               $scheme;
                }

                map $http_x_forwarded_port $pass_server_port {
                default           $http_x_forwarded_port;
                ''                $server_port;
                }

                # map port 442 to 443 for header X-Forwarded-Port
                map $pass_server_port $pass_port {
                    442              443;
                    default          $pass_server_port;
                }

                # Map a response error watching the header Content-Type
                map $http_accept $httpAccept {
                    default          html;
                    application/json json;
                    application/xml  xml;
                    text/plain       text;
                }

                map $httpAccept $httpReturnType {
                    default          text/html;
                    json             application/json;
                    xml              application/xml;
                    text             text/plain;
                }

                server_name_in_redirect off;
                port_in_redirect        off;

                ssl_protocols TLSv1 TLSv1.1 TLSv1.2;

                # turn on session caching to drastically improve performance
                ssl_session_cache builtin:1000 shared:SSL:10m;
                ssl_session_timeout 10m;

                # allow configuring ssl session tickets
                ssl_session_tickets on;

                # slightly reduce the time-to-first-byte
                ssl_buffer_size 4k;

                # allow configuring custom ssl ciphers
                ssl_ciphers 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:AES:CAMELLIA:DES-CBC3-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA';
                ssl_prefer_server_ciphers on;

                # In case of errors try the next upstream server before returning an error
                proxy_next_upstream                     error timeout invalid_header http_502 http_503 http_504;

                upstream default-ui-80 {
                    sticky hash=sha1 name=route  httponly;
                    server 10.128.2.104:4000 max_fails=0 fail_timeout=0;
                    server 10.128.4.37:4000 max_fails=0 fail_timeout=0;
                }

                server {
                    server_name app.com;
                    listen [::]:80;
                    listen 442  ssl http2;
                    # PEM sha: a51bd3f56b3ec447945f1f92f0ad140bb8134d11
                    ssl_certificate                         /ingress-controller/ssl/default-linker-secret.pem;
                    ssl_certificate_key                     /ingress-controller/ssl/default-linker-secret.pem;

                    more_set_headers                        "Strict-Transport-Security: max-age=15724800; includeSubDomains; preload";
                    location / {
                        set $proxy_upstream_name "default-ui-80";

                        port_in_redirect off;

                        # enforce ssl on server side
                        if ($scheme = http) {
                            return 301 https://$host$request_uri;
                        }

                        client_max_body_size                    "1024m";

                        proxy_set_header Host                   $host;

                        # Pass Real IP
                        proxy_set_header X-Real-IP              $remote_addr;

                        # Allow websocket connections
                        proxy_set_header                        Upgrade           $http_upgrade;
                        proxy_set_header                        Connection        $connection_upgrade;

                        proxy_set_header X-Forwarded-For        $proxy_add_x_forwarded_for;
                        proxy_set_header X-Forwarded-Host       $host;
                        proxy_set_header X-Forwarded-Port       $pass_port;
                        proxy_set_header X-Forwarded-Proto      $pass_access_scheme;

                        # mitigate HTTPoxy Vulnerability
                        # https://www.nginx.com/blog/mitigating-the-httpoxy-vulnerability-with-nginx/
                        proxy_set_header Proxy                  "";

                        # Custom headers

                        proxy_connect_timeout                   5s;
                        proxy_send_timeout                      3600s;
                        proxy_read_timeout                      3600s;

                        proxy_redirect                          off;
                        proxy_buffering                         off;
                        proxy_buffer_size                       "4k";

                        proxy_http_version                      1.1;
                        proxy_pass http://default-ui-80;
                    }
                }

            }

ingress controller


            apiVersion: v1
            kind: ReplicationController
            metadata:
            name: nginx-ingress-controller
            labels:
                k8s-app: nginx-ingress-lb
            spec:
            replicas: 1
            selector:
                k8s-app: nginx-ingress-lb
            template:
                metadata:
                labels:
                    k8s-app: nginx-ingress-lb
                    name: nginx-ingress-lb
                spec:
                terminationGracePeriodSeconds: 60
                containers:
                - image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.2
                    name: nginx-ingress-lb
                    imagePullPolicy: Always
                    readinessProbe:
                    httpGet:
                        path: /healthz
                        port: 10254
                        scheme: HTTP
                    livenessProbe:
                    httpGet:
                        path: /healthz
                        port: 10254
                        scheme: HTTP
                    initialDelaySeconds: 10
                    timeoutSeconds: 1
                    # use downward API
                    env:
                    - name: POD_NAME
                        valueFrom:
                        fieldRef:
                            fieldPath: metadata.name
                    - name: POD_NAMESPACE
                        valueFrom:
                        fieldRef:
                            fieldPath: metadata.namespace
                    ports:
                    - containerPort: 80
                    hostPort: 80
                    - containerPort: 443
                    hostPort: 443

                    args:
                    - /nginx-ingress-controller
                    - --default-backend-service=$(POD_NAMESPACE)/default-http-backend
                    - --configmap=$(POD_NAMESPACE)/nginx-ingress-sticky-session
                    - --configmap=$(POD_NAMESPACE)/nginx-settings-configmap
                    - --tcp-services-configmap=$(POD_NAMESPACE)/tcp-configmaps
                    - --v=2

ingress

            apiVersion: extensions/v1beta1
            kind: Ingress
            metadata:
            name: foo-prod
            annotations:
                kubernetes.io/tls-acme: "true"
                kubernetes.io/ingress.class: "nginx"
                ingress.kubernetes.io/affinity: "cookie"
                ingress.kubernetes.io/session-cookie-name: "route"
                ingress.kubernetes.io/session-cookie-hash: "sha1"
                nginx.org/client-max-body-size: "1024m"
            spec:
            tls:
            - hosts:
                - foo.io
                secretName: foo-secret
            rules:
            - host: foo.io
                http:
                paths:
                - backend:
                    serviceName: foo.io
                    servicePort: 80

service

            apiVersion: v1
            kind: Service
            metadata:
            name: foo-prod-nginx
            spec:
            type: LoadBalancer
            ports:
            - port: 80
                name: http
            - port: 443
                name: https
            selector:
                app: nginx-ingress-controller
-- Hokutosei
kubernetes
nginx

1 Answer

4/5/2017

The service.type=LoadBalancer allocates a public IP per k8s service, which is not how ingress works. You should expose your service as nodeport, and let ingress route traffic to it. example here

Also, if you are going to use nginx as ingress controller, you should use endpoint instead of service. Here is why

-- Ken Chen
Source: StackOverflow