Join the community today
Become a Member

Varnish setup help

Discussion in 'Other Centmin Mod Installed software' started by DenSouLew, Oct 18, 2015.

  1. DenSouLew

    DenSouLew New Member

    6
    1
    3
    May 27, 2015
    Ratings:
    +1
    Local Time:
    4:33 PM
    1.9.1
    MariaDB 10.0.19
    hello guys, im trying to setup Varnish in port 80 and nginx in 8090 and here are my configs


    domain.com.conf
    Code:
    server {
                listen   127.0.0.1:8090;
                server_name www.domain.com;
                return 301 $scheme://domain.com$request_uri;
           }
    
    server {
    
      server_name domain.com www.domain.com;
     
      set_real_ip_from 199.27.128.0/21;
      set_real_ip_from 173.245.48.0/20;
      set_real_ip_from 103.21.244.0/22;
      set_real_ip_from 103.22.200.0/22;
      set_real_ip_from 103.31.4.0/22;
      set_real_ip_from 141.101.64.0/18;
      set_real_ip_from 108.162.192.0/18;
      set_real_ip_from 190.93.240.0/20;
      set_real_ip_from 188.114.96.0/20;
      set_real_ip_from 197.234.240.0/22;
      set_real_ip_from 198.41.128.0/17;
      set_real_ip_from 162.158.0.0/15;
      set_real_ip_from 104.16.0.0/12;
      real_ip_header CF-Connecting-IP;
    
       #ngx_pagespeed & ngx_pagespeed handler
       include /usr/local/nginx/conf/pagespeed.conf;
       include /usr/local/nginx/conf/pagespeedhandler.conf;
       include /usr/local/nginx/conf/pagespeedstatslog.conf;
    
      access_log /home/nginx/domains/domain.com/log/access.log combined buffer=256k flush=60m;
      error_log /home/nginx/domains/domain.com/log/error.log;
    
      root /home/nginx/domains/domain.com/public;
    
      # prevent access to ./directories and files
      location ~ (?:^|/)\. {
       deny all;
      }
    
    location /forum {try_files $uri $uri/ @forum;}
    location @forum {rewrite ^/forum(.+)$ /forum/index.php?p=$1 last;}
     
      location / {
        try_files $uri $uri/ /index.php?$query_string;
       
        rewrite ^/index.php/(.*) /$1  permanent;
    
      # block common exploits, sql injections etc
      include /usr/local/nginx/conf/block.conf;
    
      }
    
      include /usr/local/nginx/conf/staticfiles.conf;
      include /usr/local/nginx/conf/php.conf;
      include /usr/local/nginx/conf/drop.conf;
      #include /usr/local/nginx/conf/errorpage.conf;
      include /usr/local/nginx/conf/vts_server.conf;
    }
    

    virtual.conf
    Code:
    server {
                listen   8090 default_server backlog=2048 reuseport;
                server_name srv.domain.com;
                root   html;
    
            access_log              /var/log/nginx/localhost.access.log     main buffer=256k flush=5m;
            error_log               /var/log/nginx/localhost.error.log      error;
    
    # ngx_pagespeed & ngx_pagespeed handler
    #include /usr/local/nginx/conf/pagespeed.conf;
    #include /usr/local/nginx/conf/pagespeedhandler.conf;
    #include /usr/local/nginx/conf/pagespeedstatslog.conf;
    
    # limit_conn limit_per_ip 16;
    # ssi  on;
    
            location /nginx_status {
            stub_status on;
            access_log   off;
            allow 127.0.0.1;
            #allow youripaddress;
            deny all;
            }
    
                location / {
    
    # block common exploits, sql injections etc
    #include /usr/local/nginx/conf/block.conf;
    
    #         Enables directory listings when index file not found
    #        autoindex  on;
    
    #        Shows file listing times as local time
    #        autoindex_localtime on;
    
    #        Enable for vBulletin usage WITHOUT vbSEO installed
    #        try_files        $uri $uri/ /index.php;
               
                }
    
            # example nginx-http-concat
            # /csstest/??one.css,two.css
            #location /csstest {
            #concat on;
            #concat_max_files 20;
            #}
    
    include /usr/local/nginx/conf/staticfiles.conf;
    include /usr/local/nginx/conf/php.conf;
    #include /usr/local/nginx/conf/phpstatus.conf;
    include /usr/local/nginx/conf/drop.conf;
    #include /usr/local/nginx/conf/errorpage.conf;
    include /usr/local/nginx/conf/vts_mainserver.conf;
    
           }

    varnish.params
    Code:
    # Varnish environment configuration description. This was derived from
    # the old style sysconfig/defaults settings
    
    # Set this to 1 to make systemd reload try to switch vcl without restart.
    RELOAD_VCL=1
    
    # Main configuration file. You probably want to change it.
    VARNISH_VCL_CONF=/etc/varnish/default.vcl
    
    # Default address and port to bind to. Blank address means all IPv4
    # and IPv6 interfaces, otherwise specify a host name, an IPv4 dotted
    # quad, or an IPv6 address in brackets.
    VARNISH_LISTEN_ADDRESS=127.0.0.1
    VARNISH_LISTEN_PORT=80
    
    # Admin interface listen address and port
    VARNISH_ADMIN_LISTEN_ADDRESS=127.0.0.1
    VARNISH_ADMIN_LISTEN_PORT=6082
    
    # Shared secret file for admin interface
    VARNISH_SECRET_FILE=/etc/varnish/secret
    
    # Backend storage specification, see Storage Types in the varnishd(5)
    # man page for details.
    VARNISH_STORAGE="file,/var/lib/varnish/varnish_storage.bin,1G"
    
    # Default TTL used when the backend does not specify one
    VARNISH_TTL=120
    
    # User and group for the varnishd worker processes
    VARNISH_USER=varnish
    VARNISH_GROUP=varnish
    
    # Other options, see the man page varnishd(1)
    #DAEMON_OPTS="-p thread_pool_min=5 -p thread_pool_max=500 -p thread_pool_timeout=300"
    

    default.vcl
    Code:
    vcl 4.0;
    # Based on: https://github.com/mattiasgeniar/varnish-4.0-configuration-templates/blob/master/default.vcl
    
    import std;
    import directors;
    
    backend default { # Define one backend
      .host = "127.0.0.1";    # IP or Hostname of backend
      .port = "8090";           # Port Apache or whatever is listening
      .max_connections = 300; # That's it
    
      .probe = {
        #.url = "/"; # short easy way (GET /)
        # We prefer to only do a HEAD /
        .request =
          "HEAD / HTTP/1.1"
          "Host: localhost"
          "Connection: close";
    
        .interval  = 5s; # check the health of each backend every 5 seconds
        .timeout   = 1s; # timing out after 1 second.
        .window    = 5;  # If 3 out of the last 5 polls succeeded the backend is considered healthy, otherwise it will be marked as sick
        .threshold = 3;
      }
    
      .first_byte_timeout     = 300s;   # How long to wait before we receive a first byte from our backend?
      .connect_timeout        = 5s;     # How long to wait for a backend connection?
      .between_bytes_timeout  = 2s;     # How long to wait between bytes received from our backend?
    }
    
    acl purge {
      # ACL we'll use later to allow purges
      "localhost";
      "127.0.0.1";
      "::1";
    }
    
    /*
    acl editors {
      # ACL to honor the "Cache-Control: no-cache" header to force a refresh but only from selected IPs
      "localhost";
      "127.0.0.1";
      "::1";
    }
    */
    
    sub vcl_init {
      # Called when VCL is loaded, before any requests pass through it.
      # Typically used to initialize VMODs.
    
      new vdir = directors.round_robin();
      vdir.add_backend(default);
      # vdir.add_backend(server...);
      # vdir.add_backend(servern);
    }
    
    sub vcl_recv {
      # Called at the beginning of a request, after the complete request has been received and parsed.
      # Its purpose is to decide whether or not to serve the request, how to do it, and, if applicable,
      # which backend to use.
      # also used to modify the request
    
      set req.backend_hint = vdir.backend(); # send all traffic to the vdir director
    
      # Normalize the header, remove the port (in case you're testing this on various TCP ports)
      set req.http.Host = regsub(req.http.Host, ":[0-9]+", "");
    
      # Normalize the query arguments
      set req.url = std.querysort(req.url);
    
      # Allow purging
      if (req.method == "PURGE") {
        if (!client.ip ~ purge) { # purge is the ACL defined at the begining
          # Not from an allowed IP? Then die with an error.
          return (synth(405, "This IP is not allowed to send PURGE requests."));
        }
        # If you got this stage (and didn't error out above), purge the cached result
        return (purge);
      }
    
      # Only deal with "normal" types
      if (req.method != "GET" &&
          req.method != "HEAD" &&
          req.method != "PUT" &&
          req.method != "POST" &&
          req.method != "TRACE" &&
          req.method != "OPTIONS" &&
          req.method != "PATCH" &&
          req.method != "DELETE") {
        /* Non-RFC2616 or CONNECT which is weird. */
        return (pipe);
      }
    
      # Implementing websocket support (https://www.varnish-cache.org/docs/4.0/users-guide/vcl-example-websockets.html)
      if (req.http.Upgrade ~ "(?i)websocket") {
        return (pipe);
      }
    
      # Only cache GET or HEAD requests. This makes sure the POST requests are always passed.
      if (req.method != "GET" && req.method != "HEAD") {
        return (pass);
      }
    
      # Some generic URL manipulation, useful for all templates that follow
      # First remove the Google Analytics added parameters, useless for our backend
      if (req.url ~ "(\?|&)(utm_source|utm_medium|utm_campaign|utm_content|gclid|cx|ie|cof|siteurl)=") {
        set req.url = regsuball(req.url, "&(utm_source|utm_medium|utm_campaign|utm_content|gclid|cx|ie|cof|siteurl)=([A-z0-9_\-\.%25]+)", "");
        set req.url = regsuball(req.url, "\?(utm_source|utm_medium|utm_campaign|utm_content|gclid|cx|ie|cof|siteurl)=([A-z0-9_\-\.%25]+)", "?");
        set req.url = regsub(req.url, "\?&", "?");
        set req.url = regsub(req.url, "\?$", "");
      }
    
      # Strip hash, server doesn't need it.
      if (req.url ~ "\#") {
        set req.url = regsub(req.url, "\#.*$", "");
      }
    
      # Strip a trailing ? if it exists
      if (req.url ~ "\?$") {
        set req.url = regsub(req.url, "\?$", "");
      }
    
      # Some generic cookie manipulation, useful for all templates that follow
      # Remove the "has_js" cookie
      set req.http.Cookie = regsuball(req.http.Cookie, "has_js=[^;]+(; )?", "");
    
      # Remove any Google Analytics based cookies
      set req.http.Cookie = regsuball(req.http.Cookie, "__utm.=[^;]+(; )?", "");
      set req.http.Cookie = regsuball(req.http.Cookie, "_ga=[^;]+(; )?", "");
      set req.http.Cookie = regsuball(req.http.Cookie, "_gat=[^;]+(; )?", "");
      set req.http.Cookie = regsuball(req.http.Cookie, "utmctr=[^;]+(; )?", "");
      set req.http.Cookie = regsuball(req.http.Cookie, "utmcmd.=[^;]+(; )?", "");
      set req.http.Cookie = regsuball(req.http.Cookie, "utmccn.=[^;]+(; )?", "");
    
      # Remove DoubleClick offensive cookies
      set req.http.Cookie = regsuball(req.http.Cookie, "__gads=[^;]+(; )?", "");
    
      # Remove the Quant Capital cookies (added by some plugin, all __qca)
      set req.http.Cookie = regsuball(req.http.Cookie, "__qc.=[^;]+(; )?", "");
    
      # Remove the AddThis cookies
      set req.http.Cookie = regsuball(req.http.Cookie, "__atuv.=[^;]+(; )?", "");
    
      # Remove a ";" prefix in the cookie if present
      set req.http.Cookie = regsuball(req.http.Cookie, "^;\s*", "");
    
      # Are there cookies left with only spaces or that are empty?
      if (req.http.cookie ~ "^\s*$") {
        unset req.http.cookie;
      }
     
      # the cookie will persist until it expires (see your laravel session config)
      if (req.http.Cookie ~ "laravel_session") {
        return(pass);
      }
     
      # else ok to fetch a cached page
        return (hash); 
    
      if (req.http.Cache-Control ~ "(?i)no-cache") {
      #if (req.http.Cache-Control ~ "(?i)no-cache" && client.ip ~ editors) { # create the acl editors if you want to restrict the Ctrl-F5
      # http://varnish.projects.linpro.no/wiki/VCLExampleEnableForceRefresh
      # Ignore requests via proxy caches and badly behaved crawlers
      # like msnbot that send no-cache with every request.
        if (! (req.http.Via || req.http.User-Agent ~ "(?i)bot" || req.http.X-Purge)) {
          #set req.hash_always_miss = true; # Doesn't seems to refresh the object in the cache
          return(purge); # Couple this with restart in vcl_purge and X-Purge header to avoid loops
        }
      }
    
      # Large static files are delivered directly to the end-user without
      # waiting for Varnish to fully read the file first.
      # Varnish 4 fully supports Streaming, so set do_stream in vcl_backend_response()
      if (req.url ~ "^[^?]*\.(mp[34]|rar|tar|tgz|gz|wav|zip|bz2|xz|7z|avi|mov|ogm|mpe?g|mk[av]|webm)(\?.*)?$") {
        unset req.http.Cookie;
        return (hash);
      }
    
      # Remove all cookies for static files
      # A valid discussion could be held on this line: do you really need to cache static files that don't cause load? Only if you have memory left.
      # Sure, there's disk I/O, but chances are your OS will already have these files in their buffers (thus memory).
      # Before you blindly enable this, have a read here: https://ma.ttias.be/stop-caching-static-files/
      if (req.url ~ "^[^?]*\.(bmp|bz2|css|doc|eot|flv|gif|gz|ico|jpeg|jpg|js|less|pdf|png|rtf|swf|txt|woff|xml)(\?.*)?$") {
        unset req.http.Cookie;
        return (hash);
      }
    
      # Send Surrogate-Capability headers to announce ESI support to backend
      set req.http.Surrogate-Capability = "key=ESI/1.0";
    
      if (req.http.Authorization) {
        # Not cacheable by default
        return (pass);
      }
    
      return (hash);
    }
    
    sub vcl_pipe {
      # Called upon entering pipe mode.
      # In this mode, the request is passed on to the backend, and any further data from both the client
      # and backend is passed on unaltered until either end closes the connection. Basically, Varnish will
      # degrade into a simple TCP proxy, shuffling bytes back and forth. For a connection in pipe mode,
      # no other VCL subroutine will ever get called after vcl_pipe.
    
      # Note that only the first request to the backend will have
      # X-Forwarded-For set.  If you use X-Forwarded-For and want to
      # have it set for all requests, make sure to have:
      # set bereq.http.connection = "close";
      # here.  It is not set by default as it might break some broken web
      # applications, like IIS with NTLM authentication.
    
      # set bereq.http.Connection = "Close";
    
      # Implementing websocket support (https://www.varnish-cache.org/docs/4.0/users-guide/vcl-example-websockets.html)
      if (req.http.upgrade) {
        set bereq.http.upgrade = req.http.upgrade;
      }
    
      return (pipe);
    }
    
    sub vcl_pass {
      # Called upon entering pass mode. In this mode, the request is passed on to the backend, and the
      # backend's response is passed on to the client, but is not entered into the cache. Subsequent
      # requests submitted over the same client connection are handled normally.
    
      # return (pass);
    }
    
    # The data on which the hashing will take place
    sub vcl_hash {
      # Called after vcl_recv to create a hash value for the request. This is used as a key
      # to look up the object in Varnish.
    
      hash_data(req.url);
    
      if (req.http.host) {
        hash_data(req.http.host);
      } else {
        hash_data(server.ip);
      }
    
      # hash cookies for requests that have them
      if (req.http.Cookie) {
        hash_data(req.http.Cookie);
      }
    }
    
    sub vcl_hit {
      # Called when a cache lookup is successful.
    
      if (obj.ttl >= 0s) {
        # A pure unadultered hit, deliver it
        return (deliver);
      }
    
      # https://www.varnish-cache.org/docs/trunk/users-guide/vcl-grace.html
      # When several clients are requesting the same page Varnish will send one request to the backend and place the others on hold while fetching one copy from the backend. In some products this is called request coalescing and Varnish does this automatically.
      # If you are serving thousands of hits per second the queue of waiting requests can get huge. There are two potential problems - one is a thundering herd problem - suddenly releasing a thousand threads to serve content might send the load sky high. Secondly - nobody likes to wait. To deal with this we can instruct Varnish to keep the objects in cache beyond their TTL and to serve the waiting requests somewhat stale content.
    
    # if (!std.healthy(req.backend_hint) && (obj.ttl + obj.grace > 0s)) {
    #   return (deliver);
    # } else {
    #   return (fetch);
    # }
    
      # We have no fresh fish. Lets look at the stale ones.
      if (std.healthy(req.backend_hint)) {
        # Backend is healthy. Limit age to 10s.
        if (obj.ttl + 10s > 0s) {
          #set req.http.grace = "normal(limited)";
          return (deliver);
        } else {
          # No candidate for grace. Fetch a fresh object.
          return(fetch);
        }
      } else {
        # backend is sick - use full grace
          if (obj.ttl + obj.grace > 0s) {
          #set req.http.grace = "full";
          return (deliver);
        } else {
          # no graced object.
          return (fetch);
        }
      }
    
      # fetch & deliver once we get the result
      return (fetch); # Dead code, keep as a safeguard
    }
    
    sub vcl_miss {
      # Called after a cache lookup if the requested document was not found in the cache. Its purpose
      # is to decide whether or not to attempt to retrieve the document from the backend, and which
      # backend to use.
    
      return (fetch);
    }
    
    # Handle the HTTP request coming from our backend
    sub vcl_backend_response {
      
      # Called after the response headers has been successfully retrieved from the backend.
    
      # Pause ESI request and remove Surrogate-Control header
      if (beresp.http.Surrogate-Control ~ "ESI/1.0") {
        unset beresp.http.Surrogate-Control;
        set beresp.do_esi = true;
      }
    
      if (beresp.http.X-No-Session ~ "yeah") { 
        unset beresp.http.set-cookie; 
      } 
      else
      {
      # do not cache responses which are for logged in users
        set beresp.uncacheable = true;
        set beresp.ttl = 120s;
        return (deliver);
      }
      # only cache status ok 
      if ( beresp.status != 200 ) { 
        set beresp.uncacheable = true;
        set beresp.ttl = 120s;
        return (deliver);
      } 
       # else ok to cache the response 
      else {
       return (deliver);
      }
     
      # Enable cache for all static files
      # The same argument as the static caches from above: monitor your cache size, if you get data nuked out of it, consider giving up the static file cache.
      # Before you blindly enable this, have a read here: https://ma.ttias.be/stop-caching-static-files/
      if (bereq.url ~ "^[^?]*\.(bmp|bz2|css|doc|eot|flv|gif|gz|ico|jpeg|jpg|js|less|mp[34]|pdf|png|rar|rtf|swf|tar|tgz|txt|wav|woff|xml|zip|webm)(\?.*)?$") {
        unset beresp.http.set-cookie;
      }
    
      # Large static files are delivered directly to the end-user without
      # waiting for Varnish to fully read the file first.
      # Varnish 4 fully supports Streaming, so use streaming here to avoid locking.
      if (bereq.url ~ "^[^?]*\.(mp[34]|rar|tar|tgz|gz|wav|zip|bz2|xz|7z|avi|mov|ogm|mpe?g|mk[av]|webm)(\?.*)?$") {
        unset beresp.http.set-cookie;
        set beresp.do_stream = true;  # Check memory usage it'll grow in fetch_chunksize blocks (128k by default) if the backend doesn't send a Content-Length header, so only enable it for big objects
        set beresp.do_gzip = false;   # Don't try to compress it for storage
      }
    
      # Sometimes, a 301 or 302 redirect formed via Apache's mod_rewrite can mess with the HTTP port that is being passed along.
      # This often happens with simple rewrite rules in a scenario where Varnish runs on :80 and Apache on :8080 on the same box.
      # A redirect can then often redirect the end-user to a URL on :8080, where it should be :80.
      # This may need finetuning on your setup.
      #
      # To prevent accidental replace, we only filter the 301/302 redirects for now.
      if (beresp.status == 301 || beresp.status == 302) {
        set beresp.http.Location = regsub(beresp.http.Location, ":[0-9]+", "");
      }
    
      # Set 2min cache if unset for static files
      if (beresp.ttl <= 0s || beresp.http.Set-Cookie || beresp.http.Vary == "*") {
        set beresp.ttl = 120s; # Important, you shouldn't rely on this, SET YOUR HEADERS in the backend
        set beresp.uncacheable = true;
        return (deliver);
      }
    
      # Allow stale content, in case the backend goes down.
      # make Varnish keep all objects for 6 hours beyond their TTL
      set beresp.grace = 6h;
    
      return (deliver);
    }
    
    # The routine when we deliver the HTTP request to the user
    # Last chance to modify headers that are sent to the client
    sub vcl_deliver {
      # Called before a cached object is delivered to the client.
    
      if (obj.hits > 0) { # Add debug header to see if it's a HIT/MISS and the number of hits, disable when not needed
        set resp.http.X-Cache = "HIT";
      } else {
        set resp.http.X-Cache = "MISS";
      }
    
      # Please note that obj.hits behaviour changed in 4.0, now it counts per objecthead, not per object
      # and obj.hits may not be reset in some cases where bans are in use. See bug 1492 for details.
      # So take hits with a grain of salt
      set resp.http.X-Cache-Hits = obj.hits;
    
      # Remove some headers: PHP version
      unset resp.http.X-Powered-By;
    
      # Remove some headers: Apache version & OS
    
    
      return (deliver);
    }
    
    sub vcl_purge {
      # Only handle actual PURGE HTTP methods, everything else is discarded
      if (req.method != "PURGE") {
        # restart request
        set req.http.X-Purge = "Yes";
        return(restart);
      }
    }
    
    sub vcl_synth {
      if (resp.status == 720) {
        # We use this special error status 720 to force redirects with 301 (permanent) redirects
        # To use this, call the following from anywhere in vcl_recv: return (synth(720, "http://host/new.html"));
        set resp.http.Location = resp.reason;
        set resp.status = 301;
        return (deliver);
      } elseif (resp.status == 721) {
        # And we use error status 721 to force redirects with a 302 (temporary) redirect
        # To use this, call the following from anywhere in vcl_recv: return (synth(720, "http://host/new.html"));
        set resp.http.Location = resp.reason;
        set resp.status = 302;
        return (deliver);
      }
    
      return (deliver);
    }
    
    
    sub vcl_fini {
      # Called when VCL is discarded only after all requests have exited the VCL.
      # Typically used to clean up VMODs.
    
      return (ok);
    }

    The issue is that the nginx is keep using port 80
    Code:
    [root@srv /]# netstat -tulpn | grep :80
    tcp        0      0 0.0.0.0:80              0.0.0.0:*               LISTEN      6405/nginx: master
    tcp        0      0 0.0.0.0:8090            0.0.0.0:*               LISTEN      6405/nginx: master
    
    [root@srv /]# ls -l /proc/6405/exe
    lrwxrwxrwx 1 root root 0 Oct 18 07:26 /proc/6405/exe -> /usr/local/sbin/nginx
    
    and i cant bind
    Code:
    VARNISH_LISTEN_ADDRESS=127.0.0.1
    VARNISH_LISTEN_PORT=80
    to varnish.params


    What i am missing? Any help would be really appreciated. Thanks guys
     
  2. eva2000

    eva2000 Administrator Staff Member

    55,816
    12,275
    113
    May 24, 2014
    Brisbane, Australia
    Ratings:
    +18,861
    Local Time:
    4:33 PM
    Nginx 1.27.x
    MariaDB 10.x/11.4+
    Thanks for sharing you configuration settings it usually makes it easier to help with.

    Centmin Mod is provide as is, so short of scripted related bugs or issues, any further optimisation to the web stack components - nginx, php-fpm, mariadb mysql, csf firewall etc or web app specific configurations are left to the Centmin Mod user to deal with. So I do not provide any free support for such. There is a Varnish Install thread at Installing Varnish Cache ? | Centmin Mod Community but you have most things covered and have a good grasp of Varnish Cache setup itself. :)

    However, Centmin Mod users are free to help each other out and ask questions or give answers on this community forum. My hopes are that this community forum evolves so that more veteran long time Centmin Mod users help new Centmin Mod users out :)

    So basically the problem you have is some nginx vhost is still listening on port 80 instead of your defined backend 8090 port, you can find which vhosts that might be by grepping for port 80 or listen directives which do not specify a port (defaults to port 80) excluding phpfpmd directory for php-fpm multi pools

    Code:
    egrep ':80|listen' -Ro /usr/local/nginx/conf/* | grep -v phpfpmd
    example output for nginx vhost config files that may still have references to listen port 80 instead of your defined custom backend port 8090
    Code:
    egrep ':80|listen' -Ro /usr/local/nginx/conf/* | grep -v phpfpmd
    /usr/local/nginx/conf/conf.d/ssl.conf:listen
    /usr/local/nginx/conf/conf.d/virtual.conf:listen
    /usr/local/nginx/conf/conf.d/virtual.conf:listen
    /usr/local/nginx/conf/conf.d/demodomain.com.conf:listen
    /usr/local/nginx/conf/conf.d/demodomain.com.conf:listen
    /usr/local/nginx/conf/nginx.conf.default:listen
    /usr/local/nginx/conf/nginx.conf.default:listen
    /usr/local/nginx/conf/nginx.conf.default::80
    /usr/local/nginx/conf/nginx.conf.default:listen
    /usr/local/nginx/conf/nginx.conf.default:listen
    /usr/local/nginx/conf/nginx.conf.default:listen
    /usr/local/nginx/conf/nginx.conf.default::80
    /usr/local/nginx/conf/nginx.conf.default:listen
    you might need to exclude 8090 too and add grep n option for printing line numbers
    Code:
    egrep ':80|listen' -Rno /usr/local/nginx/conf/* | egrep -v 'phpfpmd|8090'
    Code:
    egrep ':80|listen' -Rno /usr/local/nginx/conf/* | egrep -v 'phpfpmd|8090'
    /usr/local/nginx/conf/conf.d/ssl.conf:4:listen
    /usr/local/nginx/conf/conf.d/virtual.conf:2:listen
    /usr/local/nginx/conf/conf.d/virtual.conf:3:listen
    /usr/local/nginx/conf/conf.d/demodomain.com.conf:4:listen
    /usr/local/nginx/conf/conf.d/demodomain.com.conf:11:listen
    /usr/local/nginx/conf/nginx.conf.default:36:listen
    /usr/local/nginx/conf/nginx.conf.default:57:listen
    /usr/local/nginx/conf/nginx.conf.default:57::80
    /usr/local/nginx/conf/nginx.conf.default:63:listen
    /usr/local/nginx/conf/nginx.conf.default:85:listen
    /usr/local/nginx/conf/nginx.conf.default:86:listen
    /usr/local/nginx/conf/nginx.conf.default:86::80
    /usr/local/nginx/conf/nginx.conf.default:99:listen
     
    Last edited: Oct 18, 2015
  3. DenSouLew

    DenSouLew New Member

    6
    1
    3
    May 27, 2015
    Ratings:
    +1
    Local Time:
    4:33 PM
    1.9.1
    MariaDB 10.0.19
    Thaaaaanks @eva2000 Ive figure it out. Thanks once again :)
     
  4. eva2000

    eva2000 Administrator Staff Member

    55,816
    12,275
    113
    May 24, 2014
    Brisbane, Australia
    Ratings:
    +18,861
    Local Time:
    4:33 PM
    Nginx 1.27.x
    MariaDB 10.x/11.4+
    You're welcome :D
     
  5. Rodz

    Rodz New Member

    4
    1
    3
    Mar 11, 2017
    Ratings:
    +1
    Local Time:
    7:33 AM
    1.11.10
    10.1
    Hi Guys what did you do to solve this, as I've gone though all the .conf files edited even the nginx.conf.default file.
    What are these files as they cant be edited at all and show port 80.
    /usr/local/nginx/conf/nginx.conf.default:86::80
    /usr/local/nginx/conf/nginx.conf.default:57::80


    Code:
    netstat -tulpn | grep :80
    tcp        0      0 0.0.0.0:8080            0.0.0.0:*               LISTEN      939/nginx: master
    
    Code:
    egrep ':80|listen' -Rno /usr/local/nginx/conf/* | egrep -v 'phpfpmd|8080'
    /usr/local/nginx/conf/conf.d/phpmyadmin_ssl.conf:3:listen
    /usr/local/nginx/conf/conf.d/suprawisdom.com.conf:8:listen
    /usr/local/nginx/conf/conf.d/suprawisdom.com.conf:9:listen
    /usr/local/nginx/conf/conf.d/demodomain.com.conf:4:listen
    /usr/local/nginx/conf/conf.d/demodomain.com.conf:11:listen
    /usr/local/nginx/conf/conf.d/virtual.conf:2:listen
    /usr/local/nginx/conf/conf.d/virtual.conf:3:listen
    /usr/local/nginx/conf/conf.d/ssl.conf:4:listen
    /usr/local/nginx/conf/nginx.conf.default:36:listen
    /usr/local/nginx/conf/nginx.conf.default:57:listen
    /usr/local/nginx/conf/nginx.conf.default:57::80
    /usr/local/nginx/conf/nginx.conf.default:63:listen
    /usr/local/nginx/conf/nginx.conf.default:85:listen
    /usr/local/nginx/conf/nginx.conf.default:86:listen
    /usr/local/nginx/conf/nginx.conf.default:86::80
    /usr/local/nginx/conf/nginx.conf.default:99:listen
    
     
  6. eva2000

    eva2000 Administrator Staff Member

    55,816
    12,275
    113
    May 24, 2014
    Brisbane, Australia
    Ratings:
    +18,861
    Local Time:
    4:33 PM
    Nginx 1.27.x
    MariaDB 10.x/11.4+
    that is ignored and not used by Centmin Mod Nginx at all
     
  7. RB1

    RB1 Active Member

    292
    75
    28
    Nov 11, 2016
    California
    Ratings:
    +122
    Local Time:
    11:33 PM
    Nginx 1.21.x
    MariaDB 10.1.x
    Don't quote me on this, but I believe nginx.conf is the file you want to be editing.
    I think nginx.conf.default is there as a backup incase you screw up your configuration during editing.
    Also, I'm not sure if the nginx.conf behaves this way, but when I make edits to my pagespeed.conf it is automatically backed up as pagespeed.conf_010117-041218 with a timestamp.

    Edit: It actually backs up after an nginx upgrade.
     
    Last edited: Mar 11, 2017
  8. eva2000

    eva2000 Administrator Staff Member

    55,816
    12,275
    113
    May 24, 2014
    Brisbane, Australia
    Ratings:
    +18,861
    Local Time:
    4:33 PM
    Nginx 1.27.x
    MariaDB 10.x/11.4+
  9. Rodz

    Rodz New Member

    4
    1
    3
    Mar 11, 2017
    Ratings:
    +1
    Local Time:
    7:33 AM
    1.11.10
    10.1
    When I reboot, varnish will not autostart, I get this IN RED: Manager got SIGINT, Child (4994) died signal=15
    Code:
    service varnish status
    Redirecting to /bin/systemctl status  varnish.service
    * varnish.service - Varnish Cache, a high-performance HTTP accelerator
       Loaded: loaded (/usr/lib/systemd/system/varnish.service; disabled; vendor preset: disabled)
       Active: inactive (dead)
    Mar 10 22:03:29 server1 varnishd[4993]: Platform: Linux,2.6.32-042stab120.18,x86_64,-junix,-smalloc,-smalloc,-hcritbit
    Mar 10 22:03:29 server1 systemd[1]: Started Varnish Cache, a high-performance HTTP accelerator.
    Mar 10 22:03:29 server1 varnishd[4993]: Child (4994) Started
    Mar 10 22:03:30 server1 varnishd[4993]: Child (4994) said Child starts
    Mar 10 22:07:43 server1 varnishd[4993]: Manager got SIGINT
    Mar 10 22:07:43 server1 systemd[1]: Stopping Varnish Cache, a high-performance HTTP accelerator...
    Mar 10 22:07:43 server1 varnishd[4993]: Stopping Child
    Mar 10 22:07:44 server1 varnishd[4993]: Child (4994) died signal=15
    Mar 10 22:07:44 server1 varnishd[4993]: Child cleanup complete
    Mar 10 22:07:44 server1 systemd[1]: Stopped Varnish Cache, a high-performance HTTP accelerator.
    [/COE]
     
    Last edited: Mar 11, 2017
  10. Rodz

    Rodz New Member

    4
    1
    3
    Mar 11, 2017
    Ratings:
    +1
    Local Time:
    7:33 AM
    1.11.10
    10.1
    Thing is no port is specified in niginx.conf file?????


    Code:
    user              nginx nginx;
    worker_processes 4;
    worker_priority -10;
    worker_rlimit_nofile 260000;
    timer_resolution 100ms;
    pcre_jit on;
    include /usr/local/nginx/conf/dynamic-modules.conf;
    
    pid         logs/nginx.pid;
    events {
        worker_connections  10000;
        accept_mutex off;
        accept_mutex_delay 200ms;
        use epoll;
        #multi_accept on;
    }
    http {
     map_hash_bucket_size 128;
     map_hash_max_size 4096;
     server_names_hash_bucket_size 128;
     server_names_hash_max_size 2048;
     variables_hash_max_size 2048;
    limit_req_zone $binary_remote_addr zone=xwplogin:16m rate=40r/m;
    #limit_conn_zone $binary_remote_addr zone=xwpconlimit:16m;
    more_set_headers "Server: Nginx";
    more_set_headers "X-Powered-By: Supra Wisdom";
    # uncomment cloudflare.conf include if using cloudflare for
    # server and/or vhost site
    include /usr/local/nginx/conf/cloudflare.conf;
    include /usr/local/nginx/conf/maintenance.conf;
    more_set_headers "X-Powered-By: centminmod";
    include /usr/local/nginx/conf/vts_http.conf;
    include /usr/local/nginx/conf/geoip.conf;
    #include /usr/local/nginx/conf/pagespeedadmin.conf;
    include /usr/local/nginx/conf/fastcgi_param_https_map.conf;
    log_format  main  '$remote_addr - $remote_user [$time_local] $request '
                    '"$status" $body_bytes_sent "$http_referer" '
                    '"$http_user_agent" "$http_x_forwarded_for" "$gzip_ratio"'
                    ' "$connection" "$connection_requests" "$request_time"';
    log_format  ddos-proxy '$remote_addr for $http_x_real_ip - $remote_user [$time_local] $request '
                    '"$status" $body_bytes_sent "$http_referer" '
                    '"$http_user_agent" "$http_x_forwarded_for" "$gzip_ratio"'
                    ' "$connection" "$connection_requests" "$request_time"';
    log_format  main_ext '$remote_addr - $remote_user [$time_local] "$request" '
                             '$status $body_bytes_sent "$http_referer" '
                             '"$http_user_agent" "$http_x_forwarded_for" '
                             'rt=$request_time ua="$upstream_addr" '
                             'us="$upstream_status" ut="$upstream_response_time" '
                             'ul="$upstream_response_length" '
                             'cs=$upstream_cache_status' ;
    access_log  logs/access.log combined buffer=128k flush=5m;
    error_log   logs/error.log warn;
        index  index.php index.html index.htm;
        include       mime.types;
        default_type  application/octet-stream;
        charset utf-8;
            sendfile on;
            sendfile_max_chunk 512k;
            tcp_nopush  on;
            tcp_nodelay on;
            server_tokens off;
            server_name_in_redirect off;
           
            keepalive_timeout  5;
            keepalive_requests 500;
            lingering_time 20s;
            lingering_timeout 5s;
            keepalive_disable msie6;
     gzip on;
     gzip_vary   on;
     gzip_disable "MSIE [1-6]\.";
            gzip_static on;
            gzip_min_length   1400;
            gzip_buffers      32 8k;
            gzip_http_version 1.0;
            gzip_comp_level 5;
            gzip_proxied    any;
            gzip_types text/plain text/css text/xml application/javascript application/x-javascript application/xml application/xml+rss application/ecmascript application/json image/svg+xml;
     client_body_buffer_size 256k;
     client_body_in_file_only off;
     client_body_timeout 10s;
     client_header_buffer_size 64k;
    ## how long a connection has to complete sending
    ## it's headers for request to be processed
     client_header_timeout  5s;
     client_max_body_size 50m;
     connection_pool_size  512;
     directio  4m;
     ignore_invalid_headers on;      
     large_client_header_buffers 8 64k;
     output_buffers   8 256k;
     postpone_output  1460;
     proxy_temp_path  /tmp/nginx_proxy/;
     request_pool_size  32k;
     reset_timedout_connection on;
     send_timeout     10s;
     types_hash_max_size 2048;
     
    # for nginx proxy backends to prevent redirects to backend port
    # port_in_redirect off;
    open_file_cache max=50000 inactive=60s;
    open_file_cache_valid 120s;
    open_file_cache_min_uses 2;
    open_file_cache_errors off;
    open_log_file_cache max=10000 inactive=30s min_uses=2;
    ## limit number of concurrency connections per ip to 16
    ## add to your server {} section the next line
    ## limit_conn limit_per_ip 16;
    ## uncomment below line allows 500K sessions
    # limit_conn_log_level error;
    #######################################
    # use limit_zone for Nginx <v1.1.7 and lower
    # limit_zone $binary_remote_addr zone=limit_per_ip:16m;
    #######################################
    # use limit_conn_zone for Nginx >v1.1.8 and higher
    # limit_conn_zone $binary_remote_addr zone=limit_per_ip:16m;
    #######################################
     include /usr/local/nginx/conf/conf.d/*.conf;
    }
    
     
  11. eva2000

    eva2000 Administrator Staff Member

    55,816
    12,275
    113
    May 24, 2014
    Brisbane, Australia
    Ratings:
    +18,861
    Local Time:
    4:33 PM
    Nginx 1.27.x
    MariaDB 10.x/11.4+
    that shows nothing wrong and nothing using port 80

    have you tried one of varnish's recommend support avenues http://stackoverflow.com/questions/tagged/varnish?sort=newest&pageSize=40 to ask ?

    Centmin Mod is provide as is, so short of scripted related bugs or issues, any further optimisation to the web stack components - nginx, php-fpm, mariadb mysql, csf firewall etc or web app specific configurations are left to the Centmin Mod user to deal with i.e. Varnish cache setup. So I do not provide any free support for such.

    However, Centmin Mod users are free to help each other out and ask questions or give answers on this community forum. My hopes are that this community forum evolves so that more veteran long time Centmin Mod users help new Centmin Mod users out :)
     
  12. ahmed

    ahmed Active Member

    361
    49
    28
    Feb 21, 2017
    Ratings:
    +63
    Local Time:
    8:33 AM
    same problem here, port 80 is being used by nginx and no way to prevent it from that

    tcp 0 0 0.0.0.0:80 0.0.0.0:* LISTEN 11560/nginx:master


    FOR SSL


    tcp 0 0 0.0.0.0:443 0.0.0.0:* LISTEN 11669/nginx:master
     
  13. eva2000

    eva2000 Administrator Staff Member

    55,816
    12,275
    113
    May 24, 2014
    Brisbane, Australia
    Ratings:
    +18,861
    Local Time:
    4:33 PM
    Nginx 1.27.x
    MariaDB 10.x/11.4+
  14. ahmed

    ahmed Active Member

    361
    49
    28
    Feb 21, 2017
    Ratings:
    +63
    Local Time:
    8:33 AM
    that what I did exactly and no port 80 is on nginx configs
     
  15. eva2000

    eva2000 Administrator Staff Member

    55,816
    12,275
    113
    May 24, 2014
    Brisbane, Australia
    Ratings:
    +18,861
    Local Time:
    4:33 PM
    Nginx 1.27.x
    MariaDB 10.x/11.4+
    any vhost without a listed listen directive also defaults to port 80 and 443 for http and https too
     
  16. ahmed

    ahmed Active Member

    361
    49
    28
    Feb 21, 2017
    Ratings:
    +63
    Local Time:
    8:33 AM
    to continue digging:

    This is the outcome of egrep ':80|listen' -Ro /usr/local/nginx/conf/* | grep -v phpfpmd

    Code:
     
    /usr/local/nginx/conf/acmevhostbackup/domain2.com.conf-backup-removal-https-default-190317-234558:listen
    /usr/local/nginx/conf/acmevhostbackup/domain2.com.ssl.conf-acmebackup-190317-234558:listen
    /usr/local/nginx/conf/acmevhostbackup/domain2.com.conf-backup-removal-https-default-240317-185801:listen
    /usr/local/nginx/conf/acmevhostbackup/domain2.com.ssl.conf-acmebackup-240317-185801:listen
    /usr/local/nginx/conf/acmevhostbackup/domain.com.conf-backup-removal-https-default-250317-233435:listen
    /usr/local/nginx/conf/acmevhostbackup/domain.com.ssl.conf-acmebackup-250317-233435:listen
    /usr/local/nginx/conf/conf.d/ssl.conf:listen
    /usr/local/nginx/conf/conf.d/virtual.conf:listen
    /usr/local/nginx/conf/conf.d/virtual.conf:listen
    /usr/local/nginx/conf/conf.d/domain2.com.ssl.conf:listen
    /usr/local/nginx/conf/conf.d/domain.com.ssl.conf:listen
    /usr/local/nginx/conf/conf.d/domain.com.ssl.conf::80
    /usr/local/nginx/conf/conf.d/domain.com.ssl.conf:listen
    /usr/local/nginx/conf/nginx.conf.default:listen
    /usr/local/nginx/conf/nginx.conf.default:listen
    /usr/local/nginx/conf/nginx.conf.default::80
    /usr/local/nginx/conf/nginx.conf.default:listen
    /usr/local/nginx/conf/nginx.conf.default:listen
    /usr/local/nginx/conf/nginx.conf.default:listen
    /usr/local/nginx/conf/nginx.conf.default::80
    /usr/local/nginx/conf/nginx.conf.default:listen
    
     
  17. ahmed

    ahmed Active Member

    361
    49
    28
    Feb 21, 2017
    Ratings:
    +63
    Local Time:
    8:33 AM
    and this is the ssl vhsot

    Code:
    #x# HTTPS-DEFAULT
     server {
    
       server_name domain.com www.domain.com;
       return 302 https://$server_name$request_uri;
       include /usr/local/nginx/conf/staticfiles.conf;
     }
    
    
    server {
      listen 443 ssl http2;
      server_name domain.com www.domain.com;
    
      include /usr/local/nginx/conf/ssl/domain.com/domain.com.crt.key.conf;
      include /usr/local/nginx/conf/ssl_include.conf;
    
      http2_max_field_size 16k;
      http2_max_header_size 32k;
      # mozilla recommended
      ssl_ciphers EECDH+CHACHA20:EECDH+CHACHA20-draft:EECDH+ECDSA+AESGCM:EECDH+aRSA+AESGCM:EECDH+ECDSA+SHA256:EECDH+ECDSA+SHA384:EECDH+aRSA+SHA256:EECDH+aRSA+SHA384:EECDH+AES128:!aNULL$
      ssl_prefer_server_ciphers   on;
      #add_header Alternate-Protocol  443:npn-spdy/3;
    
      # before enabling HSTS line below read centminmod.com/nginx_domain_dns_setup.html#hsts
      #add_header Strict-Transport-Security "max-age=31536000; includeSubdomains;";
      #add_header X-Frame-Options SAMEORIGIN;
      #add_header X-Xss-Protection "1; mode=block" always;
      #add_header X-Content-Type-Options "nosniff" always;
      #spdy_headers_comp 5;
      ssl_buffer_size 1369;
      ssl_session_tickets on;
    
      # enable ocsp stapling
      resolver 8.8.8.8 8.8.4.4 valid=10m;
      resolver_timeout 10s;
      ssl_stapling on;
      ssl_stapling_verify on;
    
    # ngx_pagespeed & ngx_pagespeed handler
    #include /usr/local/nginx/conf/pagespeed.conf;
    # ngx_pagespeed & ngx_pagespeed handler
    #include /usr/local/nginx/conf/pagespeed.conf;
    #include /usr/local/nginx/conf/pagespeedhandler.conf;
    #include /usr/local/nginx/conf/pagespeedstatslog.conf;
    
      #add_header X-Frame-Options SAMEORIGIN;
      #add_header X-Xss-Protection "1; mode=block" always;
      #add_header X-Content-Type-Options "nosniff" always;
    
      # limit_conn limit_per_ip 16;
      # ssi  on;
    
      access_log /home/nginx/domains/domain.com/log/access.log main_ext buffer=256k flush=60m;
      error_log /home/nginx/domains/domain.com/log/error.log;
    
      include /usr/local/nginx/conf/autoprotect/domain.com/autoprotect-domain.com.conf;
      root /home/nginx/domains/domain.com/public;
      # uncomment cloudflare.conf include if using cloudflare for
      # server and/or vhost site
      #include /usr/local/nginx/conf/cloudflare.conf;
      include /usr/local/nginx/conf/503include-main.conf;
    
      #include /usr/local/nginx/conf/wpincludes/domain.com/wpcacheenabler_domain.com.conf;
      #include /usr/local/nginx/conf/wpincludes/domain.com/wpsupercache_domain.com.conf;
      # https://community.centminmod.com/posts/18828/
      include /usr/local/nginx/conf/wpincludes/domain.com/rediscache_domain.com.conf;
    
      location / {
     proxy_pass http://127.0.0.1:80;
         proxy_set_header Host $http_host;
         proxy_set_header X-Forwarded-Host $http_host;
         proxy_set_header X-Real-IP $remote_addr;
         proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
         proxy_set_header X-Forwarded-Proto https;
         proxy_set_header HTTPS "on";
    
      include /usr/local/nginx/conf/503include-only.conf;
      # Enables directory listings when index file not found
      #autoindex  on;
    
      # for wordpress super cache plugin
      #try_files /wp-content/cache/supercache/$http_host/$cache_uri/index.html $uri $uri/ /index.php?q=$uri&$args;
    
      # for wp cache enabler plugin
      #try_files /wp-content/cache/supercache/$http_host/$cache_uri/index.html $uri $uri/ /index.php?q=$uri&$args;
    
      # for wp cache enabler plugin
      #try_files $cache_enabler_uri $uri $uri/ $custom_subdir/index.php?$args;
    
      # Wordpress Permalinks
      #try_files $uri $uri/ /index.php?q=$uri&$args;
    
      # Nginx level redis Wordpress
      # https://community.centminmod.com/posts/18828/
      try_files $uri $uri/ /index.php?$args;
    
      }
    
    
    location ~* /(wp-login\.php) {
        limit_req zone=xwplogin burst=1 nodelay;
        #limit_conn xwpconlimit 30;
        auth_basic "Private";
        auth_basic_user_file /home/nginx/domains/domain.com/htpasswd_wplogin;
        #include /usr/local/nginx/conf/php-wpsc.conf;
        # https://community.centminmod.com/posts/18828/
        include /usr/local/nginx/conf/php-rediscache.conf;
    }
    
    
    
    location ~* /(xmlrpc\.php) {
        limit_req zone=xwprpc burst=45 nodelay;
        #limit_conn xwpconlimit 30;
        #include /usr/local/nginx/conf/php-wpsc.conf;
        # https://community.centminmod.com/posts/18828/
        include /usr/local/nginx/conf/php-rediscache.conf;
    }
    
    
      include /usr/local/nginx/conf/wpincludes/domain.com/wpsecure_domain.com.conf;
      #include /usr/local/nginx/conf/php-wpsc.conf;
      # https://community.centminmod.com/posts/18828/
      include /usr/local/nginx/conf/php-rediscache.conf;
      include /usr/local/nginx/conf/staticfiles.conf;
      include /usr/local/nginx/conf/drop.conf;
      #include /usr/local/nginx/conf/errorpage.conf;
      include /usr/local/nginx/conf/vts_server.conf;
    }
    
    server {
       listen 8080;
       server_name domain.com www.domain.com;
       index index.php;
       port_in_redirect off;
    
       location / {
          try_files $uri $uri/ /index.php?$args;
       }
    
       location ~ \.php$ {
           try_files $uri =404;
           }
    }
    
     
  18. ahmed

    ahmed Active Member

    361
    49
    28
    Feb 21, 2017
    Ratings:
    +63
    Local Time:
    8:33 AM
    I think nothing can explain this!? where is the 80 that Nginx is angry with?
     
  19. eva2000

    eva2000 Administrator Staff Member

    55,816
    12,275
    113
    May 24, 2014
    Brisbane, Australia
    Ratings:
    +18,861
    Local Time:
    4:33 PM
    Nginx 1.27.x
    MariaDB 10.x/11.4+
    Code (Text):
     server {
    
       server_name domain.com www.domain.com;
       return 302 https://$server_name$request_uri;
       include /usr/local/nginx/conf/staticfiles.conf;
     }
    

    that's your culprit as i said any server{} context without any listen directive defaults to port 80 on main ip
     
  20. ahmed

    ahmed Active Member

    361
    49
    28
    Feb 21, 2017
    Ratings:
    +63
    Local Time:
    8:33 AM
    Thanks for help :)
    I removed this server block altogether

    and still have

    May 03 16:02:36 instance-2 nginx[30963]: nginx: [emerg] bind() to 0.0.0.0:80 failed (98: Address already in use)
    May 03 16:02:36 instance-2 nginx[30963]: nginx: [emerg] bind() to 0.0.0.0:80 failed (98: Address already in use)
    May 03 16:02:37 instance-2 nginx[30963]: nginx: [emerg] bind() to 0.0.0.0:80 failed (98: Address already in use)
    May 03 16:02:37 instance-2 nginx[30963]: nginx: [emerg] bind() to 0.0.0.0:80 failed (98: Address already in use)