diff --git a/Dockerfile.alpine b/Dockerfile.alpine index 362e708a8..d4d5c9d2d 100644 --- a/Dockerfile.alpine +++ b/Dockerfile.alpine @@ -1,9 +1,9 @@ -FROM nginxproxy/docker-gen:0.11.2 AS docker-gen +FROM nginxproxy/docker-gen:0.12.1 AS docker-gen FROM nginxproxy/forego:0.18.1 AS forego # Build the final image -FROM nginx:1.25.4-alpine +FROM nginx:1.26.0-alpine ARG NGINX_PROXY_VERSION # Add DOCKER_GEN_VERSION environment variable because diff --git a/Dockerfile.debian b/Dockerfile.debian index 726c8630d..c91f5765f 100644 --- a/Dockerfile.debian +++ b/Dockerfile.debian @@ -1,9 +1,9 @@ -FROM nginxproxy/docker-gen:0.11.2-debian AS docker-gen +FROM nginxproxy/docker-gen:0.12.1-debian AS docker-gen FROM nginxproxy/forego:0.18.1-debian AS forego # Build the final image -FROM nginx:1.25.4 +FROM nginx:1.26.0 ARG NGINX_PROXY_VERSION # Add DOCKER_GEN_VERSION environment variable because diff --git a/README.md b/README.md index 46d1d61ce..09e81d09f 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ [![Test](https://github.com/nginx-proxy/nginx-proxy/actions/workflows/test.yml/badge.svg)](https://github.com/nginx-proxy/nginx-proxy/actions/workflows/test.yml) [![GitHub release](https://img.shields.io/github/v/release/nginx-proxy/nginx-proxy)](https://github.com/nginx-proxy/nginx-proxy/releases) -![nginx 1.25.4](https://img.shields.io/badge/nginx-1.25.4-brightgreen.svg) +![nginx 1.26.0](https://img.shields.io/badge/nginx-1.26.0-brightgreen.svg) [![Docker Image Size](https://img.shields.io/docker/image-size/nginxproxy/nginx-proxy?sort=semver)](https://hub.docker.com/r/nginxproxy/nginx-proxy "Click to view the image on Docker Hub") [![Docker stars](https://img.shields.io/docker/stars/nginxproxy/nginx-proxy.svg)](https://hub.docker.com/r/nginxproxy/nginx-proxy "DockerHub") [![Docker pulls](https://img.shields.io/docker/pulls/nginxproxy/nginx-proxy.svg)](https://hub.docker.com/r/nginxproxy/nginx-proxy "DockerHub") @@ -18,7 +18,7 @@ docker run --detach \ --name nginx-proxy \ --publish 80:80 \ --volume /var/run/docker.sock:/tmp/docker.sock:ro \ - nginxproxy/nginx-proxy:1.4 + nginxproxy/nginx-proxy:1.5 ``` Then start any containers (here an nginx container) you want proxied with an env var `VIRTUAL_HOST=subdomain.yourdomain.com` @@ -48,7 +48,7 @@ The nginx-proxy images are available in two flavors. This image is based on the nginx:mainline image, itself based on the debian slim image. ```console -docker pull nginxproxy/nginx-proxy:1.4 +docker pull nginxproxy/nginx-proxy:1.5 ``` #### Alpine based version (`-alpine` suffix) @@ -56,14 +56,14 @@ docker pull nginxproxy/nginx-proxy:1.4 This image is based on the nginx:alpine image. ```console -docker pull nginxproxy/nginx-proxy:1.4-alpine +docker pull nginxproxy/nginx-proxy:1.5-alpine ``` #### :warning: a note on `latest` and `alpine`: It is not recommended to use the `latest` (`nginxproxy/nginx-proxy`, `nginxproxy/nginx-proxy:latest`) or `alpine` (`nginxproxy/nginx-proxy:alpine`) tag for production setups. -Those tags points to the latest commit in the `main` branch. They do not carry any promise of stability, and using them will probably put your nginx-proxy setup at risk of experiencing uncontrolled updates to non backward compatible versions (or versions with breaking changes). You should always specify the version you want to use explicitly to ensure your setup doesn't break when the image is updated. +[Those tags point](https://hub.docker.com/r/nginxproxy/nginx-proxy/tags) to the latest commit in the `main` branch. They do not carry any promise of stability, and using them will probably put your nginx-proxy setup at risk of experiencing uncontrolled updates to non backward compatible versions (or versions with breaking changes). You should always specify the version you want to use explicitly to ensure your setup doesn't break when the image is updated. ### Additional documentation diff --git a/docker-compose.yml b/docker-compose.yml index 809d6fa80..a261ffce0 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -9,6 +9,9 @@ services: volumes: - /var/run/docker.sock:/tmp/docker.sock:ro + # if you want to proxy based on host ports, you'll want to use the host network + # network_mode: "host" + whoami: image: jwilder/whoami environment: diff --git a/docs/README.md b/docs/README.md index 9e26be551..6e29e03ff 100644 --- a/docs/README.md +++ b/docs/README.md @@ -1,54 +1,45 @@ -### Docker Compose +# Table of Contents + +- [Virtual Hosts and Ports](#virtual-hosts-and-ports) +- [Path-based Routing](#path-based-routing) +- [Docker Networking](#docker-networking) +- [Upstream (Backend) features](#upstream-backend-features) +- [Basic Authentication Support](#basic-authentication-support) +- [Logging](#logging) +- [SSL Support](#ssl-support) +- [IPv6 Support](#ipv6-nat) +- [HTTP/2 and HTTP/3](#http2-and-http3) +- [Headers](#headers) +- [Custom Nginx Configuration](#custom-nginx-configuration) +- [Unhashed vs SHA1 upstream names](#unhashed-vs-sha1-upstream-names) +- [Separate Containers](#separate-containers) +- [Docker Compose](#docker-compose) +- [Troubleshooting](#troubleshooting) +- [Contributing](#contributing) + +## Virtual Hosts and Ports -```yaml -version: '2' +### Multiple Hosts -services: - nginx-proxy: - image: nginxproxy/nginx-proxy - ports: - - "80:80" - volumes: - - /var/run/docker.sock:/tmp/docker.sock:ro +If you need to support multiple virtual hosts for a container, you can separate each entry with commas. For example, `foo.bar.com,baz.bar.com,bar.com` and each host will be setup the same. - whoami: - image: jwilder/whoami - expose: - - "8000" - environment: - - VIRTUAL_HOST=whoami.example - - VIRTUAL_PORT=8000 -``` - -```console -docker compose up -curl -H "Host: whoami.example" localhost -``` +### Wildcard Hosts -Example output: -```console -I'm 5b129ab83266 -``` +You can also use wildcards at the beginning and the end of host name, like `*.bar.com` or `foo.bar.*`. Or even a regular expression, which can be very useful in conjunction with a wildcard DNS service like [nip.io](https://nip.io) or [sslip.io](https://sslip.io), using `~^foo\.bar\..*\.nip\.io` will match `foo.bar.127.0.0.1.nip.io`, `foo.bar.10.0.2.2.nip.io` and all other given IPs. More information about this topic can be found in the nginx documentation about [`server_names`](http://nginx.org/en/docs/http/server_names.html). -### IPv6 support +### Default Host -You can activate the IPv6 support for the nginx-proxy container by passing the value `true` to the `ENABLE_IPV6` environment variable: +To set the default host for nginx use the env var `DEFAULT_HOST=foo.bar.com` for example ```console -docker run -d -p 80:80 -e ENABLE_IPV6=true -v /var/run/docker.sock:/tmp/docker.sock:ro nginxproxy/nginx-proxy +docker run -d -p 80:80 -e DEFAULT_HOST=foo.bar.com -v /var/run/docker.sock:/tmp/docker.sock:ro nginxproxy/nginx-proxy ``` -#### Scoped IPv6 Resolvers - -NginX does not support scoped IPv6 resolvers. In [docker-entrypoint.sh](https://github.com/nginx-proxy/nginx-proxy/tree/main/docker-entrypoint.sh) the resolvers are parsed from resolv.conf, but any scoped IPv6 addreses will be removed. - -#### IPv6 NAT - -By default, docker uses IPv6-to-IPv4 NAT. This means all client connections from IPv6 addresses will show docker's internal IPv4 host address. To see true IPv6 client IP addresses, you must [enable IPv6](https://docs.docker.com/config/daemon/ipv6/) and use [ipv6nat](https://github.com/robbertkl/docker-ipv6nat). You must also disable the userland proxy by adding `"userland-proxy": false` to `/etc/docker/daemon.json` and restarting the daemon. - -### Multiple Hosts +nginx-proxy will then redirect all requests to a container where `VIRTUAL_HOST` is set to `DEFAULT_HOST`, if they don't match any (other) `VIRTUAL_HOST`. Using the example above requests without matching `VIRTUAL_HOST` will be redirected to a plain nginx instance after running the following command: -If you need to support multiple virtual hosts for a container, you can separate each entry with commas. For example, `foo.bar.com,baz.bar.com,bar.com` and each host will be setup the same. +```console +docker run -d -e VIRTUAL_HOST=foo.bar.com nginx +``` ### Virtual Ports @@ -57,24 +48,23 @@ When your container exposes only one port, nginx-proxy will default to this port If you need to specify a different port, you can set a `VIRTUAL_PORT` env var to select a different one. This variable cannot be set to more than one port. For each host defined into `VIRTUAL_HOST`, the associated virtual port is retrieved by order of precedence: + 1. From the `VIRTUAL_PORT` environment variable 1. From the container's exposed port if there is only one 1. From the default port 80 when none of the above methods apply -### Wildcard Hosts - -You can also use wildcards at the beginning and the end of host name, like `*.bar.com` or `foo.bar.*`. Or even a regular expression, which can be very useful in conjunction with a wildcard DNS service like [nip.io](https://nip.io) or [sslip.io](https://sslip.io), using `~^foo\.bar\..*\.nip\.io` will match `foo.bar.127.0.0.1.nip.io`, `foo.bar.10.0.2.2.nip.io` and all other given IPs. More information about this topic can be found in the nginx documentation about [`server_names`](http://nginx.org/en/docs/http/server_names.html). +⬆️ [back to table of contents](#table-of-contents) -### Path-based Routing +## Path-based Routing -You can have multiple containers proxied by the same `VIRTUAL_HOST` by adding a `VIRTUAL_PATH` environment variable containing the absolute path to where the container should be mounted. For example with `VIRTUAL_HOST=foo.example.com` and `VIRTUAL_PATH=/api/v2/service`, then requests to http://foo.example.com/api/v2/service will be routed to the container. If you wish to have a container serve the root while other containers serve other paths, give the root container a `VIRTUAL_PATH` of `/`. Unmatched paths will be served by the container at `/` or will return the default nginx error page if no container has been assigned `/`. +You can have multiple containers proxied by the same `VIRTUAL_HOST` by adding a `VIRTUAL_PATH` environment variable containing the absolute path to where the container should be mounted. For example with `VIRTUAL_HOST=foo.example.com` and `VIRTUAL_PATH=/api/v2/service`, then requests to http://foo.example.com/api/v2/service will be routed to the container. If you wish to have a container serve the root while other containers serve other paths, give the root container a `VIRTUAL_PATH` of `/`. Unmatched paths will be served by the container at `/` or will return the default nginx error page if no container has been assigned `/`. It is also possible to specify multiple paths with regex locations like `VIRTUAL_PATH=~^/(app1|alternative1)/`. For further details see the nginx documentation on location blocks. This is not compatible with `VIRTUAL_DEST`. The full request URI will be forwarded to the serving container in the `X-Original-URI` header. **NOTE**: Your application needs to be able to generate links starting with `VIRTUAL_PATH`. This can be achieved by it being natively on this path or having an option to prepend this path. The application does not need to expect this path in the request. -#### VIRTUAL_DEST +### VIRTUAL_DEST This environment variable can be used to rewrite the `VIRTUAL_PATH` part of the requested URL to proxied application. The default value is empty (off). Make sure that your settings won't result in the slash missing or being doubled. Both these versions can cause troubles. @@ -88,7 +78,7 @@ $ docker run -d -e VIRTUAL_HOST=example.tld -e VIRTUAL_PATH=/app1/ -e VIRTUAL_DE In this example, the incoming request `http://example.tld/app1/foo` will be proxied as `http://app1/foo` instead of `http://app1/app1/foo`. -#### Per-VIRTUAL_PATH location configuration +### Per-VIRTUAL_PATH location configuration The same options as from [Per-VIRTUAL_HOST location configuration](#Per-VIRTUAL_HOST-location-configuration) are available on a `VIRTUAL_PATH` basis. The only difference is that the filename gets an additional block `HASH=$(echo -n $VIRTUAL_PATH | sha1sum | awk '{ print $1 }')`. This is the sha1-hash of the `VIRTUAL_PATH` (no newline). This is done filename sanitization purposes. @@ -96,25 +86,36 @@ The used filename is `${VIRTUAL_HOST}_${HASH}_location` The filename of the previous example would be `example.tld_8610f6c344b4096614eab6e09d58885349f42faf_location`. -#### DEFAULT_ROOT +### DEFAULT_ROOT This environment variable of the nginx proxy container can be used to customize the return error page if no matching path is found. Furthermore it is possible to use anything which is compatible with the `return` statement of nginx. -Exception: If this is set to the string `none`, no default `location /` directive will be generated. This makes it possible for you to provide your own `location /` directive in your [`/etc/nginx/vhost.d/VIRTUAL_HOST`](#per-virtual_host) or [`/etc/nginx/vhost.d/default`](#per-virtual_host-default-configuration) files. +Exception: If this is set to the string `none`, no default `location /` directive will be generated. This makes it possible for you to provide your own `location /` directive in your [`/etc/nginx/vhost.d/VIRTUAL_HOST`](#per-virtual_host) or [`/etc/nginx/vhost.d/default`](#per-virtual_host-default-configuration) files. If unspecified, `DEFAULT_ROOT` defaults to `404`. Examples (YAML syntax): - * `DEFAULT_ROOT: "none"` prevents `nginx-proxy` from generating a default `location /` directive. - * `DEFAULT_ROOT: "418"` returns a 418 error page instead of the normal 404 one. - * `DEFAULT_ROOT: "301 https://github.com/nginx-proxy/nginx-proxy/blob/main/README.md"` redirects the client to this documentation. +- `DEFAULT_ROOT: "none"` prevents `nginx-proxy` from generating a default `location /` directive. +- `DEFAULT_ROOT: "418"` returns a 418 error page instead of the normal 404 one. +- `DEFAULT_ROOT: "301 https://github.com/nginx-proxy/nginx-proxy/blob/main/README.md"` redirects the client to this documentation. -Nginx variables such as `$scheme`, `$host`, and `$request_uri` can be used. However, care must be taken to make sure the `$` signs are escaped properly. For example, if you want to use `301 $scheme://$host/myapp1$request_uri` you should use: +Nginx variables such as `$scheme`, `$host`, and `$request_uri` can be used. However, care must be taken to make sure the `$` signs are escaped properly. For example, if you want to use `301 $scheme://$host/myapp1$request_uri` you should use: -* Bash: `DEFAULT_ROOT='301 $scheme://$host/myapp1$request_uri'` -* Docker Compose yaml: `- DEFAULT_ROOT: 301 $$scheme://$$host/myapp1$$request_uri` +- Bash: `DEFAULT_ROOT='301 $scheme://$host/myapp1$request_uri'` +- Docker Compose yaml: `- DEFAULT_ROOT: 301 $$scheme://$$host/myapp1$$request_uri` +⬆️ [back to table of contents](#table-of-contents) + +## Docker Networking + +### Custom external HTTP/HTTPS ports + +If you want to use `nginx-proxy` with different external ports that the default ones of `80` for `HTTP` traffic and `443` for `HTTPS` traffic, you'll have to use the environment variable(s) `HTTP_PORT` and/or `HTTPS_PORT` in addition to the changes to the Docker port mapping. If you change the `HTTPS` port, the redirect for `HTTPS` traffic will also be configured to redirect to the custom port. Typical usage, here with the custom ports `1080` and `10443`: + +```console +docker run -d -p 1080:1080 -p 10443:10443 -e HTTP_PORT=1080 -e HTTPS_PORT=10443 -v /var/run/docker.sock:/tmp/docker.sock:ro nginxproxy/nginx-proxy +``` ### Multiple Networks @@ -136,17 +137,9 @@ In this example, the `my-nginx-proxy` container will be connected to `my-network Proxyed containers running in host network mode **must** use the [`VIRTUAL_PORT`](#virtual-ports) environment variable, as this is the only way for `nginx-proxy` to get the correct port (or a port at all) for those containers. -### Custom external HTTP/HTTPS ports - -If you want to use `nginx-proxy` with different external ports that the default ones of `80` for `HTTP` traffic and `443` for `HTTPS` traffic, you'll have to use the environment variable(s) `HTTP_PORT` and/or `HTTPS_PORT` in addition to the changes to the Docker port mapping. If you change the `HTTPS` port, the redirect for `HTTPS` traffic will also be configured to redirect to the custom port. Typical usage, here with the custom ports `1080` and `10443`: - -```console -docker run -d -p 1080:1080 -p 10443:10443 -e HTTP_PORT=1080 -e HTTPS_PORT=10443 -v /var/run/docker.sock:/tmp/docker.sock:ro nginxproxy/nginx-proxy -``` - ### Internet vs. Local Network Access -If you allow traffic from the public internet to access your `nginx-proxy` container, you may want to restrict some containers to the internal network only, so they cannot be accessed from the public internet. On containers that should be restricted to the internal network, you should set the environment variable `NETWORK_ACCESS=internal`. By default, the *internal* network is defined as `127.0.0.0/8, 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16`. To change the list of networks considered internal, mount a file on the `nginx-proxy` at `/etc/nginx/network_internal.conf` with these contents, edited to suit your needs: +If you allow traffic from the public internet to access your `nginx-proxy` container, you may want to restrict some containers to the internal network only, so they cannot be accessed from the public internet. On containers that should be restricted to the internal network, you should set the environment variable `NETWORK_ACCESS=internal`. By default, the _internal_ network is defined as `127.0.0.0/8, 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16`. To change the list of networks considered internal, mount a file on the `nginx-proxy` at `/etc/nginx/network_internal.conf` with these contents, edited to suit your needs: ```Nginx # These networks are considered "internal" @@ -161,27 +154,99 @@ deny all; When internal-only access is enabled, external clients will be denied with an `HTTP 403 Forbidden` -> If there is a load-balancer / reverse proxy in front of `nginx-proxy` that hides the client IP (example: AWS Application/Elastic Load Balancer), you will need to use the nginx `realip` module (already installed) to extract the client's IP from the HTTP request headers. Please see the [nginx realip module configuration](http://nginx.org/en/docs/http/ngx_http_realip_module.html) for more details. This configuration can be added to a new config file and mounted in `/etc/nginx/conf.d/`. +> If there is a load-balancer / reverse proxy in front of `nginx-proxy` that hides the client IP (example: AWS Application/Elastic Load Balancer), you will need to use the nginx `realip` module (already installed) to extract the client's IP from the HTTP request headers. Please see the [nginx realip module configuration](http://nginx.org/en/docs/http/ngx_http_realip_module.html) for more details. This configuration can be added to a new config file and mounted in `/etc/nginx/conf.d/`. + +⬆️ [back to table of contents](#table-of-contents) -### SSL Backends +## Upstream (Backend) features + +### SSL Upstream If you would like the reverse proxy to connect to your backend using HTTPS instead of HTTP, set `VIRTUAL_PROTO=https` on the backend container. -> Note: If you use `VIRTUAL_PROTO=https` and your backend container exposes port 80 and 443, `nginx-proxy` will use HTTPS on port 80. This is almost certainly not what you want, so you should also include `VIRTUAL_PORT=443`. +> Note: If you use `VIRTUAL_PROTO=https` and your backend container exposes port 80 and 443, `nginx-proxy` will use HTTPS on port 80. This is almost certainly not what you want, so you should also include `VIRTUAL_PORT=443`. -### uWSGI Backends +### uWSGI Upstream If you would like to connect to uWSGI backend, set `VIRTUAL_PROTO=uwsgi` on the backend container. Your backend container should then listen on a port rather than a socket and expose that port. -### FastCGI Backends - +### FastCGI Upstream + If you would like to connect to FastCGI backend, set `VIRTUAL_PROTO=fastcgi` on the backend container. Your backend container should then listen on a port rather than a socket and expose that port. - -### FastCGI File Root Directory -If you use fastcgi,you can set `VIRTUAL_ROOT=xxx` for your root directory +#### FastCGI File Root Directory + +If you use fastcgi,you can set `VIRTUAL_ROOT=xxx` for your root directory + +### Upstream Server HTTP Load Balancing Support + +> **Warning** +> This feature is experimental. The behavior may change (or the feature may be removed entirely) without warning in a future release, even if the release is not a new major version. If you use this feature, or if you would like to use this feature but you require changes to it first, please [provide feedback in #2195](https://github.com/nginx-proxy/nginx-proxy/discussions/2195). Once we have collected enough feedback we will promote this feature to officially supported. + +If you have multiple containers with the same `VIRTUAL_HOST` and `VIRTUAL_PATH` settings, nginx will spread the load across all of them. To change the load balancing algorithm from nginx's default (round-robin), set the `com.github.nginx-proxy.nginx-proxy.loadbalance` label on one or more of your application containers to the desired load balancing directive. See the [`ngx_http_upstream_module` documentation](https://nginx.org/en/docs/http/ngx_http_upstream_module.html) for available directives. + +> **Note** +> +> - Don't forget the terminating semicolon (`;`). +> - If you are using Docker Compose, remember to escape any dollar sign (`$`) characters (`$` becomes `$$`). + +Docker Compose example: + +```yaml +services: + nginx-proxy: + image: nginxproxy/nginx-proxy + ports: + - "80:80" + volumes: + - /var/run/docker.sock:/tmp/docker.sock:ro + environment: + HTTPS_METHOD: nohttps + myapp: + image: jwilder/whoami + expose: + - "8000" + environment: + VIRTUAL_HOST: myapp.example + VIRTUAL_PORT: "8000" + labels: + com.github.nginx-proxy.nginx-proxy.loadbalance: "hash $$remote_addr;" + deploy: + replicas: 4 +``` + +### Upstream Server HTTP Keep-Alive Support -### Logging +> **Warning** +> This feature is experimental. The behavior may change (or the feature may be removed entirely) without warning in a future release, even if the release is not a new major version. If you use this feature, or if you would like to use this feature but you require changes to it first, please [provide feedback in #2194](https://github.com/nginx-proxy/nginx-proxy/discussions/2194). Once we have collected enough feedback we will promote this feature to officially supported. + +To enable HTTP keep-alive between `nginx-proxy` and backend server(s), set the `com.github.nginx-proxy.nginx-proxy.keepalive` label on the server's container either to `auto` or to the desired maximum number of idle connections. The `auto` setting will dynamically set the maximum number of idle connections to twice the number of servers listed in the corresponding `upstream{}` block, [per nginx recommendation](https://www.nginx.com/blog/avoiding-top-10-nginx-configuration-mistakes/#no-keepalives). + +See the [nginx keepalive documentation](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#keepalive) and the [Docker label documentation](https://docs.docker.com/config/labels-custom-metadata/) for details. + +⬆️ [back to table of contents](#table-of-contents) + +## Basic Authentication Support + +In order to be able to secure your virtual host, you have to create a file named as its equivalent `VIRTUAL_HOST` variable in directory +`/etc/nginx/htpasswd/{$VIRTUAL_HOST}` + +```console +docker run -d -p 80:80 -p 443:443 \ + -v /path/to/htpasswd:/etc/nginx/htpasswd \ + -v /path/to/certs:/etc/nginx/certs \ + -v /var/run/docker.sock:/tmp/docker.sock:ro \ + nginxproxy/nginx-proxy +``` + +If you want to define basic authentication for a `VIRTUAL_PATH`, you have to create a file named as `/etc/nginx/htpasswd/${VIRTUAL_HOST}_${VIRTUAL_PATH_SHA1}` +(where `$VIRTUAL_PATH_SHA1` is the SHA1 hash for the virtual path, you can use any SHA1 online generator to calculate it). + +You'll need apache2-utils on the machine where you plan to create the htpasswd file. Follow these [instructions](http://httpd.apache.org/docs/programs/htpasswd.html) + +⬆️ [back to table of contents](#table-of-contents) + +## Logging The default nginx access log format is @@ -189,13 +254,13 @@ The default nginx access log format is $host $remote_addr - $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" "$upstream_addr" ``` -#### Custom log format +### Custom log format If you want to use a custom access log format, you can set `LOG_FORMAT=xxx` on the proxy container. With docker compose take care to escape the `$` character with `$$` to avoid variable interpolation. Example: `$remote_addr` becomes `$$remote_addr`. -#### JSON log format +### JSON log format If you want access logs in JSON format, you can set `LOG_JSON=true`. This will correctly set the escape character to `json` and the log format to : @@ -216,15 +281,15 @@ If you want access logs in JSON format, you can set `LOG_JSON=true`. This will c } ``` -#### Log format escaping +### Log format escaping If you want to manually set nginx `log_format`'s `escape`, set the `LOG_FORMAT_ESCAPE` variable to [a value supported by nginx](https://nginx.org/en/docs/http/ngx_http_log_module.html#log_format). -#### Disable access logs +### Disable access logs To disable nginx access logs entirely, set the `DISABLE_ACCESS_LOGS` environment variable to any value. -#### Disabling colors in the container log output +### Disabling colors in the container log output To remove colors from the container log output, set the [`NO_COLOR` environment variable to any value other than an empty string](https://no-color.org/) on the nginx-proxy container. @@ -236,67 +301,9 @@ docker run --detach \ nginxproxy/nginx-proxy ``` -### Default Host - -To set the default host for nginx use the env var `DEFAULT_HOST=foo.bar.com` for example - -```console -docker run -d -p 80:80 -e DEFAULT_HOST=foo.bar.com -v /var/run/docker.sock:/tmp/docker.sock:ro nginxproxy/nginx-proxy -``` - -nginx-proxy will then redirect all requests to a container where `VIRTUAL_HOST` is set to `DEFAULT_HOST`, if they don't match any (other) `VIRTUAL_HOST`. Using the example above requests without matching `VIRTUAL_HOST` will be redirected to a plain nginx instance after running the following command: - -```console -docker run -d -e VIRTUAL_HOST=foo.bar.com nginx -``` - -### Separate Containers +⬆️ [back to table of contents](#table-of-contents) -nginx-proxy can also be run as two separate containers using the [nginxproxy/docker-gen](https://hub.docker.com/r/nginxproxy/docker-gen) image and the official [nginx](https://registry.hub.docker.com/_/nginx/) image. - -You may want to do this to prevent having the docker socket bound to a publicly exposed container service. - -You can demo this pattern with docker compose: - -```console -docker compose --file docker-compose-separate-containers.yml up -curl -H "Host: whoami.example" localhost -``` - -Example output: -```console -I'm 5b129ab83266 -``` - -To run nginx proxy as a separate container you'll need to have [nginx.tmpl](https://github.com/nginx-proxy/nginx-proxy/blob/main/nginx.tmpl) on your host system. - -First start nginx with a volume: - - -```console -docker run -d -p 80:80 --name nginx -v /tmp/nginx:/etc/nginx/conf.d -t nginx -``` - -Then start the docker-gen container with the shared volume and template: - -```console -docker run --volumes-from nginx \ - -v /var/run/docker.sock:/tmp/docker.sock:ro \ - -v $(pwd):/etc/docker-gen/templates \ - -t nginxproxy/docker-gen -notify-sighup nginx -watch /etc/docker-gen/templates/nginx.tmpl /etc/nginx/conf.d/default.conf -``` - -Finally, start your containers with `VIRTUAL_HOST` environment variables. - -```console -docker run -e VIRTUAL_HOST=foo.bar.com ... -``` - -### SSL Support using an ACME CA - -[acme-companion](https://github.com/nginx-proxy/acme-companion) is a lightweight companion container for the nginx-proxy. It allows the automated creation/renewal of SSL certificates using the ACME protocol. - -### SSL Support +## SSL Support SSL is supported using single host, wildcard and SNI certificates using naming conventions for certificates or optionally specifying a cert name (for SNI) as an environment variable. @@ -310,7 +317,11 @@ The contents of `/path/to/certs` should contain the certificates and private key If you are running the container in a virtualized environment (Hyper-V, VirtualBox, etc...), /path/to/certs must exist in that environment or be made accessible to that environment. By default, Docker is not able to mount directories on the host machine to containers running in a virtual machine. -#### Diffie-Hellman Groups +### SSL Support using an ACME CA + +[acme-companion](https://github.com/nginx-proxy/acme-companion) is a lightweight companion container for the nginx-proxy. It allows the automated creation/renewal of SSL certificates using the ACME protocol. + +### Diffie-Hellman Groups [RFC7919 groups](https://datatracker.ietf.org/doc/html/rfc7919#appendix-A) with key lengths of 2048, 3072, and 4096 bits are [provided by `nginx-proxy`](https://github.com/nginx-proxy/nginx-proxy/dhparam). The ENV `DHPARAM_BITS` can be set to `2048` or `3072` to change from the default 4096-bit key. The DH key file will be located in the container at `/etc/nginx/dhparam/dhparam.pem`. Mounting a different `dhparam.pem` file at that location will override the RFC7919 key. @@ -326,23 +337,23 @@ Set `DHPARAM_SKIP` environment variable to `true` to disable using default Diffi docker run -e DHPARAM_SKIP=true .... ``` -#### Wildcard Certificates +### Wildcard Certificates Wildcard certificates and keys should be named after the domain name with a `.crt` and `.key` extension. For example `VIRTUAL_HOST=foo.bar.com` would use cert name `bar.com.crt` and `bar.com.key`. -#### SNI +### SNI -If your certificate(s) supports multiple domain names, you can start a container with `CERT_NAME=` to identify the certificate to be used. For example, a certificate for `*.foo.com` and `*.bar.com` could be named `shared.crt` and `shared.key`. A container running with `VIRTUAL_HOST=foo.bar.com` and `CERT_NAME=shared` will then use this shared cert. +If your certificate(s) supports multiple domain names, you can start a container with `CERT_NAME=` to identify the certificate to be used. For example, a certificate for `*.foo.com` and `*.bar.com` could be named `shared.crt` and `shared.key`. A container running with `VIRTUAL_HOST=foo.bar.com` and `CERT_NAME=shared` will then use this shared cert. -#### OCSP Stapling +### OCSP Stapling To enable OCSP Stapling for a domain, `nginx-proxy` looks for a PEM certificate containing the trusted CA certificate chain at `/etc/nginx/certs/.chain.pem`, where `` is the domain name in the `VIRTUAL_HOST` directive. The format of this file is a concatenation of the public PEM CA certificates starting with the intermediate CA most near the SSL certificate, down to the root CA. This is often referred to as the "SSL Certificate Chain". If found, this filename is passed to the NGINX [`ssl_trusted_certificate` directive](http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_trusted_certificate) and OCSP Stapling is enabled. -#### How SSL Support Works +### How SSL Support Works The default SSL cipher configuration is based on the [Mozilla intermediate profile](https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29) version 5.0 which should provide compatibility with clients back to Firefox 27, Android 4.4.2, Chrome 31, Edge, IE 11 on Windows 7, Java 8u31, OpenSSL 1.0.1, Opera 20, and Safari 9. Note that the DES-based TLS ciphers were removed for security. The configuration also enables HSTS, PFS, OCSP stapling and SSL session caches. Currently TLS 1.2 and 1.3 are supported. -If you don't require backward compatibility, you can use the [Mozilla modern profile](https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility) profile instead by including the environment variable `SSL_POLICY=Mozilla-Modern` to the nginx-proxy container or to your container. This profile is compatible with clients back to Firefox 63, Android 10.0, Chrome 70, Edge 75, Java 11, OpenSSL 1.1.1, Opera 57, and Safari 12.1. Note that this profile is **not** compatible with any version of Internet Explorer. +If you don't require backward compatibility, you can use the [Mozilla modern profile](https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility) profile instead by including the environment variable `SSL_POLICY=Mozilla-Modern` to the nginx-proxy container or to your container. This profile is compatible with clients back to Firefox 63, Android 10.0, Chrome 70, Edge 75, Java 11, OpenSSL 1.1.1, Opera 57, and Safari 12.1. Note that this profile is **not** compatible with any version of Internet Explorer. Complete list of policies available through the `SSL_POLICY` environment variable, including the [AWS ELB Security Policies](https://docs.aws.amazon.com/elasticloadbalancing/latest/application/create-https-listener.html#describe-ssl-policies) and [AWS Classic ELB security policies](https://docs.aws.amazon.com/fr_fr/elasticloadbalancing/latest/classic/elb-security-policy-table.html): @@ -449,40 +460,62 @@ Note that the `Mozilla-Old` policy should use a 1024 bits DH key for compatibili The default behavior for the proxy when port 80 and 443 are exposed is as follows: -* If a virtual host has a usable cert, port 80 will redirect to 443 for that virtual host so that HTTPS is always preferred when available. -* If the virtual host does not have a usable cert, but `default.crt` and `default.key` exist, those will be used as the virtual host's certificate and the client browser will receive a 500 error. -* If the virtual host does not have a usable cert, and `default.crt` and `default.key` do not exist, TLS negotiation will fail (see [Missing Certificate](#missing-certificate) below). +- If a virtual host has a usable cert, port 80 will redirect to 443 for that virtual host so that HTTPS is always preferred when available. +- If the virtual host does not have a usable cert, but `default.crt` and `default.key` exist, those will be used as the virtual host's certificate and the client browser will receive a 500 error. +- If the virtual host does not have a usable cert, and `default.crt` and `default.key` do not exist, TLS negotiation will fail (see [Missing Certificate](#missing-certificate) below). To serve traffic in both SSL and non-SSL modes without redirecting to SSL, you can include the environment variable `HTTPS_METHOD=noredirect` (the default is `HTTPS_METHOD=redirect`). You can also disable the non-SSL site entirely with `HTTPS_METHOD=nohttp`, or disable the HTTPS site with `HTTPS_METHOD=nohttps`. `HTTPS_METHOD` can be specified on each container for which you want to override the default behavior or on the proxy container to set it globally. If `HTTPS_METHOD=noredirect` is used, Strict Transport Security (HSTS) is disabled to prevent HTTPS users from being redirected by the client. If you cannot get to the HTTP site after changing this setting, your browser has probably cached the HSTS policy and is automatically redirecting you back to HTTPS. You will need to clear your browser's HSTS cache or use an incognito window / different browser. -By default, [HTTP Strict Transport Security (HSTS)](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Strict-Transport-Security) is enabled with `max-age=31536000` for HTTPS sites. You can disable HSTS with the environment variable `HSTS=off` or use a custom HSTS configuration like `HSTS=max-age=31536000; includeSubDomains; preload`. +By default, [HTTP Strict Transport Security (HSTS)](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Strict-Transport-Security) is enabled with `max-age=31536000` for HTTPS sites. You can disable HSTS with the environment variable `HSTS=off` or use a custom HSTS configuration like `HSTS=max-age=31536000; includeSubDomains; preload`. -*WARNING*: HSTS will force your users to visit the HTTPS version of your site for the `max-age` time - even if they type in `http://` manually. The only way to get to an HTTP site after receiving an HSTS response is to clear your browser's HSTS cache. +_WARNING_: HSTS will force your users to visit the HTTPS version of your site for the `max-age` time - even if they type in `http://` manually. The only way to get to an HTTP site after receiving an HSTS response is to clear your browser's HSTS cache. -#### Missing Certificate +### Missing Certificate If HTTPS is enabled for a virtual host but its certificate is missing, nginx-proxy will configure nginx to use the default certificate (`default.crt` with `default.key`) and return a 500 error. -If the default certificate is also missing, nginx-proxy will configure nginx to accept HTTPS connections but fail the TLS negotiation. Client browsers will render a TLS error page. As of March 2023, web browsers display the following error messages: +If the default certificate is also missing, nginx-proxy will configure nginx to accept HTTPS connections but fail the TLS negotiation. Client browsers will render a TLS error page. As of March 2023, web browsers display the following error messages: + +- Chrome: - * Chrome: + > This site can't provide a secure connection + > + > example.test sent an invalid response. + > + > Try running Connectivity Diagnostics. + > + > `ERR_SSL_PROTOCOL_ERROR` - > This site can't provide a secure connection - > - > example.test sent an invalid response. - > - > Try running Connectivity Diagnostics. - > - > `ERR_SSL_PROTOCOL_ERROR` +- Firefox: - * Firefox: + > Secure Connection Failed + > + > An error occurred during a connection to example.test. + > Peer reports it experienced an internal error. + > + > Error code: `SSL_ERROR_INTERNAL_ERROR_ALERT` "TLS error". - > Secure Connection Failed - > - > An error occurred during a connection to example.test. - > Peer reports it experienced an internal error. - > - > Error code: `SSL_ERROR_INTERNAL_ERROR_ALERT` "TLS error". +⬆️ [back to table of contents](#table-of-contents) + +## IPv6 Support + +You can activate the IPv6 support for the nginx-proxy container by passing the value `true` to the `ENABLE_IPV6` environment variable: + +```console +docker run -d -p 80:80 -e ENABLE_IPV6=true -v /var/run/docker.sock:/tmp/docker.sock:ro nginxproxy/nginx-proxy +``` + +### Scoped IPv6 Resolvers + +Nginx does not support scoped IPv6 resolvers. In [docker-entrypoint.sh](https://github.com/nginx-proxy/nginx-proxy/blob/main/app/docker-entrypoint.sh) the resolvers are parsed from resolv.conf, but any scoped IPv6 addreses will be removed. + +### IPv6 NAT + +By default, docker uses IPv6-to-IPv4 NAT. This means all client connections from IPv6 addresses will show docker's internal IPv4 host address. To see true IPv6 client IP addresses, you must [enable IPv6](https://docs.docker.com/config/daemon/ipv6/) and use [ipv6nat](https://github.com/robbertkl/docker-ipv6nat). You must also disable the userland proxy by adding `"userland-proxy": false` to `/etc/docker/daemon.json` and restarting the daemon. + +⬆️ [back to table of contents](#table-of-contents) + +## HTTP/2 and HTTP/3 ### HTTP/2 support @@ -513,97 +546,38 @@ To enable HTTP/3 for a single proxied container, set the `com.github.nginx-proxy To enable HTTP/3 globally set the environment variable `ENABLE_HTTP3` to `true` on the nginx-proxy container. -### Basic Authentication Support - -In order to be able to secure your virtual host, you have to create a file named as its equivalent VIRTUAL_HOST variable on directory -/etc/nginx/htpasswd/$VIRTUAL_HOST - -```console -docker run -d -p 80:80 -p 443:443 \ - -v /path/to/htpasswd:/etc/nginx/htpasswd \ - -v /path/to/certs:/etc/nginx/certs \ - -v /var/run/docker.sock:/tmp/docker.sock:ro \ - nginxproxy/nginx-proxy -``` - -You'll need apache2-utils on the machine where you plan to create the htpasswd file. Follow these [instructions](http://httpd.apache.org/docs/2.2/programs/htpasswd.html) - -If you want to define basic authentication for a `VIRTUAL_PATH`, you have to create a file named as /etc/nginx/htpasswd/${VIRTUAL_HOST}_${VIRTUAL_PATH_SHA1} -(where $VIRTUAL_PATH_SHA1 is the SHA1 hash for the virtual path, you can use any SHA1 online generator to calculate it). - -### Upstream (Backend) Server HTTP Load Balancing Support - -> **Warning** -> This feature is experimental. The behavior may change (or the feature may be removed entirely) without warning in a future release, even if the release is not a new major version. If you use this feature, or if you would like to use this feature but you require changes to it first, please [provide feedback in #2195](https://github.com/nginx-proxy/nginx-proxy/discussions/2195). Once we have collected enough feedback we will promote this feature to officially supported. - -If you have multiple containers with the same `VIRTUAL_HOST` and `VIRTUAL_PATH` settings, nginx will spread the load across all of them. To change the load balancing algorithm from nginx's default (round-robin), set the `com.github.nginx-proxy.nginx-proxy.loadbalance` label on one or more of your application containers to the desired load balancing directive. See the [`ngx_http_upstream_module` documentation](https://nginx.org/en/docs/http/ngx_http_upstream_module.html) for available directives. - -> **Note** -> * Don't forget the terminating semicolon (`;`). -> * If you are using Docker Compose, remember to escape any dollar sign (`$`) characters (`$` becomes `$$`). - -Docker Compose example: - -```yaml -services: - nginx-proxy: - image: nginxproxy/nginx-proxy - ports: - - "80:80" - volumes: - - /var/run/docker.sock:/tmp/docker.sock:ro - environment: - HTTPS_METHOD: nohttps - myapp: - image: jwilder/whoami - expose: - - "8000" - environment: - VIRTUAL_HOST: myapp.example - VIRTUAL_PORT: "8000" - labels: - com.github.nginx-proxy.nginx-proxy.loadbalance: "hash $$remote_addr;" - deploy: - replicas: 4 -``` - -### Upstream (Backend) Server HTTP Keep-Alive Support - -> **Warning** -> This feature is experimental. The behavior may change (or the feature may be removed entirely) without warning in a future release, even if the release is not a new major version. If you use this feature, or if you would like to use this feature but you require changes to it first, please [provide feedback in #2194](https://github.com/nginx-proxy/nginx-proxy/discussions/2194). Once we have collected enough feedback we will promote this feature to officially supported. - -To enable HTTP keep-alive between `nginx-proxy` and backend server(s), set the `com.github.nginx-proxy.nginx-proxy.keepalive` label on the server's container either to `auto` or to the desired maximum number of idle connections. The `auto` setting will dynamically set the maximum number of idle connections to twice the number of servers listed in the corresponding `upstream{}` block, [per nginx recommendation](https://www.nginx.com/blog/avoiding-top-10-nginx-configuration-mistakes/#no-keepalives). - -See the [nginx keepalive documentation](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#keepalive) and the [Docker label documentation](https://docs.docker.com/config/labels-custom-metadata/) for details. +⬆️ [back to table of contents](#table-of-contents) -### Headers +## Headers By default, `nginx-proxy` forwards all incoming request headers from the client to the backend server unmodified, with the following exceptions: - * `Connection`: Set to `upgrade` if the client sets the `Upgrade` header, otherwise set to `close`. (Keep-alive between `nginx-proxy` and the backend server is not supported.) - * `Proxy`: Always removed if present. This prevents attackers from using the so-called [httpoxy attack](http://httpoxy.org). There is no legitimate reason for a client to send this header, and there are many vulnerable languages / platforms (`CVE-2016-5385`, `CVE-2016-5386`, `CVE-2016-5387`, `CVE-2016-5388`, `CVE-2016-1000109`, `CVE-2016-1000110`, `CERT-VU#797896`). - * `X-Real-IP`: Set to the client's IP address. - * `X-Forwarded-For`: The client's IP address is appended to the value provided by the client. (If the client did not provide this header, it is set to the client's IP address.) - * `X-Forwarded-Host`: If the client did not provide this header or if the `TRUST_DOWNSTREAM_PROXY` environment variable is set to `false` (see below), this is set to the value of the `Host` header provided by the client. Otherwise, the header is forwarded to the backend server unmodified. - * `X-Forwarded-Proto`: If the client did not provide this header or if the `TRUST_DOWNSTREAM_PROXY` environment variable is set to `false` (see below), this is set to `http` for plain HTTP connections and `https` for TLS connections. Otherwise, the header is forwarded to the backend server unmodified. - * `X-Forwarded-Ssl`: Set to `on` if the `X-Forwarded-Proto` header sent to the backend server is `https`, otherwise set to `off`. - * `X-Forwarded-Port`: If the client did not provide this header or if the `TRUST_DOWNSTREAM_PROXY` environment variable is set to `false` (see below), this is set to the port of the server that accepted the client's request. Otherwise, the header is forwarded to the backend server unmodified. - * `X-Original-URI`: Set to the original request URI. +- `Connection`: Set to `upgrade` if the client sets the `Upgrade` header, otherwise set to `close`. (Keep-alive between `nginx-proxy` and the backend server is not supported.) +- `Proxy`: Always removed if present. This prevents attackers from using the so-called [httpoxy attack](http://httpoxy.org). There is no legitimate reason for a client to send this header, and there are many vulnerable languages / platforms (`CVE-2016-5385`, `CVE-2016-5386`, `CVE-2016-5387`, `CVE-2016-5388`, `CVE-2016-1000109`, `CVE-2016-1000110`, `CERT-VU#797896`). +- `X-Real-IP`: Set to the client's IP address. +- `X-Forwarded-For`: The client's IP address is appended to the value provided by the client. (If the client did not provide this header, it is set to the client's IP address.) +- `X-Forwarded-Host`: If the client did not provide this header or if the `TRUST_DOWNSTREAM_PROXY` environment variable is set to `false` (see below), this is set to the value of the `Host` header provided by the client. Otherwise, the header is forwarded to the backend server unmodified. +- `X-Forwarded-Proto`: If the client did not provide this header or if the `TRUST_DOWNSTREAM_PROXY` environment variable is set to `false` (see below), this is set to `http` for plain HTTP connections and `https` for TLS connections. Otherwise, the header is forwarded to the backend server unmodified. +- `X-Forwarded-Ssl`: Set to `on` if the `X-Forwarded-Proto` header sent to the backend server is `https`, otherwise set to `off`. +- `X-Forwarded-Port`: If the client did not provide this header or if the `TRUST_DOWNSTREAM_PROXY` environment variable is set to `false` (see below), this is set to the port of the server that accepted the client's request. Otherwise, the header is forwarded to the backend server unmodified. +- `X-Original-URI`: Set to the original request URI. -#### Trusting Downstream Proxy Headers +### Trusting Downstream Proxy Headers For legacy compatibility reasons, `nginx-proxy` forwards any client-supplied `X-Forwarded-Proto` (which affects the value of `X-Forwarded-Ssl`), `X-Forwarded-Host`, and `X-Forwarded-Port` headers unchecked and unmodified. To prevent malicious clients from spoofing the protocol, hostname, or port that is perceived by your backend server, you are encouraged to set the `TRUST_DOWNSTREAM_PROXY` value to `false` if: - * you do not operate a second reverse proxy downstream of `nginx-proxy`, or - * you do operate a second reverse proxy downstream of `nginx-proxy` but that proxy forwards those headers unchecked from untrusted clients. +- you do not operate a second reverse proxy downstream of `nginx-proxy`, or +- you do operate a second reverse proxy downstream of `nginx-proxy` but that proxy forwards those headers unchecked from untrusted clients. The default for `TRUST_DOWNSTREAM_PROXY` may change to `false` in a future version of `nginx-proxy`. If you require it to be enabled, you are encouraged to explicitly set it to `true` to avoid compatibility problems when upgrading. -### Custom Nginx Configuration +⬆️ [back to table of contents](#table-of-contents) + +## Custom Nginx Configuration If you need to configure Nginx beyond what is possible using environment variables, you can provide custom configuration files on either a proxy-wide or per-`VIRTUAL_HOST` basis. -#### Replacing default proxy settings +### Replacing default proxy settings If you want to replace the default proxy settings for the nginx container, add a configuration file at `/etc/nginx/proxy.conf`. A file with the default settings would look like this: @@ -625,9 +599,9 @@ proxy_set_header X-Original-URI $request_uri; proxy_set_header Proxy ""; ``` -***NOTE***: If you provide this file it will replace the defaults; you may want to check the .tmpl file to make sure you have all of the needed options. +**_NOTE_**: If you provide this file it will replace the defaults; you may want to check the [nginx.tmpl](https://github.com/nginx-proxy/nginx-proxy/blob/main/nginx.tmpl) file to make sure you have all of the needed options. -#### Proxy-wide +### Proxy-wide To add settings on a proxy-wide basis, add your configuration file under `/etc/nginx/conf.d` using a name ending in `.conf`. @@ -647,7 +621,7 @@ Or it can be done by mounting in your custom configuration in your `docker run` docker run -d -p 80:80 -p 443:443 -v /path/to/my_proxy.conf:/etc/nginx/conf.d/my_proxy.conf:ro -v /var/run/docker.sock:/tmp/docker.sock:ro nginxproxy/nginx-proxy ``` -#### Per-VIRTUAL_HOST +### Per-VIRTUAL_HOST To add settings on a per-`VIRTUAL_HOST` basis, add your configuration file under `/etc/nginx/vhost.d`. Unlike in the proxy-wide case, which allows multiple config files with any name ending in `.conf`, the per-`VIRTUAL_HOST` file must be named exactly after the `VIRTUAL_HOST`. @@ -667,11 +641,11 @@ If you are using multiple hostnames for a single container (e.g. `VIRTUAL_HOST=e ln -s /path/to/vhost.d/www.example.com /path/to/vhost.d/example.com ``` -#### Per-VIRTUAL_HOST default configuration +### Per-VIRTUAL_HOST default configuration If you want most of your virtual hosts to use a default single configuration and then override on a few specific ones, add those settings to the `/etc/nginx/vhost.d/default` file. This file will be used on any virtual host which does not have a `/etc/nginx/vhost.d/{VIRTUAL_HOST}` file associated with it. -#### Per-VIRTUAL_HOST location configuration +### Per-VIRTUAL_HOST location configuration To add settings to the "location" block on a per-`VIRTUAL_HOST` basis, add your configuration file under `/etc/nginx/vhost.d` just like the previous section except with the suffix `_location`. @@ -689,13 +663,13 @@ If you are using multiple hostnames for a single container (e.g. `VIRTUAL_HOST=e ln -s /path/to/vhost.d/www.example.com /path/to/vhost.d/example.com ``` -#### Per-VIRTUAL_HOST location default configuration +### Per-VIRTUAL_HOST location default configuration If you want most of your virtual hosts to use a default single `location` block configuration and then override on a few specific ones, add those settings to the `/etc/nginx/vhost.d/default_location` file. This file will be used on any virtual host which does not have a `/etc/nginx/vhost.d/{VIRTUAL_HOST}_location` file associated with it. -#### Overriding `location` blocks +### Overriding `location` blocks -The `${VIRTUAL_HOST}_${PATH_HASH}_location`, `${VIRTUAL_HOST}_location`, and `default_location` files documented above make it possible to *augment* the generated [`location` block(s)](https://nginx.org/en/docs/http/ngx_http_core_module.html#location) in a virtual host. In some circumstances, you may need to *completely override* the `location` block for a particular combination of virtual host and path. To do this, create a file whose name follows this pattern: +The `${VIRTUAL_HOST}_${PATH_HASH}_location`, `${VIRTUAL_HOST}_location`, and `default_location` files documented above make it possible to _augment_ the generated [`location` block(s)](https://nginx.org/en/docs/http/ngx_http_core_module.html#location) in a virtual host. In some circumstances, you may need to _completely override_ the `location` block for a particular combination of virtual host and path. To do this, create a file whose name follows this pattern: ``` /etc/nginx/vhost.d/${VIRTUAL_HOST}_${PATH_HASH}_location_override @@ -709,9 +683,9 @@ For convenience, the `_${PATH_HASH}` part can be omitted if the path is `/`: /etc/nginx/vhost.d/${VIRTUAL_HOST}_location_override ``` -When an override file exists, the `location` block that is normally created by `nginx-proxy` is not generated. Instead, the override file is included via the [nginx `include` directive](https://nginx.org/en/docs/ngx_core_module.html#include). +When an override file exists, the `location` block that is normally created by `nginx-proxy` is not generated. Instead, the override file is included via the [nginx `include` directive](https://nginx.org/en/docs/ngx_core_module.html#include). -You are responsible for providing a suitable `location` block in your override file as required for your service. By default, `nginx-proxy` uses the `VIRTUAL_HOST` name as the upstream name for your application's Docker container; see [here](#unhashed-vs-sha1-upstream-names) for details. As an example, if your container has a `VIRTUAL_HOST` value of `app.example.com`, then to override the location block for `/` you would create a file named `/etc/nginx/vhost.d/app.example.com_location_override` that contains something like this: +You are responsible for providing a suitable `location` block in your override file as required for your service. By default, `nginx-proxy` uses the `VIRTUAL_HOST` name as the upstream name for your application's Docker container; see [here](#unhashed-vs-sha1-upstream-names) for details. As an example, if your container has a `VIRTUAL_HOST` value of `app.example.com`, then to override the location block for `/` you would create a file named `/etc/nginx/vhost.d/app.example.com_location_override` that contains something like this: ``` location / { @@ -719,16 +693,100 @@ location / { } ``` -#### Per-VIRTUAL_HOST `server_tokens` configuration +### Per-VIRTUAL_HOST `server_tokens` configuration + Per virtual-host `servers_tokens` directive can be configured by passing appropriate value to the `SERVER_TOKENS` environment variable. Please see the [nginx http_core module configuration](https://nginx.org/en/docs/http/ngx_http_core_module.html#server_tokens) for more details. -### Unhashed vs SHA1 upstream names +⬆️ [back to table of contents](#table-of-contents) + +## Unhashed vs SHA1 upstream names By default the nginx configuration `upstream` blocks will use this block's corresponding hostname as a predictable name. However, this can cause issues in some setups (see [this issue](https://github.com/nginx-proxy/nginx-proxy/issues/1162)). In those cases you might want to switch to SHA1 names for the `upstream` blocks by setting the `SHA1_UPSTREAM_NAME` environment variable to `true` on the nginx-proxy container. Please note that using regular expressions in `VIRTUAL_HOST` will always result in a corresponding `upstream` block with an SHA1 name. -### Troubleshooting +⬆️ [back to table of contents](#table-of-contents) + +## Separate Containers + +nginx-proxy can also be run as two separate containers using the [nginxproxy/docker-gen](https://hub.docker.com/r/nginxproxy/docker-gen) image and the official [nginx](https://registry.hub.docker.com/_/nginx/) image. + +You may want to do this to prevent having the docker socket bound to a publicly exposed container service. + +You can demo this pattern with docker compose: + +```console +docker compose --file docker-compose-separate-containers.yml up +curl -H "Host: whoami.example" localhost +``` + +Example output: + +```console +I'm 5b129ab83266 +``` + +To run nginx proxy as a separate container you'll need to have [nginx.tmpl](https://github.com/nginx-proxy/nginx-proxy/blob/main/nginx.tmpl) on your host system. + +First start nginx with a volume: + +```console +docker run -d -p 80:80 --name nginx -v /tmp/nginx:/etc/nginx/conf.d -t nginx +``` + +Then start the docker-gen container with the shared volume and template: + +```console +docker run --volumes-from nginx \ + -v /var/run/docker.sock:/tmp/docker.sock:ro \ + -v $(pwd):/etc/docker-gen/templates \ + -t nginxproxy/docker-gen -notify-sighup nginx -watch /etc/docker-gen/templates/nginx.tmpl /etc/nginx/conf.d/default.conf +``` + +Finally, start your containers with `VIRTUAL_HOST` environment variables. + +```console +docker run -e VIRTUAL_HOST=foo.bar.com ... +``` + +⬆️ [back to table of contents](#table-of-contents) + +## Docker Compose + +```yaml +version: "2" + +services: + nginx-proxy: + image: nginxproxy/nginx-proxy + ports: + - "80:80" + volumes: + - /var/run/docker.sock:/tmp/docker.sock:ro + + whoami: + image: jwilder/whoami + expose: + - "8000" + environment: + - VIRTUAL_HOST=whoami.example + - VIRTUAL_PORT=8000 +``` + +```console +docker compose up +curl -H "Host: whoami.example" localhost +``` + +Example output: + +```console +I'm 5b129ab83266 +``` + +⬆️ [back to table of contents](#table-of-contents) + +## Troubleshooting If you can't access your `VIRTUAL_HOST`, inspect the generated nginx configuration: @@ -753,15 +811,18 @@ upstream foo.example.com { ``` The effective `Port` is retrieved by order of precedence: + 1. From the `VIRTUAL_PORT` environment variable 1. From the container's exposed port if there is only one 1. From the default port 80 when none of the above methods apply -### Contributing +⬆️ [back to table of contents](#table-of-contents) + +## Contributing Before submitting pull requests or issues, please check github to make sure an existing issue or pull request is not already open. -#### Running Tests Locally +### Running Tests Locally To run tests, you just need to run the command below: @@ -779,3 +840,5 @@ make test-alpine ``` You can learn more about how the test suite works and how to write new tests in the [test/README.md](https://github.com/nginx-proxy/nginx-proxy/tree/main/test/README.md) file. + +⬆️ [back to table of contents](#table-of-contents) diff --git a/nginx.tmpl b/nginx.tmpl index c72c1c7a4..fefb07f1a 100644 --- a/nginx.tmpl +++ b/nginx.tmpl @@ -128,7 +128,7 @@ # exposed ports:{{ range sortObjectsByKeysAsc $.container.Addresses "Port" }} {{ .Port }}/{{ .Proto }}{{ else }} (none){{ end }} {{- $default_port := when (eq (len $.container.Addresses) 1) (first $.container.Addresses).Port "80" }} # default port: {{ $default_port }} - {{- $port := or $.container.Env.VIRTUAL_PORT $default_port }} + {{- $port := when (eq $.port "legacy") (or $.container.Env.VIRTUAL_PORT $default_port) $.port }} # using port: {{ $port }} {{- $addr_obj := where $.container.Addresses "Port" $port | first }} {{- if and $addr_obj $addr_obj.HostPort }} @@ -242,6 +242,7 @@ {{- end }} {{- define "location" }} + {{- $vpath := .VPath }} {{- $override := printf "/etc/nginx/vhost.d/%s_%s_location_override" .Host (sha1 .Path) }} {{- if and (eq .Path "/") (not (exists $override)) }} {{- $override = printf "/etc/nginx/vhost.d/%s_location_override" .Host }} @@ -249,34 +250,37 @@ {{- if exists $override }} include {{ $override }}; {{- else }} - {{- $keepalive := coalesce (first (keys (groupByLabel .Containers "com.github.nginx-proxy.nginx-proxy.keepalive"))) "disabled" }} + {{- $keepalive := $vpath.keepalive }} location {{ .Path }} { - {{- if eq .NetworkTag "internal" }} + {{- if eq $vpath.network_tag "internal" }} # Only allow traffic from internal clients include /etc/nginx/network_internal.conf; {{- end }} - {{- if eq .Proto "uwsgi" }} + {{ $proto := $vpath.proto }} + {{ $upstream := $vpath.upstream }} + {{ $dest := $vpath.dest }} + {{- if eq $proto "uwsgi" }} include uwsgi_params; - uwsgi_pass {{ trim .Proto }}://{{ trim .Upstream }}; - {{- else if eq .Proto "fastcgi" }} + uwsgi_pass {{ trim $proto }}://{{ trim $upstream }}; + {{- else if eq $proto "fastcgi" }} root {{ trim .VhostRoot }}; include fastcgi_params; - fastcgi_pass {{ trim .Upstream }}; + fastcgi_pass {{ trim $upstream }}; {{- if ne $keepalive "disabled" }} fastcgi_keep_conn on; {{- end }} - {{- else if eq .Proto "grpc" }} - grpc_pass {{ trim .Proto }}://{{ trim .Upstream }}; - {{- else if eq .Proto "grpcs" }} - grpc_pass {{ trim .Proto }}://{{ trim .Upstream }}; + {{- else if eq $proto "grpc" }} + grpc_pass {{ trim $proto }}://{{ trim $upstream }}; + {{- else if eq $proto "grpcs" }} + grpc_pass {{ trim $proto }}://{{ trim $upstream }}; {{- else }} - proxy_pass {{ trim .Proto }}://{{ trim .Upstream }}{{ trim .Dest }}; + proxy_pass {{ trim $proto }}://{{ trim $upstream }}{{ trim $dest }}; set $upstream_keepalive {{ if ne $keepalive "disabled" }}true{{ else }}false{{ end }}; {{- end }} {{- if (exists (printf "/etc/nginx/htpasswd/%s_%s" .Host (sha1 .Path) )) }} - auth_basic "Restricted {{ .Host }}/{{ .Path }}"; + auth_basic "Restricted {{ .Host }}{{ .Path }}"; auth_basic_user_file {{ (printf "/etc/nginx/htpasswd/%s_%s" .Host (sha1 .Path)) }}; {{- else if (exists (printf "/etc/nginx/htpasswd/%s" .Host)) }} auth_basic "Restricted {{ .Host }}"; @@ -295,24 +299,27 @@ {{- end }} {{- define "upstream" }} -upstream {{ .Upstream }} { + {{- $path := .Path }} + {{- $vpath := .VPath }} +upstream {{ $vpath.upstream }} { {{- $servers := 0 }} - {{- $loadbalance := first (keys (groupByLabel .Containers "com.github.nginx-proxy.nginx-proxy.loadbalance")) }} + {{- $loadbalance := $vpath.loadbalance }} {{- if $loadbalance }} # From the container's loadbalance label: {{ $loadbalance }} {{- end }} - {{- range $container := .Containers }} + {{- range $port, $containers := $vpath.ports }} + {{- range $container := $containers }} # Container: {{ $container.Name }} - {{- $args := dict "globals" $.globals "container" $container }} - {{- template "container_ip" $args }} - {{- $ip := $args.ip }} - {{- $args := dict "container" $container }} - {{- template "container_port" $args }} - {{- $port := $args.port }} - {{- if $ip }} - {{- $servers = add1 $servers }} - server {{ $ip }}:{{ $port }}; + {{- $args := dict "globals" $.globals "container" $container }} + {{- template "container_ip" $args }} + {{- $ip := $args.ip }} + {{- $args = dict "container" $container "path" $path "port" $port }} + {{- template "container_port" $args }} + {{- if $ip }} + {{- $servers = add1 $servers }} + server {{ $ip }}:{{ $args.port }}; + {{- end }} {{- end }} {{- end }} {{- /* nginx-proxy/nginx-proxy#1105 */}} @@ -320,7 +327,7 @@ upstream {{ .Upstream }} { # Fallback entry server 127.0.0.1 down; {{- end }} - {{- $keepalive := coalesce (first (keys (groupByLabel .Containers "com.github.nginx-proxy.nginx-proxy.keepalive"))) "disabled" }} + {{- $keepalive := $vpath.keepalive }} {{- if and (ne $keepalive "disabled") (gt $servers 0) }} {{- if eq $keepalive "auto" }} keepalive {{ mul $servers 2 }}; @@ -331,6 +338,49 @@ upstream {{ .Upstream }} { } {{- end }} +{{- /* + * Template used as a function to collect virtual path properties from + * the given containers. These properties are "returned" by storing their + * values into the provided dot dict. + * + * The provided dot dict is expected to have the following entries: + * - "Containers": List of container's RuntimeContainer struct. + * - "Upstream_name" + * - "Has_virtual_paths": boolean + * - "Path" + * + * The return values will be added to the dot dict with keys: + * - "dest" + * - "proto" + * - "network_tag" + * - "upstream" + * - "loadbalance" + * - "keepalive" + */}} +{{- define "get_path_info" }} + {{- /* Get the VIRTUAL_PROTO defined by containers w/ the same vhost-vpath, falling back to "http". */}} + {{- $proto := trim (or (first (groupByKeys $.Containers "Env.VIRTUAL_PROTO")) "http") }} + {{- /* Get the NETWORK_ACCESS defined by containers w/ the same vhost, falling back to "external". */}} + {{- $network_tag := or (first (groupByKeys $.Containers "Env.NETWORK_ACCESS")) "external" }} + + {{- $loadbalance := first (keys (groupByLabel $.Containers "com.github.nginx-proxy.nginx-proxy.loadbalance")) }} + {{- $keepalive := coalesce (first (keys (groupByLabel $.Containers "com.github.nginx-proxy.nginx-proxy.keepalive"))) "disabled" }} + + {{- $upstream := $.Upstream_name }} + {{- $dest := "" }} + {{- if $.Has_virtual_paths }} + {{- $sum := sha1 $.Path }} + {{- $upstream = printf "%s-%s" $upstream $sum }} + {{- $dest = or (first (groupByKeys $.Containers "Env.VIRTUAL_DEST")) "" }} + {{- end }} + {{- $_ := set $ "proto" $proto }} + {{- $_ := set $ "network_tag" $network_tag }} + {{- $_ := set $ "upstream" $upstream }} + {{- $_ := set $ "dest" $dest }} + {{- $_ := set $ "loadbalance" $loadbalance }} + {{- $_ := set $ "keepalive" $keepalive }} +{{- end }} + # If we receive X-Forwarded-Proto, pass it through; otherwise, pass along the # scheme used to connect to this server map $http_x_forwarded_proto $proxy_x_forwarded_proto { @@ -450,33 +500,76 @@ proxy_set_header X-Original-URI $request_uri; proxy_set_header Proxy ""; {{- end }} -{{- /* - * Precompute some information about each vhost. This is done early because - * the creation of fallback servers depends on DEFAULT_HOST, HTTPS_METHOD, - * and whether there are any missing certs. - */}} -{{- range $vhost, $containers := groupByMulti $globals.containers "Env.VIRTUAL_HOST" "," }} - {{- $vhost := trim $vhost }} - {{- if not $vhost }} +{{- /* Precompute some information about each vhost. */}} +{{- range $hostname, $containers := groupByMulti $globals.containers "Env.VIRTUAL_HOST" "," }} + {{- $hostname = trim $hostname }} + {{- if not $hostname }} {{- /* Ignore containers with VIRTUAL_HOST set to the empty string. */}} {{- continue }} {{- end }} + {{- $certName := first (groupByKeys $containers "Env.CERT_NAME") }} - {{- $vhostCert := closest (dir "/etc/nginx/certs") (printf "%s.crt" $vhost) }} + {{- $vhostCert := closest (dir "/etc/nginx/certs") (printf "%s.crt" $hostname) }} {{- $vhostCert = trimSuffix ".crt" $vhostCert }} {{- $vhostCert = trimSuffix ".key" $vhostCert }} {{- $cert := or $certName $vhostCert }} {{- $cert_ok := and (ne $cert "") (exists (printf "/etc/nginx/certs/%s.crt" $cert)) (exists (printf "/etc/nginx/certs/%s.key" $cert)) }} - {{- $default := eq $globals.Env.DEFAULT_HOST $vhost }} + + {{- $default := eq $globals.Env.DEFAULT_HOST $hostname }} {{- $https_method := or (first (groupByKeys $containers "Env.HTTPS_METHOD")) $globals.Env.HTTPS_METHOD "redirect" }} - {{- $http3 := parseBool (or (first (keys (groupByLabel $containers "com.github.nginx-proxy.nginx-proxy.http3.enable"))) $globals.Env.ENABLE_HTTP3 "false")}} - {{- $_ := set $globals.vhosts $vhost (dict + {{- $http2_enabled := parseBool (or (first (keys (groupByLabel $containers "com.github.nginx-proxy.nginx-proxy.http2.enable"))) $globals.Env.ENABLE_HTTP2 "true")}} + {{- $http3_enabled := parseBool (or (first (keys (groupByLabel $containers "com.github.nginx-proxy.nginx-proxy.http3.enable"))) $globals.Env.ENABLE_HTTP3 "false")}} + + {{- $is_regexp := hasPrefix "~" $hostname }} + {{- $upstream_name := when (or $is_regexp $globals.sha1_upstream_name) (sha1 $hostname) $hostname }} + + {{- /* Get the SERVER_TOKENS defined by containers w/ the same vhost, falling back to "". */}} + {{- $server_tokens := trim (or (first (groupByKeys $containers "Env.SERVER_TOKENS")) "") }} + + {{- /* Get the SSL_POLICY defined by containers w/ the same vhost, falling back to empty string (use default). */}} + {{- $ssl_policy := or (first (groupByKeys $containers "Env.SSL_POLICY")) "" }} + + {{- /* Get the HSTS defined by containers w/ the same vhost, falling back to "max-age=31536000". */}} + {{- $hsts := or (first (groupByKeys $containers "Env.HSTS")) (or $globals.Env.HSTS "max-age=31536000") }} + + {{- /* Get the VIRTUAL_ROOT By containers w/ use fastcgi root */}} + {{- $vhost_root := or (first (groupByKeys $containers "Env.VIRTUAL_ROOT")) "/var/www/public" }} + + + {{- $tmp_paths := groupBy $containers "Env.VIRTUAL_PATH" }} + {{- $has_virtual_paths := gt (len $tmp_paths) 0}} + {{- if not $has_virtual_paths }} + {{- $tmp_paths = dict "/" $containers }} + {{- end }} + + {{- $paths := dict }} + + {{- range $path, $containers := $tmp_paths }} + {{- $args := dict "Containers" $containers "Path" $path "Upstream_name" $upstream_name "Has_virtual_paths" $has_virtual_paths }} + {{- template "get_path_info" $args }} + {{- $_ := set $paths $path (dict + "ports" (dict "legacy" $containers) + "dest" $args.dest + "proto" $args.proto + "network_tag" $args.network_tag + "upstream" $args.upstream + "loadbalance" $args.loadbalance + "keepalive" $args.keepalive + ) }} + {{- end }} + + {{- $_ := set $globals.vhosts $hostname (dict "cert" $cert "cert_ok" $cert_ok - "containers" $containers "default" $default + "hsts" $hsts "https_method" $https_method - "http3" $http3 + "http2_enabled" $http2_enabled + "http3_enabled" $http3_enabled + "paths" $paths + "server_tokens" $server_tokens + "ssl_policy" $ssl_policy + "vhost_root" $vhost_root ) }} {{- end }} @@ -499,7 +592,7 @@ proxy_set_header Proxy ""; {{- $https_exists := false }} {{- $default_http_exists := false }} {{- $default_https_exists := false }} - {{- $http3 := false }} + {{- $http3_enabled := false }} {{- range $vhost := $globals.vhosts }} {{- $http := or (ne $vhost.https_method "nohttp") (not $vhost.cert_ok) }} {{- $https := ne $vhost.https_method "nohttps" }} @@ -507,7 +600,7 @@ proxy_set_header Proxy ""; {{- $https_exists = or $https_exists $https }} {{- $default_http_exists = or $default_http_exists (and $http $vhost.default) }} {{- $default_https_exists = or $default_https_exists (and $https $vhost.default) }} - {{- $http3 = or $http3 $vhost.http3 }} + {{- $http3_enabled = or $http3_enabled $vhost.http3_enabled }} {{- end }} {{- $fallback_http := and $http_exists (not $default_http_exists) }} {{- $fallback_https := and $https_exists (not $default_https_exists) }} @@ -537,7 +630,7 @@ server { {{- if $globals.enable_ipv6 }} listen [::]:{{ $globals.external_https_port }} ssl; {{- /* Do not add `default_server` (see comment above). */}} {{- end }} - {{- if $http3 }} + {{- if $http3_enabled }} http3 on; listen {{ $globals.external_https_port }} quic reuseport; {{- /* Do not add `default_server` (see comment above). */}} {{- if $globals.enable_ipv6 }} @@ -567,60 +660,19 @@ server { {{- end }} {{- end }} -{{- range $host, $vhost := $globals.vhosts }} - {{- $cert := $vhost.cert }} - {{- $cert_ok := $vhost.cert_ok }} - {{- $containers := $vhost.containers }} +{{- range $hostname, $vhost := $globals.vhosts }} {{- $default_server := when $vhost.default "default_server" "" }} - {{- $https_method := $vhost.https_method }} - {{- $http2 := parseBool (or (first (keys (groupByLabel $containers "com.github.nginx-proxy.nginx-proxy.http2.enable"))) $globals.Env.ENABLE_HTTP2 "true")}} - {{- $http3 := parseBool (or (first (keys (groupByLabel $containers "com.github.nginx-proxy.nginx-proxy.http3.enable"))) $globals.Env.ENABLE_HTTP3 "false")}} - - {{- $is_regexp := hasPrefix "~" $host }} - {{- $upstream_name := when (or $is_regexp $globals.sha1_upstream_name) (sha1 $host) $host }} - {{- $paths := groupBy $containers "Env.VIRTUAL_PATH" }} - {{- $nPaths := len $paths }} - {{- if eq $nPaths 0 }} - {{- $paths = dict "/" $containers }} + {{- range $path, $vpath := $vhost.paths }} +# {{ $hostname }}{{ $path }} + {{ template "upstream" (dict "globals" $globals "Path" $path "VPath" $vpath) }} {{- end }} - {{- range $path, $containers := $paths }} - {{- $upstream := $upstream_name }} - {{- if gt $nPaths 0 }} - {{- $sum := sha1 $path }} - {{- $upstream = printf "%s-%s" $upstream $sum }} - {{- end }} -# {{ $host }}{{ $path }} -{{ template "upstream" (dict "globals" $globals "Upstream" $upstream "Containers" $containers) }} - {{- end }} - - {{- /* - * Get the SERVER_TOKENS defined by containers w/ the same vhost, - * falling back to "". - */}} - {{- $server_tokens := trim (or (first (groupByKeys $containers "Env.SERVER_TOKENS")) "") }} - - {{- /* - * Get the SSL_POLICY defined by containers w/ the same vhost, falling - * back to empty string (use default). - */}} - {{- $ssl_policy := or (first (groupByKeys $containers "Env.SSL_POLICY")) "" }} - - {{- /* - * Get the HSTS defined by containers w/ the same vhost, falling back to - * "max-age=31536000". - */}} - {{- $hsts := or (first (groupByKeys $containers "Env.HSTS")) (or $globals.Env.HSTS "max-age=31536000") }} - - {{- /* Get the VIRTUAL_ROOT By containers w/ use fastcgi root */}} - {{- $vhost_root := or (first (groupByKeys $containers "Env.VIRTUAL_ROOT")) "/var/www/public" }} - - {{- if and $cert_ok (eq $https_method "redirect") }} + {{- if and $vhost.cert_ok (eq $vhost.https_method "redirect") }} server { - server_name {{ $host }}; - {{- if $server_tokens }} - server_tokens {{ $server_tokens }}; + server_name {{ $hostname }}; + {{- if $vhost.server_tokens }} + server_tokens {{ $vhost.server_tokens }}; {{- end }} {{ $globals.access_log }} listen {{ $globals.external_http_port }} {{ $default_server }}; @@ -649,27 +701,27 @@ server { {{- end }} server { - server_name {{ $host }}; - {{- if $server_tokens }} - server_tokens {{ $server_tokens }}; + server_name {{ $hostname }}; + {{- if $vhost.server_tokens }} + server_tokens {{ $vhost.server_tokens }}; {{- end }} {{ $globals.access_log }} - {{- if $http2 }} + {{- if $vhost.http2_enabled }} http2 on; {{- end }} - {{- if or (eq $https_method "nohttps") (not $cert_ok) (eq $https_method "noredirect") }} + {{- if or (eq $vhost.https_method "nohttps") (not $vhost.cert_ok) (eq $vhost.https_method "noredirect") }} listen {{ $globals.external_http_port }} {{ $default_server }}; {{- if $globals.enable_ipv6 }} listen [::]:{{ $globals.external_http_port }} {{ $default_server }}; {{- end }} {{- end }} - {{- if ne $https_method "nohttps" }} + {{- if ne $vhost.https_method "nohttps" }} listen {{ $globals.external_https_port }} ssl {{ $default_server }}; {{- if $globals.enable_ipv6 }} listen [::]:{{ $globals.external_https_port }} ssl {{ $default_server }}; {{- end }} - {{- if $http3 }} + {{- if $vhost.http3_enabled }} http3 on; add_header alt-svc 'h3=":{{ $globals.external_https_port }}"; ma=86400;'; listen {{ $globals.external_https_port }} quic {{ $default_server }}; @@ -678,30 +730,30 @@ server { {{- end }} {{- end }} - {{- if $cert_ok }} - {{- template "ssl_policy" (dict "ssl_policy" $ssl_policy) }} + {{- if $vhost.cert_ok }} + {{- template "ssl_policy" (dict "ssl_policy" $vhost.ssl_policy) }} ssl_session_timeout 5m; ssl_session_cache shared:SSL:50m; ssl_session_tickets off; - ssl_certificate /etc/nginx/certs/{{ (printf "%s.crt" $cert) }}; - ssl_certificate_key /etc/nginx/certs/{{ (printf "%s.key" $cert) }}; + ssl_certificate /etc/nginx/certs/{{ (printf "%s.crt" $vhost.cert) }}; + ssl_certificate_key /etc/nginx/certs/{{ (printf "%s.key" $vhost.cert) }}; - {{- if (exists (printf "/etc/nginx/certs/%s.dhparam.pem" $cert)) }} - ssl_dhparam {{ printf "/etc/nginx/certs/%s.dhparam.pem" $cert }}; + {{- if (exists (printf "/etc/nginx/certs/%s.dhparam.pem" $vhost.cert)) }} + ssl_dhparam {{ printf "/etc/nginx/certs/%s.dhparam.pem" $vhost.cert }}; {{- end }} - {{- if (exists (printf "/etc/nginx/certs/%s.chain.pem" $cert)) }} + {{- if (exists (printf "/etc/nginx/certs/%s.chain.pem" $vhost.cert)) }} ssl_stapling on; ssl_stapling_verify on; - ssl_trusted_certificate {{ printf "/etc/nginx/certs/%s.chain.pem" $cert }}; + ssl_trusted_certificate {{ printf "/etc/nginx/certs/%s.chain.pem" $vhost.cert }}; {{- end }} - {{- if (not (or (eq $https_method "noredirect") (eq $hsts "off"))) }} + {{- if (not (or (eq $vhost.https_method "noredirect") (eq $vhost.hsts "off"))) }} set $sts_header ""; if ($https) { - set $sts_header "{{ trim $hsts }}"; + set $sts_header "{{ trim $vhost.hsts }}"; } add_header Strict-Transport-Security $sts_header always; {{- end }} @@ -735,43 +787,22 @@ server { {{- end }} {{- end }} - {{- if (exists (printf "/etc/nginx/vhost.d/%s" $host)) }} - include {{ printf "/etc/nginx/vhost.d/%s" $host }}; + {{- if (exists (printf "/etc/nginx/vhost.d/%s" $hostname)) }} + include {{ printf "/etc/nginx/vhost.d/%s" $hostname }}; {{- else if (exists "/etc/nginx/vhost.d/default") }} include /etc/nginx/vhost.d/default; {{- end }} - {{- range $path, $containers := $paths }} - {{- /* - * Get the VIRTUAL_PROTO defined by containers w/ the same - * vhost-vpath, falling back to "http". - */}} - {{- $proto := trim (or (first (groupByKeys $containers "Env.VIRTUAL_PROTO")) "http") }} - - {{- /* - * Get the NETWORK_ACCESS defined by containers w/ the same vhost, - * falling back to "external". - */}} - {{- $network_tag := or (first (groupByKeys $containers "Env.NETWORK_ACCESS")) "external" }} - {{- $upstream := $upstream_name }} - {{- $dest := "" }} - {{- if gt $nPaths 0 }} - {{- $sum := sha1 $path }} - {{- $upstream = printf "%s-%s" $upstream $sum }} - {{- $dest = (or (first (groupByKeys $containers "Env.VIRTUAL_DEST")) "") }} - {{- end }} + {{- range $path, $vpath := $vhost.paths }} {{- template "location" (dict "Path" $path - "Proto" $proto - "Upstream" $upstream - "Host" $host - "VhostRoot" $vhost_root - "Dest" $dest - "NetworkTag" $network_tag - "Containers" $containers + "Host" $hostname + "VhostRoot" $vhost.vhost_root + "VPath" $vpath ) }} {{- end }} - {{- if and (not (contains $paths "/")) (ne $globals.default_root_response "none")}} + + {{- if and (not (contains $vhost.paths "/")) (ne $globals.default_root_response "none")}} location / { return {{ $globals.default_root_response }}; } diff --git a/test/requirements/python-requirements.txt b/test/requirements/python-requirements.txt index d5033424a..f22c000dd 100644 --- a/test/requirements/python-requirements.txt +++ b/test/requirements/python-requirements.txt @@ -1,4 +1,4 @@ backoff==2.2.1 docker==7.0.0 -pytest==7.4.4 +pytest==8.2.0 requests==2.31.0 diff --git a/test/requirements/web/webserver.py b/test/requirements/web/webserver.py index b8e81c062..c4750bdb1 100755 --- a/test/requirements/web/webserver.py +++ b/test/requirements/web/webserver.py @@ -14,8 +14,8 @@ def do_GET(self): response_body += self.headers.as_string() elif self.path == "/port": response_body += f"answer from port {PORT}\n" - elif re.match("/status/(\d+)", self.path): - result = re.match("/status/(\d+)", self.path) + elif re.match(r"/status/(\d+)", self.path): + result = re.match(r"/status/(\d+)", self.path) response_code = int(result.group(1)) response_body += f"answer with response code {response_code}\n" elif self.path == "/": diff --git a/test/stress_tests/test_deleted_cert/README.md b/test/stress_tests/test_deleted_cert/README.md deleted file mode 100644 index 9fac0b908..000000000 --- a/test/stress_tests/test_deleted_cert/README.md +++ /dev/null @@ -1,5 +0,0 @@ -Test the behavior of nginx-proxy when restarted after deleting a certificate file is was using. - -1. nginx-proxy is created with a virtual host having a certificate -1. while nginx-proxy is running, the certificate file is deleted -1. nginx-proxy is then restarted (without removing the container) diff --git a/test/stress_tests/test_deleted_cert/certs/web.nginx-proxy.crt b/test/stress_tests/test_deleted_cert/certs/web.nginx-proxy.crt deleted file mode 100644 index 2c92efee7..000000000 --- a/test/stress_tests/test_deleted_cert/certs/web.nginx-proxy.crt +++ /dev/null @@ -1,70 +0,0 @@ -Certificate: - Data: - Version: 3 (0x2) - Serial Number: 4096 (0x1000) - Signature Algorithm: sha256WithRSAEncryption - Issuer: O=nginx-proxy test suite, CN=www.nginx-proxy.tld - Validity - Not Before: Feb 17 23:20:54 2017 GMT - Not After : Jul 5 23:20:54 2044 GMT - Subject: CN=web.nginx-proxy - Subject Public Key Info: - Public Key Algorithm: rsaEncryption - Public-Key: (2048 bit) - Modulus: - 00:b6:27:63:a5:c6:e8:f4:7a:94:0e:cc:a2:62:76: - 6d:5d:33:6f:cf:19:fc:e7:e5:bb:0e:0e:d0:7c:4f: - 73:4c:48:2b:17:d1:4d:d5:9f:42:08:73:84:54:8c: - 86:d2:c5:da:59:01:3f:42:22:e0:36:f0:dc:ab:de: - 0a:bd:26:2b:22:13:87:a6:1f:23:ef:0e:99:27:8b: - 15:4a:1b:ef:93:c9:6b:91:de:a0:02:0c:62:bb:cc: - 56:37:e8:25:92:c3:1f:f1:69:d8:7c:a8:33:e0:89: - ce:14:67:a0:39:77:88:91:e6:a3:07:97:90:22:88: - d0:79:18:63:fb:6f:7e:ee:2b:42:7e:23:f5:e7:da: - e9:ee:6a:fa:96:65:9f:e1:2b:15:49:c8:cd:2d:ce: - 86:4f:2c:2a:67:79:bf:41:30:14:cc:f6:0f:14:74: - 9e:b6:d3:d0:3b:f0:1b:b8:e8:19:2a:fd:d6:fd:dc: - 4b:4e:65:7d:9b:bf:37:7e:2d:35:22:2e:74:90:ce: - 41:35:3d:41:a0:99:db:97:1f:bf:3e:18:3c:48:fb: - da:df:c6:4e:4e:b9:67:b8:10:d5:a5:13:03:c4:b7: - 65:e7:aa:f0:14:4b:d3:4d:ea:fe:8f:69:cf:50:21: - 63:27:cf:9e:4c:67:15:7b:3f:3b:da:cb:17:80:61: - 1e:25 - Exponent: 65537 (0x10001) - X509v3 extensions: - X509v3 Subject Alternative Name: - DNS:web.nginx-proxy - Signature Algorithm: sha256WithRSAEncryption - 09:31:be:db:4e:b0:b6:68:da:ae:5b:16:51:29:fc:9f:61:b6: - 5a:2f:3c:35:ef:67:76:97:b0:34:4e:3b:b4:d6:88:19:4f:84: - 2e:73:d3:c0:3a:4c:41:54:6c:bb:67:89:67:ad:25:55:d7:d4: - 80:fe:a7:3f:3d:9e:f1:34:96:d8:da:5a:78:51:c0:63:f1:52: - 29:35:55:f4:7d:70:1c:d3:96:62:7f:64:86:81:52:27:c4:c6: - 10:13:c6:73:56:4d:32:d0:b3:c3:c8:2c:25:83:e4:2b:1d:d4: - 74:30:e5:85:af:2d:b6:a5:6b:fe:5d:d3:3c:00:58:94:f4:6a: - f5:a6:1d:cf:f9:ed:d5:27:ed:13:24:b2:4f:2b:f3:b8:e4:af: - 0c:1d:fe:e0:6a:01:5e:a2:44:ff:3e:96:fa:6c:39:a3:51:37: - f3:72:55:d8:2d:29:6e:de:95:b9:d8:e3:1e:65:a5:9c:0d:79: - 2d:39:ab:c7:ac:16:b6:a5:71:4b:35:a4:6c:72:47:1b:72:9c: - 67:58:c1:fc:f6:7f:a7:73:50:7b:d6:27:57:74:a1:31:38:a7: - 31:e3:b9:d4:c9:45:33:ec:ed:16:cf:c5:bd:d0:03:b1:45:3f: - 68:0d:91:5c:26:4e:37:05:74:ed:3e:75:5e:ca:5e:ee:e2:51: - 4b:da:08:99 ------BEGIN CERTIFICATE----- -MIIC8zCCAdugAwIBAgICEAAwDQYJKoZIhvcNAQELBQAwPzEfMB0GA1UECgwWbmdp -bngtcHJveHkgdGVzdCBzdWl0ZTEcMBoGA1UEAwwTd3d3Lm5naW54LXByb3h5LnRs -ZDAeFw0xNzAyMTcyMzIwNTRaFw00NDA3MDUyMzIwNTRaMBoxGDAWBgNVBAMMD3dl -Yi5uZ2lueC1wcm94eTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALYn -Y6XG6PR6lA7MomJ2bV0zb88Z/Ofluw4O0HxPc0xIKxfRTdWfQghzhFSMhtLF2lkB -P0Ii4Dbw3KveCr0mKyITh6YfI+8OmSeLFUob75PJa5HeoAIMYrvMVjfoJZLDH/Fp -2HyoM+CJzhRnoDl3iJHmoweXkCKI0HkYY/tvfu4rQn4j9efa6e5q+pZln+ErFUnI -zS3Ohk8sKmd5v0EwFMz2DxR0nrbT0DvwG7joGSr91v3cS05lfZu/N34tNSIudJDO -QTU9QaCZ25cfvz4YPEj72t/GTk65Z7gQ1aUTA8S3Zeeq8BRL003q/o9pz1AhYyfP -nkxnFXs/O9rLF4BhHiUCAwEAAaMeMBwwGgYDVR0RBBMwEYIPd2ViLm5naW54LXBy -b3h5MA0GCSqGSIb3DQEBCwUAA4IBAQAJMb7bTrC2aNquWxZRKfyfYbZaLzw172d2 -l7A0Tju01ogZT4Quc9PAOkxBVGy7Z4lnrSVV19SA/qc/PZ7xNJbY2lp4UcBj8VIp -NVX0fXAc05Zif2SGgVInxMYQE8ZzVk0y0LPDyCwlg+QrHdR0MOWFry22pWv+XdM8 -AFiU9Gr1ph3P+e3VJ+0TJLJPK/O45K8MHf7gagFeokT/Ppb6bDmjUTfzclXYLSlu -3pW52OMeZaWcDXktOavHrBa2pXFLNaRsckcbcpxnWMH89n+nc1B71idXdKExOKcx -47nUyUUz7O0Wz8W90AOxRT9oDZFcJk43BXTtPnVeyl7u4lFL2giZ ------END CERTIFICATE----- diff --git a/test/stress_tests/test_deleted_cert/certs/web.nginx-proxy.key b/test/stress_tests/test_deleted_cert/certs/web.nginx-proxy.key deleted file mode 100644 index dca1c9983..000000000 --- a/test/stress_tests/test_deleted_cert/certs/web.nginx-proxy.key +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEAtidjpcbo9HqUDsyiYnZtXTNvzxn85+W7Dg7QfE9zTEgrF9FN -1Z9CCHOEVIyG0sXaWQE/QiLgNvDcq94KvSYrIhOHph8j7w6ZJ4sVShvvk8lrkd6g -Agxiu8xWN+glksMf8WnYfKgz4InOFGegOXeIkeajB5eQIojQeRhj+29+7itCfiP1 -59rp7mr6lmWf4SsVScjNLc6GTywqZ3m/QTAUzPYPFHSettPQO/AbuOgZKv3W/dxL -TmV9m783fi01Ii50kM5BNT1BoJnblx+/Phg8SPva38ZOTrlnuBDVpRMDxLdl56rw -FEvTTer+j2nPUCFjJ8+eTGcVez872ssXgGEeJQIDAQABAoIBAGQCMFW+ZfyEqHGP -rMA+oUEAkqy0agSwPwky3QjDXlxNa0uCYSeebtTRB6CcHxHuCzm+04puN4gyqhW6 -rU64fAoTivCMPGBuNWxekmvD9r+/YM4P2u4E+th9EgFT9f0kII+dO30FpKXtQzY0 -xuWGWXcxl+T9M+eiEkPKPmq4BoqgTDo5ty7qDv0ZqksGotKFmdYbtSvgBAueJdwu -VWJvenI9F42ExBRKOW1aldiRiaYBCLiCVPKJtOg9iuOP9RHUL1SE8xy5I5mm78g3 -a13ji3BNq3yS+VhGjQ7zDy1V1jGupLoJw4I7OThu8hy+B8Vt8EN/iqakufOkjlTN -xTJ33CkCgYEA5Iymg0NTjWk6aEkFa9pERjfUWqdVp9sWSpFFZZgi55n7LOx6ohi3 -vuLim3is/gYfK2kU/kHGZZLPnT0Rdx0MbOB4XK0CAUlqtUd0IyO4jMZ06g4/kn3N -e2jLdCCIBoEQuLk4ELxj2mHsLQhEvDrg7nzU2WpTHHhvJbIbDWOAxhsCgYEAzAgv -rKpanF+QDf4yeKHxAj2rrwRksTw4Pe7ZK/bog/i+HIVDA70vMapqftHbual/IRrB -JL7hxskoJ/h9c1w4xkWDjqkSKz8/Ihr4dyPfWyGINWbx/rarT/m5MU5SarScoK7o -Xgb25x+W+61rtI+2JhVRGO86+JiAeT4LkAX88L8CgYAwHHug/jdEeXZWJakCfzwI -HBCT1M3vO+uBXvtg25ndb0i0uENIhDOJ93EEkW65Osis9r34mBgPocwaqZRXosHO -2aH8wF6/rpjL+HK2QvrCh7Rs4Pr494qeA/1wQLjhxaGjgToQK9hJTHvPLwJpLWvU -SGr2Ka+9Oo0LPmb7dorRKQKBgQCLsNcjOodLJMp2KiHYIdfmlt6itzlRd09yZ8Nc -rHHJWVagJEUbnD1hnbHIHlp3pSqbObwfMmlWNoc9xo3tm6hrZ1CJLgx4e5b3/Ms8 -ltznge/F0DPDFsH3wZwfu+YFlJ7gDKCfL9l/qEsxCS0CtJobPOEHV1NivNbJK8ey -1ca19QKBgDTdMOUsobAmDEkPQIpxfK1iqYAB7hpRLi79OOhLp23NKeyRNu8FH9fo -G3DZ4xUi6hP2bwiYugMXDyLKfvxbsXwQC84kGF8j+bGazKNhHqEC1OpYwmaTB3kg -qL9cHbjWySeRdIsRY/eWmiKjUwmiO54eAe1HWUdcsuz8yM3xf636 ------END RSA PRIVATE KEY----- diff --git a/test/stress_tests/test_deleted_cert/test_restart_while_missing_cert.py b/test/stress_tests/test_deleted_cert/test_restart_while_missing_cert.py deleted file mode 100644 index d7e4cbbb5..000000000 --- a/test/stress_tests/test_deleted_cert/test_restart_while_missing_cert.py +++ /dev/null @@ -1,72 +0,0 @@ -import logging -import os -from os.path import join, isfile -from shutil import copy -from time import sleep - -import pytest -from requests import ConnectionError - -script_dir = os.path.dirname(__file__) - -pytestmark = pytest.mark.xfail() # TODO delete this marker once those issues are fixed - -@pytest.fixture(scope="module", autouse=True) -def certs(): - """ - pytest fixture that provides cert and key files into the tmp_certs directory - """ - file_names = ("web.nginx-proxy.crt", "web.nginx-proxy.key") - logging.info("copying server cert and key files into tmp_certs") - for f_name in file_names: - copy(join(script_dir, "certs", f_name), join(script_dir, "tmp_certs")) - yield - logging.info("cleaning up the tmp_cert directory") - for f_name in file_names: - if isfile(join(script_dir, "tmp_certs", f_name)): - os.remove(join(script_dir, "tmp_certs", f_name)) - -############################################################################### - - -def test_unknown_virtual_host_is_503(docker_compose, nginxproxy): - r = nginxproxy.get("http://foo.nginx-proxy/") - assert r.status_code == 503 - - -def test_http_web_is_301(docker_compose, nginxproxy): - r = nginxproxy.get("http://web.nginx-proxy/port", allow_redirects=False) - assert r.status_code == 301 - - -def test_https_web_is_200(docker_compose, nginxproxy): - r = nginxproxy.get("https://web.nginx-proxy/port") - assert r.status_code == 200 - assert "answer from port 81\n" in r.text - - -@pytest.mark.incremental -def test_delete_cert_and_restart_reverseproxy(docker_compose): - os.remove(join(script_dir, "tmp_certs", "web.nginx-proxy.crt")) - docker_compose.containers.get("reverseproxy").restart() - sleep(3) # give time for the container to initialize - assert "running" == docker_compose.containers.get("reverseproxy").status - - -@pytest.mark.incremental -def test_unknown_virtual_host_is_still_503(nginxproxy): - r = nginxproxy.get("http://foo.nginx-proxy/") - assert r.status_code == 503 - - -@pytest.mark.incremental -def test_http_web_is_now_200(nginxproxy): - r = nginxproxy.get("http://web.nginx-proxy/port", allow_redirects=False) - assert r.status_code == 200 - assert "answer from port 81\n" == r.text - - -@pytest.mark.incremental -def test_https_web_is_now_broken_since_there_is_no_cert(nginxproxy): - with pytest.raises(ConnectionError): - nginxproxy.get("https://web.nginx-proxy/port") diff --git a/test/stress_tests/test_deleted_cert/tmp_certs/.gitignore b/test/stress_tests/test_deleted_cert/tmp_certs/.gitignore deleted file mode 100644 index c96a04f00..000000000 --- a/test/stress_tests/test_deleted_cert/tmp_certs/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -* -!.gitignore \ No newline at end of file diff --git a/test/test_build.py b/test/test_build.py new file mode 100644 index 000000000..9c798082e --- /dev/null +++ b/test/test_build.py @@ -0,0 +1,62 @@ +""" +Test that nginx-proxy-tester can build successfully +""" +import pytest +import docker +import re + +client = docker.from_env() + +@pytest.fixture(scope = "session") +def docker_build(request): + # Define Dockerfile path + dockerfile_path = "requirements/" + dockerfile_name = "Dockerfile-nginx-proxy-tester" + + # Build the Docker image + image, logs = client.images.build( + path = dockerfile_path, + dockerfile = dockerfile_name, + rm = True, # Remove intermediate containers + tag = "nginx-proxy-tester-ci", # Tag for the built image + ) + + # Check for build success + for log in logs: + if "stream" in log: + print(log["stream"].strip()) + if "error" in log: + raise Exception(log["error"]) + + def teardown(): + # Clean up after teardown + client.images.remove(image.id, force=True) + + request.addfinalizer(teardown) + + # Return the image name + return "nginx-proxy-tester-ci" + +def test_build_nginx_proxy_tester(docker_build): + assert docker_build == "nginx-proxy-tester-ci" + +def test_run_nginx_proxy_tester(docker_build): + # Run the container with 'pytest -v' command to output version info + container = client.containers.run("nginx-proxy-tester-ci", + command = "pytest -V", + detach = True, + ) + + # Wait for the container to finish and get the exit code + result = container.wait() + exit_code = result.get("StatusCode", 1) # Default to 1 (error) if not found + + # Get the output logs from the container + output = container.logs().decode("utf-8").strip() + + # Clean up: Remove the container + container.remove() + + # Assertions + assert exit_code == 0, "Container exited with a non-zero exit code" + assert re.search(r"pytest\s\d+\.\d+\.\d+", output) diff --git a/test/test_htpasswd/htpasswd/htpasswd.nginx-proxy.tld b/test/test_htpasswd/htpasswd/htpasswd.nginx-proxy.tld new file mode 100644 index 000000000..336275a1b --- /dev/null +++ b/test/test_htpasswd/htpasswd/htpasswd.nginx-proxy.tld @@ -0,0 +1 @@ +vhost:$2a$13$/aPYmoK0mmgyAI4TpKdFY.6441Ugo39MdXjhpm.Pp6D15rbz9tvz. diff --git a/test/test_htpasswd/htpasswd/htpasswd.nginx-proxy.tld_8d960560c82f4e6c8b1b0f03eb30a1afd00e5696 b/test/test_htpasswd/htpasswd/htpasswd.nginx-proxy.tld_8d960560c82f4e6c8b1b0f03eb30a1afd00e5696 new file mode 100644 index 000000000..9816a8372 --- /dev/null +++ b/test/test_htpasswd/htpasswd/htpasswd.nginx-proxy.tld_8d960560c82f4e6c8b1b0f03eb30a1afd00e5696 @@ -0,0 +1 @@ +vpath:$2a$13$/aPYmoK0mmgyAI4TpKdFY.6441Ugo39MdXjhpm.Pp6D15rbz9tvz. diff --git a/test/test_htpasswd/test_htpasswd_virtual_host.py b/test/test_htpasswd/test_htpasswd_virtual_host.py new file mode 100644 index 000000000..aff3a62ad --- /dev/null +++ b/test/test_htpasswd/test_htpasswd_virtual_host.py @@ -0,0 +1,13 @@ +import pytest + +def test_htpasswd_virtual_host_is_restricted(docker_compose, nginxproxy): + r = nginxproxy.get("http://htpasswd.nginx-proxy.tld/port") + assert r.status_code == 401 + assert "WWW-Authenticate" in r.headers + assert r.headers["WWW-Authenticate"] == 'Basic realm="Restricted htpasswd.nginx-proxy.tld"' + + +def test_htpasswd_virtual_host_basic_auth(docker_compose, nginxproxy): + r = nginxproxy.get("http://htpasswd.nginx-proxy.tld/port", auth=("vhost", "password")) + assert r.status_code == 200 + assert r.text == "answer from port 80\n" diff --git a/test/stress_tests/test_deleted_cert/docker-compose.yml b/test/test_htpasswd/test_htpasswd_virtual_host.yml similarity index 52% rename from test/stress_tests/test_deleted_cert/docker-compose.yml rename to test/test_htpasswd/test_htpasswd_virtual_host.yml index a362e443d..b3f15df9e 100644 --- a/test/stress_tests/test_deleted_cert/docker-compose.yml +++ b/test/test_htpasswd/test_htpasswd_virtual_host.yml @@ -4,14 +4,14 @@ services: web: image: web expose: - - "81" + - "80" environment: - WEB_PORTS: 81 - VIRTUAL_HOST: web.nginx-proxy + WEB_PORTS: 80 + VIRTUAL_HOST: htpasswd.nginx-proxy.tld - reverseproxy: + sut: + container_name: sut image: nginxproxy/nginx-proxy:test - container_name: reverseproxy volumes: - /var/run/docker.sock:/tmp/docker.sock:ro - - ./tmp_certs:/etc/nginx/certs:ro + - ./htpasswd:/etc/nginx/htpasswd:ro diff --git a/test/test_htpasswd/test_htpasswd_virtual_path.py b/test/test_htpasswd/test_htpasswd_virtual_path.py new file mode 100644 index 000000000..262b31472 --- /dev/null +++ b/test/test_htpasswd/test_htpasswd_virtual_path.py @@ -0,0 +1,12 @@ +import pytest + +def test_htpasswd_virtual_path_is_restricted(docker_compose, nginxproxy): + r = nginxproxy.get("http://htpasswd.nginx-proxy.tld/foo/port") + assert r.status_code == 401 + assert "WWW-Authenticate" in r.headers + assert r.headers["WWW-Authenticate"] == 'Basic realm="Restricted htpasswd.nginx-proxy.tld/foo/"' + +def test_htpasswd_virtual_path_basic_auth(docker_compose, nginxproxy): + r = nginxproxy.get("http://htpasswd.nginx-proxy.tld/foo/port", auth=("vpath", "password")) + assert r.status_code == 200 + assert r.text == "answer from port 80\n" diff --git a/test/test_htpasswd/test_htpasswd_virtual_path.yml b/test/test_htpasswd/test_htpasswd_virtual_path.yml new file mode 100644 index 000000000..ffe1a0858 --- /dev/null +++ b/test/test_htpasswd/test_htpasswd_virtual_path.yml @@ -0,0 +1,19 @@ +version: "2" + +services: + web: + image: web + expose: + - "80" + environment: + WEB_PORTS: 80 + VIRTUAL_HOST: htpasswd.nginx-proxy.tld + VIRTUAL_PATH: /foo/ + VIRTUAL_DEST: / + + sut: + container_name: sut + image: nginxproxy/nginx-proxy:test + volumes: + - /var/run/docker.sock:/tmp/docker.sock:ro + - ./htpasswd:/etc/nginx/htpasswd:ro