diff --git a/hosting/letsencrypt/certificate-request.sh b/hosting/letsencrypt/certificate-request.sh index d029da265f..83f314fc88 100644 --- a/hosting/letsencrypt/certificate-request.sh +++ b/hosting/letsencrypt/certificate-request.sh @@ -10,15 +10,14 @@ certbot certonly --webroot --webroot-path="/var/www/html" \ if (($? != 0)); then echo "ERROR: certbot request failed for $CUSTOM_DOMAIN use http on port 80 - exiting" - nginx -s stop exit 1 else cp /app/letsencrypt/options-ssl-nginx.conf /etc/letsencrypt/options-ssl-nginx.conf cp /app/letsencrypt/ssl-dhparams.pem /etc/letsencrypt/ssl-dhparams.pem cp /app/letsencrypt/nginx-ssl.conf /etc/nginx/sites-available/nginx-ssl.conf - sed -i 's/CUSTOM_DOMAIN/$CUSTOM_DOMAIN/g' /etc/nginx/sites-available/nginx-ssl.conf + sed -i "s/CUSTOM_DOMAIN/$CUSTOM_DOMAIN/g" /etc/nginx/sites-available/nginx-ssl.conf ln -s /etc/nginx/sites-available/nginx-ssl.conf /etc/nginx/sites-enabled/nginx-ssl.conf echo "INFO: restart nginx after certbot request" - nginx -s reload + /etc/init.d/nginx restart fi diff --git a/hosting/letsencrypt/nginx-ssl.conf b/hosting/letsencrypt/nginx-ssl.conf index c1a1d91917..50c5e0198a 100644 --- a/hosting/letsencrypt/nginx-ssl.conf +++ b/hosting/letsencrypt/nginx-ssl.conf @@ -6,6 +6,7 @@ server { ssl_certificate_key /etc/letsencrypt/live/CUSTOM_DOMAIN/privkey.pem; include /etc/letsencrypt/options-ssl-nginx.conf; ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; + client_max_body_size 1000m; ignore_invalid_headers off; proxy_buffering off; @@ -91,4 +92,5 @@ server { gzip_proxied any; gzip_comp_level 6; gzip_types text/plain text/css text/xml application/json application/javascript application/rss+xml application/atom+xml image/svg+xml; + } diff --git a/hosting/scripts/build-target-paths.sh b/hosting/scripts/build-target-paths.sh new file mode 100644 index 0000000000..d1c9b5cd05 --- /dev/null +++ b/hosting/scripts/build-target-paths.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +echo ${TARGETBUILD} > /buildtarget.txt +if [[ "${TARGETBUILD}" = "aas" ]]; then + # Azure AppService uses /home for persisent data & SSH on port 2222 + mkdir -p /home/budibase/{minio,couchdb} + mkdir -p /home/budibase/couchdb/data + chown -R couchdb:couchdb /home/budibase/couchdb/ + apt update + apt-get install -y openssh-server + sed -i 's#dir=/opt/couchdb/data/search#dir=/home/budibase/couchdb/data/search#' /opt/clouseau/clouseau.ini + sed -i 's#/minio/minio server /minio &#/minio/minio server /home/budibase/minio &#' /runner.sh + sed -i 's#database_dir = ./data#database_dir = /home/budibase/couchdb/data#' /opt/couchdb/etc/default.ini + sed -i 's#view_index_dir = ./data#view_index_dir = /home/budibase/couchdb/data#' /opt/couchdb/etc/default.ini + sed -i "s/#Port 22/Port 2222/" /etc/ssh/sshd_config + /etc/init.d/ssh restart +fi diff --git a/hosting/scripts/healthcheck.sh b/hosting/scripts/healthcheck.sh index fa6f511eb9..80f2ece0b6 100644 --- a/hosting/scripts/healthcheck.sh +++ b/hosting/scripts/healthcheck.sh @@ -25,6 +25,13 @@ if [[ $(redis-cli -a $REDIS_PASSWORD --no-auth-warning ping) != 'PONG' ]]; then healthy=false fi # mino, clouseau, +nginx -t -q +NGINX_STATUS=$? + +if [[ $NGINX_STATUS -gt 0 ]]; then + echo 'ERROR: Nginx config problem'; + healthy=false +fi if [ $healthy == true ]; then exit 0 diff --git a/hosting/single/Dockerfile b/hosting/single/Dockerfile index 24e90fc818..5e1b0b1374 100644 --- a/hosting/single/Dockerfile +++ b/hosting/single/Dockerfile @@ -19,8 +19,12 @@ ADD packages/worker . RUN node /pinVersions.js && yarn && yarn build && /cleanup.sh FROM couchdb:3.2.1 - +# TARGETARCH can be amd64 or arm e.g. docker build --build-arg TARGETARCH=amd64 ARG TARGETARCH amd64 +#TARGETBUILD can be set to single (for single docker image) or aas (for azure app service) +# e.g. docker build --build-arg TARGETBUILD=aas .... +ARG TARGETBUILD single +ENV TARGETBUILD $TARGETBUILD COPY --from=build /app /app COPY --from=build /worker /worker @@ -33,7 +37,7 @@ ENV \ COUCHDB_PASSWORD=budibase \ COUCHDB_USER=budibase \ COUCH_DB_URL=http://budibase:budibase@localhost:5984 \ - CUSTOM_DOMAIN=budi001.custom.com \ + # CUSTOM_DOMAIN=budi001.custom.com \ DEPLOYMENT_ENVIRONMENT=docker \ INTERNAL_API_KEY=budibase \ JWT_SECRET=testsecret \ @@ -44,6 +48,7 @@ ENV \ REDIS_PASSWORD=budibase \ REDIS_URL=localhost:6379 \ SELF_HOSTED=1 \ + TARGETBUILD=$TARGETBUILD \ WORKER_PORT=4002 \ WORKER_URL=http://localhost:4002 @@ -62,6 +67,7 @@ RUN curl -sL https://deb.nodesource.com/setup_16.x -o /tmp/nodesource_setup.sh & # setup nginx ADD hosting/single/nginx.conf /etc/nginx +ADD hosting/single/nginx-default-site.conf /etc/nginx/sites-enabled/default RUN mkdir -p /var/log/nginx && \ touch /var/log/nginx/error.log && \ touch /var/run/nginx.pid @@ -100,6 +106,12 @@ RUN chmod +x ./runner.sh ADD hosting/scripts/healthcheck.sh . RUN chmod +x ./healthcheck.sh +ADD hosting/scripts/build-target-paths.sh . +RUN chmod +x ./build-target-paths.sh + +# For Azure App Service install SSH & point data locations to /home +RUN /build-target-paths.sh + # cleanup cache RUN yarn cache clean -f diff --git a/hosting/single/README.md b/hosting/single/README.md index d62359a628..1147d55c89 100644 --- a/hosting/single/README.md +++ b/hosting/single/README.md @@ -4,7 +4,6 @@ As an alternative to running several docker containers via docker-compose, the files under ./hosting/single can be used to build a docker image containing all of the Budibase components (minio, couch, clouseau etc). We call this the 'single image' container as the Dockerfile adds all the components to a single docker image. - ## Usage - Amend Environment Variables @@ -22,9 +21,9 @@ If you have other arrangements for a proxy in front of the single image containe We would suggest building the image with 6GB of RAM and 20GB of free disk space for build artifacts. The resulting image size will use approx 2GB of disk space. ### Build the Image -The guidance below is based on building the Budibase single image on Debian 11. If you use another distro or OS you will need to amend the commands to suit. -Install Node -Budibase requires a recent version of node (14+) than is in the base Debian repos so: +The guidance below is based on building the Budibase single image on Debian 11 and AlmaLinux 8. If you use another distro or OS you will need to amend the commands to suit. +#### Install Node +Budibase requires a more recent version of node (14+) than is available in the base Debian repos so: ``` curl -sL https://deb.nodesource.com/setup_16.x | sudo bash - @@ -35,25 +34,26 @@ Install yarn and lerna: ``` npm install -g yarn jest lerna ``` -Install Docker +#### Install Docker + ``` apt install -y docker.io -apt install -y python3-pip -pip3 install docker-compose ``` + Check the versions of each installed version. This process was tested with the version numbers below so YMMV using anything else: - Docker: 20.10.5 -- docker-compose: 1.29.2 - node: 16.15.1 - yarn: 1.22.19 - lerna: 5.1.4 +#### Get the Code Clone the Budibase repo ``` git clone https://github.com/Budibase/budibase.git cd budibase ``` +#### Setup Node Node setup: ``` node ./hosting/scripts/setup.js @@ -61,15 +61,20 @@ yarn yarn bootstrap yarn build ``` - -Build the image from the Dockerfile: - +#### Build Image +The following yarn command does some prep and then runs the docker build command: ``` yarn build:docker:single ``` -If the docker build step fails run that step again manually with: +If the docker build step fails try running that step again manually with: ``` -docker build --no-cache -t budibase:latest -f ./hosting/single/Dockerfile . +docker build --build-arg TARGETARCH=amd --no-cache -t budibase:latest -f ./hosting/single/Dockerfile . +``` + +#### Azure App Services +Azure have some specific requirements for running a container in their App Service. Specifically, installation of SSH to port 2222 and data storage under /home. If you would like to build a budibase container for Azure App Service add the build argument shown below setting it to 'aas'. You can remove the CUSTOM_DOMAIN env variable from the Dockerfile too as Azure terminate SSL before requests reach the container. +``` +docker build --build-arg TARGETARCH=amd --build-arg TARGETBUILD=aas -t budibase:latest -f ./hosting/single/Dockerfile . ``` ### Run the Container @@ -85,6 +90,9 @@ When the container runs you should be able to access the container over http at When the Budibase UI appears you will be prompted to create an account to get started. +### Podman +The single image container builds fine when using podman in place of docker. You may be prompted for the registry to use for the CouchDB image and the HEALTHCHECK parameter is not OCI compliant so is ignored. + ### Check There are many things that could go wrong so if your container is not building or running as expected please check the following before opening a support issue. Verify the healthcheck status of the container: @@ -96,7 +104,6 @@ Check the container logs: docker logs budibase ``` - ### Support This single image build is still a work-in-progress so if you open an issue please provide the following information: - The OS and OS version you are building on diff --git a/hosting/single/nginx-default-site.conf b/hosting/single/nginx-default-site.conf new file mode 100644 index 0000000000..964313fa73 --- /dev/null +++ b/hosting/single/nginx-default-site.conf @@ -0,0 +1,94 @@ +server { + listen 80 default_server; + listen [::]:80 default_server; + server_name _; + + client_max_body_size 1000m; + ignore_invalid_headers off; + proxy_buffering off; + # port_in_redirect off; + + location ^~ /.well-known/acme-challenge/ { + default_type "text/plain"; + root /var/www/html; + break; + } + location = /.well-known/acme-challenge/ { + return 404; + } + + location /app { + proxy_pass http://127.0.0.1:4001; + } + + location = / { + proxy_pass http://127.0.0.1:4001; + } + + location ~ ^/(builder|app_) { + proxy_http_version 1.1; + proxy_set_header Connection $connection_upgrade; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_pass http://127.0.0.1:4001; + } + + location ~ ^/api/(system|admin|global)/ { + proxy_pass http://127.0.0.1:4002; + } + + location /worker/ { + proxy_pass http://127.0.0.1:4002; + rewrite ^/worker/(.*)$ /$1 break; + } + + location /api/ { + # calls to the API are rate limited with bursting + limit_req zone=ratelimit burst=20 nodelay; + + # 120s timeout on API requests + proxy_read_timeout 120s; + proxy_connect_timeout 120s; + proxy_send_timeout 120s; + + proxy_http_version 1.1; + proxy_set_header Connection $connection_upgrade; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + + proxy_pass http://127.0.0.1:4001; + } + + location /db/ { + proxy_pass http://127.0.0.1:5984; + rewrite ^/db/(.*)$ /$1 break; + } + + location / { + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + + proxy_connect_timeout 300; + proxy_http_version 1.1; + proxy_set_header Connection ""; + chunked_transfer_encoding off; + proxy_pass http://127.0.0.1:9000; + } + + client_header_timeout 60; + client_body_timeout 60; + keepalive_timeout 60; + + # gzip + gzip on; + gzip_vary on; + gzip_proxied any; + gzip_comp_level 6; + gzip_types text/plain text/css text/xml application/json application/javascript application/rss+xml application/atom+xml image/svg+xml; + + + +} diff --git a/hosting/single/nginx.conf b/hosting/single/nginx.conf index 42d20dd14a..1e5d1c20d2 100644 --- a/hosting/single/nginx.conf +++ b/hosting/single/nginx.conf @@ -32,94 +32,6 @@ http { default "upgrade"; } - server { - listen 80 default_server; - listen [::]:80 default_server; - server_name _; - client_max_body_size 1000m; - ignore_invalid_headers off; - proxy_buffering off; - # port_in_redirect off; + include /etc/nginx/sites-enabled/*; - location ^~ /.well-known/acme-challenge/ { - default_type "text/plain"; - root /var/www/html; - break; - } - location = /.well-known/acme-challenge/ { - return 404; - } - - location /app { - proxy_pass http://127.0.0.1:4001; - } - - location = / { - proxy_pass http://127.0.0.1:4001; - } - - location ~ ^/(builder|app_) { - proxy_http_version 1.1; - proxy_set_header Connection $connection_upgrade; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_pass http://127.0.0.1:4001; - } - - location ~ ^/api/(system|admin|global)/ { - proxy_pass http://127.0.0.1:4002; - } - - location /worker/ { - proxy_pass http://127.0.0.1:4002; - rewrite ^/worker/(.*)$ /$1 break; - } - - location /api/ { - # calls to the API are rate limited with bursting - limit_req zone=ratelimit burst=20 nodelay; - - # 120s timeout on API requests - proxy_read_timeout 120s; - proxy_connect_timeout 120s; - proxy_send_timeout 120s; - - proxy_http_version 1.1; - proxy_set_header Connection $connection_upgrade; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - - proxy_pass http://127.0.0.1:4001; - } - - location /db/ { - proxy_pass http://127.0.0.1:5984; - rewrite ^/db/(.*)$ /$1 break; - } - - location / { - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto $scheme; - - proxy_connect_timeout 300; - proxy_http_version 1.1; - proxy_set_header Connection ""; - chunked_transfer_encoding off; - proxy_pass http://127.0.0.1:9000; - } - - client_header_timeout 60; - client_body_timeout 60; - keepalive_timeout 60; - - # gzip - gzip on; - gzip_vary on; - gzip_proxied any; - gzip_comp_level 6; - gzip_types text/plain text/css text/xml application/json application/javascript application/rss+xml application/atom+xml image/svg+xml; - } } diff --git a/hosting/single/test.sh b/hosting/single/test.sh index b9f9a4032c..c7ef53f994 100755 --- a/hosting/single/test.sh +++ b/hosting/single/test.sh @@ -1,4 +1,4 @@ #!/bin/bash -id=$(docker run -t -d -p 10000:10000 budibase:latest) +id=$(docker run -t -d -p 80:80 budibase:latest) docker exec -it $id bash docker kill $id