diff --git a/.env.example b/.env.example index a71feb5..9bd1f07 100644 --- a/.env.example +++ b/.env.example @@ -1,9 +1,12 @@ DOMAIN= +APPLICATION_DOMAIN="" ### TLS certificates configuration TLS_ENABLED="" TLS_CERTIFICATE="" TLS_KEY="" +APP_TLS_CERTIFICATE="" +APP_TLS_KEY="" ### Database configuration DB_HOST="" @@ -17,3 +20,6 @@ EMAIL_PORT=587 EMAIL_SECURE="" EMAIL_USER="" EMAIL_PASSWORD="" + +### Docker Driver options +DOCKER_DRIVER_PRIVATE_CA_PATH="" diff --git a/.github/workflows/test-docker-compose.yaml b/.github/workflows/test-docker-compose.yaml new file mode 100644 index 0000000..6b58586 --- /dev/null +++ b/.github/workflows/test-docker-compose.yaml @@ -0,0 +1,163 @@ +name: Test Docker Compose + +on: + push: + branches: + - main + pull_request: + branches: + - main + +jobs: + default-stack: + name: Test default stack + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Create .env file for default settings + run: | + cp .env.example .env + sed -i 's/DOMAIN=.*/DOMAIN=ci-example.com/' .env + + - name: Create stack + uses: hoverkraft-tech/compose-action@v2.0.2 + with: + compose-file: "./docker-compose.yml" + up-flags: "-d --quiet-pull" + + - name: Check readiness + run: | + has_healthcheck() { + local container=$1 + local health_status=$(docker inspect --format='{{if .Config.Healthcheck}}true{{else}}false{{end}}' "$container") + [ "$health_status" = "true" ] + } + + check_containers() { + containers=$(docker compose ps -q) + for container in $containers; do + container_name=$(docker inspect --format '{{.Name}}' "$container" | sed 's/\///') + container_ip=$(docker inspect --format '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' "$container") + + if has_healthcheck "$container"; then + echo "Container has healthcheck defined" + status=$(docker inspect --format "{{.State.Health.Status}}" "$container") + if [ "$status" != "healthy" ]; then + echo "❌ Container $container_name is not healthy (status: $status)" + return 1 + fi + else + running=$(docker inspect --format "{{.State.Running}}" "$container") + if [ "$running" != "true" ]; then + echo "❌ Container $container_name is not running" + return 1 + fi + fi + + echo "✅ Container $container_name is ready" + done + return 0 + } + + # Wait for containers with timeout + TIMEOUT=300 # 5 minutes timeout + ELAPSED=0 + SLEEP_TIME=10 + + until check_containers; do + if [ $ELAPSED -ge $TIMEOUT ]; then + echo "❌ Timeout waiting for containers to be ready" + docker compose ps + docker compose logs + exit 1 + fi + echo "⏳ Waiting for containers... ($ELAPSED seconds elapsed)" + sleep $SLEEP_TIME + ELAPSED=$((ELAPSED + SLEEP_TIME)) + done + + echo "✅ All containers are ready!" + docker compose ps + + - name: Tear down the stack + if: always() + run: docker compose down + + + quick-start-stack: + name: Test quick-start stack + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Create .env file for default settings + run: | + cp .env.example .env + sed -i 's/DOMAIN=.*/DOMAIN=ci-example.com/' .env + + - name: Create stack + uses: hoverkraft-tech/compose-action@v2.0.2 + with: + compose-file: "./docker-compose-quick-start.yml" + up-flags: "-d --quiet-pull" + + - name: Check readiness + run: | + has_healthcheck() { + local container=$1 + local health_status=$(docker inspect --format='{{if .Config.Healthcheck}}true{{else}}false{{end}}' "$container") + [ "$health_status" = "true" ] + } + + check_containers() { + containers=$(docker compose ps -q) + for container in $containers; do + container_name=$(docker inspect --format '{{.Name}}' "$container" | sed 's/\///') + container_ip=$(docker inspect --format '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' "$container") + + if has_healthcheck "$container"; then + echo "Container has healthcheck defined" + status=$(docker inspect --format "{{.State.Health.Status}}" "$container") + if [ "$status" != "healthy" ]; then + echo "❌ Container $container_name is not healthy (status: $status)" + return 1 + fi + else + running=$(docker inspect --format "{{.State.Running}}" "$container") + if [ "$running" != "true" ]; then + echo "❌ Container $container_name is not running" + return 1 + fi + fi + + echo "✅ Container $container_name is ready" + done + return 0 + } + + # Wait for containers with timeout + TIMEOUT=300 # 5 minutes timeout + ELAPSED=0 + SLEEP_TIME=10 + + until check_containers; do + if [ $ELAPSED -ge $TIMEOUT ]; then + echo "❌ Timeout waiting for containers to be ready" + docker compose ps + docker compose logs + exit 1 + fi + echo "⏳ Waiting for containers... ($ELAPSED seconds elapsed)" + sleep $SLEEP_TIME + ELAPSED=$((ELAPSED + SLEEP_TIME)) + done + + echo "✅ All containers are ready!" + docker compose ps + + - name: Tear down the stack + if: always() + run: docker compose down diff --git a/CHANGELOG.md b/CHANGELOG.md index 8dd4b0c..095a479 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,12 @@ +#### 2.11.0: Release + + - Ensure the broker service name doesn't change (#168) + - ci: Introduce workflow for testing docker compose (#166) + - Update Node-RED version in Docker compose (#162) + - chore: Refactor docker-compose to simplify installation experience (#160) + - First pass at TeamBroker (#165) @hardillb + - fix: Remove interpolation when creating TLS certificates (#164) @ppawlowski + #### 2.10.0: Release - Introduce quick-start compose file (#158) @ppawlowski diff --git a/UPGRADE.md b/UPGRADE.md index 45f7d00..56492cc 100644 --- a/UPGRADE.md +++ b/UPGRADE.md @@ -38,15 +38,17 @@ This allows for easier management of the platform and better separation of conce ```bash curl -o docker-compose-new.yml https://raw.githubusercontent.com/flowfuse/docker-compose/main/docker-compose.yml - curl -o docker-compose-tls.override.new.yml https://raw.githubusercontent.com/flowfuse/docker-compose/main/docker-compose-tls.override.yml curl -o .env https://raw.githubusercontent.com/flowfuse/docker-compose/main/.env.example ``` 3. **Move configurations to the new approach** * Copy content of `./etc/flowforge.yml` file to `docker-compose-new.yml` file, to `configs.flowfuse.content` section. Remove all commented lines. Maintain indentation. + * Make sure, that `broker.url` is seto fo `mqtt://broker:1883`. Update if needed. * Copy content of `./etc/flowforge-storage.yml` file to `docker-compose-new.yml` file, to `configs.flowfuse_storage.content` section. Remove all commented lines. Maintain indentation. * Set the `DOMAIN` variable in the `.env` file to the domain used by your instance of FlowFuse platform. +* If FlowFuse application is running outside of the `DOMAIN` scope, set it as a value of `APPLICATION_DOMAIN` variable in the `.env` file. +* If application should be accessible via seured connection (HTTPS), set `TLS_ENABLED` variable to `true` in `.env` file. * If custom certificates are used, copy their content to `.env` file, to `TLS_CERTIFICATE` and `TLS_KEY` variables. They should look like this: ```bash @@ -68,6 +70,28 @@ This allows for easier management of the platform and better separation of conce " ``` +* If custom certificates are used and FlowFuse application is running on a different domain than other stack components (defined in `APPLICATION_DOMAIN` variable), + use `APP_TLS_CERTIFICATE` and `APP_TLS_KEY` variabls to provide certificate and it's key. They should look like this: + + ```bash + APP_TLS_CERTIFICATE=" + -----BEGIN CERTIFICATE----- + MIIFfzCCBKegAwIBAgISA0 + ... + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIFfzCCBKegAwIBAgISA0 + ... + -----END CERTIFICATE----- + " + APP_TLS_KEY=" + -----BEGIN PRIVATE KEY----- + MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQD + ... + -----END PRIVATE KEY----- + " + ``` + 4. **Migrate database files** Move the database files from host to the new volume. This will allow you to keep the existing data. @@ -90,19 +114,22 @@ This allows for easier management of the platform and better separation of conce docker run --rm -v flowfuse_db:/data -v $(pwd)/db:/backup alpine sh -c "cp -a /backup/. /data/" ``` -5. **Start FlowFuse** +5. **Rename files** - Start the new FlowFuse platform using the new Docker Compose file. + In order to maintain the same file structurem, rename the compose files. - * With automatic TLS certificate generation: ```bash - docker compose -f docker-compose-new.yml -f docker-compose-tls.new.override.yml --profile autossl -p flowfuse up -d + mv docker-compose.yml docker-compose-old.yml + mv docker-compose-new.yml docker-compose.yml ``` - * With custom TLS certificate: +6. **Start FlowFuse** + + Start the new FlowFuse platform using the new Docker Compose file. + * With automatic TLS certificate generation: ```bash - docker compose -f docker-compose.new.yml -f docker-compose-tls..new.override.yml -p flowfuse up -d + docker compose -f docker-compose.yml --profile autotls -p flowfuse up -d ``` * In all other cases @@ -110,12 +137,14 @@ This allows for easier management of the platform and better separation of conce ```bash docker compose -p flowfuse up -d ``` -6. **Verify the migration** + +7. **Verify the migration** Verify that the new FlowFuse platform is working correctly and it is accessible using the domain set in the `.env` file. Login credentials should remain the same as before the migration, as well as platform configuration. + Restart the Node-RED instances if they appear in `Starting` state. -7. **Cleanup** +8 **Cleanup** After verifying that the new FlowFuse platform is working correctly, you can remove the old configuration files. @@ -123,5 +152,6 @@ This allows for easier management of the platform and better separation of conce rm ./etc/flowforge.yml ./etc/flowforge.yml.bak rm ./etc/flowforge-storage.yml.bak ./etc/flowforge-storage.yml rm -rf ./db ./db.bak + rm -f ./docker-compose-old.yml ``` diff --git a/docker-compose.yml b/docker-compose.yml index 0811a4e..e6ed22a 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -6,7 +6,7 @@ configs: port: 3000 host: 0.0.0.0 domain: ${DOMAIN:?error} - base_url: http${TLS_ENABLED:+s}://forge.${DOMAIN:?error} + base_url: http${TLS_ENABLED:+s}://${APPLICATION_DOMAIN:-forge.${DOMAIN}} api_url: http://forge:3000 create_admin: ${CREATE_ADMIN:-false} db: @@ -17,7 +17,7 @@ configs: password: ${DB_PASSWORD:-secret} email: enabled: ${EMAIL_ENABLED:-false} - from: '"FlowFuse" ' + from: '"FlowFuse" ' smtp: host: ${EMAIL_HOST} port: ${EMAIL_PORT:-587} @@ -29,31 +29,15 @@ configs: type: docker options: socket: /tmp/docker.sock + ${DOCKER_DRIVER_PRIVATE_CA_PATH:+privateCA: ${DOCKER_DRIVER_PRIVATE_CA_PATH}} broker: url: mqtt://broker:1883 public_url: ws${TLS_ENABLED:+s}://mqtt.${DOMAIN:?error} + teamBroker: + enabled: true fileStore: enable: true url: http://file-server:3001 - flowfuse_broker: - content: | - per_listener_settings false - allow_anonymous false - listener 1883 0.0.0.0 - listener 1884 0.0.0.0 - protocol websockets - auth_plugin /mosquitto/go-auth.so - auth_opt_backends http - auth_opt_hasher bcrypt - auth_opt_cache true - auth_opt_auth_cache_seconds 30 - auth_opt_acl_cache_seconds 90 - auth_opt_auth_jitter_second 3 - auth_opt_acl_jitter_seconds 5 - auth_opt_http_host forge - auth_opt_http_port 3000 - auth_opt_http_getuser_uri /api/comms/auth/client - auth_opt_http_aclcheck_uri /api/comms/auth/acl flowfuse_storage: content: | port: 3001 @@ -76,10 +60,25 @@ configs: nginx: content: | client_max_body_size 5m; - nginx_tls_crt: + nginx_main_tls_crt: environment: TLS_CERTIFICATE - nginx_tls_key: + nginx_main_tls_key: environment: TLS_KEY + nginx_app_tls_crt: + environment: APP_TLS_CERTIFICATE + nginx_app_tls_key: + environment: APP_TLS_KEY + nginx_stream: + content: | + # stream { + # server { + # listen 1884 ssl; + # ssl_protocols TLSv1.2; + # ssl_certificate /etc/nginx/certs/${DOMAIN}.crt; + # ssl_certificate_key /etc/nginx/certs/${DOMAIN}.key; + # proxy_pass broker:1883; + # } + #} postgres_db_setup: content: | #!/bin/sh @@ -97,6 +96,312 @@ configs: SELECT 'CREATE DATABASE "ff-context"' WHERE NOT EXISTS (SELECT FROM pg_database WHERE datname = 'ff-context')\gexec GRANT ALL PRIVILEGES ON DATABASE "ff-context" TO "${DB_USER:-forge}"; ESQL + emqx: + content: | + authentication = [ + { + backend = http + body { + clientId = "$${clientid}" + password = "$${password}" + username = "$${username}" + } + connect_timeout = "15s" + enable = true + enable_pipelining = 1 + headers { + content-type = "application/json" + } + mechanism = password_based + method = post + pool_size = 8 + request_timeout = "15s" + ssl { + ciphers = [] + depth = 10 + enable = false + hibernate_after = "5s" + log_level = notice + reuse_sessions = true + secure_renegotiate = true + verify = verify_peer + versions = [ + "tlsv1.3", + "tlsv1.2" + ] + } + url = "http://forge:3000/api/comms/v2/auth" + }, + { + backend = built_in_database + bootstrap_file = "$${EMQX_ETC_DIR}/auth-built-in-db-bootstrap.csv" + bootstrap_type = plain + enable = true + mechanism = password_based + password_hash_algorithm {name = plain, salt_position = disable} + user_id_type = username + } + ] + authorization { + cache { + enable = true + excludes = [] + max_size = 32 + ttl = "1m" + } + deny_action = ignore + no_match = allow + sources = [ + { + body { + action = "$${action}" + topic = "$${topic}" + username = "$${username}" + } + connect_timeout = "15s" + enable = true + enable_pipelining = 1 + headers { + content-type = "application/json" + } + method = post + pool_size = 8 + request_timeout = "30s" + ssl { + ciphers = [] + depth = 10 + enable = false + hibernate_after = "5s" + log_level = notice + reuse_sessions = true + secure_renegotiate = true + verify = verify_peer + versions = [ + "tlsv1.3", + "tlsv1.2" + ] + } + type = http + url = "http://forge:3000/api/comms/v2/acls" + } + ] + } + mqtt { + max_packet_size: 128MB + } + listeners { + ssl { + default { + acceptors = 16 + access_rules = [ + "allow all" + ] + bind = "0.0.0.0:8883" + enable = false + enable_authn = true + max_conn_rate = infinity + max_connections = infinity + mountpoint = "$${client_attrs.team}" + proxy_protocol = false + proxy_protocol_timeout = "3s" + ssl_options { + cacertfile = "$${EMQX_ETC_DIR}/certs/cacert.pem" + certfile = "$${EMQX_ETC_DIR}/certs/cert.pem" + ciphers = [] + client_renegotiation = true + depth = 10 + enable_crl_check = false + fail_if_no_peer_cert = false + gc_after_handshake = false + handshake_timeout = "15s" + hibernate_after = "5s" + honor_cipher_order = true + keyfile = "$${EMQX_ETC_DIR}/certs/key.pem" + log_level = notice + ocsp { + enable_ocsp_stapling = false + refresh_http_timeout = "15s" + refresh_interval = "5m" + } + reuse_sessions = true + secure_renegotiate = true + verify = verify_none + versions = [ + "tlsv1.3", + "tlsv1.2" + ] + } + tcp_options { + active_n = 100 + backlog = 1024 + buffer = "4KB" + high_watermark = "1MB" + keepalive = none + nodelay = true + reuseaddr = true + send_timeout = "15s" + send_timeout_close = true + } + zone = default + } + } + tcp { + default { + acceptors = 16 + access_rules = [ + "allow all" + ] + bind = "0.0.0.0:1883" + enable = true + enable_authn = true + max_conn_rate = infinity + max_connections = infinity + mountpoint = "$${client_attrs.team}" + proxy_protocol = false + proxy_protocol_timeout = "3s" + tcp_options { + active_n = 100 + backlog = 1024 + buffer = "4KB" + high_watermark = "1MB" + keepalive = none + nodelay = true + reuseaddr = true + send_timeout = "15s" + send_timeout_close = true + } + zone = default + } + } + ws { + default { + acceptors = 16 + access_rules = [ + "allow all" + ] + bind = "0.0.0.0:8083" + enable = true + enable_authn = true + max_conn_rate = infinity + max_connections = infinity + mountpoint = "$${client_attrs.team}" + proxy_protocol = false + proxy_protocol_timeout = "3s" + tcp_options { + active_n = 100 + backlog = 1024 + buffer = "4KB" + high_watermark = "1MB" + keepalive = none + nodelay = true + reuseaddr = true + send_timeout = "15s" + send_timeout_close = true + } + websocket { + allow_origin_absence = true + check_origin_enable = false + check_origins = "http://localhost:18083, http://127.0.0.1:18083" + compress = false + deflate_opts { + client_context_takeover = takeover + client_max_window_bits = 15 + mem_level = 8 + server_context_takeover = takeover + server_max_window_bits = 15 + strategy = default + } + fail_if_no_subprotocol = true + idle_timeout = "7200s" + max_frame_size = infinity + mqtt_path = "/" + mqtt_piggyback = multiple + proxy_address_header = "x-forwarded-for" + proxy_port_header = "x-forwarded-port" + supported_subprotocols = "mqtt, mqtt-v3, mqtt-v3.1.1, mqtt-v5" + validate_utf8 = true + } + zone = default + } + } + wss { + default { + acceptors = 16 + access_rules = [ + "allow all" + ] + bind = "0.0.0.0:8084" + enable = false + enable_authn = true + max_conn_rate = infinity + max_connections = infinity + mountpoint = "$${client_attrs.team}" + proxy_protocol = false + proxy_protocol_timeout = "3s" + ssl_options { + cacertfile = "$${EMQX_ETC_DIR}/certs/cacert.pem" + certfile = "$${EMQX_ETC_DIR}/certs/cert.pem" + ciphers = [] + client_renegotiation = true + depth = 10 + fail_if_no_peer_cert = false + handshake_timeout = "15s" + hibernate_after = "5s" + honor_cipher_order = true + keyfile = "$${EMQX_ETC_DIR}/certs/key.pem" + log_level = notice + reuse_sessions = true + secure_renegotiate = true + verify = verify_none + versions = [ + "tlsv1.3", + "tlsv1.2" + ] + } + tcp_options { + active_n = 100 + backlog = 1024 + buffer = "4KB" + high_watermark = "1MB" + keepalive = none + nodelay = true + reuseaddr = true + send_timeout = "15s" + send_timeout_close = true + } + websocket { + allow_origin_absence = true + check_origin_enable = false + check_origins = "http://localhost:18083, http://127.0.0.1:18083" + compress = false + deflate_opts { + client_context_takeover = takeover + client_max_window_bits = 15 + mem_level = 8 + server_context_takeover = takeover + server_max_window_bits = 15 + strategy = default + } + fail_if_no_subprotocol = true + idle_timeout = "7200s" + max_frame_size = infinity + mqtt_path = "/" + mqtt_piggyback = multiple + proxy_address_header = "x-forwarded-for" + proxy_port_header = "x-forwarded-port" + supported_subprotocols = "mqtt, mqtt-v3, mqtt-v3.1.1, mqtt-v5" + validate_utf8 = true + } + zone = default + } + } + } + api_key { + bootstrap_file = "/mounted/config/api-keys" + } + emqx-api: + content: | + flowfuse:verySecret:administrator services: nginx: @@ -111,15 +416,22 @@ services: configs: - source: nginx target: /etc/nginx/conf.d/my_proxy.conf - - source: nginx_tls_crt + - source: nginx_main_tls_crt target: /etc/nginx/certs/${DOMAIN:?error}.crt - - source: nginx_tls_key + - source: nginx_main_tls_key target: /etc/nginx/certs/${DOMAIN:?error}.key + - source: nginx_app_tls_crt + target: /etc/nginx/certs/${APPLICATION_DOMAIN:-forge.${DOMAIN}}.crt + - source: nginx_app_tls_key + target: /etc/nginx/certs/${APPLICATION_DOMAIN:-forge.${DOMAIN}}.key + - source: nginx_stream + target: /etc/nginx/toplevel.conf.d/mqtt.conf ports: - "80:80" - "443:443" + - "1884:1884" environment: - - "HTTPS_METHOD=redirect" + - HTTPS_METHOD=${TLS_ENABLED:+redirect} healthcheck: test: "curl -s -I http://localhost | head -n 1 | grep -q 503" interval: 5s @@ -150,28 +462,37 @@ services: start_period: 10s broker: - image: "iegomez/mosquitto-go-auth:latest-mosquitto_2.0.15" + image: emqx/emqx:5.8.0 networks: - flowforge - restart: always - ulimits: - nofile: 2048 + ports: + - 1883:1883 + healthcheck: + test: ["CMD", "/opt/emqx/bin/emqx", "ctl", "status"] + interval: 5s + timeout: 25s + retries: 5 environment: - - "VIRTUAL_HOST=mqtt.${DOMAIN:?error}" - - "VIRTUAL_PORT=1884" - - "LETSENCRYPT_HOST=mqtt.${DOMAIN:?error}" + - "VIRTUAL_HOST=broker.${DOMAIN:?error},mqtt.${DOMAIN:?error}" + - "VIRTUAL_PORT=8083" + - "LETSENCRYPT_HOST=broker.${DOMAIN:?error},mqtt.${DOMAIN:?error}" + - "EMQX_DASHBOARD__DEFAULT_PASSWORD=topSecret" configs: - - source: flowfuse_broker - target: /etc/mosquitto/mosquitto.conf + - source: emqx + target: /opt/emqx/data/configs/cluster.hocon + - source: emqx-api + target: /mounted/config/api-keys + volumes: + - emqx:/opt/emqx/data forge: - image: "flowfuse/forge-docker:2.10.0" + image: "flowfuse/forge-docker:2.11.0" networks: - flowforge restart: always environment: - - "VIRTUAL_HOST=forge.${DOMAIN:?error}" - - "LETSENCRYPT_HOST=forge.${DOMAIN:?error}" + - "VIRTUAL_HOST=${APPLICATION_DOMAIN:-forge.${DOMAIN}}" + - "LETSENCRYPT_HOST=${APPLICATION_DOMAIN:-forge.${DOMAIN}}" configs: - source: flowfuse target: /usr/src/forge/etc/flowforge.yml @@ -190,7 +511,7 @@ services: start_period: 10s file-server: - image: "flowfuse/file-server:2.10.0" + image: "flowfuse/file-server:2.11.0" networks: - flowforge restart: always @@ -249,3 +570,4 @@ volumes: nginx-proxy-html: fileStorage: acme: + emqx: diff --git a/file-server/package.json b/file-server/package.json index 4ecdd97..24b09aa 100644 --- a/file-server/package.json +++ b/file-server/package.json @@ -1,9 +1,9 @@ { "name": "@flowfuse/file-server-container", - "version": "2.10.0", + "version": "2.11.0", "private": true, "dependencies": { - "@flowfuse/file-server": "^2.10.0" + "@flowfuse/file-server": "^2.11.0" }, "license": "Apache-2.0" } diff --git a/flowforge-docker/package.json b/flowforge-docker/package.json index ba99adb..8b1d476 100644 --- a/flowforge-docker/package.json +++ b/flowforge-docker/package.json @@ -1,14 +1,14 @@ { "name": "@flowfuse/docker-deployment", "description": "FlowFuse in Docker", - "version": "2.10.0", + "version": "2.11.0", "private": true, "author": { "name": "FlowForge Inc." }, "dependencies": { - "@flowfuse/flowfuse": "^2.10.0", - "@flowfuse/driver-docker": "^2.10.0", + "@flowfuse/flowfuse": "^2.11.0", + "@flowfuse/driver-docker": "^2.11.0", "pg": "^8.7.1", "pg-hstore": "^2.3.4" }, diff --git a/node-red-container/package.json b/node-red-container/package.json index 4e533cd..3675392 100644 --- a/node-red-container/package.json +++ b/node-red-container/package.json @@ -1,7 +1,7 @@ { "name": "node-red-project", "description": "A Node-RED Project", - "version": "2.10.0", + "version": "2.11.0", "private": true, "dependencies": {} }