diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml index 0e473be..754b905 100644 --- a/.github/workflows/deploy.yml +++ b/.github/workflows/deploy.yml @@ -58,7 +58,7 @@ jobs: name: Run hadolint against docker files runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4.1.7 + - uses: actions/checkout@v4.2.1 - name: Pull hadolint/hadolint:latest Image run: docker pull hadolint/hadolint:latest - name: Run hadolint against Dockerfiles diff --git a/.github/workflows/on_pr.yml b/.github/workflows/on_pr.yml index 1515545..5ed5e40 100644 --- a/.github/workflows/on_pr.yml +++ b/.github/workflows/on_pr.yml @@ -34,7 +34,7 @@ jobs: name: Run hadolint against docker files runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4.1.7 + - uses: actions/checkout@v4.2.1 - name: Pull hadolint/hadolint:latest Image run: docker pull hadolint/hadolint:latest - name: Run hadolint against Dockerfiles diff --git a/.github/workflows/pre-commit-updates.yaml b/.github/workflows/pre-commit-updates.yaml index bfd1981..42fc80b 100644 --- a/.github/workflows/pre-commit-updates.yaml +++ b/.github/workflows/pre-commit-updates.yaml @@ -11,7 +11,7 @@ jobs: name: Updates steps: - name: Checkout - uses: actions/checkout@v4.1.7 + uses: actions/checkout@v4.2.1 - name: Update pre-commit hooks uses: brokenpip3/action-pre-commit-update@0.0.1 with: diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 74c6dd1..c802af8 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,7 +1,7 @@ repos: # lint yaml, line and whitespace - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.6.0 + rev: v5.0.0 hooks: - id: check-yaml - id: end-of-file-fixer @@ -13,7 +13,7 @@ repos: # lint the dockerfiles - repo: https://github.com/hadolint/hadolint - rev: v2.13.0-beta + rev: v2.13.1-beta hooks: - id: hadolint @@ -41,7 +41,7 @@ repos: - id: shellcheck - repo: https://github.com/sirosen/check-jsonschema - rev: 0.29.0 + rev: 0.29.4 hooks: - id: check-github-actions - id: check-github-workflows @@ -54,12 +54,12 @@ repos: # lint python formatting - repo: https://github.com/psf/black - rev: 24.4.2 + rev: 24.10.0 hooks: - id: black - repo: https://github.com/pycqa/flake8 - rev: "7.1.0" # pick a git hash / tag to point to + rev: "7.1.1" # pick a git hash / tag to point to hooks: - id: flake8 args: ["--extend-ignore=W503,W504,E501"] diff --git a/Dockerfile b/Dockerfile index 285e7b8..735c3fc 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,18 +1,19 @@ -FROM ghcr.io/sdr-enthusiasts/docker-baseimage:mlatclient as buildimage +FROM ghcr.io/sdr-enthusiasts/docker-baseimage:mlatclient AS buildimage SHELL ["/bin/bash", "-x", "-o", "pipefail", "-c"] RUN \ --mount=type=bind,source=./,target=/app/ \ - apt-get update -q -y && \ - apt-get install -o Dpkg::Options::="--force-confnew" -y --no-install-recommends -q \ - build-essential && \ + # this baseimage has build-essential installed, no need to install it + #apt-get update -q -y && \ + #apt-get install -o Dpkg::Options::="--force-confnew" -y --no-install-recommends -q \ + # build-essential && \ gcc -static /app/downloads/distance-in-meters.c -o /distance -lm -O2 FROM ghcr.io/sdr-enthusiasts/docker-tar1090:latest -LABEL org.opencontainers.image.source = "https://github.com/sdr-enthusiasts/docker-adsb-ultrafeeder" +LABEL org.opencontainers.image.source="https://github.com/sdr-enthusiasts/docker-adsb-ultrafeeder" -ENV URL_MLAT_CLIENT_REPO="https://github.com/wiedehopf/mlat-client.git" \ +ENV \ PRIVATE_MLAT="false" \ MLAT_INPUT_TYPE="auto" @@ -33,26 +34,23 @@ RUN \ # Install all these packages: apt-get update -q -y && \ apt-get install -o Dpkg::Options::="--force-confnew" -y --no-install-recommends -q \ - "${KEPT_PACKAGES[@]}" \ - "${TEMP_PACKAGES[@]}" && \ + "${KEPT_PACKAGES[@]}" \ + "${TEMP_PACKAGES[@]}" && \ # Get mlat-client tar zxf /buildimage/mlatclient.tgz -C / && \ ln -s /usr/local/bin/mlat-client /usr/bin/mlat-client && \ # Get distance binary cp -f /buildimage/distance /usr/local/bin/distance && \ # Add Container Version - [[ "${VERSION_BRANCH:0:1}" == "#" ]] && VERSION_BRANCH="main" || true && \ - echo "$(TZ=UTC date +%Y%m%d-%H%M%S)_$(curl -ssL https://api.github.com/repos/$VERSION_REPO/commits/$VERSION_BRANCH | awk '{if ($1=="\"sha\":") {print substr($2,2,7); exit}}')_$VERSION_BRANCH" > /.CONTAINER_VERSION && \ - # Clean up and install POST_PACKAGES: - apt-get remove -q -y "${TEMP_PACKAGES[@]}" && \ - # apt-get install -o Dpkg::Options::="--force-confnew" -y --no-install-recommends -q \ - # ${POST_PACKAGES[@]} && \ - apt-get autoremove -q -o APT::Autoremove::RecommendsImportant=0 -o APT::Autoremove::SuggestsImportant=0 -y && \ + { [[ "${VERSION_BRANCH:0:1}" == "#" ]] && VERSION_BRANCH="main" || true; } && \ + echo "$(TZ=UTC date +%Y%m%d-%H%M%S)_$(curl -ssL "https://api.github.com/repos/$VERSION_REPO/commits/$VERSION_BRANCH" | awk '{if ($1=="\"sha\":") {print substr($2,2,7); exit}}')_$VERSION_BRANCH" > /.CONTAINER_VERSION && \ + # Clean up: + apt-get autoremove -q -o APT::Autoremove::RecommendsImportant=0 -o APT::Autoremove::SuggestsImportant=0 -y "${TEMP_PACKAGES[@]}" && \ apt-get clean -q -y && \ # test mlat-client /usr/bin/mlat-client --help > /dev/null && \ # remove pycache introduced by testing mlat-client - find /usr | grep -E "/__pycache__$" | xargs rm -rf || true && \ + { find /usr | grep -E "/__pycache__$" | xargs rm -rf || true; } && \ rm -rf /src /tmp/* /var/lib/apt/lists/* /git /var/cache/* && \ # # Do some stuff for kx1t's convenience: diff --git a/README.md b/README.md index bacd7ba..a4bb066 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ - [Mandatory Parameters](#mandatory-parameters) - [Optional Parameters](#optional-parameters) - [Getting ADSB data to the Ultrafeeder](#getting-adsb-data-to-the-ultrafeeder) - - [Connecting to a SDR or other hardware device](#connecting-to-a-sdr-or-other-hardware-device) + - [Connecting to an SDR or other hardware device](#connecting-to-a-sdr-or-other-hardware-device) - [Mandatory parameters](#mandatory-parameters-1) - [Optional/Additional Parameters](#optionaladditional-parameters) - [AutoGain for RTLSDR Devices](#autogain-for-rtlsdr-devices) @@ -59,7 +59,7 @@ ## Introduction -`adsb-ultrafeeder™` is a ADS-B data collector container that can be used to: +`adsb-ultrafeeder™` is an ADS-B data collector container that can be used to: - retrieve ADS-B data from your SDR or other device - display it on a local map, including options to show tracks, heatmaps, and system performance graphs @@ -131,11 +131,11 @@ The general principle behind the port numbering, is: | `31004/tcp` | MLATHUB Beast protocol input | | `31005/tcp` | MLATHUB Beast protocol output | | `31006/tcp` | MLATHUB Beast-reduce protocol output | -| `9273/tcp` | Prometheus web interface with data from `readsb` | -| `9274/tcp` | Prometheus web interface with data from `Telegraf` | +| `9273/tcp` | Prometheus http endpoint with data from `telegraf` | +| `9274/tcp` | Prometheus http endpoint with data from `readsb` | | `80/tcp` | Tar1090 (map) web interface | -Any of these ports can be made available to the host system by using the `ports:` directive in your `docker-compose.yml`. The container's web interface is rendered to port `80` in the container. This can me mapped to a port on the host using the docker-compose `ports` directive. In the example [`docker-compose.yml`](docker-compose.yml) file, the container's Tar1090 interface is mapped to `8080` on the host system, and ports `9273-9274` are exposed as-is: +Any of these ports can be made available to the host system by using the `ports:` directive in your `docker-compose.yml`. The container's web interface is rendered to port `80` in the container. This can be mapped to a port on the host using the docker-compose `ports` directive. In the example [`docker-compose.yml`](docker-compose.yml) file, the container's Tar1090 interface is mapped to `8080` on the host system, and ports `9273-9274` are exposed as-is: ```yaml ports: @@ -174,7 +174,7 @@ You need to make sure that the USB device can be accessed by the container. The - 'c 189:* rwm' ... volumes: - - /dev:/dev:rw + - /dev/bus/usb:/dev/bus/usb:rw ``` The advantage of doing this (over simply adding a `device:` directive pointing at the USB port) is that the construction above will automatically recover if you "hot plug" your dongle. ⚠️This feature requires a recent version of docker-compose (version >=2.3). Make sure your system is up to date if dongles are not found. ⚠️ @@ -201,7 +201,7 @@ The following parameters must be set (mandatory) for the container to function: | `READSB_DEBUG` | Optional, used to set debug mode. `n`: network, `P`: CPR, `S`: speed check | Unset | | | `S6_SERVICES_GRACETIME` | Optional, set to 30000 when saving traces / globe_history | `3000` | | | `READSB_ENABLE_BIASTEE` | Set to `true` to enable bias tee on supporting interfaces | | Unset | -| `READSB_RX_LOCATION_ACCURACY` | Accuracy of receiver location in metadata: 0=no location, 1=approximate, 2=exact | `--rx-location-accuracy=` | `2` | +| `READSB_RX_LOCATION_ACCURACY` | Accuracy of receiver location in metadata: 0=no location, 1=approximate, 2=exact (`HEYWHATSTHAT_PANORAMA_ID` also has location) | `--rx-location-accuracy=` | `2` | | `READSB_HEATMAP_INTERVAL` | Per plane interval for heatmap and replay (if you want to lower this, also lower json-trace-interval to this or a lower value) | `--heatmap=` | `15` | | `READSB_MAX_RANGE` | Absolute maximum range for position decoding (in nm) | `--max-range=` | `450` | | `READSB_STATS_EVERY` | Number of seconds between showing and resetting stats. | `--stats-every=` | Unset | @@ -219,12 +219,12 @@ The following parameters must be set (mandatory) for the container to function: There are two ways to provide ADSB data to the Ultrafeeder: -- provide the container with access to a SDR or other hardware device that collects ADSB data +- provide the container with access to an SDR or other hardware device that collects ADSB data - allow the container to connect to a ADSB data source in Beast, Raw, or SBS format These methods are not mutually exclusive - you can use both at the same time if you want. -#### Connecting to a SDR or other hardware device +#### Connecting to an SDR or other hardware device If you want to connect your SDR to the container, here's how to do that: @@ -278,7 +278,7 @@ docker exec -it ultrafeeder /usr/local/bin/autogain1090 reset #### Connecting to external ADSB data sources -In addition to (or instead of) connecting to a SDR or hardware device to get ADSB data, the container also supports ingesting or sending data from a TCP port. Here are some parameters that you need to configure if you want to make this happen: +In addition to (or instead of) connecting to an SDR or hardware device to get ADSB data, the container also supports ingesting or sending data from a TCP port. Here are some parameters that you need to configure if you want to make this happen: ##### All-in-One Configuration using `ULTRAFEEDER_CONFIG` @@ -376,7 +376,7 @@ There are many optional parameters relating to the ingestion of data and the gen | ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | --------------------------------------- | ------------- | | `READSB_NET_API_PORT` | | `--net-api-port=` | `30152` | | `READSB_ENABLE_API` | Adds nginx proxies api at /re-api. Use with extraargs --write-json-globe-index --tar1090-use-api to get fast map with many planes | various | disabled | -| `READSB_NET_BEAST_REDUCE_INTERVAL` | BeastReduce position update interval, longer means less data (valid range: `0.000` - `14.999`) | `--net-beast-reduce-interval=` | `1.0` | +| `READSB_NET_BEAST_REDUCE_INTERVAL` | BeastReduce position update interval, longer means less data (valid range: `0.000` - `14.999`) | `--net-beast-reduce-interval=` | `0.5` | | `READSB_NET_BEAST_REDUCE_FILTER_DIST` | Restrict beast-reduce output to aircraft in a radius of X nmi | `--net-beast-reduce-filter-dist=` | Unset | | `READSB_NET_BEAST_REDUCE_FILTER_ALT` | Restrict beast-reduce output to aircraft below X ft | `--net-beast-reduce-filter-alt=` | Unset | | `READSB_NET_BR_OPTIMIZE_FOR_MLAT` | BeastReduce: Keep messages relevant to mlat-client | `--net-beast-reduce-optimize-for-mlat` | Unset | @@ -411,7 +411,7 @@ It will create a separate instance of `mlat-client` for each defined MLAT server ```yaml environment: ... - - ULTRAFEEDERCONFIG= + - ULTRAFEEDER_CONFIG= ... mlat,mlat-server1.com,port1,return_port1,uuid=1234-5678-90123,inputconnect=remote_receiver1:30005,lat=12.3456,lon=45.6789,alt=18m,--arg1 hello --arg2 world; mlat,mlat-server2.com,port2,return_port2,uuid=5678-9012-34567,inputconnect=remote_receiver2:30005,-lat=12.3456,lon=45.6789,alt=18m,--arg1 hello-again --arg2 universe @@ -465,7 +465,6 @@ Here are a few things you may want to try to fix this: - Never, ever, ever resend MLAT results back to ADSB or MLAT aggregators. Please DO NOT. This will ensure your data is discarded and may get you banned from the aggregator - If you feed your data to multiple aggregators, please do not enable MLAT for FlightRadar24 (per their request). Note that MLAT for FR24 using our containerized setup is disabled by default - #### Configuring the built-in MLAT Hub An "MLAT Hub" is an aggregator of MLAT results from several sources. Since the container is capable of sending MLAT data to multiple ADSB aggregators (like adsb.lol/fi/one, etc), we built in a capability to: @@ -503,12 +502,13 @@ Generally, there is little else to configure, but there are a few parameters tha | `MLATHUB_BEAST_REDUCE_OUT_PORT` | TCP port where consolidated MLAT results will be available in Beast format with reduced data rates | `31006` | | `MLATHUB_NET_CONNECTOR` | (Obsolete, please use `ULTRAFEEDER_CONFIG=mlathub,...` instead.) List of semi-colon (`;`) separated IP or host, port, and protocols where MLATHUB will connect to ingest or send MLAT data. It follows the same syntax as described in the [`READSB_NET_CONNECTOR` syntax section](#alternate-configuration-method-with-readsb_net_connector) above | Unset | | `MLATHUB_DISABLE` | If set to `true`, the MLATHUB will be disabled even if there are `mlat-client`s running in the container | Unset | +| `MLATHUB_ENABLE` | If set to `true`, the MLATHUB will be enabled even if there are no `mlat-client`s running in the container | Unset | ### Web Gui (`tar1090`) Configuration The Container creates an interactive web interface displaying the aircraft, based on Wiedehopf's widely used [tar1090](https://github.com/wiedehopf/tar1090) software. -The web interface is rendered to port `80` in the container. This can me mapped to a port on the host using the docker-compose `ports` directive. +The web interface is rendered to port `80` in the container. This can be mapped to a port on the host using the docker-compose `ports` directive. All of the variables below are optional. @@ -527,8 +527,9 @@ Note - due to design limitations of `readsb`, the `tar1090` graphical interface | `GZIP_LVL` | `1`-`9` are valid, lower lvl: less CPU usage, higher level: less network bandwidth used when loading the page | `3` | | `PTRACKS` | Shows the last `$PTRACKS` hours of traces you have seen at the `?pTracks` URL | `8` | | `TAR1090_FLIGHTAWARELINKS` | Set to `true` to enable FlightAware links in the web interface | `null` | -| `TAR1090_ENABLE_AC_DB` | Set to `true` to enable extra information, such as aircraft type and registration, to be included in in `aircraft.json` output. Will use more memory; use caution on older Pis or similar devices. | `false` | -| `HEYWHATSTHAT_PANORAMA_ID` | Your `heywhatsthat.com` panorama ID. See | | +| `TAR1090_ENABLE_AC_DB` | Set to `true` to enable extra information, such as aircraft type and registration, to be included in in `aircraft.json` output. Will use more 50 MB extra memory | `false` | +| `TAR1090_DB_LONGTYPE` | Set to `false` to remove the "desc", "ownOp" and "year" fields from `aircraft.json` when AC_DB is enabled. | `true` | +| `HEYWHATSTHAT_PANORAMA_ID` | Your `heywhatsthat.com` panorama ID. See (will reveal exact location in the webinterface) | | | `HEYWHATSTHAT_ALTS` | Comma separated altitudes for multiple outlines. Use no units or `ft` for feet, `m` for meters, or `km` for kilometers. Only integer numbers are accepted, no decimals please | `12192m` (=40000 ft) | | `HTTP_ACCESS_LOG` | Optional. Set to `true` to display HTTP server access logs. | `false` | | `HTTP_ERROR_LOG` | Optional. Set to `false` to hide HTTP server error logs. | `true` | @@ -536,6 +537,7 @@ Note - due to design limitations of `readsb`, the `tar1090` graphical interface | `TAR1090_IMAGE_CONFIG_TEXT` | Text to display for the config link | `null` | | `TAR1090_DISABLE` | Set to `true` to disable the web server and all websites (including the map, `graphs1090`, `heatmap`, `pTracks`, etc.) | Unset | | `READSB_ENABLE_HEATMAP` | Set to `true` or leave unset to enable the HeatMap function available at `http://myip/?Heatmap`; set to `false` to disable the HeapMap function | `true` (enabled) | +| `READSB_ENABLE_TRACES` | Save detailed globe history traces (1 gzip compressed json file per day and airframe, use MAX_GLOBE_HISTORY so you don't run out of inodes / diskspace) | `false` | | `TAR1090_AISCATCHER_SERVER` | If you want to show vessels from your AIS-Catcher instance on the map, put the (externally reachable) URL of your AIS-Catcher or ShipFeeder website in this parameter (incl. `https://`). Note - if you are using "barebones" AIS-Catcher you should add `GEOJSON on` after the `-N` parameter on the `AIS-Catcher` command line. If you use [docker-shipfeeder](https://github.com/sdr-enthusiasts/docker-shipfeeder), no change is needed for that container | Empty | | `TAR1090_AISCATCHER_REFRESH` | Refresh rate (in seconds) of reading vessels from your AIS-Catcher instance. Defaults to 15 (secs) if omitted | `15` | @@ -564,9 +566,9 @@ Note - due to design limitations of `readsb`, the `tar1090` graphical interface | `TAR1090_DEFAULTCENTERLAT` | Default center (latitude) of the map. This setting is overridden by any position information provided by dump1090/readsb. All positions are in decimal degrees. | `45.0` | | `TAR1090_DEFAULTCENTERLON` | Default center (longitude) of the map. This setting is overridden by any position information provided by dump1090/readsb. All positions are in decimal degrees. | `9.0` | | `TAR1090_DEFAULTZOOMLVL` | The google maps zoom level, `0` - `16`, lower is further out. | `7` | -| `TAR1090_SITESHOW` | Center marker. If dump1090 provides a receiver location, that location is used and these settings are ignored. Set to `true` to show a center marker. | `false` | -| `TAR1090_SITELAT` | Center marker. If dump1090 provides a receiver location, that location is used and these settings are ignored. Position of the marker (latitude). | `45.0` | -| `TAR1090_SITELON` | Center marker. If dump1090 provides a receiver location, that location is used and these settings are ignored. Position of the marker (longitude). | `9.0` | +| `TAR1090_SITESHOW` | Display center marker. Setting this to false will NOT remove your location, see `READSB_RX_LOCATION_ACCURACY` for that. | `true` | +| `TAR1090_SITELAT` | Center marker. If readsb provides a receiver location, that location is used and these settings are ignored. Position of the marker (latitude). | `45.0` | +| `TAR1090_SITELON` | Center marker. If readsb provides a receiver location, that location is used and these settings are ignored. Position of the marker (longitude). | `9.0` | | `TAR1090_SITENAME` | The tooltip of the center marker. | `My Radar Site` | | `TAR1090_RANGE_OUTLINE_COLOR` | Colour for the range outline. | `#0000DD` | | `TAR1090_RANGE_OUTLINE_WIDTH` | Width for the range outline. | `1.7` | @@ -624,31 +626,35 @@ Note - due to design limitations of `readsb`, the `tar1090` graphical interface #### `graphs1090` Environment Parameters -| Variable | Description | Default | -| -------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------- | --------- | -| `GRAPHS1090_DARKMODE` | If set to `true`, `graphs1090` will be rendered in "dark mode". | Unset | -| `GRAPHS1090_RRD_STEP` | Interval in seconds to feed data into RRD files. | `60` | -| `GRAPHS1090_SIZE` | Set graph size, possible values: `small`, `default`, `large`, `huge`, `custom`. | `default` | -| `GRAPHS1090_ALL_LARGE` | Make the small graphs as large as the big ones by setting to `yes`. | `no` | -| `GRAPHS1090_FONT_SIZE` | Font size (relative to graph size). | `10.0` | -| `GRAPHS1090_MAX_MESSAGES_LINE` | Set to `true` to draw a reference line at the maximum message rate. | Unset | -| `GRAPHS1090_LARGE_WIDTH` | Defines the width of the larger graphs. | `1096` | -| `GRAPHS1090_LARGE_HEIGHT` | Defines the height of the larger graphs. | `235` | -| `GRAPHS1090_SMALL_WIDTH` | Defines the width of the smaller graphs. | `619` | -| `GRAPHS1090_SMALL_HEIGHT` | Defines the height of the smaller graphs. | `324` | -| `GRAPHS1090_DISK_DEVICE` | Defines which disk device (`mmc0`, `sda`, `sdc`, etc) is shown. Leave empty for default device | Unset | -| `GRAPHS1090_ETHERNET_DEVICE` | Defines which (wired) ethernet device (`eth0`, `enp0s`, etc) is shown. Leave empty for default device | Unset | -| `GRAPHS1090_WIFI_DEVICE` | Defines which (wireless) WiFi device (`wlan0`, `wlp3s0`, etc) is shown. Leave empty for default device | Unset | -| `GRAPHS1090_DISABLE` | Set to `true` to disable the entire GRAPHS1090 web page and associated data collection | Unset | -| `GRAPHS1090_DISABLE_CHART_CPU` | Set to `true` to disable the GRAPHS1090 CPU chart | Unset | -| `GRAPHS1090_DISABLE_CHART_TEMP` | Set to `true` to disable the GRAPHS1090 Temperature chart | Unset | -| `GRAPHS1090_DISABLE_CHART_MEMORY` | Set to `true` to disable the GRAPHS1090 Memory Utilization chart | Unset | -| `GRAPHS1090_DISABLE_CHART_NETWORK_BANDWIDTH` | Set to `true` to disable the GRAPHS1090 Network Bandwidth chart | Unset | -| `GRAPHS1090_DISABLE_CHART_DISK_USAGE` | Set to `true` to disable the GRAPHS1090 Disk Usage chart | Unset | -| `GRAPHS1090_DISABLE_CHART_DISK_IOPS` | Set to `true` to disable the GRAPHS1090 Disk IOPS chart | Unset | -| `GRAPHS1090_DISABLE_CHART_DISK_BANDWIDTH` | Set to `true` to disable the GRAPHS1090 Disk Bandwidth chart | Unset | -| `ENABLE_AIRSPY` | Optional, set to any non-empty value if you want to enable the special AirSpy graphs. See below for additional configuration requirements | Unset | -| `URL_AIRSPY` | Optional, set to the URL where the airspy stats are available, for example `http://airspy_adsb` | Unset | +| Variable | Description | Default | +| -------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------- | -------------- | +| `GRAPHS1090_DARKMODE` | If set to `true`, `graphs1090` will be rendered in "dark mode". | Unset | +| `GRAPHS1090_RRD_STEP` | Interval in seconds to feed data into RRD files. | `60` | +| `GRAPHS1090_SIZE` | Set graph size, possible values: `small`, `default`, `large`, `huge`, `custom`. | `custom` | +| `GRAPHS1090_ALL_LARGE` | Make the small graphs as large as the big ones by setting to `yes`. | `no` | +| `GRAPHS1090_FONT_SIZE` | Font size (relative to graph size). | `10.0` | +| `GRAPHS1090_MAX_MESSAGES_LINE` | Set to `true` to draw a reference line at the maximum message rate. | Unset | +| `GRAPHS1090_LARGE_WIDTH` | Defines the width of the larger graphs. (if size is set to custom) | `1096` | +| `GRAPHS1090_LARGE_HEIGHT` | Defines the height of the larger graphs. (if size is set to custom) | `235` | +| `GRAPHS1090_SMALL_WIDTH` | Defines the width of the smaller graphs. (if size is set to custom) | `619` | +| `GRAPHS1090_SMALL_HEIGHT` | Defines the height of the smaller graphs. (if size is set to custom) | `324` | +| `GRAPHS1090_DISK_DEVICE` | Defines which disk device (`mmc0`, `sda`, `sdc`, etc) is shown. Leave empty for default device | Unset | +| `GRAPHS1090_ETHERNET_DEVICE` | Defines which (wired) ethernet device (`eth0`, `enp0s`, etc) is shown. Leave empty for default device | Unset | +| `GRAPHS1090_WIFI_DEVICE` | Defines which (wireless) WiFi device (`wlan0`, `wlp3s0`, etc) is shown. Leave empty for default device | Unset | +| `GRAPHS1090_DISABLE` | Set to `true` to disable the entire GRAPHS1090 web page and associated data collection | Unset | +| `GRAPHS1090_DISABLE_CHART_CPU` | Set to `true` to disable the GRAPHS1090 CPU chart | Unset | +| `GRAPHS1090_DISABLE_CHART_TEMP` | Set to `true` to disable the GRAPHS1090 Temperature chart | Unset | +| `GRAPHS1090_DISABLE_CHART_MEMORY` | Set to `true` to disable the GRAPHS1090 Memory Utilization chart | Unset | +| `GRAPHS1090_DISABLE_CHART_NETWORK_BANDWIDTH` | Set to `true` to disable the GRAPHS1090 Network Bandwidth chart | Unset | +| `GRAPHS1090_DISABLE_CHART_DISK_USAGE` | Set to `true` to disable the GRAPHS1090 Disk Usage chart | Unset | +| `GRAPHS1090_DISABLE_CHART_DISK_IOPS` | Set to `true` to disable the GRAPHS1090 Disk IOPS chart | Unset | +| `GRAPHS1090_DISABLE_CHART_DISK_BANDWIDTH` | Set to `true` to disable the GRAPHS1090 Disk Bandwidth chart | Unset | +| `GRAPHS1090_WWW_TITLE` | Set title for the web page (displayed in the browser title or tab bar) | `graphs1090` | +| `GRAPHS1090_WWW_HEADER` | Set header text for the web page | `Perf. Graphs` | +| `GRAPHS1090_HIDE_SYSTEM` | Hide the system graphs and don't render them, don't collect system data | `no` | +| `GRAPHS1090_DEFAULT_APPEND` | Append to /etc/default/graphs1090, see | Unset | +| `ENABLE_AIRSPY` | Optional, set to any non-empty value if you want to enable the special AirSpy graphs. See below for additional configuration requirements | Unset | +| `URL_AIRSPY` | Optional, set to the URL where the airspy stats are available, for example `http://airspy_adsb` | Unset | #### Enabling UAT data @@ -663,7 +669,7 @@ ADS-B over UAT data is transmitted in the 978 MHz band, and this is used in the 2. Install the [`docker-dump978` container](https://github.com/sdr-enthusiasts/docker-dump978). Note - only containers downloaded/deployed on/after Feb 8, 2023 will work. -Note that you \*_must_- configure `URL_978` to point at a working skyaware978 website with `aircraft.json` data feed. This means that the URL `http://dump978/skyaware978/data/aircraft.json` must return valid JSON data to this `tar1090` container. +Note that you *must* configure `URL_978` to point at a working skyaware978 website with `aircraft.json` data feed. This means that the URL `http://dump978/skyaware978/data/aircraft.json` must return valid JSON data to this `tar1090` container. #### Enabling AirSpy graphs @@ -750,10 +756,10 @@ The feature assumes that you have mapped `/var/lib/collectd` to a volume (to ens ... ``` -| Environment Variable | Purpose | Default | -| --------------------------------- | ------------------------------------------------------------------------------------------- | ------- | -| `GRAPHS1090_REDUCE_IO=` | Optional Set to `true` to reduce the write cycles for `graphs1090` | Unset | -| `GRAPHS1090_REDUCE_IO_FLUSH_IVAL` | Interval (in secs) over which the `graphs1090` data is written back to non-volatile storage | `3600` | +| Environment Variable | Purpose | Default | +| --------------------------------- | ---------------------------------------------------------------------------------------------- | ------- | +| `GRAPHS1090_REDUCE_IO=` | Optional Set to `true` to reduce the write cycles for `graphs1090` | Unset | +| `GRAPHS1090_REDUCE_IO_FLUSH_IVAL` | Interval (i.e. 1h, 6h, 24h, 1d, 2d) writing `graphs1090` data back to non-volatile storage | `1d` | ### `timelapse1090` Configuration @@ -777,7 +783,7 @@ For this to work, you should install and configure GPSD to work on your host mac ```bash sudo apt update && sudo apt install -y gpsd -cat < EOM | sudo tee /etc/default/gpsd +cat << EOM | sudo tee /etc/default/gpsd # Devices gpsd should collect to at boot time. # They need to be read/writeable, either by user gpsd or the group dialout. DEVICES="/dev/ttyACM0" @@ -786,7 +792,7 @@ GPSD_OPTIONS="-G" # Automatically hot add/remove USB GPS devices via gpsdctl USBAUTO="true" EOM -cat < EOM | sudo tee /lib/systemd/system/gpsd.socket +cat << EOM | sudo tee /lib/systemd/system/gpsd.socket [Unit] Description=GPS (Global Positioning System) Daemon Sockets @@ -832,7 +838,7 @@ If you have any issues, readsb will use verbose output if you add the `GPSD_DEBU ### Optional parameters regulating the restart of `mlat-client` when the location changes -The following parameters are all optional and are subject to change. You don't need to set them unless you want to change the default behavior: +The following parameters are all optional and are subject to change. These variables should be added to the environment section of your docker-compose.yml. They will not work if entered into the .env file. You don't need to set them unless you want to change the default behavior. | Environment Variable | Purpose | Default | | -------------------- | ------- | ------- | @@ -840,6 +846,17 @@ The following parameters are all optional and are subject to change. You don't n | `GPSD_MLAT_WAIT` | The wait period (in seconds) your station must be stationary before mlat is started (minimum 90 seconds) | `90` (seconds) | | `GPSD_CHECK_INTERVAL` | How often the container checks for updated location information. (minimum 5 seconds) | `30` (seconds) | +See example below: + +```yaml + environment: + ... + - GPSD_MIN_DISTANCE=20 + - GPSD_MLAT_WAIT=90 + - GPSD_CHECK_INTERVAL=30 + ... +``` + ## Web Pages If you have configured the container as described above, you should be able to browse to the following web pages: @@ -874,8 +891,8 @@ See [README-grafana.md](README-grafana.md) for detailed instruction on how to co ```yaml services: - tar1090: - image: ghcr.io/sdr-enthusiasts/docker-tar1090:telegraf + ultrafeeder: + image: ghcr.io/sdr-enthusiasts/docker-adsb-ultrafeeder:telegraf ... ``` @@ -890,7 +907,6 @@ In order for Telegraf to serve a [Prometheus](https://prometheus.io) endpoint, t | Variable | Description | | ------------------- | ------------------------------------------------------------------------ | | `PROMETHEUS_ENABLE` | Set to `true` for a Prometheus endpoint on `http://0.0.0.0:9273/metrics` | -| `PROMETHEUSPORT` | TCP port for the Prometheus endpoint. Default value is `9273` | ### Output from Ultrafeeder to InfluxDBv2 @@ -926,6 +942,19 @@ If you want to use `ultrafeeder` _only_ as a SDR decoder but without any mapping - Set the parameter `TAR1090_DISABLE=true`. This will prevent the `nginx` webserver and any websites to be launched and no `collectd` (graphs1090) or `rrd` (ADSB message history) data to be collected or retained. - Make sure to use `ghcr.io/sdr-enthusiasts/docker-adsb-ultrafeeder:latest` and specifically NOT the `ghcr.io/sdr-enthusiasts/docker-adsb-ultrafeeder:telegraf` label as Telegraf adds a LOT of resource use to the container +## Offline maps + +There is the option to use some basic offline maps limited in zoom: + +- Download the tiles (don't install tar1090): +- Add a volume mapping so the container can access the tiles: + +```yaml + volumes: + - /usr/local/share/osm_tiles_offline:/usr/local/share/osm_tiles_offline +``` + + ## Logging All logs are to the container's stdout and can be viewed with `docker logs -t [-f] container`. diff --git a/docker-compose.yml b/docker-compose.yml index 86247a4..98c4d00 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -4,7 +4,6 @@ services: # Note - if you want to enable telegraf for use with InfluxDB/Prometheus and Grafana, # use the following image instead: # image: ghcr.io/sdr-enthusiasts/docker-adsb-ultrafeeder:telegraf - tty: true container_name: ultrafeeder hostname: ultrafeeder restart: unless-stopped @@ -106,7 +105,7 @@ services: - /opt/adsb/ultrafeeder/globe_history:/var/globe_history - /opt/adsb/ultrafeeder/graphs1090:/var/lib/collectd - /proc/diskstats:/proc/diskstats:ro - - /dev:/dev:ro + - /dev/bus/usb:/dev/bus/usb tmpfs: - /run:exec,size=256M - /tmp:size=128M diff --git a/downloads/adsbexchange-json-status b/downloads/adsbexchange-json-status index c4b260c..5774993 100644 --- a/downloads/adsbexchange-json-status +++ b/downloads/adsbexchange-json-status @@ -1,5 +1,7 @@ #!/bin/bash +# DEPRECATED, see ./rootfs/usr/local/bin/adsbexchange-json-status + # # Upload output data from decoder to remote server # @@ -13,7 +15,7 @@ DNS_CACHE=1 # Cache time, default 10min DNS_TTL=600 # Set this to 1 if you want to force using the cache always even if there is a local resolver. -DNS_IGNORE_LOCAL=0 +DNS_IGNORE_LOCAL=1 # List all paths, IN PREFERRED ORDER, separated by a SPACE # By default, only use the json from the feed client @@ -208,18 +210,11 @@ if [ $LOCAL_RESOLVER -ne 0 ]; then fi fi -if ! command -v host &>/dev/null || ! command -v perl &>/dev/null; then - echo "host command or perl not available, disabling DNS Cache" >&2 +if ! command -v host &>/dev/null; then + echo "host command not available, disabling DNS Cache" >&2 DNS_CACHE=0 fi -VER_OK=$( echo "$CURL_VER" | perl -ne '@v=split(/\./); if ($v[0] == 7) { if ($v[1] >= 22) { printf("1");exit; } else { printf("0");exit; } } if ($v[0] > 7) { pr -intf("1");exit; } printf("0");exit;') -if [ $VER_OK -ne 1 ]; then - echo "WARNING: curl version is too old ($CURL_VER < 7.22.0), not using script's DNS cache." - DNS_CACHE=0 -fi - # If we have a local resolver, just use the URL. If not, look up the host and use that IP (replace the URL appropriately) # -- DNS Setup done diff --git a/rootfs/etc/s6-overlay/s6-rc.d/aggregator-urls/dependencies.d/startup b/rootfs/etc/s6-overlay/s6-rc.d/aggregator-urls/dependencies.d/startup new file mode 100644 index 0000000..e69de29 diff --git a/rootfs/etc/s6-overlay/s6-rc.d/aggregator-urls/run b/rootfs/etc/s6-overlay/s6-rc.d/aggregator-urls/run new file mode 100755 index 0000000..93b5eb4 --- /dev/null +++ b/rootfs/etc/s6-overlay/s6-rc.d/aggregator-urls/run @@ -0,0 +1,2 @@ +#!/bin/sh +exec /etc/s6-overlay/scripts/aggregator-urls diff --git a/rootfs/etc/s6-overlay/s6-rc.d/aggregator-urls/type b/rootfs/etc/s6-overlay/s6-rc.d/aggregator-urls/type new file mode 100644 index 0000000..5883cff --- /dev/null +++ b/rootfs/etc/s6-overlay/s6-rc.d/aggregator-urls/type @@ -0,0 +1 @@ +longrun diff --git a/rootfs/etc/s6-overlay/s6-rc.d/readsb/finish b/rootfs/etc/s6-overlay/s6-rc.d/readsb/finish deleted file mode 100755 index 23065e6..0000000 --- a/rootfs/etc/s6-overlay/s6-rc.d/readsb/finish +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/sh -exec /etc/s6-overlay/scripts/readsb-finish diff --git a/rootfs/etc/s6-overlay/s6-rc.d/user/contents.d/aggregator-urls b/rootfs/etc/s6-overlay/s6-rc.d/user/contents.d/aggregator-urls new file mode 100644 index 0000000..e69de29 diff --git a/rootfs/etc/s6-overlay/scripts/adsbx-stats b/rootfs/etc/s6-overlay/scripts/adsbx-stats index be21583..8143682 100755 --- a/rootfs/etc/s6-overlay/scripts/adsbx-stats +++ b/rootfs/etc/s6-overlay/scripts/adsbx-stats @@ -23,47 +23,36 @@ source /scripts/interpret_ultrafeeder_config # Check if ADSBExchange is configured if ! grep -i adsbexchange.com <<< "$ULTRAFEEDER_CONFIG" >/dev/null 2>&1; then "${s6wrap[@]}" echo "AdsbExchange not configured - no stats package needed" - exec sleep infinity + stop_service fi # Check if ADSBExchange stats are disabled if chk_disabled "$ADSBX_STATS" ; then "${s6wrap[@]}" echo "AdsbExchange stats disabled" - exec sleep infinity + stop_service fi # prep work: mkdir -p /run/adsbexchange-stats -# Install the Stats package: -if [[ ! -f /etc/default/adsbexchange-stats ]]; then - echo -e "UUID_FILE=/run/uuid\nJSON_PATHS=(\"/run/readsb\")" > /etc/default/adsbexchange-stats -fi -if [[ ! -f /usr/local/bin/json-status ]]; then - if ! curl -sSL -o /usr/local/bin/json-status https://raw.githubusercontent.com/sdr-enthusiasts/docker-adsb-ultrafeeder/main/downloads/adsbexchange-json-status; then - "${s6wrap[@]}" echo "ERROR: AdsbExchange configured, but cannot download stats package! AdsbExchange will be fed but stats will not be available" - exec sleep infinity - fi - chmod 755 /usr/local/bin/json-status -fi - # set the UUID: -if [[ ! -f /usr/local/share/adsbexchange/adsbx-uuid ]]; then - # get UUID from ULTRAFEEDER_CONFIG if it exists - for entry in "${READSB_CONF_ARR[@]}"; do - if echo "$entry" | grep -q 'adsbexchange.*uuid'; then - ADSBX_UUID="$(sed 's|.*adsbexchange.*uuid=\([a-f0-9-]\+\).*|\1|g' <<< "$entry")" - fi - done - ADSBX_UUID="${ADSBX_UUID:-${UUID}}" || true # ...else get it from the UUID param - ADSBX_UUID="${ADSBX_UUID:-$(cat /proc/sys/kernel/random/uuid 2>/dev/null)}" || true # ...else generate a random one - if [[ -n "$ADSBX_UUID" ]]; then - mkdir -p /usr/local/share/adsbexchange - echo "$ADSBX_UUID" > /usr/local/share/adsbexchange/adsbx-uuid - "${s6wrap[@]}" echo "Using UUID $ADSBX_UUID for AdsbExchange" - else - "${s6wrap[@]}" echo "WARNING: no UUID can be identified to be used with AdsbExchange; a random one will be generated every time the container is started." +# get UUID from ULTRAFEEDER_CONFIG if it exists +for entry in "${READSB_CONF_ARR[@]}"; do +echo "$entry" + if echo "$entry" | grep -q 'adsbexchange.*uuid'; then + ADSBX_UUID="$(sed 's|.*adsbexchange.*uuid=\([a-f0-9-]\+\).*|\1|g' <<< "$entry")" fi +done + +ADSBX_UUID="${ADSBX_UUID:-${UUID}}" || true # ...else get it from the UUID param +ADSBX_UUID="${ADSBX_UUID:-$(cat /proc/sys/kernel/random/uuid 2>/dev/null)}" || true # ...else generate a random one + +if [[ -n "$ADSBX_UUID" ]]; then + # export the variable, it's used by json-status which is started by this script at the very end + export ADSBX_UUID + "${s6wrap[@]}" echo "Using UUID $ADSBX_UUID for AdsbExchange" +else + "${s6wrap[@]}" echo "WARNING: no UUID can be identified to be used with AdsbExchange; a random one will be generated every time the container is started." fi # wait until readsb is established... @@ -79,13 +68,13 @@ if ! pgrep readsb >/dev/null; then sleep 2 & wait $! fi -# Let json-status start up, and then print the Anywhere Map and Anywhere Stats URLs to the container logs: +# Let adsbexchange-json-status start up, and then print the Anywhere Map and Anywhere Stats URLs to the container logs: { sleep 15 - AnywhereMap="$(curl -sSL https://www.adsbexchange.com/myip/ | grep "ADSBx Anywhere Map" | sed -n 's|.*\(https.*\)\" class.*|\1|p')" - AnywhereStats="$(curl -sSL https://www.adsbexchange.com/myip/ | grep "ADSBx Anywhere Stats" | sed -n 's|.*\(https.*\)\" class.*|\1|p')" + AnywhereMap="$(curl -sSL https://www.adsbexchange.com/myip/ | grep "https://globe.adsbexchange.com/?feed=" | sed -n 's|.*\(https.*\)\"|\1|p')" + AnywhereStats="$(curl -sSL https://www.adsbexchange.com/myip/ | grep "https://www.adsbexchange.com/api/feeders/?feed=" | sed -n 's|.*\(https.*\)\"|\1|p')" "${s6wrap[@]}" echo "Your AdsbExchange Anywhere Map URL is $AnywhereMap" "${s6wrap[@]}" echo "Your AdsbExchange Anywhere Stats URL is $AnywhereStats" } & -"${s6wrap[@]}" echo "invoking: /usr/local/bin/json-status" -exec "${s6wrap[@]}" /usr/local/bin/json-status +"${s6wrap[@]}" echo "invoking: /usr/local/bin/adsbexchange-json-status" +exec "${s6wrap[@]}" /usr/local/bin/adsbexchange-json-status diff --git a/rootfs/etc/s6-overlay/scripts/readsb-finish b/rootfs/etc/s6-overlay/scripts/aggregator-urls similarity index 59% rename from rootfs/etc/s6-overlay/scripts/readsb-finish rename to rootfs/etc/s6-overlay/scripts/aggregator-urls index 4d60b96..828dbf3 100755 --- a/rootfs/etc/s6-overlay/scripts/readsb-finish +++ b/rootfs/etc/s6-overlay/scripts/aggregator-urls @@ -1,5 +1,5 @@ #!/command/with-contenv bash -# shellcheck shell=bash disable=SC1091 +# shellcheck shell=bash disable=SC2015,SC2016,SC1091 #--------------------------------------------------------------------------------------------- # Copyright (C) 2023-2024, Ramon F. Kolb (kx1t) and contributors @@ -18,5 +18,17 @@ #--------------------------------------------------------------------------------------------- source /scripts/common +source /scripts/interpret_ultrafeeder_config -rm -f /run/readsb-prometheus.prom || true +# wait 30 seconds for connections to be established +sleep 30 + +if [[ "${READSB_CONF_ARR[*]}" =~ "in.adsb.lol" ]]; then + myadsblol="$(curl -sLD - https://my.adsb.lol -o /dev/null -w '%{url_effective}' 2>&1 | sed '/location: /h;g;$!d;s/^.* \(.*\)$/\1/')" + if [[ -n "$myadsblol" ]] && [[ ! $myadsblol =~ sorry-but-i-could-not-find-your-receiver ]]; then + "${s6wrap[@]}" echo "ADSB.LOL station map available at $myadsblol" + fi +fi + +# one shots have weird behaviour, just use longrun and stop the service once this is done +stop_service diff --git a/rootfs/etc/s6-overlay/scripts/mlat-client b/rootfs/etc/s6-overlay/scripts/mlat-client index 326314e..a2592d3 100755 --- a/rootfs/etc/s6-overlay/scripts/mlat-client +++ b/rootfs/etc/s6-overlay/scripts/mlat-client @@ -24,6 +24,9 @@ source /scripts/interpret_ultrafeeder_config s6wrap=(s6wrap --quiet --timestamps --prepend="$SCRIPT_NAME") +# run with slightly lower priority +renice 5 $$ || true + "${s6wrap[@]}" --args echo "Started as an s6 service" MLAT_CMD="/usr/bin/mlat-client" @@ -42,15 +45,17 @@ declare -A pid_array if [[ -z "${MLAT_CONFIG}" ]] then "${s6wrap[@]}" --args echo "Warning: MLAT_CONFIG not defined - MLAT will be disabled." - exec sleep infinity + stop_service fi if [[ -z "${MLAT_USER}" ]] && [[ -z "${UUID}" ]] then "${s6wrap[@]}" --args echo "ERROR: either UUID or MLAT_USER must be defined - MLAT will be disabled." - exec sleep infinity + stop_service fi +mkdir -p /run/mlat-client + function check_gpsd() { if (( GPSD == 0 )) || ! [[ -f /run/readsb/gpsd.json ]]; then return 1 @@ -234,7 +239,10 @@ do MLAT_PARAM+=(--input-connect "${input_connect_arg:-localhost:30005}") if [[ -n "${name_arg}" ]] || [[ -n "${MLAT_USER}" ]]; then - MLAT_PARAM+=(--user \""${name_arg:-${MLAT_USER}}"\") + # remove spaces from mlat name to avoid startup errors + # the readme already says not to put spaces but like this mlat will work even with space + name=$( sed -e 's/ /_/g' <<< "${name_arg:-${MLAT_USER}}") + MLAT_PARAM+=(--user "${name}") else rnd="${RANDOM}" "${s6wrap[@]}" --args echo "WARNING: MLAT_USER is not set - using random number \"${rnd}\" as MLAT_USER" @@ -276,6 +284,8 @@ do "${s6wrap[@]}" --args echo "WARNING: UUID is not defined, proceeding without UUID" fi + MLAT_PARAM+=(--stats-json "/run/mlat-client/${params[0]}:${params[1]}.json") + # Now add the extra_args, if any: [[ -n "${extra_args}" ]] && MLAT_PARAM+=("${extra_args}") || true diff --git a/rootfs/etc/s6-overlay/scripts/mlathub b/rootfs/etc/s6-overlay/scripts/mlathub index 1efda3a..4edc250 100755 --- a/rootfs/etc/s6-overlay/scripts/mlathub +++ b/rootfs/etc/s6-overlay/scripts/mlathub @@ -29,14 +29,14 @@ if ! [[ "$LOGLEVEL" =~ ^(verbose|error|none)$ ]]; then LOGLEVEL="verbose" fi -if [[ -z "${MLAT_CONFIG}" ]] && [[ -z "$MLATHUB_NET_CONNECTOR" ]] && [[ ${#MLATHUB_CONF_ARR[@]} == 0 ]]; then +if [[ -z "${MLAT_CONFIG}" ]] && [[ -z "$MLATHUB_NET_CONNECTOR" ]] && [[ ${#MLATHUB_CONF_ARR[@]} == 0 ]] && ! chk_enabled "${MLATHUB_ENABLE}"; then "${s6wrap[@]}" --args echo "No MLAT servers have been defined in MLAT_CONFIG and no external sources have been defined in MLATHUB_NET_CONNECTOR - no need to start MLATHUB" - exec sleep infinity + stop_service fi if chk_enabled "${MLATHUB_DISABLE}"; then "${s6wrap[@]}" --args echo "MLATHUB is disabled." - exec sleep infinity + stop_service fi # Build the readsb command line based on options diff --git a/rootfs/etc/s6-overlay/scripts/readsb b/rootfs/etc/s6-overlay/scripts/readsb index 95eef4e..d852b30 100755 --- a/rootfs/etc/s6-overlay/scripts/readsb +++ b/rootfs/etc/s6-overlay/scripts/readsb @@ -113,6 +113,10 @@ if chk_enabled "${READSB_ENABLE_HEATMAP:-true}"; then READSB_CMD+=("--heatmap=${READSB_HEATMAP_INTERVAL:-15}") fi +if chk_enabled "${READSB_ENABLE_TRACES}"; then + READSB_CMD+=("--write-globe-history=/var/globe_history") +fi + if ! chk_enabled "$READSB_NET_SBS_DISABLE_REDUCE"; then READSB_CMD+=("--net-sbs-reduce") fi @@ -283,6 +287,9 @@ if chk_enabled "$TAR1090_ENABLE_AC_DB"; then # fallback to container supplied not updated csv.gz READSB_CMD+=("--db-file=$TAR1090_INSTALL_DIR/aircraft.csv.gz") fi + if ! chk_disabled "$TAR1090_DB_LONGTYPE"; then + READSB_CMD+=("--db-file-lt") + fi fi # Handle "--device-type" @@ -364,6 +371,12 @@ if chk_enabled "$PROMETHEUS_ENABLE"; then READSB_CMD+=("--write-prom=/run/readsb-prometheus.prom") fi +# wait 15 seconds if this is not the first startup +if [[ $(s6-svdt /run/service/readsb | wc -l) != 0 ]]; then + "${s6wrap[@]}" --args echo "delaying restart by 15 seconds" + sleep 15 +fi + # shellcheck disable=SC2086 if [[ "${LOGLEVEL,,}" == "verbose" ]]; then exec "${s6wrap[@]}" --args "${READSB_BIN}" "${READSB_CMD[@]}" $READSB_AUTOMATION_ARGS $READSB_EXTRA_ARGS diff --git a/rootfs/etc/s6-overlay/startup.d/99-prometheus-conf b/rootfs/etc/s6-overlay/startup.d/99-prometheus-conf deleted file mode 100755 index cc0562e..0000000 --- a/rootfs/etc/s6-overlay/startup.d/99-prometheus-conf +++ /dev/null @@ -1,25 +0,0 @@ -#!/command/with-contenv bash -#shellcheck shell=bash disable=SC1091 - -source /scripts/common - -if ! chk_enabled "$PROMETHEUS_ENABLE"; then - exit 0 -fi - -# serve port 9274 always replying with stats.prom regardless of requested path - -cat > /etc/nginx/sites-enabled/prometheus_9274 <&1) +RV=$? +if [ $RV -ne 0 ]; then + echo "ERROR: Unable to write to $TMPFILE, aborting! ($T)" + exit 99 +fi + + +# load bash sleep builtin if available +[[ -f /usr/lib/bash/sleep ]] && enable -f /usr/lib/bash/sleep sleep || true + +# Do this a few times, in case we're still booting up (wait a bit between checks) +CHECK_LOOP=0 +while [ "x$JSON_DIR" = "x" ]; do + # Check the paths IN ORDER, preferring the first one we find + for i in ${!JSON_PATHS[@]}; do + CHECK=${JSON_PATHS[$i]} + + if [ -d $CHECK ]; then + JSON_DIR=$CHECK + break + fi + done + + # Couldn't find any of them... + if [ "x$JSON_DIR" = "x" ]; then + CHECK_LOOP=$(( CHECK_LOOP + 1 )) + + if [ $CHECK_LOOP -gt 4 ]; then + # Bad news. Complain and exit. + echo "ERROR: Tried multiple times, could not find any of the directories - ABORTING!" + exit 10 + fi + echo "No valid data source directory found, do you have the adsbexchange feed scripts installed? Tried each of: [${JSON_PATHS[@]}]" + sleep 20 + fi +done + +UUID=$ADSBX_UUID + +if ! [[ $UUID =~ ^\{?[A-F0-9a-f]{8}-[A-F0-9a-f]{4}-[A-F0-9a-f]{4}-[A-F0-9a-f]{4}-[A-F0-9a-f]{12}\}?$ ]]; then + # Data in UUID file is invalid + echo "FATAL: Data in UUID file was invalid, exiting!" + exit 1 +fi + +##################### +# DNS cache setup # +##################### + +declare -A DNS_LOOKUP +declare -A DNS_EXPIRE + +# Let's FIRST make sure our version of curl will support what we need (--resolve arg) +CURL_VER=$( curl -V | head -1 | awk '{print $2}' ) +if [ "x$CURL_VER" = "x" ]; then + echo "FATAL - curl is malfunctioning, can't get version info." + exit 11 +fi + +# This routine assumes you do no santiy-checking. +# +# Checks for the host in $DNS_LOOKUP{}, and if the corresponding $DNS_EXPIRE{} is less than NOW, return success. +# Otherwise, try looking it up. Save value if lookup succeeded. +# +# Returns: +# On Success: returns 0, and host will be in DNS_LOOKUP assoc array. +# On Fail: Various return codes: +# - 10 = No Hostname Provided +# - 20 = Hostname Format Invalid +# - 30 = Lookup Failed even after $DNS_MAX_LOOPS tries +DNS_WAIT=5 +DNS_MAX_LOOPS=2 + +dns_lookup () { + local HOST=$1 + + local NOW=$( date +%s ) + + # You need to pass in a hostname :) + if [ "x$HOST" = "x" ]; then + echo "ERROR: dns_lookup called without a hostname" >&2 + return 10 + fi + + # (is it even a syntactically-valid hostname?) + if ! [[ $HOST =~ ^[a-zA-Z0-9\.-]+$ ]]; then + echo "ERROR: Invalid hostname passed into dns_lookup [$HOST]" >&2 + return 20 + fi + + # If the host is cached, and the TTL hasn't expired, return the cached data. + if [ ${DNS_LOOKUP[$HOST]} ]; then + if [ ${DNS_EXPIRE[$HOST]} -ge $NOW ]; then + return 0 + fi + fi + + # Try this several times + local LOOP=$DNS_MAX_LOOPS + + while [ $LOOP -ge 1 ]; do + # Ok, let's look this hostname up! Use the first IP returned. + # - XXX : WARNING: This assumed the output format of 'host -v' doesn't change drastically! XXX - + # - Because this uses the "Trying" line, it should work for non-FQDN lookups, too - + + sleep $DNS_WAIT & + HOST_IP=$( host -v -W $DNS_WAIT -t a "$HOST" | perl -ne 'if (/^Trying "(.*)"/){$h=$1; next;} if (/^$h\.\s+(\d+)\s+IN\s+A\s+(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})/) {$i=$2; last}; END {printf("%s", $i);}' ) + RV=$? + # If this is empty, something failed. Sleep some and try again... + if [ $RV -ne 0 ] || [ "x$HOST_IP" == "x" ]; then + if ping -c1 "$HOST" &>/dev/null && ! host -v -W $DNS_WAIT -t a "$HOST" &>/dev/null; then + echo "host not working but ping is, disabling DNS caching!" + DNS_CACHE=0 + return 1 + fi + echo "Failure resolving [$HOST], waiting and trying again..." >&2 + LOOP=$(( LOOP - 1 )) + wait + continue + fi + # If we get here, we successfully resolved it + break; + done + + # If LOOP is zero, Something Bad happened. + if [ $LOOP -le 0 ]; then + echo "FATAL: unable to resolve $HOST even after $DNS_MAX_LOOPS tries. Giving up." >&2 + return 30 + fi + + # Resolved ok! + NOW=$( date +%s ) + DNS_LOOKUP["$HOST"]=$HOST_IP + DNS_EXPIRE["$HOST"]=$(( NOW + DNS_TTL )) + return 0 +} + +# First, see if we have a localhost resolver... +# - Only look at the first 'nameserver' entry in resolv.conf +# - This will assume any 127.x.x.x resolver entry is "local" +LOCAL_RESOLVER=$( grep nameserver /etc/resolv.conf | head -1 | egrep -c '[[:space:]]127\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' ) +if [ $LOCAL_RESOLVER -ne 0 ]; then + if [ $DNS_IGNORE_LOCAL -eq 1 ]; then + echo "Found local resolver in resolv.conf, but DNS_IGNORE_LOCAL is on, so ignoring" >&2 + else + echo "Found local resolver in resolv.conf, disabling DNS Cache" >&2 + DNS_CACHE=0 + fi +fi + +if ! command -v host &>/dev/null; then + echo "host command not available, disabling DNS Cache" >&2 + DNS_CACHE=0 +fi + + +# If we have a local resolver, just use the URL. If not, look up the host and use that IP (replace the URL appropriately) +# -- DNS Setup done + + +echo "Using UUID [${UUID}] for stats uploads" +echo "Using JSON directory [${JSON_DIR}] for source data" + + +if [ $DNS_CACHE -ne 0 ]; then + echo "Using script's DNS cache ($DNS_TTL seconds)" +else + echo "NOT using script's DNS cache" +fi + +JSON_FILE="${JSON_DIR}/aircraft.json" + +STAT_COUNT=0 +# Grab the current timestamp of the file. Try in a loop a few times, in case +while [ $STAT_COUNT -lt 5 ]; do + JSON_STAT=$(stat --printf="%Y" $JSON_FILE 2> /dev/null) + RV=$? + + if [ $RV -eq 0 ]; then + break + fi + STAT_COUNT=$(( STAT_COUNT + 1 )) + sleep 15 +done + +# Bad juju if we still don't have a stat... +if [ "x$JSON_STAT" = "x" ]; then + echo "ERROR: Can't seem to stat $JSON_FILE at startup, bailing out..." + exit 15 +fi + +# Complain if this file seems really old +NOW=$(date +%s) +DIFF=$(( NOW - JSON_STAT )) +if [ $DIFF -gt 60 ]; then + echo "WARNING: $JSON_FILE seems old, are you sure we're using the right path?" +fi + +# How long to wait before uploads, minimum (in seconds) +WAIT_TIME=5 + +# random sleep on startup ... reduce load spikes +sleep "$(( RANDOM % WAIT_TIME )).$(( RANDOM % 100))" + +# How long curl will wait to send data (10 sec default) +MAX_CURL_TIME=10 + +# How much time (sec) has to pass since last JSON update before we say something +# Initial value is "AGE_COMPLAIN", and then it complains every "AGE_INTERVAL" after that +# Deftauls are: +# AGE_COMPLAIN = 30 sec +# AGE_INTERVAL = 30 min (1800 sec) +AGE_COMPLAIN=30 +AGE_INTERVAL=$(( 30 * 60 )) +OLD_AGE=$AGE_COMPLAIN +while true; do + wait + # make this loop from now to the next start last exactly $WAIT_TIME secons + # sleep in the background then wait for it at the end of the loop + sleep $WAIT_TIME & + + NOW=$(date +%s) + + # Grab new stat. If it fails, wait longer (otherwise assign to the main var) + NEW_STAT=$(stat --printf="%Y" $JSON_FILE 2> /dev/null) + RV=$? + if [ $RV -ne 0 ]; then + sleep 10 + else + JSON_STAT=$NEW_STAT + fi + DIFF=$(( NOW - JSON_STAT )) + if [ $DIFF -gt $OLD_AGE ]; then + echo "WARNING: JSON file $JSON_FILE has not been updated in $DIFF seconds. Did your decoder die?" + OLD_AGE=$(( OLD_AGE + AGE_INTERVAL )) + else + # Reset this here, in case it comes back ;) + OLD_AGE=$AGE_COMPLAIN + fi + + # Move the JSON somewhere before operating on it... + + rm -f $TMPFILE $NEWFILE + CP=$(cp $JSON_FILE $TMPFILE 2>&1) + RV=$? + if [ $RV -ne 0 ]; then + # cp failed (file changed during copy, usually), wait a few and loop again + sleep 2 + continue + fi + + if STATUS=$(vcgencmd get_throttled 2>/dev/null | tr -d '"'); then + STATUS="${STATUS#*=}" + else + STATUS="" + fi + + if ! jq -c \ + --arg STATUS "$STATUS" \ + --arg UUID "$UUID" \ + ' . + | ."uuid"=$UUID + | ."v"=$STATUS + | ."rssi"=(if (.aircraft | length <= 0) then 0 else ([.aircraft[].rssi] | select(. >=0) | add / length | floor) end) + | ."rssi-min"=(if (.aircraft | length <= 0) then 0 else ([.aircraft[].rssi] | select(. >=0) | min | floor) end) + | ."rssi-max"=(if (.aircraft | length <= 0) then 0 else ([.aircraft[].rssi] | select(. >=0) | max | floor) end) + ' < $TMPFILE > $NEWFILE + then + # this shouldn't happen, don't spam the syslog with the error quite as much + sleep 15 + # we don't have a json output, let's try again from the start + continue + fi + + + CURL_EXTRA="" + # If DNS_CACHE is set, use the builtin cache (and correspondingly the additional curl arg + if [ $DNS_CACHE -ne 0 ]; then + dns_lookup $REMOTE_HOST + RV=$? + if [ $RV -ne 0 ]; then + # Some sort of error... We'll fall back to normal curl usage, but sleep a little. + echo "DNS Error for ${REMOTE_HOST}, fallback ..." + else + REMOTE_IP=${DNS_LOOKUP[$REMOTE_HOST]} + CURL_EXTRA="--resolve ${REMOTE_HOST}:443:$REMOTE_IP" + fi + fi + + sleep 0.314 + gzip -c <$NEWFILE >$TEMP_DIR/upload.gz + sleep 0.314 + + # Push up the data. 'curl' will wait no more than $MAX_CURL_TIME seconds for upload to complete + curl -m $MAX_CURL_TIME $CURL_EXTRA -sS -X POST -H "adsbx-uuid: ${UUID}" -H "Content_Encoding: gzip" --data-binary @- $REMOTE_URL 2>&1 <$TEMP_DIR/upload.gz + RV=$? + + if [ $RV -ne 0 ]; then + echo "WARNING: curl process returned non-zero ($RV): [$CURL]; Sleeping a little extra." + sleep $(( 5 + RANDOM % 15 )) + fi +done +