diff --git a/.gitignore b/.gitignore index 55887ccc..92ed2c6f 100644 --- a/.gitignore +++ b/.gitignore @@ -231,4 +231,7 @@ swag/* !swag/nginx/proxy-confs/dpi-device-manager.subfolder.conf *.pem -settings.json \ No newline at end of file +settings.json + +#303 back up database +backup-db/ diff --git a/compose/.env b/compose/.env index d2cf7f4f..617fde97 100755 --- a/compose/.env +++ b/compose/.env @@ -15,6 +15,15 @@ PGDATABASE=broker POSTGRES_PASSWORD=CHANGEME POSTGRES_DB=broker +#TSDB -- KEEP SYNCED WITH ./.tsdb_env +TSDB_USER=postgres +TSDB_PASSWORD=admin +TSDB_PORT=5432 +TSDB_HOST=tsdb +TSDB_DB=postgres +TSDB_TABLE=timeseries #set in init.sql +NAMING_UPDATE_INTERVAL=600 ##how often on new message to map will it check to update word_list and type_maps + # Set this to the root of the git repo as it is seen by the containers. PYTHONPATH=/home/broker/python diff --git a/compose/.tsdb_env b/compose/.tsdb_env new file mode 100644 index 00000000..add83b6a --- /dev/null +++ b/compose/.tsdb_env @@ -0,0 +1,3 @@ +POSTGRES_USER=postgres +POSTGRES_PASSWORD=admin +POSTGRES_PORT=5432 diff --git a/compose/docker-compose.yml b/compose/docker-compose.yml index 41cdaadd..cfa62d52 100644 --- a/compose/docker-compose.yml +++ b/compose/docker-compose.yml @@ -253,6 +253,7 @@ services: working_dir: "/home/broker/python" entrypoint: [ "python", "-m", "delivery.FRRED" ] + axistech: image: broker/python-base logging: @@ -266,10 +267,39 @@ services: - frred depends_on: db: + entrypoint: [ "python", "-m", "pollers.axistech" ] + + timescaledb: + build: ../timescale # Point to the directory containing the custom Dockerfile + hostname: "tsdb" + image: custom-timescaledb:latest + restart: "no" + + env_file: + - .tsdb_env + volumes: + - ../timescale/init.sql:/docker-entrypoint-initdb.d/init.sql + - ../timescale/pgbackrest/pgbackrest.conf:/home/postgres/pgdata/backup/pgbackrest.conf + - ../timescale/logs:/var/log/timescale + ports: + - "5433:5432" + healthcheck: + test: ["CMD-SHELL", "pg_isready -U ${TSDB_USER}"] + interval: 10s + timeout: 5s + retries: 3 + + iota_tsdb_decoder: + image: broker/python-base + restart: "no" + env_file: + - .env + depends_on: + timescaledb: condition: "service_healthy" mq: condition: "service_healthy" volumes: - ../src/python:/home/broker/python working_dir: "/home/broker/python" - entrypoint: [ "python", "-m", "pollers.axistech" ] + entrypoint: [ "python", "-m", "timescale.TS_LTSReader" ] diff --git a/compose/production/prod.yml b/compose/production/prod.yml index 15461ee9..c31d1eb4 100644 --- a/compose/production/prod.yml +++ b/compose/production/prod.yml @@ -31,9 +31,20 @@ services: ports: - "127.0.0.1:5000:5000" + timescaledb: + volumes: + - tsdb_db:/home/postgres/pgdata/data + - pgbackrest_data:/var/lib/pgbackrest + volumes: broker_db: external: true mq_data: external: true + + tsdb_db: + external: true + + pgbackrest_data: + external: false diff --git a/compose/test/test.yml b/compose/test/test.yml index be235d3d..62d8767c 100644 --- a/compose/test/test.yml +++ b/compose/test/test.yml @@ -15,3 +15,10 @@ services: - ..:/home/broker/broker working_dir: "/home/broker/broker" entrypoint: [ "./forever.sh" ] + mq: + ports: + - 15672:15672 + restapi: + ports: + - 5687:5687 + diff --git a/db/init.d/init_db.sql b/db/init.d/init_db.sql index 9ac12cab..0df6d030 100755 --- a/db/init.d/init_db.sql +++ b/db/init.d/init_db.sql @@ -111,3 +111,352 @@ create index if not exists pd_src_id_idx on physical_devices using GIN (source_i insert into sources values ('ttn'), ('greenbrain'), ('wombat'), ('ydoc'), ('ict_eagleio'); insert into version values (2); + +create table if not exists data_name_map( + input_name text not null primary key, + std_name text not null +); + +create table if not exists type_name_map( + full_name text not null primary key, + short_name text not null +); + +create table if not exists word_list( + full_word text +); + +create table if not exists hash_table( + table_name text primary key, + data_hash text +); + +create or replace function update_hash_table() +returns trigger as $$ +begin + if TG_OP = 'INSERT' or TG_OP = 'UPDATE' OR TG_OP = 'DELETE' then + insert into hash_table (table_name, data_hash) + values (TG_TABLE_NAME, MD5(NEW.*::text)) + on conflict (table_name) + do update set data_hash = MD5(NEW.*::text); + return new; + end if; +end; +$$ language plpgsql; + +create trigger type_name_map_trigger +after insert or update or delete on type_name_map +for each row + execute function update_hash_table(); + +create trigger word_list_trigger +after insert or update or delete on word_list +for each row + execute function update_hash_table(); + +create index if not exists pd_src_id_idx on physical_devices using GIN (source_ids); + +insert into sources values ('ttn'), ('greenbrain'), ('wombat'), ('ydoc'), ('ict_eagleio'); + +insert into data_name_map (input_name, std_name) values + ('1_Temperature', '1_TEMPERATURE'), + ('1_VWC', '1_VWC'), + ('2_Temperature', '2_TEMPERATURE'), + ('2_VWC', '2_VWC'), + ('3_Temperature', '3_TEMPERATURE'), + ('3_VWC', '3_VWC'), + ('4_Temperature', '4_TEMPERATURE'), + ('4_VWC', '4_VWC'), + ('5_Temperature', '5_TEMPERATURE'), + ('5_VWC', '5_VWC'), + ('6_Temperature', '6_TEMPERATURE'), + ('6_VWC', '6_VWC'), + ('8_AirPressure', '8_AIR_PRESSURE'), + ('8_AirTemperature', '8_AIR_TEMPERATURE'), + ('8_HumiditySensorTemperature', '8_HUMIDITY_SENSOR_TEMPERATURE'), + ('8_Precipitation', '8_PRECIPITATION'), + ('8_RH', '8_RH'), + ('8_Solar', '8_SOLAR'), + ('8_Strikes', '8_STRIKES'), + ('8_VaporPressure', '8_VAPOR_PRESSURE'), + ('8_WindDirection', '8_WIND_DIRECTION'), + ('8_WindGustSpeed', '8_WIND_GUST_SPEED'), + ('8_WindSpeed', '8_WIND_SPEED'), + ('Access_technology', 'ACCESS_TECHNOLOGY'), + ('accMotion', 'ACC_MOTION'), + ('Actuator', 'ACTUATOR'), + ('adc_ch1', 'ADC_CH_1'), + ('adc_ch2', 'ADC_CH_2'), + ('adc_ch3', 'ADC_CH_3'), + ('adc_ch4', 'ADC_CH_4'), + ('airTemp', 'AIR_TEMPERATURE'), + ('airtemperature', 'AIR_TEMPERATURE'), + ('airTemperature', 'AIR_TEMPERATURE'), + ('altitude', 'ALTITUDE'), + ('Ana', 'ANA'), + ('atmosphericpressure', 'ATMOSPHERIC_PRESSURE'), + ('atmosphericPressure', 'ATMOSPHERIC_PRESSURE'), + ('Average_current', 'AVERAGE_CURRENT'), + ('average-flow-velocity0_0_m/s', 'AVERAGE_FLOW_VELOCITY_0_0_MS'), + ('Average_voltage', 'AVERAGE_V'), + ('Average_Voltage', 'AVERAGE_V'), + ('Average_Wind_Speed_', 'AVERAGE_WIND_SPEED'), + ('avgWindDegrees', 'AVERAGE_WIND_DEGREES'), + ('barometricPressure', 'BAROMETRIC_PRESSURE'), + ('batmv', 'BATMV'), + ('battery', 'BATTERY'), + ('Battery (A)', 'BATTERY_A'), + ('battery (v)', 'BATTERY_V'), + ('Battery (V)', 'BATTERY_V'), + ('batteryVoltage', 'BATTERY_V'), + ('battery-voltage_V', 'BATTERY_V'), + ('Battery (W)', 'BATTERY_W'), + ('Cable', 'CABLE'), + ('charging-state', 'CHARGING_STATE'), + ('Class', 'CLASS'), + ('command', 'COMMAND'), + ('conductivity', 'CONDUCTIVITY'), + ('counterValue', 'COUNTER_VALUE'), + ('current-flow-velocity0_0_m/s', 'CURRENT_FLOW_VELOCITY_0_0_MS'), + ('depth', 'DEPTH'), + ('Device', 'DEVICE'), + ('DI0', 'DI_0'), + ('DI1', 'DI_1'), + ('direction', 'DIRECTION'), + ('distance', 'DISTANCE'), + ('down630', 'DOWN_630'), + ('down800', 'DOWN_800'), + ('EC', 'EC'), + ('externalTemperature', 'EXTERNAL_TEMPERATURE'), + ('fault', 'FAULT'), + ('Fraud', 'FRAUD'), + ('gnss', 'GNSS'), + ('gustspeed', 'GUST_SPEED'), + ('gustSpeed', 'GUST_SPEED'), + ('header', 'HEADER'), + ('Humi', 'HUMI'), + ('humidity', 'HUMIDITY'), + ('Hygro', 'HYGRO'), + ('Leak', 'LEAK'), + ('linpar', 'LINPAR'), + ('Max_current', 'MAX_CURRENT'), + ('Maximum_Wind_Speed_', 'MAX_WIND_SPEED'), + ('Max_voltage', 'MAX_V'), + ('Min_current', 'MIN_CURRENT'), + ('Minimum_Wind_Speed_', 'MIN_WIND_SPEED'), + ('Min_voltage', 'MIN_V'), + ('moisture1', 'MOISTURE_1'), + ('moisture2', 'MOISTURE_2'), + ('moisture3', 'MOISTURE_3'), + ('moisture4', 'MOISTURE_4'), + ('ndvi', 'NDVI'), + ('O06 / DPI-144', 'O_06_DPI_144'), + ('Operating_cycle', 'OPERATING_CYCLE'), + ('packet-type', 'PACKET_TYPE'), + ('period', 'PERIOD'), + ('Power', 'POWER'), + ('precipitation', 'PRECIPITATION'), + ('pressure', 'PRESSURE'), + ('Processor_temperature', 'PROCESSOR_TEMPERATURE'), + ('pulse_count', 'PULSE_COUNT'), + ('Radio_channel_code', 'RADIO_CHANNEL_CODE'), + ('Rainfall', 'RAINFALL'), + ('rain_per_interval', 'RAIN_PER_INTERVAL'), + ('Rain_per_interval', 'RAIN_PER_INTERVAL'), + ('raw_depth', 'RAW_DEPTH'), + ('rawSpeedCount', 'RAW_SPEED_COUNT'), + ('relativehumidity', 'RELATIVE_HUMIDITY'), + ('relativeHumidity', 'RELATIVE_HUMIDITY'), + ('Rest_capacity', 'REST_CAPACITY'), + ('Rest_power', 'REST_POWER'), + ('rssi', 'RSSI'), + ('rtc', 'RTC'), + ('RTC', 'RTC'), + ('S1_EC', 'S_1_EC'), + ('S1_Temp', 'S_1_TEMPERATURE'), + ('S1_Temp_10cm', 'S_1_TEMPERATURE_10_CM'), + ('S1_Temp_20cm', 'S_1_TEMPERATURE_20_CM'), + ('S1_Temp_30cm', 'S_1_TEMPERATURE_30_CM'), + ('S1_Temp_40cm', 'S_1_TEMPERATURE_40_CM'), + ('S1_Temp_50cm', 'S_1_TEMPERATURE_50_CM'), + ('S1_Temp_60cm', 'S_1_TEMPERATURE_60_CM'), + ('S1_Temp_70cm', 'S_1_TEMPERATURE_70_CM'), + ('S1_Temp_80cm', 'S_1_TEMPERATURE_80_CM'), + ('S1_Temp_90cm', 'S_1_TEMPERATURE_90_CM'), + ('S1_VWC', 'S_1_VWC'), + ('s4solarRadiation', 'S_4_SOLAR_RADIATION'), + ('salinity', 'SALINITY'), + ('salinity1', 'SALINITY_1'), + ('salinity2', 'SALINITY_2'), + ('salinity3', 'SALINITY_3'), + ('salinity4', 'SALINITY_4'), + ('sensorReading', 'SENSOR_READING'), + ('shortest_pulse', 'SHORTEST_PULSE'), + ('Signal', 'SIGNAL'), + ('Signal_indication', 'SIGNAL_INDICATION'), + ('Signal_strength', 'SIGNAL_STRENGTH'), + ('snr', 'SNR'), + ('soilmoist', 'SOIL_MOISTURE'), + ('soiltemp', 'SOIL_TEMPERATURE'), + ('solar', 'SOLAR'), + ('Solar (A)', 'SOLAR_A'), + ('solarpanel', 'SOLAR_PANEL'), + ('solarPanel', 'SOLAR_PANEL'), + ('solar (v)', 'SOLAR_V'), + ('Solar (V)', 'SOLAR_V'), + ('solar-voltage_V', 'SOLAR_V'), + ('Solar (W)', 'SOLAR_W'), + ('solmv', 'SOLMV'), + ('sq110_umol', 'SQ_110_UMOL'), + ('strikes', 'STRIKES'), + ('Tamper', 'TAMPER'), + ('tdskcl', 'TDSKCL'), + ('Temp', 'TEMPERATURE'), + ('temperature', 'TEMPERATURE'), + ('Temperature', 'TEMPERATURE'), + ('temperature1', 'TEMPERATURE_1'), + ('temperature2', 'TEMPERATURE_2'), + ('temperature3', 'TEMPERATURE_3'), + ('temperature4', 'TEMPERATURE_4'), + ('temperature5', 'TEMPERATURE_5'), + ('temperature6', 'TEMPERATURE_6'), + ('temperature7', 'TEMPERATURE_7'), + ('temperature8', 'TEMPERATURE_8'), + ('temperatureReading', 'TEMPERATURE_READING'), + ('tilt-anlge0_0_Degrees', 'TILT_ANLGE_0_0_DEGREES'), + ('UNIX_time', 'UNIX_TIME'), + ('up630', 'UP_630'), + ('up800', 'UP_800'), + ('uptime_s', 'UPTIME_S'), + ('vapourpressure', 'VAPOUR_PRESSURE'), + ('vapourPressure', 'VAPOUR_PRESSURE'), + ('vdd', 'VDD'), + ('Volt', 'V'), + ('vt', 'VT'), + ('VWC', 'VWC'), + ('VWC1', 'VWC_1'), + ('winddirection', 'WIND_DIRECTION'), + ('windDirection', 'WIND_DIRECTION'), + ('windKph', 'WIND_KPH'), + ('windspeed', 'WIND_SPEED'), + ('windSpeed', 'WIND_SPEED'), + ('windStdDevDegrees', 'WIND_STD_DEV_DEGREES'); + + +insert into type_name_map (full_name, short_name) values + ('AMP', 'A'), + ('AMPERAGE', 'A'), + ('AMPS', 'A'), + ('VOLT', 'V'), + ('VOLTAGE', 'V'), + ('VOLTS', 'V'), + ('MAXIMUM', 'MAX'), + ('MINIMUM', 'MIN'), + ('CENTIMETER', 'CM'), + ('CENTIMETRE', 'CM'), + ('CENTIMETERS', 'CM'), + ('CENTIMETRES', 'CM'), + ('TEMP', 'TEMPERATURE'), + ('AVG', 'AVERAGE'), + ('MOIST', 'MOISTURE'); + +insert into word_list values + ('ACCESS'), + ('ACTUATOR'), + ('AIR'), + ('ALTITUDE'), + ('AMP'), + ('AMPERAGE'), + ('AMPS'), + ('ATMOSPHERIC'), + ('AVERAGE'), + ('AVG'), + ('BAROMETRIC'), + ('BATTERY'), + ('CABLE'), + ('CAPACITY'), + ('CHANNEL'), + ('CHARGING'), + ('CLASS'), + ('CODE'), + ('COMMAND'), + ('CONDUCTIVITY'), + ('COUNT'), + ('COUNTER'), + ('CURRENT'), + ('CYCLE'), + ('DEGREES'), + ('DEPTH'), + ('DEV'), + ('DEVICE'), + ('DISTANCE'), + ('DIRECTION'), + ('DOWN'), + ('EXTERNAL'), + ('FLOW'), + ('FRAUD'), + ('GUST'), + ('HEADER'), + ('HUMIDITY'), + ('HYGRO'), + ('INDICATION'), + ('INTERVAL'), + ('KPH'), + ('LEAK'), + ('MAX'), + ('MAXIMUM'), + ('MIN'), + ('MINIMUM'), + ('MOIST'), + ('MOISTURE'), + ('MOTION'), + ('OPERATING'), + ('PACKET'), + ('PANEL'), + ('PER'), + ('PERIOD'), + ('POWER'), + ('PRECIPITATION'), + ('PRESSURE'), + ('PROCESSOR'), + ('PULSE'), + ('RADIO'), + ('RAINFALL'), + ('RAIN'), + ('READING'), + ('RELATIVE'), + ('REST'), + ('SALINITY'), + ('SIGNAL'), + ('SOLAR'), + ('SOIL'), + ('SPEED'), + ('STRENGTH'), + ('STRIKE'), + ('STRIKES'), + ('STD'), + ('TECHNOLOGY'), + ('TILT'), + ('TIME'), + ('UNIX'), + ('UP'), + ('UPTIME'), + ('VALUE'), + ('VAPOR'), + ('VELOCITY'), + ('VOLT'), + ('VOLTS'), + ('VOLTAGE'), + ('READING'), + ('SHORTEST'), + ('SNR'), + ('SOIL'), + ('TAMPER'), + ('TILT'), + ('TIME'), + ('TEMPERATURE'), + ('TEMP'), + ('UNIX'), + ('UP'), + ('VAPOUR'), + ('WIND'); diff --git a/doc/nginx.md b/doc/nginx.md index 483b7d60..449f403f 100644 --- a/doc/nginx.md +++ b/doc/nginx.md @@ -30,6 +30,11 @@ To connect to the RabbitMQ monitor web page, use `https://hostname/rabbitmq` # Use the hostname 'restapi' if nginx is running in a container. proxy_pass http://localhost:5687/broker/; } + + location /query/ { + #use the hostname 'restapi' if nginx is running in a container. + proxy_pass http://localhost:5687/query/; + } location /rabbitmq/ { # Use the hostname 'mq' if nginx is running in a container. @@ -93,4 +98,4 @@ stream { proxy_pass 127.0.0.1:1884; } } -``` \ No newline at end of file +``` diff --git a/doc/tsdb/Programmer Documentation.md b/doc/tsdb/Programmer Documentation.md new file mode 100644 index 00000000..ba5c7ef1 --- /dev/null +++ b/doc/tsdb/Programmer Documentation.md @@ -0,0 +1,369 @@ +# Programmer Documentation +### Purpose: +` `The aim of this document is to act as a guide on how the system actually works, why it is implemented the way it is, and how to modify or maintain the system. +## ***Business Aims*** +` `The TSDB implementation aims to address to business requirements of: + +- Storing incoming sensor data in an efficient and optimal way, that can be easily retrieved or backed up +- Compatibility with existing implementation, including allowing access and use to IoTa databases +- Local hosting of database, not cloud hosting +- Graphical representation of data with a web app +## ***Change List - Initial Merge into IoTa*** +` `Here is a complete list of the files we have changed or added, their reason for change and some notes on what might happen if it is changed. + + + +|File|Change List|Reasons/Notes| +| :- | :- | :- | +|[compose/.env](https://github.com/ZakhaevK/itc303-team3-broker/blob/merge_dpi/compose/.env)|

Several new environment variables:

- TSDB\_USER

 - TSDB username

- TSDB\_PASSWORD

 - TSDB password

- TSDB\_PORT

 - TSDB port

- TSDB\_HOST

 - TSDB host

- TSDB\_DB

 - TSDB database name

- TSDB\_TABLE

 - TSDB table name

- NAMING\_UPDATE\_INTERVAL

 - ` `interval (seconds) between the naming system checking for changes

|

- Changed existing file to keep project in one place.

- Required extra environment variables for TSDB implementation

- The TSDB\_XXX needs to match the equivalent in [compose/.tsdb_env](https://github.com/ZakhaevK/itc303-team3-broker/blob/merge_dpi/compose/.tsdb_env).

| +|[compose/.tsdb_env](https://github.com/ZakhaevK/itc303-team3-broker/blob/merge_dpi/compose/.tsdb_env)|

Several new environment variables:

- POSTGRES\_USER

 - TSDB username

- POSTGRES\_PASSWORD

 - TSDB pass

- POSTGRES\_PORT

 - TSDB port

|- Since Timescale is built on postgres, there was a conflict with both databases using the same .env file to set credentials. Had to split this up into a second file.| +|[compose/docker-compose.yml](https://github.com/ZakhaevK/itc303-team3-broker/blob/merge_dpi/compose/docker-compose.yml)|

Several new services:

- Iota\_tsdb\_decoder

 - Message handling and inserting into TSDB

- timescaledb

 - TSDB

|

- The decoder handles incoming MQ messages, and inserts them into TSDB.

- timescaledb is the actual time series database.

| +|[db/init.d/init_db.sql](https://github.com/ZakhaevK/itc303-team3-broker/blob/merge_dpi/db/init.d/init_db.sql)|

New Tables:

- data\_name\_map

 - Stores mappings to map incoming messages

- type\_name\_map

 - Stores types to dynamically process unmapped messages

- `word\_list`

 - Stores words to dynamically process unmapped messages

- hash\_table

 - Stores hash of tables to enable quicker syncing

Functions:

- update\_hash\_table

 - Create or update hash for table\_name

Triggers:

- type\_name\_map\_trigger

- word\_list\_trigger

 - Both the above trigger on any changes to the respected table, and call the update

Insertions:

- Inserts default values into data\_name\_map

|

- Efficient implementation of standardising the names for time series data

- Name maps are accessible by everything that has access to dao, or database.

- word\_list and type\_name\_map can be updated with containers running and will auto sync in a set period, without too much overhead.

| +|[timescale/init.sql](https://github.com/ZakhaevK/itc303-team3-broker/blob/merge_dpi/timescale/init.sql)|Creates the times series database schema|Requirement to set up the time series database.| +|[timescale/Dockerfile](https://github.com/ZakhaevK/itc303-team3-broker/blob/inc_backup/timescale/Dockerfile)|Used for custom timescaleDB images.|Main purpose is to install pgBackRest for physical backup into the timescale image.| +|[timescale/pgbr_init.sh](https://github.com/ZakhaevK/itc303-team3-broker/blob/inc_backup/timescale/pgbr_init.sh)|Added file.|Main purpose is to establish the stanza for pgBackRest so that physical backup can be performed.| +|[timescale/postgres/postgresql.conf](https://github.com/ZakhaevK/itc303-team3-broker/blob/inc_backup/timescale/postgres/custom_postgresql.conf)|Added file.|Required for configuration of postgres in use with pgBackRest.| +|[timescale/pgbackrest/pgbackrest.conf](https://github.com/ZakhaevK/itc303-team3-broker/blob/inc_backup/timescale/pgbackrest/pgbackrest.conf)|Added file.|Required for configuration of pgBackRest.| +|[src/python/broker-cli.py](https://github.com/ZakhaevK/itc303-team3-broker/blob/merge_dpi/src/python/broker-cli.py)|

Added logical device pretty output

Added several CRUD functions for:

- word\_list

- data\_name\_map

- type\_name\_map

|Physical devices had pretty output but logical devices did not| +|[src/python/api/client/DAO.py](https://github.com/ZakhaevK/itc303-team3-broker/blob/merge_dpi/src/python/api/client/DAO.py)|

New Functions:

- CRUD:

 - add\_name\_map

 - update\_name\_map

 - get\_std\_name

 - `\_get\_std\_name`

|Ability to access the new table correctly.| +|[src/python/pdmodels/Models.py](https://github.com/ZakhaevK/itc303-team3-broker/blob/merge_dpi/src/python/pdmodels/Models.py)|Added DataNameMap class|Following current structure for IoTa.| +|[src/python/restapi/TSDBAPI.py](https://github.com/ZakhaevK/itc303-team3-broker/tree/merge_dpi/src/python/restapi)|New file for implementing the time series API|

Uses same endpoint as existing REST API

Implements API requests for getting time series data from Timescale

| +|[src/python/restapi/requirements.txt](https://github.com/ZakhaevK/itc303-team3-broker/blob/merge_dpi/src/python/restapi/requirements.txt)|Added extra modules|

Some newer features needed extra modules to work.

Made setting up a local test environment Slightly easier by using file to install requirements

| +|[src/python/timescale/TS_LTSReader.py](https://github.com/ZakhaevK/itc303-team3-broker/blob/merge_dpi/src/python/timescale/TS_LTSReader.py)|Added file|This is the rabbit MQ message listener that receives and handles the incoming messages| +|[src/python/timescale/Timescale.py](https://github.com/ZakhaevK/itc303-team3-broker/blob/merge_dpi/src/python/timescale/Timescale.py)|Added file|This parses incoming messages into the timescale instance| +|[src/python/util/NamingConstants.py](https://github.com/ZakhaevK/itc303-team3-broker/blob/merge_dpi/src/python/util/NamingConstants.py)|Added file|

If messages come through and do not currently have a mapped name, they this module will generate the mapped name.

Uses word\_list, type\_name\_map and hash\_table to keep synced and dynamically create mapped names.

| +|[src/www/app/utils/api.py](https://github.com/ZakhaevK/itc303-team3-broker/blob/merge_dpi/src/www/app/utils/api.py)|

New functions:

- get\_between\_dates\_ts

- get\_luid\_ts

- get\_puid\_ts

|New functions pull from REST API to fill the web graph and web table data.| +|[src/www/app/main.py](https://github.com/ZakhaevK/itc303-team3-broker/blob/merge_dpi/src/www/app/main.py)|

New functions:

- parse\_ts\_table\_data

 - parses data in format for the web table

- parse\_ts\_data

 - parses data for format for the graph

- get\_data

 - actually flask endpoint to get data for web table

Modified functions:

- logical\_device\_form

 - added ts data variable that passes to the form

- logical\_device\_form

 - added ts data variable that passes to the form

|Required functions for providing time series data to the web app| +|[src/www/app/static/ts_graph.js](https://github.com/ZakhaevK/itc303-team3-broker/blob/merge_dpi/src/www/app/static/ts_graph.js)|Added file|

This is a bit of a template file, both p\_uid and l\_uid use it.

Generates graph with time series data

| +|[src/www/app/templates/ts_graph.html](https://github.com/ZakhaevK/itc303-team3-broker/blob/master/src/www/app/templates/ts_graph.html)|Added file|

This is a bit of a template file, both p\_uid and l\_uid use it.

This largely just passes on the time series data from flask to the graph.js file and handles the html side of things.

This page also references the ts\_table.js and handles displaying the graph.

The name should be refactored as it was created prior to the idea of a table being done.

| +|[src/www/app/static/ts_table.css](https://github.com/ZakhaevK/itc303-team3-broker/blob/merge_dpi/src/www/app/static/ts_table.css)|Added file|

Purely css for the time series template.

99% is for the ts\_table.js however, a small part may be for the show graph/table button that is always shown.

| +|[src/www/app/static/ts_table.js](https://github.com/ZakhaevK/itc303-team3-broker/blob/merge_dpi/src/www/app/static/ts_table.js)|Added file|

This handles the drawing and updating of the time series table.

It also uses js to insert the date pickers and buttons that the table uses.

| +|[src/www/app/templates/physical_device_form.html](https://github.com/ZakhaevK/itc303-team3-broker/blob/merge_dpi/src/www/app/templates/physical_device_form.html)|Added reference to ts\_graph.html template file|Added reference to the ts\_graph.js pages so time series can be used| +|[src/www/app/templates/logical_device_form.html](https://github.com/ZakhaevK/itc303-team3-broker/blob/merge_dpi/src/www/app/templates/logical_device_form.html)|

Added reference to ts\_graph.html template file

|Added reference to the ts\_graph.js pages so time series can be used| +|[/load-data.sh](https://github.com/ZakhaevK/itc303-team3-broker/blob/merge_dpi/load-data.sh)|Added file|

Useful script for adding and mapping some devices to test.

Creates user login

By default will create 10:10 p\_uid:l\_uid and map them 1:1.

| +|[/ts_backup.sh](https://github.com/ZakhaevK/itc303-team3-broker/blob/merge_dpi/ts_backup.sh)|Added file|Used for logical backup of the time series database| +|[/ts_restore.sh](https://github.com/ZakhaevK/itc303-team3-broker/blob/merge_dpi/ts_restore.sh)|Added file|Used to restore from a logical backup file| +|[/pgbr_backup.sh](https://github.com/ZakhaevK/itc303-team3-broker/blob/merge_dpi/pgbr_backup.sh)|- added file|Used for physical backup the time series database| +|[/pgbr_restore.sh](https://github.com/ZakhaevK/itc303-team3-broker/blob/merge_dpi/pgbr_restore.sh)|- added file|Used to restore from the physical backup files| +|[/pgbr_cleanup.sh](https://github.com/ZakhaevK/itc303-team3-broker/blob/merge_dpi/pgbr_cleanup.sh)|- added file|

Used for wiping and recreating PGBR data.


Best used when following a logical restore.

| + +## ***System Architecture*** +### Design Philosophy: +- Keep as decoupled as possible, this will help: + - Add future changes, and maintain the system. + - Merging with IoTa and other ITC-303 projects without major breakages. +- Use of existing libraries - IoTa uses some existing libraries such as FastAPI to accomplish features, where possible we used the same libraries in order to keep IoTa maintainable and as simple as possible. +- Coding style - there is no stringent coding style used through IoTa, and so the TSDB changes mirror what is there, and follow PEP8 to some degree. +- The project should be able to be dropped/merged directly into current IoTa and not cause any issues, it only adds new features, it does not interfere or change existing mechanisms. + +### Component Breakdown: +Key: **Green** = New File | **Purple** = Modified File + +Timescale Database: + +` `Relevant Files: + +1. [timescale/init.sql](https://github.com/ZakhaevK/itc303-team3-broker/blob/merge_dpi/timescale/init.sql) +1. [timescale/Dockerfile](https://github.com/ZakhaevK/itc303-team3-broker/blob/inc_backup/timescale/Dockerfile) +1. [compose/docker-compose.yml](https://github.com/ZakhaevK/itc303-team3-broker/blob/merge_dpi/compose/docker-compose.yml) + +Purpose: + +- (1) Defines the schema utilised by the Timescale database. +- (2) Specifies the base image, and the addition of pgBackRest for physical backup. +- (3) Modified to include Timescale database as part of the IoTa stack. + +IoTa Timeseries Database Decoder: + +Relevant Files: + +1. [src/python/timescale/TS_LTSReader.py](https://github.com/ZakhaevK/itc303-team3-broker/blob/merge_dpi/src/python/timescale/TS_LTSReader.py) +1. [src/python/timescale/Timescale.py](https://github.com/ZakhaevK/itc303-team3-broker/blob/merge_dpi/src/python/timescale/Timescale.py) +1. [db/init.d/init_db.sql](https://github.com/ZakhaevK/itc303-team3-broker/blob/merge_dpi/db/init.d/init_db.sql) +1. [src/python/util/NamingConstants.py](https://github.com/ZakhaevK/itc303-team3-broker/blob/merge_dpi/src/python/util/NamingConstants.py) +1. [src/python/api/client/DAO.py](https://github.com/ZakhaevK/itc303-team3-broker/blob/merge_dpi/src/python/api/client/DAO.py) + +Purpose: + +- (1) Accept JSON formatted messages from RabbitMQ containing time series data. +- (1) Confirm the data is in acceptable format, and contains no invalid data. +- (1) Convert data type names into standardised formats.. +- (2) Provide a method of translation into a consumable format for the Timescale database. +- (2) Add valid data to the Timescale database. +- (3) Provides the standardised names used in translation within an SQL table added to the existing postgres database. +- (4) Provides the functions for finding and replacing data names with standardised names. +- (5) Extra functions to handle new tables in the existing postgres database. + +REST API Addition: + +` `Relevant Files: + +1. [src/python/restapi/TSDBAPI.py](https://github.com/ZakhaevK/itc303-team3-broker/tree/merge_dpi/src/python/restapi) + +Purpose: + +- (1) Provides several endpoints for retrieval of data from the Timescale database. +- (1) Is used by the Webapp for both the graph and table views. + +Webapp Additions: + +` `Relevant Files: + +1. [src/www/app/utils/api.py](https://github.com/ZakhaevK/itc303-team3-broker/blob/merge_dpi/src/www/app/utils/api.py) +1. [src/www/app/main.py](https://github.com/ZakhaevK/itc303-team3-broker/blob/merge_dpi/src/www/app/main.py) +1. [src/www/app/static/ts_graph.js](https://github.com/ZakhaevK/itc303-team3-broker/blob/merge_dpi/src/www/app/static/ts_graph.js) +1. [src/www/app/templates/ts_graph.html](https://github.com/ZakhaevK/itc303-team3-broker/blob/master/src/www/app/templates/ts_graph.html) +1. [src/www/app/templates/physical_device_form.html](https://github.com/ZakhaevK/itc303-team3-broker/blob/merge_dpi/src/www/app/templates/physical_device_form.html) +1. [src/www/app/templates/logical_device_form.html](https://github.com/ZakhaevK/itc303-team3-broker/blob/merge_dpi/src/www/app/templates/logical_device_form.html) +1. [src/www/app/static/ts_table.js](https://github.com/ZakhaevK/itc303-team3-broker/blob/merge_dpi/src/www/app/static/ts_table.js) +1. [src/www/app/static/ts_table.css](https://github.com/ZakhaevK/itc303-team3-broker/blob/merge_dpi/src/www/app/static/ts_table.css) + +Purpose: + +- (1) Provides functionality for pulling data that is used for the Webapp graph. +- (2) Integrates functionality from api.py for the Webapp graph. +- (3) Javascript for the Webapp graph, used in the templates of ID pages. +- (4) The HTML template file with the ts\_graph.js contained +- (5-6) Template device pages that import the graph HTML. +- (7) Javascript for the Webapp table view, used by both p\_uid and l\_uid pages. +- (8) Styling for the time series section. + +Timescale Backup: + +` `Relevant Files: + +1. [/ts_backup.sh](https://github.com/ZakhaevK/itc303-team3-broker/blob/merge_dpi/ts_backup.sh) +1. [/ts_restore.sh](https://github.com/ZakhaevK/itc303-team3-broker/blob/merge_dpi/ts_restore.sh) +1. [/pgbr_backup.sh](https://github.com/ZakhaevK/itc303-team3-broker/blob/inc_backup/pgbr_backup.sh) +1. [/pbgr_restore.sh](https://github.com/ZakhaevK/itc303-team3-broker/blob/inc_backup/pgbr_restore.sh) +1. [timescale/postgres/postgresql.conf](https://github.com/ZakhaevK/itc303-team3-broker/blob/inc_backup/timescale/postgres/custom_postgresql.conf) +1. [timescale/pgbackrest/pgbackrest.conf](https://github.com/ZakhaevK/itc303-team3-broker/blob/inc_backup/timescale/pgbackrest/pgbackrest.conf) +1. [timescale/pgbr_init.sh](https://github.com/ZakhaevK/itc303-team3-broker/blob/inc_backup/timescale/pgbr_init.sh) + +Purpose: + +- (1) Bash script that performs a logical backup and stores them in /backups/. +- (2) Bash script that performs a logical restore when given an argument containing the backup name contained in /backups/ +- (3) Bash script that performs a physical backup. +- (3) Needs an argument of either “full”, “incr” (incremental), or “diff” (differential). +- (4) Bash script for performing a physical restore, will provide options within the physical backup volume. +- (4) Contains a warning on limitations regarding postgres due to the timelines feature. +- (5) Config file for Timescale, only adjusted to account for stanza creation and PITR options. +- (6) Config file for pgBackRest, contains details regarding backup storage. +- (7) Initialises pgBackRest on container startup. +## ***Detailed Design*** +Component Diagram: +![](Aspose.Words.42e69971-a9a6-42e9-8a81-580d7a5f5d36.001.png) + +Use Case Diagrams: + +![](./media/Aspose.Words.42e69971-a9a6-42e9-8a81-580d7a5f5d36.001.png) +![](./media/Aspose.Words.42e69971-a9a6-42e9-8a81-580d7a5f5d36.002.png) + +![](./media/Aspose.Words.42e69971-a9a6-42e9-8a81-580d7a5f5d36.003.png) +## ***Operation*** +The additional features follow the design philosophy of the existing IoTa, such that any current user of the system should be able to just ‘use’ the new additions. + + +Operations can be broken down to a few main use cases, message handling, broker cli usage, API, web app and back up. These are highlighted below, with a breakdown of each component. + +#### Message Handling +The message handling is almost entirely automated without any changes, or worries. A message is handled by interacting with the following systems: + +1. TS\_LTSReader.py listens to the main LTS exchange BrokerConstants.LOGICAL\_TIMESERIES\_EXCHANGE\_NAME +1. Once a message is received by the exchange, TS\_LTSReader.on\_message will double check that the physical device exists. + 1. Note: it does not check for a valid mapping, as the sender LogicalMapper does this already. +1. TS\_LTSReader.py then uses Timescale.py to attempt to parse and store the message. +1. Timescale.py handles talking to existing postgres db via DAO to check for valid mappings, and if required, uses NamingConstants.py to generate a new mapping. +1. Then, timescaledb will confirm success upon storing the message. +1. TS\_LTSReader.py will print results of the message handling. + +![](./media/Aspose.Words.42e69971-a9a6-42e9-8a81-580d7a5f5d36.004.png) + +**Notes**: + +- Various messages are logged to console, such as if mappings are found or created, if a message passes or fails, if a message is received/rejected. +- NamingConstants.py will also communicate with DAO for access to word\_list, type\_name\_map and hash\_table in order to be able to sync with latest tables, and dynamically standardise the names using data from the tables. +- The files excluding NamingConstants.py are self contained, and should not be accessed outside of the sending messages through RabbitMQ, + + +**Breakages**: + +Doing these things will require further code changes otherwise aspects will break or not function as expected and at the very least will require you to check the code for potential breakages. + +- Modifying the message IoTa message format will cause messages to be rejected. +- Removing the added tables word\_list, hash\_table, type\_name\_map, name\_map will cause breakages on various sections. +- Changes to certain DAO functions that handle checking p\_uid, or the above tables. +- Renaming any of the expected environment variables defined in either .env or tsdb\_env.env or if these two files are not synced with the same variables. + + +**Modifications:** + +These changes may be considered in the future, and this is a rough idea on what requirements may be: + +- Removing NamingConstants.py : as it is a standalone module, it would be rather trivial to delete the reference and single function call from Timescale.py +- Changing environment variables would have no effect on the system, as long as they are synced between the two .env files. +- Changing exchange name should be done through BrokerConstants.py - otherwise code will break. + + + +#### BrokerCLI.py +Several additions have been added to broker-cli.py to aid in the usage of the new features. + +These are solely CRUD functions for the new tables in the standard postgres database, and follow the same expectation of the existing function. + +The command names used are shorthand, where:*.docx|*.doc|*.docm +nmap = data\_name\_map (name\_map) + +tmap = type\_name\_map (type\_map) + +wlist = word\_list (word\_list) + +These also have a list of default values, that are defined in sql.init file when the postgresdb gets created. + +name\_map is the mapped names, incoming name -> outgoing name: +![](./media/Aspose.Words.42e69971-a9a6-42e9-8a81-580d7a5f5d36.005.png) + +word\_list is any word to be broken down and split up, ie Temperature which would result in TemperatureTemperature123 == temperature\_temperature\_123 + +Examples: + +![](./media/Aspose.Words.42e69971-a9a6-42e9-8a81-580d7a5f5d36.006.png) + +type\_map is any type that can be broken down, it Voltage,Volts, Volt all could all equal V. + +Examples: + +![](./media/Aspose.Words.42e69971-a9a6-42e9-8a81-580d7a5f5d36.007.png) + + +Like existing functions, use --help for full usage + + +Example usage: + +docker exec prod-lm-1 python -m broker-cli nmap add --in test --out TEST + +docker exec prod-lm-1 python -m broker-cli nmap ls -w 40 + +docker exec prod-lm-1 python -m broker-cli tmap rm --in test + +**Breakages**: + +These changes can not be added / removed without any issues with running the IoTa app. They are not technically needed, as one can log into the database and add/remove things, however for convenience and some safety, using the cli is recommended. + +There is a fix for print pretty device output, if this is removed then our ./load\_data.sh test script will fail. + + +**Modifications:** + +- Changes to the DAO.py functions might affect the broker-cli.py functions, especially for ls commands. + +#### Accessing Timescale Database +For direct access to the Timescale Database within its container (timescaledb), the only way is to make use of psql, a command line tool for interacting with PostgreSQL databases. + +An example to gain access to the database within production mode would be as follows: +docker exec -it prod-timescaledb-1 psql -U -d + +For querying the database, it’s recommended to use the API, but access in this way will allow for more specific queries than possible with the API. +#### Using API +The query API makes use of the same FastAPI endpoint as the existing RestAPI, but with a different URL Path. All query API calls are made through to the URL localhost:5687/query. The details of the API implementation can be found in the following files; + +- src/python/restapi/TSDBAPI.py contains the API’s function implementation and respective endpoints +- src/python/restapi/RestAPI.py for a single line that links TSDBAPI to the main FastAPI instance. + - This line can be changed to change the query endpoint, or to include authentication etc. + +The functions that have been implemented in the API include: + +- Query, available at /{query}: enables users to submit a query in PostgreSQL syntax, and results are returned on the home page. This endpoint is to be used for more specific queries, which cannot be handled by any other functions. +- Get l\_uid records, available at /l\_uid/{l\_uid}: enables users to gather all the records corresponding to a particular l\_uid. Optional query parameters for additional filtering include p\_uid, fromdate and todate, where dates should use the format DD/MM/YYYY hh:mm:ss. +- Get p\_uid records, available at /p\_uid/{p\_uid}: enables users to gather all the records corresponding to a particular p\_uid. Optional query parameters for additional filtering include l\_uid, fromdate and todate, where dates should use the format DD/MM/YYYY hh:mm:ss. +- Get l\_uid records for the last x (time), available at /l\_uid/{l\_uid}/last: enables users to gather all the records corresponding to a particular l\_uid for the last x period of time expressed through dedicated query parameters for years, months, days, hours, minutes and seconds. All arguments are optional, and if no value is provided, then default to 0. An example, to get all records for l\_uid = 9 for the last 2 days, one can submit the following queries: + - /l\_uid/9/last/?days=2 + - /l\_uid/9/last/?hours=48 + - /l\_uid/9/last/?minutes=2880 + - /l\_uid/9/last/?seconds=172800 +- Get p\_uid records for the last x (time), available at /p\_uid/{p\_uid}/last: enables users to gather all the records corresponding to a particular p\_uid for the last x period of time expressed through dedicated query parameters for years, months, days, hours, minutes and seconds. All arguments are optional, and if no value is provided, then default to 0. +- Get l\_uid records by function, available at /l\_uid/{l\_uid}/{func}: enables users to gather all the records corresponding to a particular l\_uid and apply a relevant SQL function on the values received. Functions include sum, avg, max, min etc. +- Get p\_uid records by function, available at /p\_uid/{p\_uid}/{func}: enables users to gather all the records corresponding to a particular p\_uid and apply a relevant SQL function on the values received. Functions include sum, avg, max, min etc. +#### Using Web App +The web app changes consist of three five main parts: + +- api.py to handle the calls to the REST API. +- main.py to handle communication between api.py and the html sections. +- ts\_graph.js & ts\_table.js that handle the logic of the time series graph or table. +- ts\_graph.html that handles the html layout of the graph and table, this is largely a template file that is added into the existing pages. +- ts\_table.css handles styling for the table, and related buttons. + +The XXXX\_form.html pages in the existing web app have had a few lines added, which essentially is a few divs, and inside those, sources the ts\_graph.html template file. + +Breakdown of graph: + +- The graph uses chart.js for the library. +- On each form page, whether logical or physical, the app will query the last 30 days of time series for the respective p\_uid or l\_uid and pass it onto the html page, which in turn passes it onto the javascript code. +- As it auto loads, it will only ever draw 30 days max. +- The functions of the graph are relatively straight forward, where the main issue is making sure the data passed from the web app stays in the same format, as chart.js requires the data formatted a specific way. + + +Breakages of graph: + +- If the data format must remain the same, otherwise it will need to be parsed into the correct format. +- toggle\_state is used to keep track of selecting all / none, so if you manually deselect every single node, then you have bypassed changing the state and must click the toggle button twice. + +Breakdown of table: + +- Table is more complex than graph, and therefore changes are considered more complex. +- Uses grid.js +- Loaded from ts\_graph.html template file +- Lazily loads today’s data once toggle to be visible +- When the table is created, it also creates the date pickers and buttons via javascript, these buttons are destroyed and re-created on table updates due to the way grid.js handles them. +- As the table uses the single endpoint on the Flask side, and a single API call, it requires the p\_uid or l\_uid and the device type to be passed through. It gets the device type through an empty div in the form page, i.e physical\_form has a div pd\_page, and it uses the existing uid passed from Flask for the existing app features. +- The handle\_csv\_btn() will convert and download the **displayed** table into CSV. This function can be simplified a little if you simply wanted to display all table data, not just the filtered results. +- It relies on the main.py parsing function to send the data in the correct format, which is a json object { “columns”: [], “data”: [] }. +- The fetch command to get the data from the javascript code is as follows; /get\_between\_dates\_ts?dev\_type=${dev\_type}&uid=${uid}&from\_date=${from\_date}&to\_date=${to\_date} +- Where dev\_type is the type of page, uid is the p\_uid or l\_uid for the device, and from/to\_dates are the picker values. + +Breakages of table: + +- Changing the parsing function in main.py called parse\_ts\_table\_data will break the table results. +- Changing ts\_table.css will cause the table to display in unexpected ways. + + +Modifying Web app: + +- The main change would be to extend the newer table functions such as being able to query between dates to the graph. At the moment the graph only pulls 30 days on load, then it can be filtered down to 1 day, this was to be efficient and seamless. +- Adding in a query box to the table section would not be too much modification, the only part that would need attention would be ensuring that the query results are always in the same format, either by limiting the structure of the query, using drop downs or increasing complexity of the parser. +#### Backing Up the Database +For logical backup, the only relevant files and directories are: + +- ts\_backup.sh which performs a logical backup using pg\_dump and saves it to the /backup/ directory. +- ts\_restore.sh takes an argument of a backup file that exists within the backup directory, then restores from it. +- /backup/ the directory where logical backups are stored. + +Physical backup is similar in its usage, but required a custom timescaledb image for installation of pgBackRest: + +- pgbr\_backup.sh performs a backup using pgBackRest. Can be given an argument of full, incr, or diff to determine the type of physical backup. +- pgbr\_restore.sh provides all existing physical backups within the pgbackrest\_data volume, and restores from the chosen backup. +- pgbackrest\_data is a volume that is mapped to the /var/lib/pgbackrest/ directory of the timescaledb image. This is required for the purpose of having a temp container. +- pgbr\_cleanup.sh wipes all data in the pgbackrest\_data volume and re-creates necessary files. Helpful when you have performed a logical restore, as the data will be invalid due to pg\_dump not considering pgBackRest and timelines. + +Files related to config and initialisation of pgBackRest: + +- pgbr\_init.sh initialises the stanza of pgBackRest to allow for archiving of timescale data. +- Dockerfile sets up a custom timescale image with pgBackRest installed. + - Both pgbr\_init.sh and custom\_postgresql.conf are moved to required locations within the image. +- pgbackrest.conf provides configuration for pgBackRest, such as: + - Logging (log-level-file/log-level-console) + - Full backup retention (“repo1-retention-full=4” means 4 full backups are stored max) + - pgBackRest storage (“repo1-path=/var/lib/pgbackrest”) + - Path to timescale data (“pg1-path=/home/postgres/pgdata/data”) +- custom\_postgresql.conf provides configuration for timescaledb regarding specific backup archiving options. + - “recovery\_target = immediate” disables PITR. + +There are currently some limitations regarding physical backup that may not be avoidable: + +- Upon performing a logical backup restore, all stored physical backup data is no longer valid. + - This is due to a postgres feature called timelines, where logical backup does not care for this, and essentially starts a full new timeline, despite technically being valid at some point. + - In the current state It is recommended to wipe the pgbackrest\_data volume following a logical restore using pgbr\_cleanup.sh, or delete the container and volume and restart the timescale container to allow for it to initialise again. +- With multiple backups, if you restore to an older one, backups taken after that restore point will no longer be valid, as a new timeline has been created. +- Any attempt to restore to an invalid timeline point will result in the database failing to startup, as it will detect this invalid config and will refuse to startup. diff --git a/doc/tsdb/media/2023-10-29_17-10-1698559641.png b/doc/tsdb/media/2023-10-29_17-10-1698559641.png new file mode 100644 index 00000000..efcca9fe Binary files /dev/null and b/doc/tsdb/media/2023-10-29_17-10-1698559641.png differ diff --git a/doc/tsdb/media/2023-10-29_17-10-1698559816.png b/doc/tsdb/media/2023-10-29_17-10-1698559816.png new file mode 100644 index 00000000..5b18dec7 Binary files /dev/null and b/doc/tsdb/media/2023-10-29_17-10-1698559816.png differ diff --git a/doc/tsdb/media/2023-10-29_17-10-1698559841.png b/doc/tsdb/media/2023-10-29_17-10-1698559841.png new file mode 100644 index 00000000..4ee2972e Binary files /dev/null and b/doc/tsdb/media/2023-10-29_17-10-1698559841.png differ diff --git a/doc/tsdb/media/2023-10-29_17-10-1698559851.png b/doc/tsdb/media/2023-10-29_17-10-1698559851.png new file mode 100644 index 00000000..cd10ec9d Binary files /dev/null and b/doc/tsdb/media/2023-10-29_17-10-1698559851.png differ diff --git a/doc/tsdb/media/2023-10-29_17-10-1698559932.png b/doc/tsdb/media/2023-10-29_17-10-1698559932.png new file mode 100644 index 00000000..1495cb78 Binary files /dev/null and b/doc/tsdb/media/2023-10-29_17-10-1698559932.png differ diff --git a/doc/tsdb/media/2023-10-29_17-10-1698559983.png b/doc/tsdb/media/2023-10-29_17-10-1698559983.png new file mode 100644 index 00000000..e32c308c Binary files /dev/null and b/doc/tsdb/media/2023-10-29_17-10-1698559983.png differ diff --git a/doc/tsdb/media/2023-10-29_17-10-1698560040.png b/doc/tsdb/media/2023-10-29_17-10-1698560040.png new file mode 100644 index 00000000..0d7dac92 Binary files /dev/null and b/doc/tsdb/media/2023-10-29_17-10-1698560040.png differ diff --git a/doc/tsdb/media/2023-10-29_17-10-1698560140.png b/doc/tsdb/media/2023-10-29_17-10-1698560140.png new file mode 100644 index 00000000..546ebbdb Binary files /dev/null and b/doc/tsdb/media/2023-10-29_17-10-1698560140.png differ diff --git a/doc/tsdb/media/Aspose.Words.42e69971-a9a6-42e9-8a81-580d7a5f5d36.001.png b/doc/tsdb/media/Aspose.Words.42e69971-a9a6-42e9-8a81-580d7a5f5d36.001.png new file mode 100644 index 00000000..b352513e Binary files /dev/null and b/doc/tsdb/media/Aspose.Words.42e69971-a9a6-42e9-8a81-580d7a5f5d36.001.png differ diff --git a/doc/tsdb/media/Aspose.Words.42e69971-a9a6-42e9-8a81-580d7a5f5d36.002.png b/doc/tsdb/media/Aspose.Words.42e69971-a9a6-42e9-8a81-580d7a5f5d36.002.png new file mode 100644 index 00000000..249ca920 Binary files /dev/null and b/doc/tsdb/media/Aspose.Words.42e69971-a9a6-42e9-8a81-580d7a5f5d36.002.png differ diff --git a/doc/tsdb/media/Aspose.Words.42e69971-a9a6-42e9-8a81-580d7a5f5d36.003.png b/doc/tsdb/media/Aspose.Words.42e69971-a9a6-42e9-8a81-580d7a5f5d36.003.png new file mode 100644 index 00000000..478fed59 Binary files /dev/null and b/doc/tsdb/media/Aspose.Words.42e69971-a9a6-42e9-8a81-580d7a5f5d36.003.png differ diff --git a/doc/tsdb/media/Aspose.Words.42e69971-a9a6-42e9-8a81-580d7a5f5d36.004.png b/doc/tsdb/media/Aspose.Words.42e69971-a9a6-42e9-8a81-580d7a5f5d36.004.png new file mode 100644 index 00000000..71e8fd8b Binary files /dev/null and b/doc/tsdb/media/Aspose.Words.42e69971-a9a6-42e9-8a81-580d7a5f5d36.004.png differ diff --git a/doc/tsdb/media/Aspose.Words.42e69971-a9a6-42e9-8a81-580d7a5f5d36.005.png b/doc/tsdb/media/Aspose.Words.42e69971-a9a6-42e9-8a81-580d7a5f5d36.005.png new file mode 100644 index 00000000..2177cb72 Binary files /dev/null and b/doc/tsdb/media/Aspose.Words.42e69971-a9a6-42e9-8a81-580d7a5f5d36.005.png differ diff --git a/doc/tsdb/media/Aspose.Words.42e69971-a9a6-42e9-8a81-580d7a5f5d36.006.png b/doc/tsdb/media/Aspose.Words.42e69971-a9a6-42e9-8a81-580d7a5f5d36.006.png new file mode 100644 index 00000000..66d25d27 Binary files /dev/null and b/doc/tsdb/media/Aspose.Words.42e69971-a9a6-42e9-8a81-580d7a5f5d36.006.png differ diff --git a/doc/tsdb/media/Aspose.Words.42e69971-a9a6-42e9-8a81-580d7a5f5d36.007.png b/doc/tsdb/media/Aspose.Words.42e69971-a9a6-42e9-8a81-580d7a5f5d36.007.png new file mode 100644 index 00000000..4fdce89d Binary files /dev/null and b/doc/tsdb/media/Aspose.Words.42e69971-a9a6-42e9-8a81-580d7a5f5d36.007.png differ diff --git a/doc/tsdb/media/api-docs.png b/doc/tsdb/media/api-docs.png new file mode 100644 index 00000000..5b7f40fb Binary files /dev/null and b/doc/tsdb/media/api-docs.png differ diff --git a/doc/tsdb/media/api_test.png b/doc/tsdb/media/api_test.png new file mode 100644 index 00000000..5a40edf4 Binary files /dev/null and b/doc/tsdb/media/api_test.png differ diff --git a/doc/tsdb/media/db-direct-query.png b/doc/tsdb/media/db-direct-query.png new file mode 100644 index 00000000..a208a771 Binary files /dev/null and b/doc/tsdb/media/db-direct-query.png differ diff --git a/doc/tsdb/media/docker-ps.png b/doc/tsdb/media/docker-ps.png new file mode 100644 index 00000000..f03b2d72 Binary files /dev/null and b/doc/tsdb/media/docker-ps.png differ diff --git a/doc/tsdb/media/logical_30_days.png b/doc/tsdb/media/logical_30_days.png new file mode 100644 index 00000000..7b6968df Binary files /dev/null and b/doc/tsdb/media/logical_30_days.png differ diff --git a/doc/tsdb/media/logical_7_days.png b/doc/tsdb/media/logical_7_days.png new file mode 100644 index 00000000..b35c0e1b Binary files /dev/null and b/doc/tsdb/media/logical_7_days.png differ diff --git a/doc/tsdb/media/logical_7_days_2.png b/doc/tsdb/media/logical_7_days_2.png new file mode 100644 index 00000000..744dd96e Binary files /dev/null and b/doc/tsdb/media/logical_7_days_2.png differ diff --git a/doc/tsdb/media/physical_empty.png b/doc/tsdb/media/physical_empty.png new file mode 100644 index 00000000..80660a6a Binary files /dev/null and b/doc/tsdb/media/physical_empty.png differ diff --git a/doc/tsdb/media/std_name_tests.png b/doc/tsdb/media/std_name_tests.png new file mode 100644 index 00000000..ff680b20 Binary files /dev/null and b/doc/tsdb/media/std_name_tests.png differ diff --git a/doc/tsdb/media/store_msgs.png b/doc/tsdb/media/store_msgs.png new file mode 100644 index 00000000..52cedd82 Binary files /dev/null and b/doc/tsdb/media/store_msgs.png differ diff --git a/doc/tsdb/testing document.md b/doc/tsdb/testing document.md new file mode 100644 index 00000000..a5fa15e3 --- /dev/null +++ b/doc/tsdb/testing document.md @@ -0,0 +1,171 @@ +### IoTa: Time Series Database Testing Document +--- + +#### Purpose +- This document will highlight the requirements set forth in the project vision document and show the relevent test scripts, how to run them. It is possible that some of the requirements cannot clearly be shown through testing, however we can show through documentation or code that this requirement has been met. + +#### Test Scripts +- Most scripts will require IoTa to be up and running. +- Ideally only run the tests in a test environment as data may be added or removed from the tsdb +- Test scripts may need to be updated if hostnames, or other similar settings change. +- Test scripts are added into [test/python](https://github.com/ZakhaevK/itc303-team3-broker/tree/merge_dpi/test/python) +- all tests assume you're starting from the base directory ie: `/home/cal/303/itc303-team3-broker` + +Test|Requires Running Instance|run commands|notes +|--|--|--|--| +[Integration Tests](https://github.com/ZakhaevK/itc303-team3-broker/blob/merge_dpi/test/python/TestIntegrationTSDB.py)|Yes|`cd test/python`
`python -m pytest -v TestIntegrationTSDB.py`|It will add some stuff to database. +[Retrieval/API Tests](https://github.com/ZakhaevK/itc303-team3-broker/blob/merge_dpi/test/python/TestTSDBAPI.py)|Yes|`./load_data.sh`
`cd test/python`
`python -m pytest -v TestTSDBAPI.py`|needs at least one device with puid and luid #1
inserts into database. +[Webapp time series graph](https://github.com/ZakhaevK/itc303-team3-broker/blob/merge_dpi/test/python/test_web_app.sh)|Yes|`./load_data.sh`
`cd test/python`
`./test_web_app.sh`|requires devices to exist with id 1, after running script head to the iota web app and check the physical or logical pages for time series data.
- hard coded dates, so ~9/10/23 will not show data as it is >30 days +[Standardised Naming](https://github.com/ZakhaevK/itc303-team3-broker/blob/merge_dpi/test/python/TestStdNaming.py)|No|`cd test/python`
`python -m pytest -v TestStdNaming.py`|you need to have to have python 3.10, and ideally most requirements so installing them from `../../src/python/restapi/requirements.txt` is easiest +--- +#### Requirements Breakdown + +##### Main Requirements: +Requirement|Test Script|Supported Document +|--|--|--| +Storage of time series data|[link](https://github.com/ZakhaevK/itc303-team3-broker/blob/merge_dpi/test/python/TestIntegrationTSDB.py)|[link](#storage-of-time-series-data) +Retrieval of time series data|[link](https://github.com/ZakhaevK/itc303-team3-broker/blob/merge_dpi/test/python/TestTSDBAPI.py)|[link](#retrieval-of-time-series-data) +Runs parallel with existing databases|No|[link](#runs-parallel) +No cloud hosting|No|[link](#cloud-hosting) +Backup and restore scripts|No|[link](#backup-and-restore) +Webapp additional web graph to visualise time series |[link](https://github.com/ZakhaevK/itc303-team3-broker/blob/merge_dpi/test/python/test_web_app.sh)|[link](#webapp-time-series-graph) +Compatibilty with existing IoTa implementation|No|[link](#iota-compatibility) +API to query database|[link](https://github.com/ZakhaevK/itc303-team3-broker/blob/merge_dpi/test/python/TestTSDBAPI.py)|[link](#api) + +##### Other Requirements: +Requirement|Test Script|Supported Document +|--|--|--| +Access restricted to authorised users|No|[link](#security-practices) +Robust implementation|No|[link](#robust-implementation) +Accurately store and retrieve data|[link](https://github.com/ZakhaevK/itc303-team3-broker/blob/merge_dpi/test/python/TestIntegrationTSDB.py)|[link](#accurate-data) +System is at least as easy as existing implementation|No|[link](#easy-to-use) + +--- +#### IoTa Compatibility +- Existing systems have not been modified in any way that would affect their dependants. +- Some changes to existing sections have been added to, to ensure strong compaibility with IoTa. +- We have mirrored existing designs to try ensure consistency. +- See below table for a list of some of the changes to existing IoTa files and directories. + +--- +#### Storage of time series data +- Listener (TS_LTSReader.py) performs message handling in conjunction with Timescale.py for insertion. +- Messages that are not of IoTa message format, or contain invalid data will be dropped. +- Messages containing an ID pairing that does not exist within Device Mapper will be dropped. +- Time series data names are standardised prior to storage in the Timescale database as per [link](#robust-implementation). +- The TestIntegrationTSDB.py file tests this functionality, and passes as seen in the image below: + +![LINKED IMAGE](./media/store_msgs.png) +![LINKED IMAGE](./media/2023-10-29_17-10-1698559983.png) +--- +#### Retrieval of time series data +- The main method of retrieving the time series data is via API (covered in API section) +- Secondary way would be to query the database directly. +- The `../../compose/.tsdb_env` sets the database credentials +- The `../../compose/.env` also has the credentials, however these are for pulling the them rather than setting them to help maintain consistency. +- With the container running, see querying: +- `docker exec -it test-timescaledb-1 psql -U postgres -d postgres` + +![image](./media/db-direct-query.png) + +--- +#### API +- the RestAPI uses same end points as existing api +- The TSDBAPI.py file provides the /query/ route for retrieval from TimescaleDB. +- The TestTSDBAPI.py file runs an automated test to confirm API functionality is working as expected. +- typically `0.0.0.0:5687/docs` to get full view of implementented features. +- main options are query database, get record by luid, puid, get by function and get by time. + +![image](./media/api_test.png) +![image](./media/api-docs.png) + +--- +#### Webapp Time Series Graph +- The time series graph uses chart.js +- Adds a time series graph at the bottom of each logical or physical device page. +- Allows 30 days, 7 days and 1 days selection and ability to enable or disable certain time series +- To run asscociated test script to check that this works as intended, head to `../../test/python/test_web_app.sh` +- and run `./test_web_app.sh` - it requires having a physical and logical device of #1 id in the system (you can easily do this by using `./load_data.sh` script) +- At this point you can go to webapp and click on puid or luid 1 and check bottom of page. + +![images](../../doc/tsdb/media/physical_empty.png) +![images](../../doc/tsdb/media/logical_30_days.png) +![images](../../doc/tsdb/media/logical_7_days.png) +![images](../../doc/tsdb/media/logical_7_days_2.png) +![images](./media/2023-10-29_17-10-1698559816.png) +![images](./media/2023-10-29_17-10-1698559841.png) +![images](./media/2023-10-29_17-10-1698560040.png) +![images](./media/2023-10-29_17-10-1698560140.png) + + + +--- +#### Logical Backup and Restore +- Only full backup is implemented. +- There are two scripts to handle this `../../ts_backup.sh` and `../../ts_restore.sh` +- They are pretty straight forward and quite verbose to ensure that user knows that scripts have run correctly without error. +- Backup data is in the form of chunks due to the hypertables of TimescaleDB used for optimisation. +- Further information on functionality is within the [user manual](https://docs.google.com/document/d/1Y9wej463ze6CFD0ZhA6pwCcQbC8DD6kX/edit?usp=drive_link&ouid=105542707453657000248&rtpof=true&sd=true). + +--- +#### Physical Backup and Restore +- Support full, incremental, and differential backup options +- There are two scripts to handle this `../../pgbr_backup.sh` and `../../pgbr_restore.sh` +- The pgbr_backup.sh file can be given arguments in command line, these include: full (full backup), incr (incremental), and diff (differential). +- Backup's take more space than logical component, but may be more suitable to frequent backups, and faster for larger sized DBs due to incremental option. +- Stores backup data in a volume called prod_pgbackrest_data. +- Care must be taken in restoring, as postgres timelines can incur errors if you restore to a backup that is not within the same timeline. +- Best use is to restore to the most recent backup, to prevent any timeline issues. +- Logical backup should be used to preserve important historical backups, and if used, timelines are destroyed. Recommended to delete all physical backups following logical restore. +- By default PITR is set off, but can be re-enabled within the timescale/postgres/custom_postgresql.conf file by removing, or changing the "recovery_target = immediate" line. +- Further information on functionality is within the [user manual](https://docs.google.com/document/d/1Y9wej463ze6CFD0ZhA6pwCcQbC8DD6kX/edit?usp=drive_link&ouid=105542707453657000248&rtpof=true&sd=true). + +--- +#### Cloud hosting +- All data is self hosted within docker compose stack via Timescale and existing Postgres database. +- It has been done in the same method as the existing postgresql db. + +``` +services: + ... + + timescaledb: + volumes: + - tsdb_db:/var/lib/postgresql/ts_data + +volumes: + ... + + tsdb_db: + external: true +``` + +--- +#### Robust implementation +- Through the message handling process, generally things are wrapped in try catch blocks so that if a segment fails, then it will not block any new incoming messages +- We have standardised the naming of the time series data to ensure that data is consistant + +![LINKED IMAGE](./media/std_name_tests.png) + +--- +#### Runs Parallel +- Below screenshot shows all running containers when IoTa is running, all the existing containers are running plus a few extra ones for the time series features. + +![picture](./media/docker-ps.png) + +--- +#### Easy To Use +- As we have followed existing designs, using the new features should feel the same as using existing features. +- Running and configuring the environment has not changed, and there are no extra steps to follow. +- The time series stuff should work automatically - and Timescale has a solid documentation making understanding and extending features less of a hassle. + +--- +#### Accurate Data +- Throughout the implementation, we have been testing with a variety of automatically generated data which was derived from existing `iota.sql` real data. +- We have not been able to identify any instances of the database changing values. +- The database has a high degree of accuracy i.e: `28.521567509813398` where lopping sensor values should not cause issue. + +--- +#### Security Practices +- The time series database uses the same authentication as existing postgres database, this is because at it's core it is also a postgres database. +- All python scripts that require access to the database get access by reading the environment variables set in `../compose/.env` file. diff --git a/docker_volumes.sh b/docker_volumes.sh new file mode 100755 index 00000000..19c05b75 --- /dev/null +++ b/docker_volumes.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash + +echo "removing production containers" +docker volume remove mq_data +docker volume remove tsdb_db +docker volume remove broker_db + +echo "creating volume dockers" +docker volume create mq_data +docker volume create tsdb_db +docker volume create broker_db diff --git a/images/restapi/requirements.txt b/images/restapi/requirements.txt index 1c5ca2dc..9d580371 100644 --- a/images/restapi/requirements.txt +++ b/images/restapi/requirements.txt @@ -44,3 +44,8 @@ uvicorn==0.17.6 uvloop==0.16.0 watchgod==0.8.2 websockets==10.3 +exceptiongroup==1.1.1 +iniconfig==2.0.0 +packaging==23.1 +pluggy==1.2.0 +tomli==2.0.1 diff --git a/load-data.sh b/load-data.sh new file mode 100755 index 00000000..ea969db7 --- /dev/null +++ b/load-data.sh @@ -0,0 +1,76 @@ +#!/usr/bin/env bash + +user="user" +pass="pass" +num_devices=10 +container_name="test-lm-1" + +# Check if prod container running +if docker ps -a --format "{{.Names}}" | grep -q "^prod-lm-1$"; then + container_name="prod-lm-1" +fi + +#get users +users=$(docker exec "$container_name" python -m broker-cli users ls | tr -d "[]'") +IFS=',' read -r -a array <<<"${users}" + +#check if user exists then create it if it doesnt +if echo "${array[@]}" | grep -q -w "$user"; then + echo "user already" +else + echo 'adding user' + docker exec "$container_name" python -m broker-cli users add -u "${user}" -p "${pass}" + users=$(docker exec "$container_name" python -m broker-cli users ls) + echo "listed users: ${users}" + echo "login with ${user} && ${pass}" +fi + +#generate 10 lots of devices add them and map them +counter=1 +puids=() +echo "adding devices and mappings" +while [ $counter -le $num_devices ]; do + dev_name="Test Sensor $counter" + longi=$(awk 'BEGIN{srand();printf"%.4f",501.0+rand()*10.0}') #somewhat nsw + lati=$(awk 'BEGIN{srand();printf"%.4f",29.0+rand()*7.0}') #somewhat nsw + app_id="ttn-app-id-$counter" + dev_id="ttn-device-id-$counter" + dev_eui="ttn-dev-eui-$counter" + device_template='{ + "source_name": "ttn", + "name": "'$dev_name'", + "location": { + "lat": "-'$lati'", + "long": "'$longi'" + }, + "source_ids": { + "app_id": "'$app_id'", + "dev_id": "'$dev_id'", + "dev_eui": "'$dev_eui'" + }, + "properties": { + "description": "Sample Test Device Properties" + } + }' + #echo "$device_template" + puid=$(docker exec "$container_name" python -m broker-cli pd create --json "$device_template" | grep "uid" | sed 's/[^0-9]*//g') + luid=$(docker exec "$container_name" python -m broker-cli ld create --json "$device_template" | grep -oP 'uid=\K\d+') + docker exec "$container_name" python -m broker-cli map start --puid "$puid" --luid "$luid" >/dev/null + puids+=("$puid") + ((counter++)) + echo -ne "." +done +echo + +# print devices +#echo 'PHYSICAL DEVICES:' +#devices=$(docker exec "$container_name" python -m broker-cli pd ls --plain) +#echo -e "listed devices:\n${devices}" +#echo 'LOGICAL DEVICES:' +#devices=$(docker exec "$container_name" python -m broker-cli ld ls --plain) +#echo -e "listed devices:\n${devices}" +#echo 'MAPPINGS:' +#for puid in "${puids[@]}"; do +# output=$(docker exec "$container_name" python -m broker-cli map ls --puid "$puid") +# echo "$output" | jq -r '"pd uid:\(.pd.uid) -> ld uid:\(.ld.uid)"' +#done diff --git a/master-test-plan.md b/master-test-plan.md new file mode 100644 index 00000000..41619bba --- /dev/null +++ b/master-test-plan.md @@ -0,0 +1,485 @@ +**IoTa Time-series Database** +**Master Test Plan** + +Version Information + + ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
VersionDateRemarksAuthor
0.131/03/23Added to the Introduction, and documents section of the Master Test +Plan. These are near completion.Zak K
0.202/04/23

First draft of the Test Strategy written.

+

Executive summary began to be written.

Zak K
0.303/04/23

First draft of Test Plan written.

+

Lacking acceptance testing section at this time.

Zak K
0.504/04/23

Full draft document completed.

+

Risks may be added/changed, and tests expanded as the project +matures.

Zak K
0.607/04/23Removed blue template text.Zak K
0.909/04/23Made small adjusts and additions to Test objectives/levelsZak K
+ +# Executive summary + + +++ + + + + + + + + + + + +

Project objective

+

The goal of this project can be summarised with the following +points:

+
    +
  • Implement a new TSDB for storing data received from the existing +RabbitMQ container.

  • +
  • Implement a service which will take the data and store it within +the new TSDB.

  • +
  • Implement API features to allow for pulling of data from the +TSDB.

  • +
  • Create scripts that allow for backup management, and also +importing of data from the existing IoTa database tables.

  • +
  • Meet the performance and design standards expected by +DPI.

  • +
+

Test approach

+

Unit testing will primarily be necessary to make sure the service can +extract the desired data points from the messages provided by RabbitMQ. +This could involve just an example message in the format to be used, to +allow the program to parse it. Could also include the other database +formats as well.

+

Integration Testing should focus on interaction with specific +functionality between Docker containers, this will include from API to +service, service to TSDB, web interface to API.

+

System Testing will monitor the full use cases and the data that +flows between different elements of the project. This includes message +parsing, and extraction of data points, and finally the storage within +the TSDB. Also includes the retrieval of data from the TSDB via an API +request to the service from the web interface.

+

Acceptance Testing will be similar to System testing, but will be +focused on getting feedback from the stakeholders to confirm that the +functionality is as they expected, or if there are changes/additions +that could be made to better fit their needs.

Test objectives

+
    +
  • Confirm functionality of individual methods is correct.

  • +
  • Verify interaction between Docker containers is as +expected.

  • +
  • Minimise errors in data storage and retrieval.

  • +
  • Confirm scripts perform the desired tasks, and offer all +necessary options.

  • +
  • Test various TSDB to make a decision on what fits best for this +project.

  • +
+ +# Table of Contents + +[1 Introduction [4](#introduction)] + +[1.1 Project and project objective +[4](#project-and-project-objective)] + +[1.2 Objective of the master test plan +[4](#objective-of-the-master-test-plan)] + +[2 Documentation [5](#documentation)] + +[2.1 Basis for the master test plan [5](#basis-for-the-master-test-plan)] + +[2.2 Test basis [5](#test-basis)] + +[3 Test strategy [6](#test-strategy)] + +[3.1 Risk analyses [6](#risk-analyses)] + +[3.1.1 Product Risk Analysis +[6](#product-risk-analysis)] + +[3.1.2 Technical Risk Analysis +[7](#technical-risk-analysis)] + +[3.2 Test strategy [8](#test-strategy-1)] + +[4 Test Levels [9](#test-levels)] + +[4.1 The \ [9](#_Toc37168495)] + +[4.1.1 Entrance and Exit Criteria +[9](#entrance-and-exit-criteria)] + +[4.1.2 Test Environment [9](#test-environment)] + +[4.1.3 Test Objectives [9](#_Toc37168498)] + +# Introduction + +## Project and project objective + +The goal of the project is to identify the best Time-series Database +(TSDB) with consideration for DPI’s requirements, and implement it into +the existing IoTa Docker compose structure. Then we will develop the +service that will handle parsing of messages from the RabbitMQ +implementation into the format used by the TSDB and subsequently storing +the data points. + +This same service will also require an API for interaction that will be +usable to retrieve data from the TSDB, which will have the service +modify the TSDB message back into IoTa message format to be read by the +web interface currently utilised by IoTa. + +There will also need to be extra scripts that can be run via CLI, which +will be capable of converting current IoTa database message tables into +the format used by the TSDB for storage. + +Though not explicitly stated to be CLI based, this may also include +backing up the TSDB data, and restoring to a previous backup. + +Development of these features will be done within DPI’s constraints, in +which the developed service will need to handle a message roughly every +5 seconds, and store it for use later. The requirements state that we +must develop the service in Python, as DPI will provide us with a Docker +image at a later date for this purpose. For the TSDB, we must ensure +that it does not require a cloud implementation, and can be hosted +within a Docker image. + +## Objective of the master test plan + +The objective of the Master Test Plan (MTP) is to inform all who are +involved in the test process about the approach, the activities, +including the mutual relations and dependencies, and the (end) products +to be delivered. + +The master test plan describes the test approach, the activities and +(end) products. + +Specifically for this project, the objective will be to provide criteria +for testing against the TSDB implementation, in which once unit testing +is done with the service, we can develop tests to properly benchmark the +performance of the service. Each TSDB candidate will be tested and we +can make a decision on what would fit best with the dataset, and +integrate well with our service. + +# Documentation + +This chapter describes the documentation used in relation with the +master test plan. The described documentation concerns a first inventory +and will be elaborated, actualized and detailed at a later stage, during +the separate test levels. + +## Basis for the master test plan + +The following documents are used as basis for this master test plan. + +| **Document name** | **Version** | **Date** | **Author** | +|------------------------|-------------|----------|------------| +| inception_vision.md | 1.0 | 27.03.23 | Sara | +| Inception_risk_list.md | 1.0 | 29.03.23 | Callum | + +## Test basis + +The test basis contains the documentation that serves as basis for the +tests that have to be executed. The overview below describes the +documentation that is the starting point for testing. + +| **Document name** | **Version** | **Date** | **Author** | +|--------------------------------------|-------------|----------|------------| +| inception_supporting_requirements.md | 1.0 | 27.03.23 | Zak | +| Architecture_notebook.md | 1.0 | 29.03.23 | Rishabh | + +Further documentation will likely appear here as full use cases and more +specific project focused documents are developed. More information is +currently set to be provided to us in the next few days. + +# Test strategy + +The time available for testing is limited; not everything can be tested +with equal thoroughness. This means that choices have to be made +regarding the depth of testing. Also, it is strived to divide test +capacity as effective and efficient as possible over the total test +project. This principle is the basis of the test strategy. + +The test strategy is based on risks: a system has to function in +practice to an extent that no unacceptable risks for the organization +arise from it. If the delivery of a system brings along many risks, +thorough testing needs to be put in place; the opposite of the spectrum +is also true: 'no risk, no test'. + +The first step in determining the test strategy is the execution of a +product risk analyses. This is elaborated in §3.1. + +The test strategy is subsequently based on the results of the risk +analyses. The test strategy lays down what, how and when (in which test +level) is being tested and is focused in finding the most important +defects as early as possible for the lowest costs. This can be +summarized as testing with an optimal use of the available capacity and +time. The test strategy is described in §3.3. + +## Risk analyses + +### Product Risk Analysis + +The product risks are determined in cooperation with the client and the +other parties involved. Product risks are those risks associated with +the final product failing to meet functional requirements and required +system quality characteristics (NFRs) This product risk analyses (PRA) +is comprised of two steps: + +| **Product Risk** | **Characteristic** | **Description** | **Risk Class** | +|------------------|--------------------|--------------------------------------------------------------------------------------------------------|----------------| +| 1 | Performance | TSDB design choices fail to meet performance standards set within the Docker compose environment. | B | +| 2 | Compatibility | TSDB suffers from integration issues within the Docker compose structure | A | +| 3 | Integrity | Service fails to extract all the desired details from the data, resulting in the loss of quality data. | C | +| 4 | Compatibility | API additions fail to be implemented correctly within the IoTa interface. | C | + +The extent of the risk (the risk class) is dependent on the chance of +failure (how big the chance is that it goes wrong?) and it depends on +the damage for the organization if it actually occurs. + +### Technical Risk Analysis + +Technical risks are determined in cooperation with the analyst/designers +and programmers involved. Technical risks are development risks +associated with failing to create a system that behaves according to +specifications derived from requirements. (I.E. those aspects of +development that pose particular challenges.) This technical risk +analyses (TRA) is comprised of two steps: + +| **Technical risk** | **Risk Area** | **Description** | **Risk Class** | +|--------------------|---------------|-------------------------------------------------------------------------------------------------|----------------| +| 1 | Parsing | A required message format is not properly recognized by the service. | B | +| 2 | Connectivity | Connection to related IoTa containers is inconsistent, resulting in lost data or functionality. | C | +| 3 | Interfacing | API requests fail to call the correct methods within the service. | B | +| 4 | Interfacing | CLI scripts fail to execute correctly. | C | + +## Test strategy + +For each risk from the product and technical risk analysis the risk +class determines the thoroughness of the test. Risk class A is the +highest risk class and C the lowest. The test strategy is subsequently +focused on covering the risks with the highest risk class as early as +possible in the test project. + +| Risk | Description | Risk Cat | Test Level | | | | | | +|---------------|----------------------------------------------------------------------------------------------------|----------|------------|------|--------|--------|------|------| +| | | | SR | Unit | Int | ST | FAT | UAT | +| Performance | TSDB design choices fail to meet performance standards set within the Docker compose environment. | B | \*\*\* | | \* | \*\* | \*\* | \*\* | +| Compatibility | TSDB suffers from integration issues within the Docker compose structure | A | \*\*\* | | \*\*\* | \*\* | | | +| Integrity | Service fails to extract all the desired details from the data, resulting in loss of quality data. | C | \*\* | \*\* | \*\*\* | \*\* | | | +| Compatibility | API additions fail to be implemented correctly within the IoTa interface. | C | \*\* | | \*\* | \* | \*\* | \*\* | +| Parsing | A required message format is not properly recognized by the service. | B | \*\* | \*\* | | | | | +| Connectivity | Connection to related IoTa containers is inconsistent, resulting in lost data or functionality. | C | \*\* | | \*\*\* | | \*\* | \* | +| Interfacing | API requests fail to call the correct methods within the service. | C | \* | \* | \*\*\* | | | | +| Interfacing | CLI scripts fail to execute correctly. | C | | \* | | \*\*\* | \*\* | \* | + +Legend for the table above: + +| RC | Risk class (from product and technical risk analysis, where A=high risk, B=average risk, C=low risk) | +|-------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| SR | Static Review of the various intermediary products (requirements, functional design, technical design). Checking and examining artefacts without executing the software | +| Unit | Unit test and Unit integration test | +| Integration | Integration tests (low level (L), high level(H)) | +| FAT | Functional acceptance test (alpha stage UAT) | +| UAT | User acceptance test (Beta stage UAT) | +| ST | System test (functional scenario testing (F), system quality scenario testing (S)) | +|  | Limited thoroughness of the test | +|  | Medium thoroughness of the test | +|  | High thoroughness of the test | +| \ | If a cell is blank, it means that the relevant test or evaluation level does not have to be concerned with the characteristic | + +# Test Levels + +For this MTP the following test levels are acknowledged: + +| **Test level** | **Goal** | +|----------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Unit testing: | The aim is to test each part of the software by separating it. It checks that component are fulfilling functionalities or not | +| Integration testing: | In this testing phase, different software modules are combined and tested as a group to make sure that integrated system is ready for system testing. Integrating testing checks the data flow from one module to other modules. | +| System testing: | System testing is performed on a complete, integrated system. It allows checking system's compliance as per the requirements. It tests the overall interaction of components. It involves load, performance, reliability and security testing. | +| Acceptance testing: | Acceptance testing is a test conducted to find if the requirements of a specification or contract are met as per its delivery. | + +## The Unit Testing Level + +The primary goal of unit testing is to confirm the service understands +the message types it is required to work with. + +### Entrance and Exit Criteria + +Entry criteria for this section is having the message formats that will +be converted into the TSDB data points and the functions related to +them. Possibly even for pulling the data from the TSDB, but it may be +unlikely until integration testing. + +Exit criteria is the messages are successful in being read by the +service, and the output is the data points that will be stored in later +stages of development. + +### Test Environment + +Simple testing of +individual processes such as reading from example messages of the +formats present in the live environment in listening to RabbitMQ. This +would also include message tables from within the current FDT and IoTa +databases that would only be called with specific scripts. + +### Test Objectives + +| **Risk** | **Test Goals** | **Risk Verification** | **Schedule** | +|-------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------| +| Integrity: Service fails to extract all the desired details from the data, resulting in the loss of quality data. | Confirm for each message type, that the data is correct as per the messages. | Check the resulting data for each message format is not lacking in details/data points. | During implementation of the functions related to handling messages. | +| Parsing: A required message format is not properly recognized by the service. | Ensure the desired message formats are handled correctly and are not rejected or extracted with invalid data. | Test each message type with examples either provided by DPI or created based on our understanding. | During implementation of the functions related to handling messages. | +| Interfacing: API requests fail to call the correct methods within the service. | Ensure the API is calling the correct functions when it receives a request. | Test each function of the API, and the different amounts of requested data to confirm the correct details are included in the function calls. | During implementation of the API, follows later than prior unit-testing. | + +## The Integration testing Level + +The primary goal of integration testing is to confirm the service +interacts correctly with individual components of IoTa, and also the new +TSDB. + +### Entrance and Exit Criteria + +Entry criteria is to have the other components of the Docker compose +running, and ready to receive/send requests to the service. + +Exit criteria is each individual component can properly interact with +the service in isolation between two of the components. + +### Test Environment + +Testing of functions will begin after the service has connected to the +related component to be tested. For example the API and the service will +be tested to confirm the requests reach the service and work as +expected. + +The TSDB (possibly multiple) should likely be tested in conjunction with +the message parsing functionality. + +### Test Objectives + +| **Risk** | **Test Goals** | **Risk Verification** | **Schedule** | +|----------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------| +| Performance: TSDB design choices fail to meet performance standards set within the Docker compose environment. | Ensure the service can handle multiple messages and save them to the TSDB. | Setup the service with multiple messages to be saved to the TSDB. Check that all messages were saved to the TSDB and no data is missing. | During TSDB testing, as it will aid in decision making. | +| Compatibility: TSDB suffers from integration issues within the Docker compose structure | Ensure TSDB works within the Docker compose environment. Help identify best TSDB to be used. | Setup the TSDB within the Docker compose image, and have it interact with the service via a service request. | During TSDB testing. | +| Integrity: Service fails to extract all the desired details from the data, resulting in loss of quality data. | Ensure the desired details are retrieved from messages sent to the API and also IoTa DB CLI prompts. | Check that correct data points are extracted from the data of each message type. | Can be tested alongside other RabbitMQ related message handling tests, and also after for CLI requests. | +| Compatibility: API additions fail to be implemented correctly within the IoTa interface. | Ensure the web interface is providing the API with sufficient detail for different methods. | Check the output functions of the API based on what is done within the web interface. | Late stage testing, as it involves the IoTa interface and not key functionality. | +| Connectivity: Connection to related IoTa containers is inconsistent, resulting in lost data or functionality. | Ensure there is no issue in the service that may result in dropped connections between container elements. | Perform an extended run for both streams of messages from the API, and also separately to the TSDB. | Can be tested alongside other RabbitMQ related message handling tests. | +| Interfacing: API requests fail to call the correct methods within the service. | Test communication from the API to the service, to ensure that all method calls perform the right tasks and result in proper results. | Check data resulting from the method calls of the API, and see if the output is accurate to the data. | Can be tested alongside other RabbitMQ related message handling tests. | + +## The System Testing Level + +The primary goal of system testing is to confirm the service interacts +correctly with all elements of IoTa working together. This is +essentially full use cases ran with the full architecture in place. + +### Entrance and Exit Criteria + +Entry criteria is to have the full set of Docker containers running, and +ready to receive/send requests to the service. + +Exit criteria is each full use case results in the correct data being +stored and displayed, as well as scripts performing the correct tasks on +multiple systems. + +### Test Environment + +Testing of the service and TSDB in conjunction with the Docker +containers will be done by started at the beginning of each use case, +and following the full process to test each method within a close to +live environment. + +The TSDB will likely be monitored further for performance issues, and if +there’s any issues with our design choices, or its capabilities within +this implementation. + +### Test Objectives + +| **Risk** | **Test Goals** | **Risk Verification** | **Schedule** | +|----------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Performance: TSDB design choices fail to meet performance standards set within the Docker compose environment. | Check the performance of the TSDB schema, and if both retrieving and storing data is efficient. | Check response times for each use case involving the TSDB. Determine if this is a result of the TSDB itself, or if another part of the system architecture is responsible. | Monitor during each run of a method within the full system, while other tasks are being performed. | +| Compatibility: TSDB suffers from integration issues within the Docker compose structure | Identify if there is any issue in the TSDB compatibility within the full Docker compose structure. | Check if data is lacking any required details, or if the TSDB is simply not working as intended during specific operations. | Monitor during each portion of a method test that interacts with the TSDB. | +| Integrity: Service fails to extract all the desired details from the data, resulting in loss of quality data. | Check the output of the service within the TSDB within the full test environment. | Following message processing, check for invalid data within the TSDB, may also be worth checking output within the web interface after retrieval of data. | First part of the system test, a follow-up to population of database using RabbitMQ messages sent to the service. | +| Compatibility: API fails to be implemented correctly within the IoTa interface. | Confirmation of the API functionality during the live environment. | Check the output of queries from the user interface to the API, and confirm data, and ranges of data is accurate to the request. | Second part of the test, relies on the database storing the correct data. | +| Interfacing: CLI scripts fail to execute correctly. | Check functionality and reliability of scripts during the service running. | Confirm function of IoTa database table import into TSDB. Confirm backup scripts perform relevant functions, and work without issue. | Can be done at any point after beginning of System testing. Backup restore should likely be done after testing of live environment, unless it is intended to work that way and import lost data later. | + +## The Acceptance Testing Level + +The primary goal of acceptance testing is to confirm the service meets +the specific requirements set by DPI through testing with the purpose of +receiving their feedback. + +### Entrance and Exit Criteria + +Entry criteria is the system testing is successful, and is ready to be +presented to the stakeholders to determine if there is any issue, or +missing functionality. + +Exit criteria is confirmation by DPI that the service, TSDB, and API +implementation meets the requirements that they have set, and the work +is of a high standard. + +### Test Environment + +Either presenting the test to stakeholders via a stream, or providing +them with our repo to test on their own system, and we can advise on how +to interact with the system either live or using a guide. + +### Test Objectives + +| **Risk** | **Test Goals** | **Risk Verification** | **Schedule** | +|----------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------| +| Performance: TSDB design choices fail to meet performance standards set within the Docker compose environment. | To verify if that design is acceptable by the stakeholder’s standards, or clash with an existing part of the architecture. | Check with stakeholders if there is any concern regarding a specific design element, or performance metric that is related to the TSDB. | Following some demoing of the use cases. | +| Compatibility: API additions fail to be implemented correctly within the IoTa interface. | To verify by the stakeholders standards that no issue is occurring between the API and expected functionality. | Have the stakeholders try multiple inputs that would be often used during production, and check results. | Following the database being populated with messages. | +| Connectivity: Connection to related IoTa containers is inconsistent, resulting in lost data or functionality. | To verify connection between the service and related elements experiences no issues with DPI’s setup. | Verify all data flows remain uninterrupted, and nothing is lost during stakeholder test. | Should be checked throughout acceptance test to verify actions as they’re performed. | +| Interfacing: CLI scripts fail to execute correctly. | Confirm functionality of each script is sufficient for the needs of DPI. | Have the stakeholders run through the scripts and confirm options are sufficient, and results are as expected. | Performed at the end of acceptance test. | + diff --git a/pgbr_backup.sh b/pgbr_backup.sh new file mode 100755 index 00000000..63652a19 --- /dev/null +++ b/pgbr_backup.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +# Load environment variables +source compose/.env 2>/dev/null + +# Find the container name containing "timescale-1" +DB_CONTAINER_NAME=$(docker ps --format '{{.Names}}' | grep "timescaledb-1") + +if [ -z "$DB_CONTAINER_NAME" ]; then + echo "Error: Container containing 'timescale-1' not found." + exit 1 +fi + +# Determine backup type from the command line argument +BACKUP_TYPE=$1 + +if [[ "$BACKUP_TYPE" != "full" && "$BACKUP_TYPE" != "diff" && "$BACKUP_TYPE" != "incr" ]]; then + echo "Error: Invalid backup type. Please specify 'full', 'diff', or 'incr'." + exit 1 +fi + +# Perform the backup +docker exec -t $DB_CONTAINER_NAME pgbackrest --config=/home/postgres/pgdata/backup/pgbackrest.conf --stanza=demo --type=$BACKUP_TYPE backup + + +echo "Backup of type $BACKUP_TYPE completed." + diff --git a/pgbr_cleanup.sh b/pgbr_cleanup.sh new file mode 100755 index 00000000..18a18ded --- /dev/null +++ b/pgbr_cleanup.sh @@ -0,0 +1,41 @@ +#!/bin/bash + +echo "WARNING! Before continuing, please read the following:" +echo "This file will wipe all pgBackRest files and create start fresh." +echo "This is best done in the case of corruption, or following a logical restore to reset timeline." +read -p "Do you wish to continue? (yes/no): " response + +if [[ "$response" != "yes" ]]; then + echo "Aborting." + exit 1 +fi + +# Configuration +PG_BACKREST_VOLUME="prod_pgbackrest_data" + +# Find the container name containing "timescaledb-1" +DB_CONTAINER_NAME=$(docker ps --format '{{.Names}}' | grep "timescaledb-1") + +# Find the container name containing "iota_tsdb_decoder-1" +DECODER_CONTAINER_NAME=$(docker ps --format '{{.Names}}' | grep "iota_tsdb_decoder-1") + +# Stop the TimescaleDB and decoder containers +echo "Stopping decoder container..." +docker stop "$DECODER_CONTAINER_NAME" +echo "Stopping TimescaleDB container..." +docker stop "$DB_CONTAINER_NAME" + +# Clear the data within the volume using a temporary container +echo "Clearing data inside pgbackrest_data volume..." +docker run --rm -v "${PG_BACKREST_VOLUME}:/data" busybox sh -c 'rm -rf /data/*' + +sleep 5 +# Start the TimescaleDB and Decoder containers +echo "Starting TimescaleDB container..." +docker start "$DB_CONTAINER_NAME" +docker exec -t "$DB_CONTAINER_NAME" pgbackrest --stanza=demo --config=/home/postgres/pgdata/backup/pgbackrest.conf stanza-create +sleep 1 +echo "Starting decoder container..." +docker start "$DECODER_CONTAINER_NAME" +echo "Process completed." + diff --git a/pgbr_restore.sh b/pgbr_restore.sh new file mode 100755 index 00000000..7d48ebcf --- /dev/null +++ b/pgbr_restore.sh @@ -0,0 +1,92 @@ +#!/bin/bash + +# Load environment variables from .env file +source compose/.env 2>/dev/null + +# Warning prompt about timelines +echo "WARNING! Before continuing, please read the following:" + +echo "Postgres makes use of timelines, which keep logs of changes from a point in time, allowing for backups to be restored to a certain point in its history." +echo +echo "Because of this to this, you should only attempt restore to a point prior to your last restore as part of that timeline, otherwise the restore will fail to complete." +echo "For example: If you take two backups, A (the first one) and B (the more recent one), if you restore to A, B will now be incompatible with the current timeline." +echo +echo "If you have recently performed a logical backup restore (ts_restore.sh), the timeline history will be erased," +echo "resulting in all physical backups prior to restore becoming incompatible with the current database." +echo +read -p "Do you wish to continue? (yes/no): " choice + +if [[ "$choice" != "yes" ]]; then + echo "Exiting restore process." + exit 1 +fi + +# Find the container name containing "timescaledb-1" +DB_CONTAINER_NAME=$(docker ps --format '{{.Names}}' | grep "timescaledb-1") +if [ -z "$DB_CONTAINER_NAME" ]; then + echo "Error: Container containing 'timescaledb-1' not found." + exit 1 +fi + +# Find the container name containing "iota_tsdb_decoder-1" +DECODER_CONTAINER_NAME=$(docker ps --format '{{.Names}}' | grep "iota_tsdb_decoder-1") +if [ -z "$DECODER_CONTAINER_NAME" ]; then + echo "Error: Container containing 'iota_tsdb_decoder-1' not found." + exit 1 +fi + +# Check if environment variables are set +if [ -z "$TSDB_USER" ] || [ -z "$TSDB_DB" ] || [ -z "$TSDB_PASSWORD" ]; then + echo "Error: Required environment variables are not set." + exit 1 +fi + +# List the backups without starting the temp container +echo "Available backups:" +docker exec -t $DB_CONTAINER_NAME pgbackrest info --stanza=demo + +# Ask the user for the backup label +read -p "Enter the backup label to restore (or press Enter for the latest): " BACKUP_LABEL + +# Stop the original container and decoder to stop message consumption +echo "Stopping the original container..." +docker stop $DECODER_CONTAINER_NAME +docker stop $DB_CONTAINER_NAME + +# Start a new temporary container using the same image but with a different entry point +TEMP_CONTAINER_NAME="temp_postgres_restore" +echo "Starting a temporary container without PostgreSQL..." +docker run -d \ + --name $TEMP_CONTAINER_NAME \ + -v tsdb_db:/home/postgres/pgdata/data \ + -v prod_pgbackrest_data:/var/lib/pgbackrest \ + -v $(pwd)/timescale/pgbackrest/pgbackrest.conf:/home/postgres/pgdata/backup/pgbackrest.conf \ + custom-timescaledb:latest /bin/sh -c "tail -f /dev/null & wait" + +# Ensure the PostgreSQL data directory is empty +echo "Ensuring the PostgreSQL data directory is empty..." +docker exec -t $TEMP_CONTAINER_NAME sh -c "rm -rf /home/postgres/pgdata/data/* && rm -rf /home/postgres/pgdata/data/.*" + +# Restore the database on the temporary container +echo "Restoring the database..." +if [ -z "$BACKUP_LABEL" ]; then + docker exec -t $TEMP_CONTAINER_NAME pgbackrest restore --stanza=demo +else + docker exec -t $TEMP_CONTAINER_NAME pgbackrest restore --stanza=demo --set=$BACKUP_LABEL +fi + +# Stop the temporary container +echo "Stopping the temporary container..." +docker stop $TEMP_CONTAINER_NAME +docker rm $TEMP_CONTAINER_NAME + +# Start the original containers +echo "Starting the original containers..." +docker start $DB_CONTAINER_NAME +sleep 5 +docker exec -it $DB_CONTAINER_NAME psql -U $TSDB_USER -d $TSDB_DB -c "SELECT pg_wal_replay_resume();" +sleep 1 +docker start $DECODER_CONTAINER_NAME + +echo "Database restore completed." + diff --git a/run.sh b/run.sh index 9df61f30..295f48af 100755 --- a/run.sh +++ b/run.sh @@ -1,21 +1,24 @@ #!/usr/bin/env bash set -euo pipefail -BROKER_ROOT=$(cd $(dirname $0); pwd) +BROKER_ROOT=$( + cd $(dirname $0) + pwd +) MODE=${1:-test} if [ ! -f $BROKER_ROOT/compose/.env ]; then - echo The file $BROKER_ROOT/compose/.env is missing. Copy $BROKER_ROOT/config/broker.env.template to $BROKER_ROOT/compose/.env and set the values. - exit 1 + echo The file $BROKER_ROOT/compose/.env is missing. Copy $BROKER_ROOT/config/broker.env.template to $BROKER_ROOT/compose/.env and set the values. + exit 1 fi cd $BROKER_ROOT cd compose/$MODE ./dc.sh down cd $BROKER_ROOT -docker build -q -t broker/python-base -f images/restapi/Dockerfile . -docker build -q -t broker/ttn_decoder -f images/ttn_decoder/Dockerfile . -docker build -q -t broker/mgmt-app -f src/www/Dockerfile . +docker build -t broker/python-base -f images/restapi/Dockerfile . +docker build -t broker/ttn_decoder -f images/ttn_decoder/Dockerfile . +docker build -t broker/mgmt-app -f src/www/Dockerfile . cd compose/$MODE ./dc.sh up -d ./dc.sh logs -f diff --git a/run2.sh b/run2.sh new file mode 100755 index 00000000..4be20147 --- /dev/null +++ b/run2.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash + +CWD="$(pwd)" + +#add our nginx container stuff +NGINX_ROOT=$( + cd 303/nginx + pwd +) +cd $NGINX_ROOT +docker ps | grep -q "nginx-t" && echo "Stopping nginx-t" && docker stop nginx-t >/dev/null +docker ps -a | grep -q "nginx-t" && docker rm nginx-t >/dev/null +docker images -a | grep -q "nginx_img" && docker rmi nginx_img >/dev/null +docker images -a | grep -q "nginx" || echo "pulling nginx image" && docker pull nginx:latest +docker build -q -t nginx_img . +docker run --name nginx-t -p 80:80 -d nginx_img:latest >/dev/null +docker start nginx-t >/dev/null +docker ps | grep -q 'nginx-t' && echo 'nginx-t started' + +#run the actual run +cd $CWD +echo "starting broker containers" +./run.sh "$@" diff --git a/sendmessages b/sendmessages new file mode 100755 index 00000000..b156268b --- /dev/null +++ b/sendmessages @@ -0,0 +1,24 @@ +#!/bin/bash + +for ((i = 1; i <= 10; i++)); do + timestamp=$(date -u +"%Y-%m-%dT%H:%M:%SZ") + battery_voltage=$((RANDOM % 10)) + + payload='{ + "broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", + "p_uid": 999, + "l_uid": 666, + "timestamp": "'"$timestamp"'", + "timeseries": [ + { + "name": "battery (v)", + "value": '"$battery_voltage"' + } + ] + }' + + docker exec test-mq-1 rabbitmqadmin publish -u broker -p CHANGEME exchange="lts_exchange" routing_key="ltsreader_logical_msg_queue" payload="$payload" properties="{}" + + echo "$payload" + sleep 1 +done diff --git a/src/python/api/client/DAO.py b/src/python/api/client/DAO.py index 28b330a9..320c3039 100644 --- a/src/python/api/client/DAO.py +++ b/src/python/api/client/DAO.py @@ -4,6 +4,7 @@ import dateutil.parser import psycopg2 from psycopg2 import pool +from typing import List, Tuple # Import Tuple import psycopg2.errors from psycopg2.extensions import AsIs from psycopg2.extras import Json, register_uuid @@ -1320,4 +1321,265 @@ def token_enable(uname)-> None: finally: if conn is not None: free_conn(conn) - + +""" +DATA_NAME_MAP : links incoming data names to a standardised version, so that timeseries data can be more coherent +""" +def _get_std_name(conn, input_name: str) -> str: + """ + Gets standard name given an input name + + This method allows the query to be more lightweight in those circumstances. + + conn: a database connection + name: input_name + """ + std_name = None + with conn.cursor() as cursor: + sql = 'select std_name from data_name_map where input_name ilike %s' + cursor.execute(sql, (input_name, )) + row = cursor.fetchone() + if row is not None: + std_name = row[0] + + return std_name + + +def get_std_name(input_name: str) -> str: + """ + CASE INSENSITIVE + """ + conn = None + try: + with _get_connection() as conn: + return _get_std_name(conn, input_name) + except Exception as err: + raise err if isinstance(err, DAOException) else DAOException('get_std_name failed.', err) + finally: + if conn is not None: + free_conn(conn) + + +def update_name_map(input_name: str, std_name:str) -> None: + try: + with _get_connection() as conn, conn.cursor() as cursor: + cursor.execute("update data_name_map set std_name=%s where input_name=%s", (std_name, input_name)) + conn.commit() + except Exception as err: + raise err if isinstance(err, DAOException) else DAOException('update_name_map failed.', err) + finally: + if conn is not None: + free_conn(conn) + + +def add_name_map(input_name: str, std_name:str) -> None: + try: + with _get_connection() as conn, conn.cursor() as cursor: + cursor.execute("insert into data_name_map (input_name, std_name) values (%s, %s)", (input_name, std_name)) + conn.commit() + except Exception as err: + raise err if isinstance(err, DAOException) else DAOException('add_name_map failed.', err) + finally: + if conn is not None: + free_conn(conn) + + +def remove_name_map(input_name: str) -> None: + try: + with _get_connection() as conn, conn.cursor() as cursor: + cursor.execute("DELETE FROM data_name_map WHERE input_name = %s", (input_name,)) + conn.commit() + except Exception as err: + raise err if isinstance(err, DAOException) else DAOException('remove_name_map failed.', err) + finally: + if conn is not None: + free_conn(conn) + +def list_name_map() -> List[Tuple[str, str]]: + try: + with _get_connection() as conn, conn.cursor() as cursor: + cursor.execute("SELECT input_name, std_name FROM data_name_map") + results = cursor.fetchall() + return results + except Exception as err: + raise err if isinstance(err, DAOException) else DAOException('list_name_map failed.', err) + finally: + if conn is not None: + free_conn(conn) + +def add_word_list(full_word: str) -> None: + try: + with _get_connection() as conn, conn.cursor() as cursor: + cursor.execute("INSERT INTO word_list (full_word) VALUES (%s)", (full_word,)) + conn.commit() + except Exception as err: + raise err if isinstance(err, DAOException) else DAOException('add_word_list failed.', err) + finally: + if conn is not None: + free_conn(conn) + +def remove_word_list(full_word: str) -> None: + try: + with _get_connection() as conn, conn.cursor() as cursor: + cursor.execute("DELETE FROM word_list WHERE full_word = %s", (full_word,)) + conn.commit() + except Exception as err: + raise err if isinstance(err, DAOException) else DAOException('remove_word_list failed.', err) + finally: + if conn is not None: + free_conn(conn) + +""" +TYPE_NAME_MAP : handles types so they can be updated. +""" +def _get_type_map(conn): + """ + Gets standard name given an input name + This method allows the query to be more lightweight in those circumstances. + conn: a database connection + name: input_name + """ + type_map = [] + with conn.cursor() as cursor: + sql = 'select * from type_name_map' + cursor.execute(sql) + row = cursor.fetchall() + if row is not None: + return row + return type_map + +def get_type_map(): + """ + CASE INSENSITIVE + """ + conn = None + try: + with _get_connection() as conn: + return _get_type_map(conn) + except Exception as err: + raise err if isinstance(err, DAOException) else DAOException('get_type_map failed.', err) + finally: + if conn is not None: + free_conn(conn) + + + +def list_word_list() -> List[str]: + try: + with _get_connection() as conn, conn.cursor() as cursor: + cursor.execute("SELECT full_word FROM word_list") + results = [row[0] for row in cursor.fetchall()] + return results + except Exception as err: + raise err if isinstance(err, DAOException) else DAOException('list_word_list failed.', err) + finally: + if conn is not None: + free_conn(conn) + + +def update_type_map(input_name: str, std_name:str) -> None: + try: + with _get_connection() as conn, conn.cursor() as cursor: + cursor.execute("update type_name_map set short_name=%s where full_name=%s", (std_name, input_name)) + conn.commit() + except Exception as err: + raise err if isinstance(err, DAOException) else DAOException('update_type_map failed.', err) + finally: + if conn is not None: + free_conn(conn) + + +def add_type_map(input_name: str, std_name:str) -> None: + try: + with _get_connection() as conn, conn.cursor() as cursor: + cursor.execute("insert into type_name_map (full_name, short_name) values (%s, %s)", (input_name, std_name)) + conn.commit() + except Exception as err: + raise err if isinstance(err, DAOException) else DAOException('add_type_map failed.', err) + + +""" +TYPE_NAME_MAP : handles types so they can be updated. +""" +def _get_word_list(conn): + """ + conn: a database connection + """ + word_list = [] + with conn.cursor() as cursor: + sql = 'select * from word_list' + cursor.execute(sql) + row = cursor.fetchall() + if row is not None: + return row + + return word_list + + +def get_word_list(): + """ + CASE INSENSITIVE + """ + conn = None + try: + with _get_connection() as conn: + return _get_word_list(conn) + except Exception as err: + raise err if isinstance(err, DAOException) else DAOException('get_word_list failed.', err) + finally: + if conn is not None: + free_conn(conn) + + +def remove_type_map(input_name: str) -> None: + try: + with _get_connection() as conn, conn.cursor() as cursor: + cursor.execute("DELETE FROM type_name_map WHERE full_name=%s", (input_name,)) + conn.commit() + except Exception as err: + raise err if isinstance(err, DAOException) else DAOException('remove_type_map failed.', err) + finally: + if conn is not None: + free_conn(conn) + + +def list_type_map() -> List[Tuple[str, str]]: + try: + with _get_connection() as conn, conn.cursor() as cursor: + cursor.execute("SELECT full_name, short_name FROM type_name_map") + results = cursor.fetchall() + return results + except Exception as err: + raise err if isinstance(err, DAOException) else DAOException('list_type_map failed.', err) + finally: + if conn is not None: + free_conn(conn) + +def _get_hash_table(conn): + """ + conn: a database connection + """ + hash_table = [] + with conn.cursor() as cursor: + sql = 'select * from hash_table' + cursor.execute(sql) + row = cursor.fetchall() + if row is not None: + return row + return hash_table + + +def get_hash_table(): + """ + CASE INSENSITIVE + """ + conn = None + try: + with _get_connection() as conn: + return _get_hash_table(conn) + except Exception as err: + raise err if isinstance(err, DAOException) else DAOException('get_hash_table failed.', err) + finally: + if conn is not None: + free_conn(conn) + diff --git a/src/python/broker-cli.py b/src/python/broker-cli.py index 5d4c9529..97d13c55 100755 --- a/src/python/broker-cli.py +++ b/src/python/broker-cli.py @@ -9,19 +9,15 @@ import os import hashlib - def str_to_physical_device(val) -> PhysicalDevice: return PhysicalDevice.parse_obj(json.loads(val)) - def str_to_logical_device(val) -> LogicalDevice: return LogicalDevice.parse_obj(json.loads(val)) - def str_to_dict(val) -> Dict: return json.loads(val) - main_parser = argparse.ArgumentParser() main_sub_parsers = main_parser.add_subparsers(dest='cmd1') @@ -71,11 +67,12 @@ def str_to_dict(val) -> Dict: ## List logical devices ld_ls_parser = ld_sub_parsers.add_parser('ls', help='list logical devices') ld_ls_parser.add_argument('--properties', action='store_true', help='Include the properties field in the output', dest='include_props', required=False) +ld_ls_parser.add_argument('--plain', action='store_true', help='Plain output, not JSON', dest='plain') ## Create logical devices ld_mk_parser = ld_sub_parsers.add_parser('create', help='create logical device') group = ld_mk_parser.add_mutually_exclusive_group(required=True) -group.add_argument('--json', type=str_to_dict, help='Logical device JSON', dest='ld') +group.add_argument('--json', type=str_to_dict, help='Logical device JSON', dest='pd') #pd is not a typo, it uses same function group.add_argument('--file', help='Read json from file, - for stdin', dest='in_filename') ## Get logical device @@ -121,33 +118,24 @@ def str_to_dict(val) -> Dict: group.add_argument('--puid', type=int, help='Physical device uid', dest='p_uid') group.add_argument('--luid', type=int, help='Logical device uid', dest='l_uid') -# Toggle mapping -map_pause_parser = map_sub_parsers.add_parser('toggle', help='toggle device mapping') -group = map_pause_parser.add_mutually_exclusive_group(required=True) -group.add_argument('--luid', type=int, help="Logical deivce uid", dest='l_uid') -group.add_argument('--puid', type=int, help="Physical deivce uid", dest='p_uid') -group = map_pause_parser.add_mutually_exclusive_group(required=True) -group.add_argument('--enable', action='store_true', help="re-enable mapping", dest='enable') -group.add_argument('--disable', action='store_false', help="temporarily disable mapping", dest='enable') - -# User commands -user_parser = main_sub_parsers.add_parser('users', help="manage users") -user_sub_parsers = user_parser.add_subparsers(dest='cmd2') +#User commands +user_parser=main_sub_parsers.add_parser('users', help="manage users") +user_sub_parsers=user_parser.add_subparsers(dest='cmd2') -# Add user -user_add_parser = user_sub_parsers.add_parser('add', help="Add a user") +#Add user +user_add_parser=user_sub_parsers.add_parser('add', help="Add a user") user_add_parser.add_argument('-u', help="Username of user", dest='uname', required=True) user_add_parser.add_argument('-p', help="Password for user", dest='passwd', required=True) user_add_parser.add_argument('-d', help="Account is disable upon creation", action='store_true', dest='disabled') user_add_parser.add_argument('-a', help="Create an admin account, user is not read-only", action='store_true', dest='admin') -# Remove user -user_rm_parser = user_sub_parsers.add_parser('rm', help="Remove a user") +#Remove user +user_rm_parser=user_sub_parsers.add_parser('rm', help="Remove a user") user_rm_parser.add_argument('-u', help="Username of user to be removed", dest='uname', required=True) -# Manage users token -user_token_parser = user_sub_parsers.add_parser('token', help="Manage a user's token") +#Manage users token +user_token_parser=user_sub_parsers.add_parser('token', help="Manage a user's token") user_token_parser.add_argument('-u', help="Username", dest='uname', required=True) user_token_parser.add_argument('--refresh', help="Refresh a users token", action='store_true') @@ -155,14 +143,77 @@ def str_to_dict(val) -> Dict: group.add_argument('--disable', help="Disable a users token", action="store_true") group.add_argument('--enable', help="Enable a users token", action='store_true') -# Change users password -user_pw_change_passer = user_sub_parsers.add_parser('chng', help="Change a user's password") +#Change users password +user_pw_change_passer=user_sub_parsers.add_parser('chng', help="Change a user's password") user_pw_change_passer.add_argument('-u', help="Username", dest='uname', required=True) user_pw_change_passer.add_argument('-p', help="New password for user", dest='passwd') -# List users +#List users user_sub_parsers.add_parser('ls', help="List all users") + +##TSDB -- related + +#name_map +nmap_parser=main_sub_parsers.add_parser('nmap', help="manage name_map") +nmap_sub_parsers=nmap_parser.add_subparsers(dest='cmd2') + +#add name_map +nmap_add_parser=nmap_sub_parsers.add_parser('add', help="Add name_map") +nmap_add_parser.add_argument('--in', help="Incoming name", dest='inname', required=True) +nmap_add_parser.add_argument('--out', help="Out name", dest='outname', required=True) + +#remove name_map +nmap_rm_parser=nmap_sub_parsers.add_parser('rm', help="Remove name_map") +nmap_rm_parser.add_argument('--in', help="Incoming name", dest='inname', required=True) + +#update name_map +nmap_update_parser=nmap_sub_parsers.add_parser('update', help="Update name_map") +nmap_update_parser.add_argument('--in', help="Incoming name", dest='inname', required=True) +nmap_update_parser.add_argument('--out', help="Out name", dest='outname', required=True) + +#list name_map +nmap_ls_parser=nmap_sub_parsers.add_parser('ls', help="List name_map") +nmap_ls_parser.add_argument('-w', help="column_width", dest='width', required=False) + + +#word_list +wlist_parser=main_sub_parsers.add_parser('wlist', help="manage word_list") +wlist_sub_parsers=wlist_parser.add_subparsers(dest='cmd2') + +#add word_list +wlist_add_parser=wlist_sub_parsers.add_parser('add', help="Add word_list") +wlist_add_parser.add_argument('-w', help="word to add", dest='word', required=True) + +#remove word_list +wlist_rm_parser=wlist_sub_parsers.add_parser('rm', help="Remove word_list") +wlist_rm_parser.add_argument('-w', help="word to remove", dest='word', required=True) + +#list word_list +wlist_ls_parser=wlist_sub_parsers.add_parser('ls', help="List word_list") + +#type_map +tmap_parser=main_sub_parsers.add_parser('tmap', help="manage type_map") +tmap_sub_parsers=tmap_parser.add_subparsers(dest='cmd2') + +#add type_map +tmap_add_parser=tmap_sub_parsers.add_parser('add', help="Add type_map") +tmap_add_parser.add_argument('--in', help="Incoming name", dest='inname', required=True) +tmap_add_parser.add_argument('--out', help="Out name", dest='outname', required=True) + +#remove type_map +tmap_rm_parser=tmap_sub_parsers.add_parser('rm', help="Remove type_map") +tmap_rm_parser.add_argument('--in', help="Incoming name", dest='inname', required=True) + +#update type_map +tmap_update_parser=tmap_sub_parsers.add_parser('update', help="Update type_map") +tmap_update_parser.add_argument('--in', help="Incoming name", dest='inname', required=True) +tmap_update_parser.add_argument('--out', help="Out name", dest='outname', required=True) + +#list type_map +tmap_ls_parser=tmap_sub_parsers.add_parser('ls', help="List type_map") +tmap_ls_parser.add_argument('-w', help="column_width", dest='width', required=False) + args = main_parser.parse_args() def serialise_datetime(obj): @@ -295,11 +346,19 @@ def main() -> None: elif args.cmd1 == 'ld': if args.cmd2 == 'ls': devs = dao.get_logical_devices() + if args.include_props: tmp_list = list(map(lambda dev: dev.dict(), devs)) else: tmp_list = list(map(lambda dev: dev.dict(exclude={'properties'}), devs)) print(pretty_print_json(tmp_list)) + + tmp_list = list(map(lambda dev: dev.dict(exclude={'properties'}), devs)) + if not args.plain: + print(pretty_print_json(tmp_list)) + else: + plain_pd_list(devs) + elif args.cmd2 == 'create': dev = LogicalDevice.parse_obj(dict_from_file_or_string()) print(dao.create_logical_device(dev)) @@ -322,13 +381,7 @@ def main() -> None: dev = LogicalDevice.parse_obj(dev_dict) print(pretty_print_json(dao.update_logical_device(dev))) elif args.cmd2 == 'rm': - # Delete all physical_logical mappings to avoid foreign key violation - mappings = dao.get_logical_device_mappings(ld=args.l_uid) - for mapping in mappings: - dao.delete_mapping(mapping) - - print(pretty_print_json(dao.delete_logical_device(args.l_uid))) - + print(dao.delete_logical_device(args.l_uid)) elif args.cmd2 == 'cpd': pdev = dao.get_physical_device(args.p_uid) if pdev is None: @@ -360,47 +413,86 @@ def main() -> None: dao.end_mapping(ld=args.l_uid) elif args.cmd2 == 'ls': if args.p_uid is not None: - mappings: PhysicalToLogicalMapping = dao.get_current_device_mapping(pd=args.p_uid, only_current_mapping=False) - new_list = [m.dict() for m in mappings] - print(pretty_print_json(new_list)) - + mapping = dao.get_current_device_mapping(pd=args.p_uid) + print(pretty_print_json(mapping)) elif args.l_uid is not None: map_list = dao.get_logical_device_mappings(args.l_uid) new_list = [m.dict() for m in map_list] print(pretty_print_json(new_list)) - - elif args.cmd2 == 'toggle': - current_mapping = dao.get_current_device_mapping(pd=args.p_uid, ld=args.l_uid) - if current_mapping is None: - raise RuntimeError("No current mapping for the uid given") - - dao.toggle_device_mapping(args.enable, args.p_uid, args.l_uid) - elif args.cmd1 == 'users': - if args.cmd2 == 'add': + elif args.cmd1=='users': + if args.cmd2=='add': dao.user_add(uname=args.uname, passwd=args.passwd, disabled=args.disabled) if args.admin: dao.user_set_read_only(uname=args.uname, read_only=False) - elif args.cmd2 == 'rm': + elif args.cmd2=='rm': dao.user_rm(uname=args.uname) - elif args.cmd2 == 'token': - if args.disable == True: + elif args.cmd2=='token': + if args.disable==True: dao.token_disable(uname=args.uname) - elif args.enable == True: + elif args.enable==True: dao.token_enable(uname=args.uname) - if args.refresh == True: + if args.refresh==True: dao.token_refresh(uname=args.uname) - elif args.cmd2 == 'chng': + elif args.cmd2=='chng': dao.user_change_password(args.uname, args.passwd) - elif args.cmd2 == 'ls': + elif args.cmd2=='ls': print(dao.user_ls()) + elif args.cmd1=='nmap': + if args.cmd2=='add': + dao.add_name_map(input_name=args.inname, std_name=args.outname) + + elif args.cmd2=='rm': + dao.remove_name_map(input_name=args.inname) + + elif args.cmd2=='update': + dao.update_name_map(input_name=args.inname, std_name=args.outname) + + elif args.cmd2=='ls': + name_map = dao.list_name_map() + header = "input_name | std_name" + column_width = args.width if args.width else 30 + print("{:^{width}} | {:^{width}}".format(header.split('|')[0], header.split('|')[1], width=column_width)) + for input_name, mapped_name in name_map: + print("{:^{width}} | {:^{width}}".format(input_name, mapped_name, width=column_width)) + + elif args.cmd1=='wlist': + if args.cmd2=='add': + dao.add_word_list(full_word=args.word) + + elif args.cmd2=='rm': + dao.remove_word_list(full_word=args.word) + + elif args.cmd2=='ls': + words = dao.list_word_list() + header = "full_word" + for full_word in words: + print(full_word) + + elif args.cmd1=='tmap': + if args.cmd2=='add': + dao.add_type_map(input_name=args.inname, std_name=args.outname) + + elif args.cmd2=='rm': + dao.remove_type_map(input_name=args.inname) + + elif args.cmd2=='update': + dao.update_type_map(input_name=args.inname, std_name=args.outname) + + elif args.cmd2=='ls': + type_maps = dao.list_type_map() + header = "full_name | short_name" + column_width = args.width if args.width else 30 + print("{:^{width}} | {:^{width}}".format(header.split('|')[0], header.split('|')[1], width=column_width)) + for input_type, mapped_name in type_maps: + print("{:^{width}} | {:^{width}}".format(input_type, mapped_name, width=column_width)) if __name__ == '__main__': main() diff --git a/src/python/pdmodels/Models.py b/src/python/pdmodels/Models.py index 497ca8d2..a69dd7f1 100644 --- a/src/python/pdmodels/Models.py +++ b/src/python/pdmodels/Models.py @@ -62,4 +62,9 @@ class User(BaseModel): username: str auth_token: str valid: bool - read_only: bool \ No newline at end of file + read_only: bool + + +class DataNameMap(BaseModel): + input_name: str + std_name: str diff --git a/src/python/restapi/RestAPI.py b/src/python/restapi/RestAPI.py index 76b31e88..206b9f39 100644 --- a/src/python/restapi/RestAPI.py +++ b/src/python/restapi/RestAPI.py @@ -15,6 +15,8 @@ #from fastapi.responses import JSONResponse from typing import Annotated, List, Dict +import psycopg2, os, sys, TSDBAPI + from pdmodels.Models import DeviceNote, PhysicalDevice, LogicalDevice, PhysicalToLogicalMapping import api.client.DAO as dao @@ -569,32 +571,31 @@ async def change_password(password:str, request:Request) -> str: except dao.DAOException as err: raise HTTPException(status_code=500, detail=err.msg) - - + app = FastAPI(title='IoT Device Broker', version='1.0.0') app.include_router(router) +app.include_router(TSDBAPI.router) - -@app.middleware("http") -async def check_auth_header(request: Request, call_next): +# @app.middleware("http") +# async def check_auth_header(request: Request, call_next): - try: - if not request.url.path in ['/docs', '/openapi.json', '/broker/api/token']: - if not 'Authorization' in request.headers: - return Response(content="", status_code=401) - - token = request.headers['Authorization'].split(' ')[1] - is_valid=dao.token_is_valid(token) - - if not is_valid: - print(f'Authentication failed for url: {request.url}') - return Response(content="", status_code=401) - - if request.method != 'GET': - user=dao.get_user(auth_token=token) - if user is None or user.read_only is True: - return Response(content="", status_code=403) - except: - return Response(content="", status_code=401) - - return await call_next(request) +# try: +# if not request.url.path in ['/docs', '/openapi.json', '/broker/api/token']: +# if not 'Authorization' in request.headers: +# return Response(content="", status_code=401) + +# token = request.headers['Authorization'].split(' ')[1] +# is_valid=dao.token_is_valid(token) + +# if not is_valid: +# print(f'Authentication failed for url: {request.url}') +# return Response(content="", status_code=401) + +# if request.method != 'GET': +# user=dao.get_user(auth_token=token) +# if user is None or user.read_only is True: +# return Response(content="", status_code=403) +# except: +# return Response(content="", status_code=401) + +# return await call_next(request) diff --git a/src/python/restapi/TSDBAPI.py b/src/python/restapi/TSDBAPI.py new file mode 100644 index 00000000..574a633b --- /dev/null +++ b/src/python/restapi/TSDBAPI.py @@ -0,0 +1,246 @@ +from fastapi import APIRouter +import psycopg2, os, sys, datetime + +router = APIRouter(prefix="/query", tags=['querying db']) + +tsdb_user = os.environ.get("TSDB_USER") +tsdb_pass = os.environ.get("TSDB_PASSWORD") +tsdb_host = os.environ.get("TSDB_HOST") +tsdb_port = os.environ.get("TSDB_PORT") +tsdb_db = os.environ.get("TSDB_DB") +tsdb_table = os.environ.get("TSDB_TABLE") +CONNECTION = f"postgres://{tsdb_user}:{tsdb_pass}@{tsdb_host}:{tsdb_port}/{tsdb_db}" + +days_in_month = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] + +def is_leap(year: int) -> bool: + if year % 400 == 0: + return True + elif year % 100 == 0: + return False + elif year % 4 == 0: + return True + else: + return False + + +@router.get("/") +async def query_tsdb(query: str = f"SELECT * FROM {tsdb_table};"): + result = "" + try: + with psycopg2.connect(CONNECTION) as conn: + # query = f"SELECT * FROM {tsdb_table};" + cursor = conn.cursor() + try: + cursor.execute(query) + conn.commit() + result = cursor.fetchall() + except psycopg2.errors as e: + sys.stderr.write(f'error: {e}\n') + cursor.close() + except: + sys.stderr.write("error.") + + with open("test.txt", "w") as f: + f.write(str(result)) + + return result + +@router.get("/l_uid/{l_uid}") +async def get_luid_records(l_uid, fromdate = "", todate = "", p_uid = ""): + result = "" + try: + with psycopg2.connect(CONNECTION) as conn: + query = f"SELECT * FROM {tsdb_table} WHERE l_uid = '{l_uid}'" + if fromdate != "": + query += f"AND timestamp >= '{fromdate}'" + if todate != "": + query += f"AND timestamp <= '{todate}'" + if p_uid != "": + query += f"AND p_uid = '{p_uid}'" + cursor = conn.cursor() + try: + cursor.execute(query) + conn.commit() + result = cursor.fetchall() + except psycopg2.errors as e: + sys.stderr.write(f'error: {e}\n') + cursor.close() + except: + sys.stderr.write("error.") + + return result + + +@router.get("/p_uid/{p_uid}") +async def get_puid_records(p_uid: str, fromdate = "", todate = "", l_uid = ""): + result = "" + try: + with psycopg2.connect(CONNECTION) as conn: + query = f"SELECT * FROM {tsdb_table} WHERE p_uid = '{p_uid}'" + if fromdate != "": + query += f"AND timestamp >= '{fromdate}'" + if todate != "": + query += f"AND timestamp <= '{todate}'" + if l_uid != "": + query += f"AND l_uid = '{l_uid}'" + cursor = conn.cursor() + try: + cursor.execute(query) + conn.commit() + result = cursor.fetchall() + except psycopg2.errors as e: + sys.stderr.write(f'error: {e}\n') + cursor.close() + except: + sys.stderr.write("error.") + + return result + +@router.get("/l_uid/{l_uid}/last") +async def get_luid_for_last_x(l_uid: str, years = 0, months = 0, days = 0, hours = 0, minutes = 0, seconds = 0): + result = "" + try: + with psycopg2.connect(CONNECTION) as conn: + date = datetime.datetime.utcnow() + current_date = f"{date.year}-{date.month}-{date.day} {date.hour}:{date.minute}:{date.second}" + target_year = date.year - int(years) + target_month = date.month - int(months) + target_day = date.day - int(days) + target_hour = date.hour - int(hours) + target_minute = date.minute - int(minutes) + target_second = date.second - float(seconds) + while target_second < 0: + target_second += 60 + target_minute -= 1 + while target_minute < 0: + target_minute += 60 + target_hour -= 1 + while target_hour < 0: + target_hour += 24 + target_day -= 1 + while target_day <= 0: + target_day += days_in_month[target_month % 12] + target_day += 1 if target_month % 12 == 2 and is_leap(target_year) else 0 + target_month -= 1 + while target_month <= 0: + target_month += 12 + target_year -= 1 + if target_month == 2 and target_day > 28: + target_month = 3 + target_day -= 28 + target_date = f"{target_year}-{target_month}-{target_day} {target_hour}:{target_minute}:{target_second}" + query = f"SELECT * FROM {tsdb_table} WHERE l_uid = '{l_uid}'" + query += f" AND timestamp <= '{current_date}'" + query += f" AND timestamp >= '{target_date}'" + cursor = conn.cursor() + try: + cursor.execute(query) + conn.commit() + result = cursor.fetchall() + except psycopg2.errors as e: + sys.stderr.write(f'error: {e}\n') + cursor.close() + except: + sys.stderr.write("error.") + + return result + +@router.get("/p_uid/{p_uid}/last") +async def get_puid_for_last_x(p_uid: str, years = 0, months = 0, days = 0, hours = 0, minutes = 0, seconds = 0): + result = "" + try: + with psycopg2.connect(CONNECTION) as conn: + date = datetime.datetime.utcnow() + current_date = f"{date.year}-{date.month}-{date.day} {date.hour}:{date.minute}:{date.second}" + target_year = date.year - int(years) + target_month = date.month - int(months) + target_day = date.day - int(days) + target_hour = date.hour - int(hours) + target_minute = date.minute - int(minutes) + target_second = date.second - float(seconds) + while target_second < 0: + target_second += 60 + target_minute -= 1 + while target_minute < 0: + target_minute += 60 + target_hour -= 1 + while target_hour < 0: + target_hour += 24 + target_day -= 1 + while target_day <= 0: + target_day += days_in_month[target_month % 12] + target_day += 1 if target_month % 12 == 2 and is_leap(target_year) else 0 + target_month -= 1 + while target_month <= 0: + target_month += 12 + target_year -= 1 + if target_month == 2 and target_day > 28: + target_month = 3 + target_day -= 28 + target_date = f"{target_year}-{target_month}-{target_day} {target_hour}:{target_minute}:{target_second}" + query = f"SELECT * FROM {tsdb_table} WHERE p_uid = '{p_uid}'" + query += f" AND timestamp <= '{current_date}'" + query += f" AND timestamp >= '{target_date}'" + cursor = conn.cursor() + try: + cursor.execute(query) + conn.commit() + result = cursor.fetchall() + except psycopg2.errors as e: + sys.stderr.write(f'error: {e}\n') + cursor.close() + except: + sys.stderr.write("error.") + + return result + +@router.get("/l_uid/{l_uid}/{func}") +async def get_luid_records_by_function(l_uid: str, func: str, fromdate = "", todate = "", p_uid = ""): + result = "" + try: + with psycopg2.connect(CONNECTION) as conn: + query = f"SELECT {func}(value) FROM {tsdb_table} WHERE l_uid = '{l_uid}'" + if fromdate != "": + query += f"AND timestamp >= '{fromdate}'" + if todate != "": + query += f"AND timestamp <= '{todate}'" + if p_uid != "": + query += f"AND p_uid = '{p_uid}'" + cursor = conn.cursor() + try: + cursor.execute(query) + conn.commit() + result = cursor.fetchall() + except psycopg2.errors as e: + sys.stderr.write(f'error: {e}\n') + cursor.close() + except: + sys.stderr.write("error.") + + return result + +@router.get("/p_uid/{p_uid}/{func}") +async def get_puid_records_by_function(p_uid: str, func: str, fromdate = "", todate = "", l_uid = ""): + result = "" + try: + with psycopg2.connect(CONNECTION) as conn: + query = f"SELECT {func}(value) FROM {tsdb_table} WHERE p_uid = '{p_uid}'" + if fromdate != "": + query += f"AND timestamp >= '{fromdate}'" + if todate != "": + query += f"AND timestamp <= '{todate}'" + if l_uid != "": + query += f"AND l_uid = '{l_uid}'" + cursor = conn.cursor() + try: + cursor.execute(query) + conn.commit() + result = cursor.fetchall() + except psycopg2.errors as e: + sys.stderr.write(f'error: {e}\n') + cursor.close() + except: + sys.stderr.write("error.") + + return result \ No newline at end of file diff --git a/src/python/restapi/requirements.txt b/src/python/restapi/requirements.txt index 5fd0f659..d6a42e5d 100644 --- a/src/python/restapi/requirements.txt +++ b/src/python/restapi/requirements.txt @@ -19,6 +19,7 @@ Jinja2==3.0.3 MarkupSafe==2.0.1 psycopg2-binary==2.9.3 pydantic==1.9.0 +pytest==7.3.1 python-multipart==0.0.5 PyYAML==6.0 requests==2.27.1 @@ -28,3 +29,6 @@ typing_extensions==4.0.1 ujson==5.1.0 urllib3==1.26.8 uvicorn==0.17.0 +backoff==2.2.1 +python-dateutil==2.8.2 +pika==1.3.2 diff --git a/src/python/timescale/TS_LTSReader.py b/src/python/timescale/TS_LTSReader.py new file mode 100644 index 00000000..ab495038 --- /dev/null +++ b/src/python/timescale/TS_LTSReader.py @@ -0,0 +1,137 @@ +""" +This program receives logical device timeseries messages and logs +them as a test of having multiple queues attached to the logical_timeseries exchange. + +It can be used as a template for any program that wants to read from the logical +timeseries exchange. To do that, change the queue name to something unique. +""" + +import asyncio, json, logging, signal + +from pika.exchange_type import ExchangeType +import api.client.RabbitMQ as mq +import BrokerConstants +import util.LoggingUtil as lu +import timescale.Timescale as ts +import api.client.DAO as dao + +rx_channel = None +mq_client = None +finish = False + + +def sigterm_handler(sig_no, stack_frame) -> None: + """ + Handle SIGTERM from docker by closing the mq and db connections and setting a + flag to tell the main loop to exit. + """ + global finish, mq_client + + logging.info(f'Caught signal {signal.strsignal(sig_no)}, setting finish to True') + finish = True + mq_client.stop() + + +async def main(): + """ + Initiate the connection to RabbitMQ and then idle until asked to stop. + + Because the messages from RabbitMQ arrive via async processing this function + has nothing to do after starting connection. + """ + global mq_client, rx_channel, finish + + logging.info('===============================================================') + logging.info(' STARTING LOGICAL_TIMESERIES TEST READER') + logging.info('===============================================================') + + # The routing key is ignored by fanout exchanges so it does not need to be a constant. + # Change the queue name. This code should change to use a server generated queue name. + rx_channel = mq.RxChannel(BrokerConstants.LOGICAL_TIMESERIES_EXCHANGE_NAME, exchange_type=ExchangeType.fanout, queue_name='ltsreader_logical_msg_queue', on_message=on_message, routing_key='logical_timeseries') + mq_client = mq.RabbitMQConnection(channels=[rx_channel]) + asyncio.create_task(mq_client.connect()) + + while not rx_channel.is_open: + await asyncio.sleep(0) + + while not finish: + await asyncio.sleep(2) + + while not mq_client.stopped: + await asyncio.sleep(1) + + +def on_message(channel, method, properties, body): + """ + This function is called when a message arrives from RabbitMQ. + + + checks pd and ld much like logical mapper, possibly redundant as logical + mapper passes the message here, but maybe not always, so checks stay info + until someone deletes them + """ + + global rx_channel, finish + + delivery_tag = method.delivery_tag + + # If the finish flag is set, reject the message so RabbitMQ will re-queue it + # and return early. + if finish: + rx_channel._channel.basic_reject(delivery_tag) + return + + try: + msg = json.loads(body) + + p_uid = msg[BrokerConstants.PHYSICAL_DEVICE_UID_KEY] + pd = dao.get_physical_device(p_uid) + mapping = dao.get_current_device_mapping(p_uid) + + #TODO: implement extra checks for mapping if required or remove commented checks + # pretty much all use cases currently will cover these checks bein done already by + # the logical_mapper + #if pd or mapping is None: + if pd is None: + # Ack the message, even though we cannot process it. We don't want it redelivered. + # We can change this to a Nack if that would provide extra context somewhere. + lu.cid_logger.error(f'Physical device not found, cannot continue. Dropping message.', extra=msg) + #else: + # lu.cid_logger.error(f'No device mapping found for {pd.source_ids}, cannot continue. Dropping message.', extra=msg) + rx_channel._channel.basic_ack(delivery_tag) + return + + #accept message for processing + lu.cid_logger.info(f'Accepted message {msg}', extra=msg) + + #parse message into useable format for timeseries db + parsed_msg = ts.parse_json(msg) + + #insert into timeseries and confirm + inserted = ts.insert_lines(parsed_msg) + + if(inserted != 0): + lu.cid_logger.error('Message not stored in time series database. Rejecting Message.', extra=msg) + rx_channel._channel.basic_reject(delivery_tag) + return + + # This tells RabbitMQ the message is handled and can be deleted from the queue. + logging.info('Message successfully stored in time series database.') + rx_channel._channel.basic_ack(delivery_tag) + + except BaseException: + logging.exception('Error while processing message.') + rx_channel._channel.basic_reject(delivery_tag, requeue=False) + + +if __name__ == '__main__': + # Docker sends SIGTERM to tell the process the container is stopping so set + # a handler to catch the signal and initiate an orderly shutdown. + signal.signal(signal.SIGTERM, sigterm_handler) + + # Ctrl-C sends SIGINT, handle it the same way. + signal.signal(signal.SIGINT, sigterm_handler) + + # Does not return until SIGTERM is received. + asyncio.run(main()) + logging.info('Exiting.') diff --git a/src/python/timescale/Timescale.py b/src/python/timescale/Timescale.py new file mode 100644 index 00000000..32cd1c62 --- /dev/null +++ b/src/python/timescale/Timescale.py @@ -0,0 +1,94 @@ +import sys, json, re, os, logging, psycopg2 +import BrokerConstants +import api.client.DAO as dao +import util.LoggingUtil as lu +from dateutil import parser +from util import NamingConstants + + +#these are read from compose/.env file +#however tsdb sets them from compose/.tsdb_env +tsdb_user = os.environ.get("TSDB_USER") +tsdb_pass = os.environ.get("TSDB_PASSWORD") +tsdb_host = os.environ.get("TSDB_HOST") +tsdb_port = os.environ.get("TSDB_PORT") +tsdb_db = os.environ.get("TSDB_DB") +tsdb_table = os.environ.get("TSDB_TABLE") +CONNECTION = f"postgres://{tsdb_user}:{tsdb_pass}@{tsdb_host}:{tsdb_port}/{tsdb_db}" + + +def get_standardised_name(msg: str) -> str: + """ + check if a name is already mapped and use that format instead, + otherwise lets create a new mapping and use it + """ + std_name = dao.get_std_name(msg) + if std_name is None: + std_name = NamingConstants.clean_name(msg) + dao.add_name_map(msg, std_name) + logging.info(f'Creating New Name Mapping: {msg}:{std_name}') + else: + logging.info(f'Found Name Mapping: {msg}:{std_name}') + + return std_name + + +def parse_json(json_obj: dict) -> list: + """ + Main parser used at this time, takes a json object and parses into format ready for insertion into tsdb + """ + parsed_data = [] + + try: + broker_id = json_obj[BrokerConstants.CORRELATION_ID_KEY] + l_uid = json_obj[BrokerConstants.LOGICAL_DEVICE_UID_KEY] + p_uid = json_obj[BrokerConstants.PHYSICAL_DEVICE_UID_KEY] + timestamp = parser.parse(json_obj[BrokerConstants.TIMESTAMP_KEY]) + timeseries = json_obj[BrokerConstants.TIMESERIES_KEY] + + for tsd in timeseries: + name = get_standardised_name(tsd['name']) + value = tsd['value'] + parsed_data.append((broker_id, l_uid, p_uid, timestamp, name, value)) + + except KeyError as e: + logging.error(f"An error occurred: {str(e)}") + + return parsed_data + + +def parse_json_string(json_string: str) -> list: + """ + Alternative to above, includes json.loads prior. + """ + parsed_data = [] + try: + parsed_data.append(parse_json(json.loads(json_string))) + except json.JSONDecodeError as e: + logging.error(f"An error occurred: {str(e)}") + + return parsed_data + + +def insert_lines(parsed_data: list, connection: str = CONNECTION, table: str = tsdb_table) -> int: + """ + Insert our parsed data into tsdb + + returns 1 if an error occurred + returns 0 if sucessful + """ + conn = psycopg2.connect(connection) + try: + with conn: + with conn.cursor() as cursor: + for entry in parsed_data: + broker_id, l_uid, p_uid, timestamp, name, value = entry + cursor.execute( + f"INSERT INTO {table} (broker_id,{BrokerConstants.LOGICAL_DEVICE_UID_KEY}, {BrokerConstants.PHYSICAL_DEVICE_UID_KEY}, {BrokerConstants.TIMESTAMP_KEY}, name, value) VALUES (%s, %s, %s, %s, %s, %s);", + (broker_id, l_uid, p_uid, timestamp, name, value)) + conn.close() + return 0 + except (Exception, psycopg2.Error) as error: + logging.error(f"Error inserting: {error}") + conn.close() + return 0 diff --git a/src/python/timescale/__init__.py b/src/python/timescale/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/python/util/NamingConstants.py b/src/python/util/NamingConstants.py new file mode 100644 index 00000000..be373e6b --- /dev/null +++ b/src/python/util/NamingConstants.py @@ -0,0 +1,162 @@ +# +# +#implements an automated way to standardise incoming timeseries names to make data cleaner +#aims to eliminate the possibility of timeseries names changing over time if devices change +# +# + +import re, os, time, logging +import api.client.DAO as dao + + +def update_data_structs(): + try: + type_maps = dict(dao.get_type_map()) + word_list = dao.get_word_list() + hash_table = dict(dao.get_hash_table()) + return type_maps, [item[0] for item in word_list], hash_table + except Exception as e: + logging.info("Error while updating word_list/type_map structures:", e) + return {}, [], {"word_list":"", "type_name_map":""} + + +PULL_INTERVAL = int(os.environ.get('NAMING_UPDATE_INTERVAL', 600)) +TYPE_MAPS, WORD_LIST, HASH_TABLE = update_data_structs() +last_data_pull_time = 0 + + +def check_and_update_structs(): + """ + atm the hash_table only has two rows, so pulling both at once and checking is probably + better, + if more hashes go in, then maybe redoing this to query database for just those two hashes + """ + global last_data_pull_time + global HASH_TABLE, WORD_LIST, TYPE_MAPS + + current_time = time.time() + + logging.info('in updates') + + if current_time - last_data_pull_time >= PULL_INTERVAL: + logging.info('checking for updates') + last_data_pull_time = time.time() + try: + new_hash_table = dict(dao.get_hash_table()) + if HASH_TABLE['word_list'] != new_hash_table['word_list']: + word_list = dao.get_word_list() + WORD_LIST = [item[0] for item in word_list] + HASH_TABLE['word_list'] = new_hash_table['word_list'] + if HASH_TABLE['type_name_map'] != new_hash_table['type_name_map']: + TYPE_MAPS = dict(dao.get_type_map()) + HASH_TABLE['type_name_map'] = new_hash_table['type_name_map'] + except: + logging.error("unable to update naming structs") + + +def clean_name(msg: str) -> str: + """ + strip special chars from beginning and end + make upper case --> ie aBcd => ABCD + replace and '-' with '_' --> ie 1-2 3_4 => 1_2_3_4 + remove special characters except '_' --> ie !2>_3 => 2_3 + remove duplicated '_' --> ie 1__2 => 1_2 + separete all known words --> ie UPWINDVAPOUR => UP_WIND_VAPOUR + remove duplicate words --> ie BATTERY_VOLTAGE_V => BATTERY_V + normalise words --> ie VOLTAGE => V + + Additionally, table name must not start or end with the . character. + Column name must not contain . - + """ + + check_and_update_structs() + + special_characters = '!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~ ' + cleaned_name = separate_and_normalise_words(msg.upper().replace(" ", "_").replace("-","_")) + cleaned_name = cleaned_name.lstrip(special_characters).rstrip(special_characters) + cleaned_name = split_numbers_by_underscore(cleaned_name) + cleaned_name = re.sub(r'[^\w\s]', '', cleaned_name) + cleaned_name = re.sub(r'_+', '_', cleaned_name) + + return cleaned_name + + +def normalise_word(word: str) -> str: + """ + USES THE TYPE MAPS TO ALTER THE WORD TO A STANDARD FORMAT + + IE VOLTAGE, VOLT => V + OR TEMP => TEMPERATURE + """ + for type_word, symbol in TYPE_MAPS.items(): + pattern = r'\b' + re.escape(type_word) + r'\b' + word = re.sub(pattern, symbol, word) + return word + + +def remove_duplicates(words: list) -> list: + """ + REMOVES ANY DUPLICATE WORDS IN LIST, NUMBERS ARE IGNORED + + prevents BATTERY_VOLTAGE_V from being BATTERY_V_V + """ + processed_words = [] + for word in words: + if word in processed_words and not word.isnumeric(): + continue + processed_words.append(word) + return processed_words + + + +def separate_and_normalise_words(msg: str) -> str: + """ + separates words (largest) by underscores based off ./naming_constants.py + ie 1temperaturevoltagegggcurrent => 1_temperature_voltage_ggg_current + + uses naming_constants.WORD_LIST + + also removes duplicates if the same words ie BATTERY_VOLTAGE_V => BATTERY_V_V => BATTERY_V + """ + words = [] + i = 0 + start_index = 0 + while i < len(msg): + found_word = "" + for word in WORD_LIST: + if msg.startswith(word, i) and len(word) > len(found_word): + found_word = word + if found_word: + if i > start_index: + words.extend(msg[start_index:i].split("_")) + words.append(normalise_word(found_word)) + i += len(found_word) + start_index = i + else: + i += 1 + if i > start_index: + words.extend(msg[start_index:i].split("_")) + + removed_duplicates = remove_duplicates(words) + + return "_".join(removed_duplicates) + + +def split_numbers_by_underscore(msg: str) -> str: + """ + splits numbers by underscores + + a123b ==> 1_123_b + """ + result = [] + prev_char = None + + for char in msg: + if prev_char and char.isdigit() != prev_char.isdigit(): + result.append('_') + result.append(char) + prev_char = char + + return ''.join(result) + + diff --git a/src/www/app/main.py b/src/www/app/main.py index 83a47580..a9ae00ae 100644 --- a/src/www/app/main.py +++ b/src/www/app/main.py @@ -1,9 +1,10 @@ import atexit import logging import time +from sys import stderr from typing import Tuple - -from flask import Flask, render_template, request, redirect, url_for, session, send_from_directory +from requests.auth import parse_dict_header +from flask import Flask, render_template, request, make_response, redirect, url_for, session, send_from_directory, jsonify import folium import paho.mqtt.client as mqtt @@ -38,11 +39,6 @@ app = Flask(__name__, static_url_path='/static') -app.wsgi_app = DispatcherMiddleware( - Response('Not Found', status=404), - {'/iota': app.wsgi_app} -) - _location_re = re.compile(r'([+-]?\d+\.?\d*)\s*,\s*([+-]?\d+\.?\d*)') _mqtt_host = os.getenv('RABBITMQ_HOST') @@ -357,6 +353,8 @@ def physical_device_table(): return render_template('error_page.html', reason=e), e.response.status_code + + @app.route('/physical-device/', methods=['GET']) def physical_device_form(uid): @@ -386,6 +384,11 @@ def physical_device_form(uid): currentDeviceMapping.append(m) + #TS data + ts_data = get_puid_ts(uid) + parsed_ts = parse_ts_data(ts_data) + + title = 'Physical Device ' + str(uid) + ' - ' + str(device.name) return render_template('physical_device_form.html', title=title, @@ -395,7 +398,8 @@ def physical_device_form(uid): properties=properties_formatted, ttn_link=ttn_link, currentMappings=currentDeviceMapping, - deviceNotes=notes) + deviceNotes=notes, + ts_data=parsed_ts) except requests.exceptions.HTTPError as e: return render_template('error_page.html', reason=e), e.response.status_code @@ -454,6 +458,12 @@ def logical_device_form(uid): # The physical_devices list is used in the dialog shown when mapping a logical device. physical_devices = get_physical_devices(session.get('token')) + #TS data + ts_data = get_luid_ts(uid) + parsed_ts = parse_ts_data(ts_data) + + + return render_template('logical_device_form.html', title=title, ld_data=device, @@ -462,7 +472,8 @@ def logical_device_form(uid): deviceLastSeen=device_last_seen, ubidots_link=ubidots_link, properties=properties_formatted, - deviceMappings=mappings) + deviceMappings=mappings, + ts_data=parsed_ts) except requests.exceptions.HTTPError as e: return render_template('error_page.html', reason=e), e.response.status_code @@ -677,6 +688,37 @@ def format_location_string(location: Location) -> str: return formatted_location +def add_cors_headers(response): + response.headers['Access-Control-Allow-Origin'] = '*' + response.headers['Access-Control-Allow-Methods'] = 'GET, POST, OPTIONS' + response.headers['Access-Control-Allow-Headers'] = 'Content-Type' + return response + +@app.route('/get_between_dates_ts', methods=['GET']) +def get_data(): + """ + dev_type (string): p_uid or l_uid, must match what's in the database + uid (int): uid of either p_uid or l_uid ie query could be ... where p_uid='2' + from_date(string): picker.value + to_date(string): picker.value + """ + try: + dev_type = request.args.get('dev_type') + uid = request.args.get('uid') + from_date = request.args.get('from_date') + to_date = request.args.get('to_date') + + ts_data = get_between_dates_ts(dev_type, uid, from_date, to_date) + parsed = parse_ts_table_data(ts_data) + + response = jsonify(parsed) + response = add_cors_headers(response) + + return response + + except Exception as e: + return jsonify({"error": str(e)}) + def generate_link(data): link = '' @@ -706,6 +748,88 @@ def exit_handler(): _mqtt_client.loop_stop() app.logger.info('Done') +""" +parse the timeseries data received by api into what is expected by graph +""" +def parse_ts_data(ts_data): + try: + parsed_ts = {} + for entry in ts_data: + timestamp = entry[0] + label = entry[1] + value = entry[2] + if label not in parsed_ts: + parsed_ts[label] = [] + parsed_ts[label].append((timestamp, value)) + + return parsed_ts + + except: + print("Error parsed_ts", file=sys.stderr) + return {} + +#leaving this here incase of future changes ie broker correlation id +#input: +#[ +# [1, 1, '2023-10-17T14:00:00+00:00', '5_TEMPERATURE', 0.2999783007627932], +# [1, 1, '2023-10-17T14:00:00+00:00', 'BATTERY_V', 11.313449], +# [2, 1, '2023-10-18T14:00:00+00:00', '5_TEMPERATURE', 2.2999783007627932], +# [2, 1, '2023-10-18T14:00:00+00:00', 'BATTERY_V', 12.313449], +# [1, 1, '2023-10-17T14:00:00+00:00', 'TEST', 666] +#] +# +#output: +#{ +# columns:["p_uid", "l_uid", "timestamp", "5_TEMPERATURE", "BATTERY_V", "TEST"], +# data:[ +# [1,1,'2023-10-17T14:00:00+00:00', 0.2999783007627933, 11.313449, 666], +# [2,1, '2023-10-18T14:00:00+00:00', 2.2999783007627932, 12.313449, null] +# ] +#} + +#data is the rows of table, +#p_uid + l_uid + timeseries forms the unique identifier for the row. +def parse_ts_table_data(raw_data): + if not raw_data: + return {"columns": [], "data": []} + + data_dict = {} + + for entry in raw_data: + p_uid, l_uid, timestamp, label, value = entry + if p_uid not in data_dict: + data_dict[p_uid] = {} + if l_uid not in data_dict[p_uid]: + data_dict[p_uid][l_uid] = {} + if timestamp not in data_dict[p_uid][l_uid]: + data_dict[p_uid][l_uid][timestamp] = {} + data_dict[p_uid][l_uid][timestamp][label] = value + + columns = ["p_uid", "l_uid", "timestamp"] + + for entry in raw_data: + p_uid, l_uid, timestamp, label, _ = entry + + if label not in columns: + columns.append(label) + + data = [] + + for p_uid, p_uid_data in data_dict.items(): + for l_uid, l_uid_data in p_uid_data.items(): + for timestamp, timestamp_data in l_uid_data.items(): + row = [p_uid, l_uid, timestamp] + for label in columns[3:]: + if label in timestamp_data: + row.append(timestamp_data[label]) + else: + row.append(None) + data.append(row) + + result = {"columns": columns, "data": data} + return result + + if __name__ == '__main__': app.logger.info('Starting') diff --git a/src/www/app/static/ts_graph.js b/src/www/app/static/ts_graph.js new file mode 100644 index 00000000..00825516 --- /dev/null +++ b/src/www/app/static/ts_graph.js @@ -0,0 +1,87 @@ +var chart; +var parsed_data = {} //ugly but things need to access it at varied times +var toggle_state = true; +//hopefully 20 different colours should be enough +var GRAPH_COLOURS = [ + "#1f77b4", "#ff7f0e", "#2ca02c", "#d62728", "#9467bd", + "#8c564b", "#e377c2", "#7f7f7f", "#bcbd22", "#17becf", + "#aec7e8", "#ffbb78", "#98df8a", "#ff9896", "#c5b0d5", + "#c49c94", "#f7b6d2", "#c7c7c7", "#dbdb8d", "#9edae5" +]; + + +//this generates dataset for the chart +//input data only has label, value, timestamp +function generate_datasets(parsed_data) { + var datasets = []; + var cindex = 0; + + for (var label in parsed_data) { + var data = parsed_data[label]; + var colour = GRAPH_COLOURS[cindex++ % GRAPH_COLOURS.length]; + + datasets.push({ + label: label, + data: data.map(entry => ({ + x: entry[0], + y: entry[1] + })), + borderColor: colour, + backgroundColor: 'rgba(0, 0, 0, 0)', // Transparent fill + fill: false + }); + } + return datasets; +} + + +//create the chart via chartjs +function create_chart(parsed_data) { + parsed_data = parsed_data; + var ctx = document.getElementById('chart').getContext('2d'); + chart = new Chart(ctx, { + type: 'line', + data: { + datasets: generate_datasets(parsed_data) + }, + options: { + responsive: true, + plugins: { + legend: { + onClick: function(e, legend_item, legend) { + chart.data.datasets[legend_item.datasetIndex].hidden = !chart.data.datasets[legend_item.datasetIndex].hidden; + chart.update(); + } + } + }, + scales: { + x: { + type: 'time', + //distribution: 'linear', + min: luxon.DateTime.now().plus({ days: -30 }).toISODate(), + max: new Date() + } + } + } + }); +} + + +function filter_chart(days) { + chart.options.scales.x.min = luxon.DateTime.now().plus({ days: -days.value }).toISODate(); + chart.options.scales.x.max = new Date(); + chart.update(); +} + + +function toggle_selection(btn) { + chart.data.datasets.forEach((data_set) => { + data_set.hidden = toggle_state; + }); + + toggle_state = !toggle_state; + + chart.update(); + + btn.textContent = toggle_state ? 'Select: None' : 'Select: All'; +} diff --git a/src/www/app/static/ts_table.css b/src/www/app/static/ts_table.css new file mode 100644 index 00000000..8851139b --- /dev/null +++ b/src/www/app/static/ts_table.css @@ -0,0 +1,174 @@ +h1 { + text-align: center; +} + +#ts-table-div { + margin: auto; +} + +.table-body { + font-family: monospace; +} + +.header-container { + display: flex; + align-items: center; +} + +#ts-heading { + margin-right: 10px; +} + +#ts-toggle-view { + background-color: #17a2b8; + border: none; + color: white; + text-align: center; + text-decoration: none; + display: inline-block; + font-size: 16px; + margin: 5px; + cursor: pointer; + padding-left: 5px; + padding-right: 5px; +} + +#ts-toggle-view:hover { + background-color: #fd7e14; +} + +.gridjs-wrapper { + width: 100%; + overflow-x: auto; +} + +.gridjs-table { + border-collapse: collapse; + width: 100%; +} + +.gridjs-th { + border: 1px solid #ccc; + padding: 10px; + text-align: left; + background-color: #E8E8E8; +} + + +.gridjs-td { + border: 1px solid #ccc; + padding: 10px; +} + + +.gridjs-tbody .gridjs-tr:nth-child(even) { + background-color: #f9f9f9; +} + + +.gridjs-summary { + font-weight: bold; +} + + +.gridjs-search-input { + padding: 5px; + border: 1px solid #ccc; + float: right; +} + + +.gridjs-th { + border: 1px solid #ccc; + border-left: none; + padding: 10px; + text-align: left; + background-color: #f0f0f0; + position: relative; + font-size: 16px; + font-weight: bold; +} + + +.gridjs-th:first-child { + border-left: 1px solid #ccc; +} + + +.gridjs-table .gridjs-tbody .gridjs-tr:hover { + background-color: #e0e0e0 !important; +} + + + +.gridjs-td { + border: 1px solid #ccc; + padding: 10px; +} + + +.gridjs-sort { + width: 20px; + height: 20px; + background-color: transparent; + border: none; + position: absolute; + top: 50%; + right: 0; + transform: translateY(-50%); + cursor: pointer; + outline: none; + margin-bottom:5px; + padding-bottom:5px; +} + + +.gridjs-sort:active, +.gridjs-sort:focus { + outline: none; +} + + +.gridjs-sort.gridjs-sort-asc::before { + content: ''; + position: absolute; + width: 0; + height: 0; + border-style: solid; + border-width: 0 5px 8px 5px; + border-color: transparent transparent #000 transparent; + top: 50%; + left: 50%; + transform: translate(-50%, -50%); +} + + +.gridjs-sort.gridjs-sort-desc::before { + content: ''; + position: absolute; + width: 0; + height: 0; + border-style: solid; + border-width: 8px 5px 0 5px; + border-color: #000 transparent transparent transparent; + top: 50%; + left: 50%; + transform: translate(-50%, -50%); +} + +.ts-date-picker{ + float: left; +} + +#timeseries-div{ + padding-bottom:100px; + margin-bottom:100px; +} + +.csv-btn{ + float:right; + margin-right:10px; + text-align: center; + text-decoration: none; + cursor: pointer; +} diff --git a/src/www/app/static/ts_table.js b/src/www/app/static/ts_table.js new file mode 100644 index 00000000..dc246923 --- /dev/null +++ b/src/www/app/static/ts_table.js @@ -0,0 +1,166 @@ +//this just inits some required variables pulled from html/used between some functions. +let grid, dev_type = '', uid = ''; +let to_picker_value = new Date(); +let from_picker_value = new Date(); + + +function init(devType, uId) { + dev_type = devType; + uid = uId; +} + + +//create initial table, this uses auto pulled last 30 days +function create_table(columns, data) { + if (grid) return; + grid = new gridjs.Grid({ + columns: columns, + data: data, + sort: true, + search: true, + className: { + table: 'table-body' + }, + width: '100%' + }).render(document.getElementById("ts-table-div")); + + insert_date_pickers(); +} + + +//update the table, by removing date pickers, +//force re-rendering the table,and re-add the pickers +//this uses newly pulled data +function update_table(columns, rows) { + if (grid) { + remove_date_pickers(); + + //hacky solution, get rid of search to avoid an error, error doesn't do anything + grid.plugin.remove('search'); + + grid.updateConfig({ + columns: columns, + data: rows, + sort: true, + search: true, + className: { + table: 'table-body' + }, + width: '100%' + }).forceRender(); + insert_date_pickers(); + } else { + create_table(columns, rows); + } +} + + +//in order to get the date pickers in correctly it seeems to need to be done through JS +//we add them in and set them up here +function insert_date_pickers() { + const from_picker = document.createElement('input'); + from_picker.type = 'date'; + from_picker.id = 'datepicker-from'; + from_picker.name = 'datepicker-from'; + from_picker.className = 'date-picker ts-date-picker'; + from_picker.valueAsDate = from_picker_value; + + const to_picker = document.createElement('input'); + to_picker.type = 'date'; + to_picker.id = 'datepicker-to'; + to_picker.name = 'datepicker-to'; + to_picker.className = 'date-picker ts-date-picker'; + to_picker.valueAsDate = to_picker_value; + + const go_btn = document.createElement('button'); + go_btn.textContent = 'Go'; + go_btn.className = 'ts-btn'; + + const csv_export_btn = document.createElement('button'); + csv_export_btn.textContent = 'Export CSV'; + csv_export_btn.className = 'csv-btn'; + + //button listener, handles the form + go_btn.addEventListener('click', function() { + handle_go_btn(from_picker, to_picker); + }); + + + csv_export_btn.addEventListener('click', function() { + handle_csv_btn(); + }); + + const grid_js_head = document.querySelector('.gridjs-head'); + grid_js_head.appendChild(from_picker); + grid_js_head.appendChild(to_picker); + grid_js_head.appendChild(go_btn); + grid_js_head.appendChild(csv_export_btn); +} + + +//we need to remove the date pickers when we update table, otherwise it no work for some unknown reason +function remove_date_pickers() { + const date_picker_from = document.getElementById('datepicker-from'); + const date_picker_to = document.getElementById('datepicker-to'); + const go_btn = document.querySelector('.ts-btn'); + const csv_export_btn = document.querySelector('.csv-btn') + + if (date_picker_from) { from_picker_value = date_picker_from.valueAsDate; date_picker_from.remove(); } + if (date_picker_to) { to_picker_value = date_picker_to.valueAsDate; date_picker_to.remove(); } + if (go_btn) go_btn.remove(); + if (csv_export_btn) csv_export_btn.remove(); +} + + +function handle_go_btn(from_picker, to_picker) { + const from_date = from_picker.value; + const to_date = to_picker.value; + fetch_and_update_table(from_date, to_date); + +} + +function fetch_and_update_table(from_date, to_date) { + if (!is_valid_date(from_date) || !is_valid_date(to_date) || dev_type == '' || uid == '') + return; + + fetch(`/get_between_dates_ts?dev_type=${dev_type}&uid=${uid}&from_date=${from_date}&to_date=${to_date}`) + .then(response => { + if (!response.ok) { + throw new Error('Network response was not ok'); + } + return response.json(); + }) + .then(data => { + update_table(data.columns, data.data); + }) + .catch(error => { + console.error('There was a problem with the fetch operation:', error); + }); +} + + +function handle_csv_btn() { + const columns = grid.config.columns; + + const last_entry = Array.from(grid.config.pipeline.cache.values()).pop(); + if (!last_entry) { + return; + } + + const last_visible_rows = last_entry._rows.map(row => row._cells.map(cell => cell.data)); + const csv_content = [columns.join(','), ...last_visible_rows.map(row => row.join(','))].join('\n'); + const encoded_uri = encodeURI(`data:text/csv;charset=utf-8,${csv_content}`); + + const link = document.createElement('a'); + link.setAttribute('href', encoded_uri); + link.setAttribute('download', 'table.csv'); + link.click(); +} + + + +//helper function to make sure dates chosen are valid +function is_valid_date(dateString) { + const regex = /^\d{4}-\d{2}-\d{2}$/; + return regex.test(dateString); +} diff --git a/src/www/app/templates/logical_device_form.html b/src/www/app/templates/logical_device_form.html index 7a1ab0cb..bd5e80ad 100644 --- a/src/www/app/templates/logical_device_form.html +++ b/src/www/app/templates/logical_device_form.html @@ -4,12 +4,12 @@
    -
  • Save
  • +
  • Save
  • Update Mapping
  • End Mapping
  • - {% if deviceMappings | length > 0 %} -
  • {{ "Pause Mapping" if deviceMappings[0].is_active == True else "Resume Mapping" }}
  • - {% endif %} + {% if deviceMappings | length > 0 %} +
  • {{ "Pause Mapping" if deviceMappings[0].is_active == True else "Resume Mapping" }}
  • + {% endif %}
@@ -20,7 +20,7 @@

{{ title }}

Form

-
+
@@ -80,7 +80,7 @@

Select Physical Device to Map

-
+

Mapping

{% if deviceMappings | length > 0 %} {% if deviceMappings[0].is_active == False %} @@ -88,29 +88,34 @@

Mapping

{% endif %} {% endif %}
+

Mappings

- + {% for mapping in deviceMappings %} - - + + - + {% endfor %}
ID Name Start EndIs ActiveIs Active
{{ mapping.pd.uid }}{{ mapping.pd.name }}{{ mapping.pd_uid }}{{ mapping.pd_name }} {{ mapping.start_time }} {{ mapping.end_time }}{{ mapping.is_active }}{{ mapping.is_active }}
+
+
+ {% include 'ts_graph.html' %} +
-{% endblock %} \ No newline at end of file +{% endblock %} diff --git a/src/www/app/templates/physical_device_form.html b/src/www/app/templates/physical_device_form.html index 57dd6fca..4c68b1b8 100644 --- a/src/www/app/templates/physical_device_form.html +++ b/src/www/app/templates/physical_device_form.html @@ -4,13 +4,13 @@
    -
  • Save
  • +
  • Save
  • Update Mapping
  • Create Mapping
  • End Mapping
  • - {% if currentMappings | length > 0 %} -
  • {{ "Pause Mapping" if currentMappings[0].is_active == True else "Resume Mapping" }}
  • - {% endif %} + {% if currentMappings | length > 0 %} +
  • {{ "Pause Mapping" if currentMappings[0].is_active == True else "Resume Mapping" }}
  • + {% endif %}
@@ -21,7 +21,7 @@

{{ title }}

Form

-
+
@@ -98,16 +98,16 @@

Mapping

Device Name Start Time End Time - Is Active + Is Active {% for mapping in currentMappings %} - {{ mapping.ld.uid }} - {{ mapping.ld.name }} + {{ mapping.ld_uid }} + {{ mapping.ld_name }} {{ mapping.start_time }} {{ mapping.end_time }} - {{ mapping.is_active }} + {{ mapping.is_active }} {% endfor %} @@ -134,17 +134,21 @@

Notes

{% for note in deviceNotes %} - + {{ note.note }} {{ note.ts }} {% endfor%} - +
+
+
+ {% include 'ts_graph.html' %} +
-{% endblock %} \ No newline at end of file +{% endblock %} diff --git a/src/www/app/templates/ts_graph.html b/src/www/app/templates/ts_graph.html new file mode 100644 index 00000000..18bc98eb --- /dev/null +++ b/src/www/app/templates/ts_graph.html @@ -0,0 +1,71 @@ + + + + + + + + + + +
+
+

Time Series Data

+
+ +
+
+
+
+ + + +
+ + +
+ +
+ + + diff --git a/src/www/app/utils/api.py b/src/www/app/utils/api.py index 9cfc983f..b3c97524 100644 --- a/src/www/app/utils/api.py +++ b/src/www/app/utils/api.py @@ -1,13 +1,15 @@ import json, os from typing import List +import sys +from typing import List import requests from datetime import datetime, timezone import base64 from pdmodels.Models import PhysicalDevice, LogicalDevice, PhysicalToLogicalMapping, DeviceNote, Location -end_point = os.getenv('IOTA_API_URL', 'http://restapi:5687') +end_point = os.getenv('IOTA_API_URL', 'http://restapi:5687') def get_sources(token: str) -> List[str]: """ @@ -372,3 +374,38 @@ def change_user_password(password: str, token: str) -> str: response.raise_for_status() return response.json() + +def get_puid_ts(puid: str): + try: + response = requests.get(f"{end_point}/query/?query=select timestamp,name,value from timeseries where p_uid={puid} and timestamp >= current_date - interval '30 days' order by timestamp asc") + response.raise_for_status() + #print("get_puid_ts ---returns---", file=sys.stderr) + #print(response.json(), file=sys.stderr) + return response.json() + except Exception as err: + print(f"webapp: unable to pull ts_luid data from api: {err}") + return {} + + +def get_luid_ts(luid: str): + try: + response = requests.get(f"{end_point}/query/?query=select timestamp,name,value from timeseries where l_uid={luid} and timestamp >= current_date - interval '30 days' order by timestamp asc") + response.raise_for_status() + #print("get_puid_ts ---returns---", file=sys.stderr) + #print(response.json(), file=sys.stderr) + return response.json() + except Exception as err: + print(f"webapp: unable to pull ts_luid data from api: {err}") + return {} + + +def get_between_dates_ts(dev_type: str, uid: str, from_date: str, to_date: str): + try: + response = requests.get(f"{end_point}/query/?query=select p_uid, l_uid, timestamp, name, value from timeseries where {dev_type}='{uid}' and timestamp BETWEEN '{from_date} 00:00:00' AND '{to_date} 23:59:59' order by timestamp asc") + response.raise_for_status() + #print("get_between dates ---returns---", file=sys.stderr) + #print(response.json(), file=sys.stderr) + return response.json() + except Exception as err: + print(f"webapp: unable to pull get_between_dates_ts data from api: {err}") + return {} diff --git a/test/python/TestIntegrationTSDB.py b/test/python/TestIntegrationTSDB.py new file mode 100644 index 00000000..3a66deeb --- /dev/null +++ b/test/python/TestIntegrationTSDB.py @@ -0,0 +1,104 @@ +#requires test environment running +#python -m pytest TestIntegrationTSDB.py + +import pytest,sys,os,json,dateutil,pika,time,subprocess,requests,psycopg2 +current_dir = os.path.dirname(os.path.abspath(__file__)) +module_path = os.path.abspath(os.path.join(current_dir, '../../src/python')) +sys.path.append(module_path) + + +exchange='lts_exchange' +queue='ltsreader_logical_msg_queue' +mq_user='broker' +mq_pass='CHANGEME' +end_point = 'http://0.0.0.0:5687' + +#helper, easier to just send via cmdline +def send_msg(msg: str): + command = [ + 'docker', 'exec', 'test-mq-1', 'rabbitmqadmin', + 'publish', '-u', mq_user, '-p', mq_pass, + f'exchange={exchange}', f'routing_key={queue}', + f'payload={msg}', 'properties={}' + ] + return subprocess.run(command, capture_output=True, text=True) + + +def check_insert(puid: str, luid: str): + response = requests.get( + f"{end_point}/query/?query=" + "SELECT name, value " + "FROM timeseries " + f"WHERE p_uid = {puid} " + f"AND l_uid = {luid} " + ) + response.raise_for_status() + return response.json() + + +def test_send_valid_msg(): + msg = """ + { + "broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", + "p_uid": 1, + "l_uid": 1, + "timestamp": "2023-01-30T06:21:56Z", + "timeseries": [ + { + "name": "battery (v)", + "value": 6.66 + } + ] + } + """ + result = send_msg(msg) + time.sleep(1) + insert = check_insert("1", "1") ## NAME CHANGES TO BATTERY_V + assert(result.stdout == "Message published\n") + assert(insert[-1] == ['BATTERY_V', 6.66]) + + +def test_send_invalid_msg(): + msg = """ + { + "l_uid": 777, + "timestamp": "2023-01-30T06:21:56Z", + "timeseries": [ + { + } + ] + } + """ + result = send_msg(msg) + time.sleep(1) + insert = check_insert("777", "777") ## NAME CHANGES TO BATTERY_V + assert(result.stdout == "Message published\n") + assert(insert == []) + + +#check we can still send a message after a bad one +def test_send_valid_msg2(): + msg = """ + { + "broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", + "p_uid": 2, + "l_uid": 2, + "timestamp": "2023-01-30T06:22:56Z", + "timeseries": [ + { + "name": "battery (v)", + "value": 9.99 + }, + { + "name": "teSt-name-voltage", + "value": 2.99 + } + ] + } + """ + result = send_msg(msg) + time.sleep(1) + insert = check_insert("2", "2") ## NAME CHANGES TO BATTERY_V + assert(result.stdout == "Message published\n") + assert(insert[-1] == ['TEST_NAME_V', 2.99]) + assert(insert[-2] == ['BATTERY_V', 9.99]) diff --git a/test/python/TestStdNaming.py b/test/python/TestStdNaming.py new file mode 100644 index 00000000..83baf660 --- /dev/null +++ b/test/python/TestStdNaming.py @@ -0,0 +1,199 @@ +import pytest,sys,os + +current_dir = os.path.dirname(os.path.abspath(__file__)) +module_path = os.path.abspath(os.path.join(current_dir, '../../src/python')) +sys.path.append(module_path) + +from util.NamingConstants import * + +@pytest.mark.parametrize("input_name, expected_output", [ + ('1_Temperature', '1_TEMPERATURE'), + ('1_VWC', '1_VWC'), + ('2_Temperature', '2_TEMPERATURE'), + ('2_VWC', '2_VWC'), + ('3_Temperature', '3_TEMPERATURE'), + ('3_VWC', '3_VWC'), + ('4_Temperature', '4_TEMPERATURE'), + ('4_VWC', '4_VWC'), + ('5_Temperature', '5_TEMPERATURE'), + ('5_VWC', '5_VWC'), + ('6_Temperature', '6_TEMPERATURE'), + ('6_VWC', '6_VWC'), + ('8_AirPressure', '8_AIR_PRESSURE'), + ('8_AirTemperature', '8_AIR_TEMPERATURE'), + ('8_HumiditySensorTemperature', '8_HUMIDITY_SENSOR_TEMPERATURE'), + ('8_Precipitation', '8_PRECIPITATION'), + ('8_RH', '8_RH'), + ('8_Solar', '8_SOLAR'), + ('8_Strikes', '8_STRIKES'), + ('8_VaporPressure', '8_VAPOR_PRESSURE'), + ('8_WindDirection', '8_WIND_DIRECTION'), + ('8_WindGustSpeed', '8_WIND_GUST_SPEED'), + ('8_WindSpeed', '8_WIND_SPEED'), + ('Access_technology', 'ACCESS_TECHNOLOGY'), + ('accMotion', 'ACC_MOTION'), + ('Actuator', 'ACTUATOR'), + ('adc_ch1', 'ADC_CH_1'), + ('adc_ch2', 'ADC_CH_2'), + ('adc_ch3', 'ADC_CH_3'), + ('adc_ch4', 'ADC_CH_4'), + ('airTemp', 'AIR_TEMPERATURE'), + ('airtemperature', 'AIR_TEMPERATURE'), + ('airTemperature', 'AIR_TEMPERATURE'), + ('altitude', 'ALTITUDE'), + ('Ana', 'ANA'), + ('atmosphericpressure', 'ATMOSPHERIC_PRESSURE'), + ('atmosphericPressure', 'ATMOSPHERIC_PRESSURE'), + ('Average_current', 'AVERAGE_CURRENT'), + ('average-flow-velocity0_0_m/s', 'AVERAGE_FLOW_VELOCITY_0_0_MS'), + ('Average_voltage', 'AVERAGE_V'), + ('Average_Voltage', 'AVERAGE_V'), + ('Average_Wind_Speed_', 'AVERAGE_WIND_SPEED'), + ('avgWindDegrees', 'AVERAGE_WIND_DEGREES'), + ('barometricPressure', 'BAROMETRIC_PRESSURE'), + ('batmv', 'BATMV'), + ('battery', 'BATTERY'), + ('Battery (A)', 'BATTERY_A'), + ('battery (v)', 'BATTERY_V'), + ('Battery (V)', 'BATTERY_V'), + ('batteryVoltage', 'BATTERY_V'), + ('battery-voltage_V', 'BATTERY_V'), + ('Battery (W)', 'BATTERY_W'), + ('Cable', 'CABLE'), + ('charging-state', 'CHARGING_STATE'), + ('Class', 'CLASS'), + ('command', 'COMMAND'), + ('conductivity', 'CONDUCTIVITY'), + ('counterValue', 'COUNTER_VALUE'), + ('current-flow-velocity0_0_m/s', 'CURRENT_FLOW_VELOCITY_0_0_MS'), + ('depth', 'DEPTH'), + ('Device', 'DEVICE'), + ('DI0', 'DI_0'), + ('DI1', 'DI_1'), + ('direction', 'DIRECTION'), + ('distance', 'DISTANCE'), + ('down630', 'DOWN_630'), + ('down800', 'DOWN_800'), + ('EC', 'EC'), + ('externalTemperature', 'EXTERNAL_TEMPERATURE'), + ('fault', 'FAULT'), + ('Fraud', 'FRAUD'), + ('gnss', 'GNSS'), + ('gustspeed', 'GUST_SPEED'), + ('gustSpeed', 'GUST_SPEED'), + ('header', 'HEADER'), + ('Humi', 'HUMI'), + ('humidity', 'HUMIDITY'), + ('Hygro', 'HYGRO'), + ('Leak', 'LEAK'), + ('linpar', 'LINPAR'), + ('Max_current', 'MAX_CURRENT'), + ('Maximum_Wind_Speed_', 'MAX_WIND_SPEED'), + ('Max_voltage', 'MAX_V'), + ('Min_current', 'MIN_CURRENT'), + ('Minimum_Wind_Speed_', 'MIN_WIND_SPEED'), + ('Min_voltage', 'MIN_V'), + ('moisture1', 'MOISTURE_1'), + ('moisture2', 'MOISTURE_2'), + ('moisture3', 'MOISTURE_3'), + ('moisture4', 'MOISTURE_4'), + ('ndvi', 'NDVI'), + ('O06 / DPI-144', 'O_06_DPI_144'), + ('Operating_cycle', 'OPERATING_CYCLE'), + ('packet-type', 'PACKET_TYPE'), + ('period', 'PERIOD'), + ('Power', 'POWER'), + ('precipitation', 'PRECIPITATION'), + ('pressure', 'PRESSURE'), + ('Processor_temperature', 'PROCESSOR_TEMPERATURE'), + ('pulse_count', 'PULSE_COUNT'), + ('Radio_channel_code', 'RADIO_CHANNEL_CODE'), + ('Rainfall', 'RAINFALL'), + ('rain_per_interval', 'RAIN_PER_INTERVAL'), + ('Rain_per_interval', 'RAIN_PER_INTERVAL'), + ('raw_depth', 'RAW_DEPTH'), + ('rawSpeedCount', 'RAW_SPEED_COUNT'), + ('relativehumidity', 'RELATIVE_HUMIDITY'), + ('relativeHumidity', 'RELATIVE_HUMIDITY'), + ('Rest_capacity', 'REST_CAPACITY'), + ('Rest_power', 'REST_POWER'), + ('rssi', 'RSSI'), + ('rtc', 'RTC'), + ('RTC', 'RTC'), + ('S1_EC', 'S_1_EC'), + ('S1_Temp', 'S_1_TEMPERATURE'), + ('S1_Temp_10cm', 'S_1_TEMPERATURE_10_CM'), + ('S1_Temp_20cm', 'S_1_TEMPERATURE_20_CM'), + ('S1_Temp_30cm', 'S_1_TEMPERATURE_30_CM'), + ('S1_Temp_40cm', 'S_1_TEMPERATURE_40_CM'), + ('S1_Temp_50cm', 'S_1_TEMPERATURE_50_CM'), + ('S1_Temp_60cm', 'S_1_TEMPERATURE_60_CM'), + ('S1_Temp_70cm', 'S_1_TEMPERATURE_70_CM'), + ('S1_Temp_80cm', 'S_1_TEMPERATURE_80_CM'), + ('S1_Temp_90cm', 'S_1_TEMPERATURE_90_CM'), + ('S1_VWC', 'S_1_VWC'), + ('s4solarRadiation', 'S_4_SOLAR_RADIATION'), + ('salinity', 'SALINITY'), + ('salinity1', 'SALINITY_1'), + ('salinity2', 'SALINITY_2'), + ('salinity3', 'SALINITY_3'), + ('salinity4', 'SALINITY_4'), + ('sensorReading', 'SENSOR_READING'), + ('shortest_pulse', 'SHORTEST_PULSE'), + ('Signal', 'SIGNAL'), + ('Signal_indication', 'SIGNAL_INDICATION'), + ('Signal_strength', 'SIGNAL_STRENGTH'), + ('snr', 'SNR'), + ('soilmoist', 'SOIL_MOISTURE'), + ('soiltemp', 'SOIL_TEMPERATURE'), + ('solar', 'SOLAR'), + ('Solar (A)', 'SOLAR_A'), + ('solarpanel', 'SOLAR_PANEL'), + ('solarPanel', 'SOLAR_PANEL'), + ('solar (v)', 'SOLAR_V'), + ('Solar (V)', 'SOLAR_V'), + ('solar-voltage_V', 'SOLAR_V'), + ('Solar (W)', 'SOLAR_W'), + ('solmv', 'SOLMV'), + ('sq110_umol', 'SQ_110_UMOL'), + ('strikes', 'STRIKES'), + ('Tamper', 'TAMPER'), + ('tdskcl', 'TDSKCL'), + ('Temp', 'TEMPERATURE'), + ('temperature', 'TEMPERATURE'), + ('Temperature', 'TEMPERATURE'), + ('temperature1', 'TEMPERATURE_1'), + ('temperature2', 'TEMPERATURE_2'), + ('temperature3', 'TEMPERATURE_3'), + ('temperature4', 'TEMPERATURE_4'), + ('temperature5', 'TEMPERATURE_5'), + ('temperature6', 'TEMPERATURE_6'), + ('temperature7', 'TEMPERATURE_7'), + ('temperature8', 'TEMPERATURE_8'), + ('temperatureReading', 'TEMPERATURE_READING'), + ('tilt-anlge0_0_Degrees', 'TILT_ANLGE_0_0_DEGREES'), + ('UNIX_time', 'UNIX_TIME'), + ('up630', 'UP_630'), + ('up800', 'UP_800'), + ('uptime_s', 'UPTIME_S'), + ('vapourpressure', 'VAPOUR_PRESSURE'), + ('vapourPressure', 'VAPOUR_PRESSURE'), + ('vdd', 'VDD'), + ('Volt', 'V'), + ('vt', 'VT'), + ('VWC', 'VWC'), + ('VWC1', 'VWC_1'), + ('winddirection', 'WIND_DIRECTION'), + ('windDirection', 'WIND_DIRECTION'), + ('windKph', 'WIND_KPH'), + ('windspeed', 'WIND_SPEED'), + ('windSpeed', 'WIND_SPEED'), + ('windStdDevDegrees', 'WIND_STD_DEV_DEGREES') +]) + + +def test_clean_names(input_name, expected_output): + result = clean_name(input_name) + assert result == expected_output + + diff --git a/test/python/TestTSDBAPI.py b/test/python/TestTSDBAPI.py new file mode 100644 index 00000000..c52753c3 --- /dev/null +++ b/test/python/TestTSDBAPI.py @@ -0,0 +1,149 @@ +#requires test environment running +#python -m pytest TestTSDBAPI.py + +import pytest,sys,os,json,dateutil,pika,time,subprocess,requests,psycopg2 + +current_dir = os.path.dirname(os.path.abspath(__file__)) +module_path = os.path.abspath(os.path.join(current_dir, '../../src/python')) +sys.path.append(module_path) + +tsdb_table = "timeseries" +exchange='lts_exchange' +queue='ltsreader_logical_msg_queue' +mq_user='broker' +mq_pass='CHANGEME' +end_point = 'http://0.0.0.0:5687' + +#helper, easier to just send via cmdline +def send_msg(msg: str): + command = [ + 'docker', 'exec', 'test-mq-1', 'rabbitmqadmin', + 'publish', '-u', mq_user, '-p', mq_pass, + f'exchange={exchange}', f'routing_key={queue}', + f'payload={msg}', 'properties={}' + ] + return subprocess.run(command, capture_output=True, text=True) + + +def check_insert(puid: str, luid: str): + response = requests.get( + f"{end_point}/query/?query=" + "SELECT name, value " + "FROM timeseries " + f"WHERE p_uid = {puid} " + f"AND l_uid = {luid} " + ) + response.raise_for_status() + return response.json() + +def get_current_time(): + current_time = time.strftime("%Y-%m-%dT%H:%M:%S+00:00", time.gmtime()) + return current_time + +def generate_and_send_message(correlation_id, p_uid, l_uid, timestamp, name, value): + msg = { + "broker_correlation_id": correlation_id, + "p_uid": p_uid, + "l_uid": l_uid, + "timestamp": timestamp, + "timeseries": [ + { + "name": name, + "value": value + } + ] + } + send_msg(json.dumps(msg)) + +# Test a single message retrieval +def test_send_and_query_message(): + current_time = get_current_time() + generate_and_send_message("test1", 1, 1, current_time, "battery (v)", 6.66) + + timeout = time.time() + 10 + while True: + insert = check_insert("1", "1") + if insert or time.time() > timeout: + break + time.sleep(0.2) + + assert insert[-1] == ['BATTERY_V', 6.66] + + query_params = {"timestamp": current_time} + response = requests.get(f"{end_point}/query/", params=query_params) + response.raise_for_status() + data = response.json() + + # assert 'title' in data + assert len(data) > 0 + + matching_rows = [row for row in data if row[3] == current_time] + assert len(matching_rows) > 0 + assert matching_rows[0][3] == current_time + + +def test_query_by_time_range(): + current_time1 = get_current_time() + generate_and_send_message("test1", 1, 1, current_time1, "battery (v)", 6.66) + time.sleep(2) + current_time2 = get_current_time() + generate_and_send_message("test2", 1, 1, current_time2, "battery (v)", 9.99) + time.sleep(1) + + query_params = {"timestamp1": current_time1, "timestamp2": current_time2} + response = requests.get(f"{end_point}/query/", params=query_params) + response.raise_for_status() + data = response.json() + + # assert 'title' in data + assert len(data) >= 2 + + timestamps = [row[3] for row in data] + assert current_time1 in timestamps + assert current_time2 in timestamps + +import time + +# Tests both luid, and last filtering +def test_get_last_records_for_luid(): + current_time1 = get_current_time() + generate_and_send_message("test1", 1, 1, current_time1, "battery (v)", 6.66) + time.sleep(2) + current_time2 = get_current_time() + generate_and_send_message("test2", 1, 1, current_time2, "battery (v)", 9.99) + time.sleep(2) + + response = requests.get(f"{end_point}/query/l_uid/1/last", params={"seconds": 5}) + response.raise_for_status() + data = response.json() + + assert len(data) >= 2 + + timestamps = [row[3] for row in data] + assert current_time1 in timestamps + assert current_time2 in timestamps + + +# Test the func endpoint for getting an average. +def test_get_average_for_luid(): + + current_time1 = get_current_time() + generate_and_send_message("test1", "1", "1", current_time1, "battery (v)", 1) + time.sleep(1) + + current_time2 = get_current_time() + generate_and_send_message("test2", "1", "1", current_time2, "battery (v)", 2) + time.sleep(1) + + current_time3 = get_current_time() + generate_and_send_message("test3", "1", "1", current_time3, "battery (v)", 3) + time.sleep(1) + + # Make a request to get the average value for l_uid=1 between the times. + response = requests.get(f"{end_point}/query/l_uid/1/avg", params={"fromdate": current_time1, "todate": current_time3}) + response.raise_for_status() + data = response.json() + + # Check if the average value is 2 + assert len(data) == 1 + assert data[0][0] == 2 diff --git a/test/python/test_web_app.sh b/test/python/test_web_app.sh new file mode 100755 index 00000000..eb808584 --- /dev/null +++ b/test/python/test_web_app.sh @@ -0,0 +1,131 @@ +#!/usr/bin/env bash + +#MAKE SURE IOTA IS UP AND RUNNING +#REQUIRES RUNNING ../../load-data.sh OR AT LEAST HAVING SOME DEVICES WITH PUID AND LUID 1 IN THE SYSTEM +# +#RUN BY USING `./test_web_app.sh` +#CHECK RESULTS BY GOING TO IOTA WEB APP AND SELECTING ON EITHER PHYSICAL OR LOGICAL DEVICE #1 AND CHECK BOTTOM OF PAGE + +container_name="test-mq-1" + +# Check if prod container running +if docker ps -a --format "{{.Names}}" | grep -q "^prod-mq-1$"; then + container_name="prod-mq-1" +fi + +iota_msgs=( + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-09-01T05:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 5.17719879313449},{"name": "battery voltage", "value": 12.17719879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 2, "timestamp":"2023-09-01T05:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 5.17719879313449},{"name": "battery voltage", "value": 12.17719879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-09-01T06:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 18.59712346078412},{"name": "battery voltage", "value": 12.6313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-09-01T07:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 6.290074845534363},{"name": "battery voltage", "value": 12.19879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-09-01T08:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 28.521567509813398},{"name": "battery voltage", "value": 12.7719879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-09-01T09:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 13.335981939217794},{"name": "battery voltage", "value": 13.17719879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-09-01T10:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 2.3252698879111664},{"name": "battery voltage", "value": 13.879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-09-01T11:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 7.417581303815799},{"name": "battery voltage", "value": 13.19879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-09-01T12:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 2.673679377416013},{"name": "battery voltage", "value": 12.313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-09-01T13:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 7.300519576905629},{"name": "battery voltage", "value": 11.9879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-09-01T14:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 0.2999783007627932},{"name": "battery voltage", "value": 11.313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-09-02T05:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 5.17719879313449},{"name": "battery voltage", "value": 12.17719879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-09-02T06:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 18.59712346078412},{"name": "battery voltage", "value": 12.6313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-09-03T07:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 6.290074845534363},{"name": "battery voltage", "value": 12.19879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-09-03T08:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 28.521567509813398},{"name": "battery voltage", "value": 12.7719879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-09-03T09:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 13.335981939217794},{"name": "battery voltage", "value": 13.17719879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-09-04T10:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 2.3252698879111664},{"name": "battery voltage", "value": 13.879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-09-04T11:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 7.417581303815799},{"name": "battery voltage", "value": 13.19879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-09-04T12:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 2.673679377416013},{"name": "battery voltage", "value": 12.313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-09-04T13:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 7.300519576905629},{"name": "battery voltage", "value": 11.9879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-09-04T14:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 0.2999783007627932},{"name": "battery voltage", "value": 11.313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-09-05T05:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 5.17719879313449},{"name": "battery voltage", "value": 12.17719879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-09-05T06:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 18.59712346078412},{"name": "battery voltage", "value": 12.6313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-09-05T07:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 6.290074845534363},{"name": "battery voltage", "value": 12.19879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-09-05T08:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 28.521567509813398},{"name": "battery voltage", "value": 12.7719879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-09-05T09:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 13.335981939217794},{"name": "battery voltage", "value": 13.17719879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-09-05T10:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 2.3252698879111664},{"name": "battery voltage", "value": 13.879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-09-05T11:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 7.417581303815799},{"name": "battery voltage", "value": 13.19879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-09-05T12:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 2.673679377416013},{"name": "battery voltage", "value": 12.313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-09-05T13:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 7.300519576905629},{"name": "battery voltage", "value": 11.9879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-09-05T14:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 0.2999783007627932},{"name": "battery voltage", "value": 11.313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-09-06T05:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 5.17719879313449},{"name": "battery voltage", "value": 12.17719879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-09-06T06:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 18.59712346078412},{"name": "battery voltage", "value": 12.6313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-09-06T07:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 6.290074845534363},{"name": "battery voltage", "value": 12.19879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-09-06T08:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 28.521567509813398},{"name": "battery voltage", "value": 12.7719879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-09-07T09:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 13.335981939217794},{"name": "battery voltage", "value": 13.17719879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-09-07T10:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 2.3252698879111664},{"name": "battery voltage", "value": 13.879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-09-07T11:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 7.417581303815799},{"name": "battery voltage", "value": 13.19879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-09-08T12:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 2.673679377416013},{"name": "battery voltage", "value": 12.313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-09-08T13:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 7.300519576905629},{"name": "battery voltage", "value": 11.9879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-09-09T05:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 5.17719879313449},{"name": "battery voltage", "value": 12.17719879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-09-10T07:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 6.290074845534363},{"name": "battery voltage", "value": 12.19879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-09-10T08:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 28.521567509813398},{"name": "battery voltage", "value": 12.7719879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-09-11T09:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 13.335981939217794},{"name": "battery voltage", "value": 13.17719879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-09-12T10:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 2.3252698879111664},{"name": "battery voltage", "value": 13.879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-09-13T11:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 7.417581303815799},{"name": "battery voltage", "value": 13.19879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-09-14T12:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 2.673679377416013},{"name": "battery voltage", "value": 12.313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-09-15T13:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 7.300519576905629},{"name": "battery voltage", "value": 11.9879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-09-16T14:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 0.2999783007627932},{"name": "battery voltage", "value": 11.313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-09-17T14:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 0.2999783007627932},{"name": "battery voltage", "value": 11.313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-10-01T05:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 5.17719879313449},{"name": "battery voltage", "value": 12.17719879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-10-01T06:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 18.59712346078412},{"name": "battery voltage", "value": 12.6313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-10-01T07:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 6.290074845534363},{"name": "battery voltage", "value": 12.19879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-10-01T08:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 28.521567509813398},{"name": "battery voltage", "value": 12.7719879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-10-01T09:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 13.335981939217794},{"name": "battery voltage", "value": 13.17719879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-10-01T10:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 2.3252698879111664},{"name": "battery voltage", "value": 13.879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-10-01T11:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 7.417581303815799},{"name": "battery voltage", "value": 13.19879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-10-01T12:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 2.673679377416013},{"name": "battery voltage", "value": 12.313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-10-01T13:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 7.300519576905629},{"name": "battery voltage", "value": 11.9879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-10-01T14:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 0.2999783007627932},{"name": "battery voltage", "value": 11.313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-10-02T05:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 5.17719879313449},{"name": "battery voltage", "value": 12.17719879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-10-02T06:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 18.59712346078412},{"name": "battery voltage", "value": 12.6313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-10-03T07:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 6.290074845534363},{"name": "battery voltage", "value": 12.19879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-10-03T08:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 28.521567509813398},{"name": "battery voltage", "value": 12.7719879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-10-03T09:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 13.335981939217794},{"name": "battery voltage", "value": 13.17719879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-10-04T10:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 2.3252698879111664},{"name": "battery voltage", "value": 13.879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-10-04T11:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 7.417581303815799},{"name": "battery voltage", "value": 13.19879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-10-04T12:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 2.673679377416013},{"name": "battery voltage", "value": 12.313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-10-04T13:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 7.300519576905629},{"name": "battery voltage", "value": 11.9879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-10-04T14:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 0.2999783007627932},{"name": "battery voltage", "value": 11.313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-10-05T05:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 5.17719879313449},{"name": "battery voltage", "value": 12.17719879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-10-05T06:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 18.59712346078412},{"name": "battery voltage", "value": 12.6313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-10-05T07:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 6.290074845534363},{"name": "battery voltage", "value": 12.19879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-10-05T08:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 28.521567509813398},{"name": "battery voltage", "value": 12.7719879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-10-05T09:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 13.335981939217794},{"name": "battery voltage", "value": 13.17719879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-10-05T10:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 2.3252698879111664},{"name": "battery voltage", "value": 13.879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-10-05T11:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 7.417581303815799},{"name": "battery voltage", "value": 13.19879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-10-05T12:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 2.673679377416013},{"name": "battery voltage", "value": 12.313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-10-05T13:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 7.300519576905629},{"name": "battery voltage", "value": 11.9879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-10-05T14:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 0.2999783007627932},{"name": "battery voltage", "value": 11.313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-10-06T05:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 5.17719879313449},{"name": "battery voltage", "value": 12.17719879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-10-06T06:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 18.59712346078412},{"name": "battery voltage", "value": 12.6313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-10-06T07:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 6.290074845534363},{"name": "battery voltage", "value": 12.19879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-10-06T08:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 28.521567509813398},{"name": "battery voltage", "value": 12.7719879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-10-07T09:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 13.335981939217794},{"name": "battery voltage", "value": 13.17719879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-10-07T10:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 2.3252698879111664},{"name": "battery voltage", "value": 13.879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-10-07T11:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 7.417581303815799},{"name": "battery voltage", "value": 13.19879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-10-08T12:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 2.673679377416013},{"name": "battery voltage", "value": 12.313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-10-08T13:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 7.300519576905629},{"name": "battery voltage", "value": 11.9879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-10-09T05:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 5.17719879313449},{"name": "battery voltage", "value": 12.17719879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 1, "timestamp":"2023-10-10T07:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 6.290074845534363},{"name": "battery voltage", "value": 12.19879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 3, "timestamp":"2023-10-10T08:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 28.521567509813398},{"name": "test v", "value": 12.7719879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 3, "timestamp":"2023-10-11T09:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 13.335981939217794},{"name": "test-v", "value": 13.17719879313449},{"name": "11test22", "value": 123.17719879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 3, "timestamp":"2023-10-12T10:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 2.3252698879111664},{"name": "battery voltage", "value": 13.879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 2, "timestamp":"2023-10-13T11:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 7.417581303815799},{"name": "battery voltage", "value": 13.19879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 2, "timestamp":"2023-10-14T12:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 2.673679377416013},{"name": "battery voltage", "value": 12.313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 2, "timestamp":"2023-10-15T13:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 7.300519576905629},{"name": "battery voltage", "value": 11.9879313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 2, "timestamp":"2023-10-16T14:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 0.2999783007627932},{"name": "battery voltage", "value": 11.313449}]}' + '{"broker_correlation_id": "83d04e6f-db16-4280-8337-53f11b2335c6", "l_uid": 1, "p_uid": 2, "timestamp":"2023-10-17T14:00:00.000000Z", "timeseries": [{"name": "5_Temperature", "value": 0.2999783007627932},{"name": "battery voltage", "value": 11.313449}]}' +) + +send_msg() { + local msg="$1" + local exchange="lts_exchange" + local queue="ltsreader_logical_msg_queue" + local mq_user="broker" + local mq_pass="CHANGEME" + + docker exec "$container_name" rabbitmqadmin publish -u "$mq_user" -p "$mq_pass" \ + "exchange=$exchange" "routing_key=$queue" "payload=$msg" properties={} +} + +for msg in "${iota_msgs[@]}"; do + send_msg "$msg" +done diff --git a/timescale/Dockerfile b/timescale/Dockerfile new file mode 100755 index 00000000..bbd31471 --- /dev/null +++ b/timescale/Dockerfile @@ -0,0 +1,24 @@ +FROM timescale/timescaledb-ha:pg15-latest + +# Switch to root user for installations +USER root + +# Install pgBackRest +RUN apt-get update && \ + apt-get install -y wget gnupg2 pgbackrest && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* + +# Copy the pgbackrest initialization script into the container +COPY pgbr_init.sh /docker-entrypoint-initdb.d/pgbr_init.sh +RUN chmod +x /docker-entrypoint-initdb.d/pgbr_init.sh + + +# Append custom configurations to the existing postgresql.conf +COPY postgres/custom_postgresql.conf /tmp/custom_postgresql.conf +RUN echo "cat /tmp/custom_postgresql.conf >> /home/postgres/pgdata/data/postgresql.conf" > /docker-entrypoint-initdb.d/10-append-config.sh + + +# Switch back to the postgres user +USER postgres + diff --git a/timescale/init.sql b/timescale/init.sql new file mode 100644 index 00000000..8dba4d7b --- /dev/null +++ b/timescale/init.sql @@ -0,0 +1,10 @@ +CREATE TABLE timeseries ( + broker_id TEXT NOT NULL, + l_uid INTEGER NOT NULL, + p_uid INTEGER NOT NULL, + timestamp TIMESTAMPTZ NOT NULL, + name TEXT, + value NUMERIC + ); + +SELECT create_hypertable('timeseries', 'timestamp'); diff --git a/timescale/pgbackrest/pgbackrest.conf b/timescale/pgbackrest/pgbackrest.conf new file mode 100644 index 00000000..8c6a93be --- /dev/null +++ b/timescale/pgbackrest/pgbackrest.conf @@ -0,0 +1,10 @@ +[global] +repo1-path=/var/lib/pgbackrest +process-max=2 +log-level-console=info +log-level-file=debug +repo1-retention-full=4 + +[demo] +pg1-path=/home/postgres/pgdata/data + diff --git a/timescale/pgbr_init.sh b/timescale/pgbr_init.sh new file mode 100755 index 00000000..177f2c4e --- /dev/null +++ b/timescale/pgbr_init.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +# Function to wait for PostgreSQL to be ready +wait_for_postgres() { + until pg_isready -U postgres; do + echo "Waiting for PostgreSQL to start..." + sleep 2 + done +} + +# Wait for PostgreSQL +wait_for_postgres + + +if [ ! -f "/var/lib/pgbackrest/backup/demo/backup.info" ]; then + pgbackrest --stanza=demo --config=/home/postgres/pgdata/backup/pgbackrest.conf stanza-create +fi + + + +# Continue with the default TimescaleDB entrypoint +#exec /usr/local/bin/docker-entrypoint.sh postgres + diff --git a/timescale/postgres/custom_postgresql.conf b/timescale/postgres/custom_postgresql.conf new file mode 100644 index 00000000..d75d89c7 --- /dev/null +++ b/timescale/postgres/custom_postgresql.conf @@ -0,0 +1,3 @@ +archive_mode = on +archive_command = 'pgbackrest --stanza=demo --config=/home/postgres/pgdata/backup/pgbackrest.conf archive-push %p' +recovery_target = immediate diff --git a/ts_backup.sh b/ts_backup.sh new file mode 100755 index 00000000..4b18466d --- /dev/null +++ b/ts_backup.sh @@ -0,0 +1,38 @@ +#!/bin/bash + +source compose/.env 2>/dev/null + +# Configuration +DB_NAME="${TSDB_DB}" +DB_USER="${TSDB_USER}" +DB_PASSWORD="${TSDB_PASSWORD}" +BACKUP_DIR="backup/" +BACKUP_FILENAME="backup_$(date +'%Y%m%d_%H%M%S').sql" + +if [ -z "$TSDB_USER" ] || [ -z "$TSDB_DB" ] || [ -z "$TSDB_PASSWORD" ]; then + echo "Error: Required environment variables are not set." + exit 1 +fi + +# Find the container name containing "timescale-1" +DB_CONTAINER_NAME=$(docker ps --format '{{.Names}}' | grep "timescaledb-1") + +if [ -z "$DB_CONTAINER_NAME" ]; then + echo "Error: Container containing 'timescale-1' not found." + exit 1 +fi + +# Check if backup directory exists, if not, create it (for error prevention) +[ -d "$BACKUP_DIR" ] || mkdir -p "$BACKUP_DIR" + +# Perform the backup +docker exec -t "$DB_CONTAINER_NAME" pg_dump -U "$DB_USER" -d "$DB_NAME" -F c -b -v -f "/tmp/$BACKUP_FILENAME" + +# Copy the backup from the container to the host +docker cp "$DB_CONTAINER_NAME:/tmp/$BACKUP_FILENAME" "$BACKUP_DIR/$BACKUP_FILENAME" + +# Remove the backup file from the container +docker exec -t "$DB_CONTAINER_NAME" rm "/tmp/$BACKUP_FILENAME" + +echo "Backup completed: $BACKUP_DIR/$BACKUP_FILENAME" + diff --git a/ts_restore.sh b/ts_restore.sh new file mode 100755 index 00000000..87c158c4 --- /dev/null +++ b/ts_restore.sh @@ -0,0 +1,52 @@ +#!/bin/bash + +# Load environment variables from .env file +source compose/.env 2>/dev/null + +# Configuration +DB_NAME="${TSDB_DB}" +DB_USER="${TSDB_USER}" +DB_PASSWORD="${TSDB_PASSWORD}" +BACKUP_DIR="backup/" # Update this path if needed + +# Check if environment variables are set +if [ -z "$TSDB_USER" ] || [ -z "$TSDB_DB" ] || [ -z "$TSDB_PASSWORD" ]; then + echo "Error: Required environment variables are not set." + exit 1 +fi + +# Check if backup filename is provided +if [ "$#" -ne 1 ]; then + echo "Usage: $0 " + exit 1 +fi + +BACKUP_FILENAME="$1" + +# Check if file exists +if [ ! -f "$BACKUP_DIR/$BACKUP_FILENAME" ]; then + echo "Error: File $BACKUP_DIR/$BACKUP_FILENAME does not exist." + exit 1 +fi + +# Find the container name containing "timescaledb-1" +DB_CONTAINER_NAME=$(docker ps --format '{{.Names}}' | grep "timescaledb-1") + +if [ -z "$DB_CONTAINER_NAME" ]; then + echo "Error: Container containing 'timescaledb-1' not found." + exit 1 +fi + +# Copy the backup from the host to the container +docker cp "$BACKUP_DIR/$BACKUP_FILENAME" "$DB_CONTAINER_NAME:/tmp/$BACKUP_FILENAME" + +# Restore +docker exec -t "$DB_CONTAINER_NAME" dropdb -U "$DB_USER" "$DB_NAME" +docker exec -t "$DB_CONTAINER_NAME" createdb -U "$DB_USER" "$DB_NAME" +docker exec -t "$DB_CONTAINER_NAME" pg_restore -U "$DB_USER" -d "$DB_NAME" -v -1 "/tmp/$BACKUP_FILENAME" + +# Remove the backup file from the container +docker exec -t "$DB_CONTAINER_NAME" rm "/tmp/$BACKUP_FILENAME" + +echo "Restore completed from: $BACKUP_DIR/$BACKUP_FILENAME" +