-
Notifications
You must be signed in to change notification settings - Fork 22
DB upgrade scripts
Fernando Barreiro edited this page Sep 4, 2023
·
53 revisions
ALTER SESSION set DDL_LOCK_TIMEOUT = 30;
-- Update versions
UPDATE "ATLAS_PANDA"."PANDADB_VERSION" SET major=0, minor=0, patch=16 where component='JEDI';
UPDATE "ATLAS_PANDA"."PANDADB_VERSION" SET major=0, minor=0, patch=16 where component='SERVER';
COMMIT;
-- MODIFICATIONTIME TRIGGER
-- New column JEDI_TASKS.REALMODIFICATIONTIME
ALTER TABLE "ATLAS_PANDA"."JEDI_TASKS" ADD ("REALMODIFICATIONTIME" DATE);
COMMENT ON COLUMN "ATLAS_PANDA"."JEDI_TASKS"."REALMODIFICATIONTIME" IS 'Set ALWAYS to last modification time, without any tricks like old timestamps';
CREATE INDEX "ATLAS_PANDA"."JEDI_TASKS_REALMODTIME_IDX" ON "ATLAS_PANDA"."JEDI_TASKS" ("REALMODIFICATIONTIME");
-- Trigger to set JEDI_TASKS.REALMODIFICATIONTIME to current UTC timestamp
CREATE OR REPLACE TRIGGER "ATLAS_PANDA"."UPDATE_REALMODIFICATIONTIME"
BEFORE UPDATE OR INSERT OF MODIFICATIONTIME ON "ATLAS_PANDA"."JEDI_TASKS"
FOR EACH ROW
BEGIN
:NEW."REALMODIFICATIONTIME" := SYS_EXTRACT_UTC(systimestamp);
END;
/
-- SQL_QUEUE TABLE
CREATE TABLE "ATLAS_PANDA"."SQL_QUEUE"
(
"TOPIC" VARCHAR2(50 BYTE),
"PANDAID" NUMBER(11,0),
"EXECUTION_ORDER" NUMBER(5,0),
"JEDITASKID" NUMBER(11,0),
"CREATIONTIME" DATE,
"DATA" VARCHAR2(4000 BYTE),
CONSTRAINT "SQL_QUEUE_PK" PRIMARY KEY ("TOPIC", "PANDAID", "EXECUTION_ORDER")
);
CREATE INDEX "ATLAS_PANDA"."SQL_QUEUE_TOPIC_TASK_IDX" ON "ATLAS_PANDA"."SQL_QUEUE" ("TOPIC", "JEDITASKID");
CREATE INDEX "ATLAS_PANDA"."SQL_QUEUE_TOPIC_CREATIONTIME_IDX" ON "ATLAS_PANDA"."SQL_QUEUE" ("TOPIC", "CREATIONTIME");
COMMENT ON TABLE "ATLAS_PANDA"."SQL_QUEUE" IS 'Queue to send messages between agents';
COMMENT ON COLUMN "ATLAS_PANDA"."SQL_QUEUE"."TOPIC" IS 'Topic of the message';
COMMENT ON COLUMN "ATLAS_PANDA"."SQL_QUEUE"."PANDAID" IS 'Job ID';
COMMENT ON COLUMN "ATLAS_PANDA"."SQL_QUEUE"."EXECUTION_ORDER" IS 'In case multiple SQLs need to be executed together';
COMMENT ON COLUMN "ATLAS_PANDA"."SQL_QUEUE"."JEDITASKID" IS 'JEDI Task ID in case the messages want to be batched';
COMMENT ON COLUMN "ATLAS_PANDA"."SQL_QUEUE"."CREATIONTIME" IS 'Timestamp when the message was created';
COMMENT ON COLUMN "ATLAS_PANDA"."SQL_QUEUE"."DATA" IS 'CLOB in JSON format containing the SQL query and variables';
-- AS ATLAS_PANDA
ALTER SESSION set DDL_LOCK_TIMEOUT = 30;
ALTER TABLE "ATLAS_PANDA"."JEDI_EVENTS" ADD ("ERROR_DIAG" VARCHAR2(500 BYTE));
DELETE FROM "ATLAS_PANDA"."PANDADB_VERSION" WHERE component='SCHEMA';
UPDATE "ATLAS_PANDA"."PANDADB_VERSION" SET major=0, minor=0, patch=15 where component='JEDI';
UPDATE "ATLAS_PANDA"."PANDADB_VERSION" SET major=0, minor=0, patch=15 where component='SERVER';
COMMIT;
-- AS ATLAS_PANDA
ALTER SESSION set DDL_LOCK_TIMEOUT = 30;
ALTER TABLE "ATLAS_PANDA"."JOBSDEFINED4" ADD ("CPU_ARCHITECTURE_LEVEL" VARCHAR2(20));
ALTER TABLE "ATLAS_PANDA"."JOBSWAITING4" ADD ("CPU_ARCHITECTURE_LEVEL" VARCHAR2(20));
ALTER TABLE "ATLAS_PANDA"."JOBSACTIVE4" ADD ("CPU_ARCHITECTURE_LEVEL" VARCHAR2(20));
ALTER TABLE "ATLAS_PANDA"."JOBSARCHIVED4" ADD ("CPU_ARCHITECTURE_LEVEL" VARCHAR2(20));
COMMENT ON COLUMN "ATLAS_PANDA"."JOBSDEFINED4"."CPU_ARCHITECTURE_LEVEL" IS 'Micro architecture level, e.g. x86-64-v1';
COMMENT ON COLUMN "ATLAS_PANDA"."JOBSWAITING4"."CPU_ARCHITECTURE_LEVEL" IS 'Micro architecture level, e.g. x86-64-v1';
COMMENT ON COLUMN "ATLAS_PANDA"."JOBSACTIVE4"."CPU_ARCHITECTURE_LEVEL" IS 'Micro architecture level, e.g. x86-64-v1';
COMMENT ON COLUMN "ATLAS_PANDA"."JOBSARCHIVED4"."CPU_ARCHITECTURE_LEVEL" IS 'Micro architecture level, e.g. x86-64-v1';
-- ADD THE COLUMNS TO THE BULKCOPY_PANDA_PARTITIONS JOB
UPDATE "ATLAS_PANDA"."PANDADB_VERSION" SET major=0, minor=0, patch=14 where component ='SCHEMA';
UPDATE "ATLAS_PANDA"."PANDADB_VERSION" SET major=0, minor=0, patch=14 where component ='JEDI';
UPDATE "ATLAS_PANDA"."PANDADB_VERSION" SET major=0, minor=0, patch=14 where component ='SERVER';
COMMIT;
-- As ATLAS_PANDAARCH
ALTER TABLE "ATLAS_PANDAARCH"."JOBSARCHIVED" ADD ("CPU_ARCHITECTURE_LEVEL" VARCHAR2(20));
-- AS ATLAS_PANDA
CREATE TABLE "ATLAS_PANDA"."CARBON_REGION_EMISSIONS"
(
"REGION" VARCHAR2(16 BYTE),
"TIMESTAMP" DATE,
"VALUE" NUMBER(10,0),
CONSTRAINT "CARBON_REGION_EMISSIONS_PK" PRIMARY KEY ("REGION", "TIMESTAMP") ENABLE
) ORGANIZATION INDEX COMPRESS 1;
COMMENT ON TABLE "ATLAS_PANDA"."CARBON_REGION_EMISSIONS" IS 'Table to store CO2 emission time series for a region or country';
COMMENT ON COLUMN "ATLAS_PANDA"."CARBON_REGION_EMISSIONS"."REGION" IS 'The name of the region. The region is usually a country, but can be more fine grained in large countries';
COMMENT ON COLUMN "ATLAS_PANDA"."CARBON_REGION_EMISSIONS"."TIMESTAMP" IS 'When the value was taken';
COMMENT ON COLUMN "ATLAS_PANDA"."CARBON_REGION_EMISSIONS"."VALUE" IS 'Emissions value in gCO2/kWh';
ALTER SESSION set DDL_LOCK_TIMEOUT = 30;
ALTER TABLE "ATLAS_PANDA"."JOBSDEFINED4" ADD ("GCO2_REGIONAL" NUMBER(10, 2), "GCO2_GLOBAL NUMBER"(10, 2));
ALTER TABLE "ATLAS_PANDA"."JOBSWAITING4" ADD ("GCO2_REGIONAL" NUMBER(10, 2), "GCO2_GLOBAL NUMBER"(10, 2));
ALTER TABLE "ATLAS_PANDA"."JOBSACTIVE4" ADD ("GCO2_REGIONAL" NUMBER(10, 2), "GCO2_GLOBAL NUMBER"(10, 2));
ALTER TABLE "ATLAS_PANDA"."JOBSARCHIVED4" ADD ("GCO2_REGIONAL" NUMBER(10, 2), "GCO2_GLOBAL NUMBER"(10, 2));
-- ALSO ADD THE COLUMNS TO BULKCOPY_PANDA_PARTITIONS
UPDATE "ATLAS_PANDA"."PANDADB_VERSION" SET major=0, minor=0, patch=13 where component ='SCHEMA';
UPDATE "ATLAS_PANDA"."PANDADB_VERSION" SET major=0, minor=0, patch=13 where component ='JEDI';
UPDATE "ATLAS_PANDA"."PANDADB_VERSION" SET major=0, minor=0, patch=13 where component ='SERVER';
COMMIT;
-- As ATLAS_PANDAARCH
ALTER TABLE ATLAS_PANDAARCH.JOBSARCHIVED ADD (GCO2_REGIONAL NUMBER(10, 2), GCO2_GLOBAL NUMBER(10, 2));
CREATE TABLE "ATLAS_PANDA"."TASK_EVALUATION"
(
"JEDITASKID" NUMBER(11) NOT NULL,
"METRIC" VARCHAR2(128),
"VALUE_JSON" CLOB,
"TIMESTAMP" DATE,
CONSTRAINT ensure_json_task_evaluation CHECK (value_json IS JSON),
CONSTRAINT TASK_EVALUATION_PK PRIMARY KEY(jeditaskid, metric)
);
COMMENT ON TABLE "ATLAS_PANDA"."TASK_EVALUATION" IS 'Evaluation values for active user tasks used to improve analysis job brokerage';
COMMENT ON COLUMN "ATLAS_PANDA"."TASK_EVALUATION"."JEDITASKID" IS 'Task id';
COMMENT ON COLUMN "ATLAS_PANDA"."TASK_EVALUATION"."METRIC" IS 'Metric being stored';
COMMENT ON COLUMN "ATLAS_PANDA"."TASK_EVALUATION"."VALUE_JSON" IS 'Value in JSON format';
COMMENT ON COLUMN "ATLAS_PANDA"."TASK_EVALUATION"."TIMESTAMP" IS 'Timestamp when metric was generated';
ALTER SESSION SET DDL_LOCK_TIMEOUT=600;
ALTER TABLE ATLAS_PANDA.JEDI_DATASETS ADD NFILESMISSING NUMBER(10);
COMMENT ON COLUMN "ATLAS_PANDA"."JEDI_DATASETS"."NFILESMISSING" IS 'The number of missing files for datasets';
commit;
ALTER TABLE ATLAS_PANDA.schedconfig_json
ADD CONSTRAINT schedconfig_data_validation check (data is JSON);
CREATE TABLE "ATLAS_PANDA"."TASK_ATTEMPTS"
(
jeditaskid NUMBER(11),
attemptnr NUMBER(6),
starttime DATE,
endtime DATE,
startstatus VARCHAR2(32),
endstatus VARCHAR2(32),
CONSTRAINT TASK_ATTEMPTS_PK PRIMARY KEY (jeditaskid, attemptnr)
)
PARTITION BY RANGE (starttime) INTERVAL (NUMTODSINTERVAL(1, 'DAY'))
(PARTITION "DATA_BEFORE_20220524" VALUES LESS THAN (TIMESTAMP '2022-05-24 00:00:00') )
;
BEGIN
dbms_scheduler.create_job(
job_name => 'SL_WINDOW_TASK_ATTEMPTS_1YEAR',
job_type => 'PLSQL_BLOCK',
job_action => 'BEGIN ATLAS_PANDA.PANDA_TABLE_SL_WINDOW(''TASK_ATTEMPTS'',''STARTTIME'', 365); END; ',
start_date => SYSTIMESTAMP,
repeat_interval => 'FREQ=WEEKLY; BYDAY=TUE; BYHOUR=11; BYMINUTE=0; BYSECOND=0;',
auto_drop => FALSE,
enabled => TRUE,
comments => 'Sustains 365 days of data sliding window on the TASK_ATTEMPTS table! The table is partitioned daily using the automatic INTERVAL approach');
END;
ALTER TABLE ATLAS_PANDAARCH.JOBSARCHIVED modify memory_leak_x2 NUMBER(14,3);
ALTER TABLE ATLAS_PANDA.JOBSDEFINED4 modify memory_leak_x2 NUMBER(14,3);
ALTER TABLE ATLAS_PANDA.JOBSWAITING4 modify memory_leak_x2 NUMBER(14,3);
ALTER TABLE ATLAS_PANDA.JOBSACTIVE4 modify memory_leak_x2 NUMBER(14,3);
ALTER TABLE ATLAS_PANDA.JOBSARCHIVED4 modify memory_leak_x2 NUMBER(14,3);
ALTER TABLE ATLAS_PANDA.JEDI_TASKS modify memory_leak_x2 NUMBER(14,3);
CREATE TABLE "ATLAS_PANDA"."SECRETS"
( "OWNER" VARCHAR2(60) NOT NULL ENABLE,
"UPDATED_AT" TIMESTAMP (6) NOT NULL ENABLE,
"DATA" CLOB,
PRIMARY KEY ("OWNER")
);
ALTER TABLE ATLAS_PANDA.HARVESTER_WORKERS ADD (pilotStartTime DATE,
pilotEndTime DATE,
pilotStatus VARCHAR2(80 BYTE),
pilotStatusSyncTime DATE);
CREATE TABLE "DOMA_PANDA"."METRICS"
(
computingsite VARCHAR2(128),
gshare VARCHAR2(32),
metric VARCHAR2(128),
value_json CLOB,
timestamp DATE,
CONSTRAINT ensure_json_metrics CHECK (value_json IS JSON),
CONSTRAINT METRICS_SITE_GSHARE_METRIC_UQ UNIQUE (computingsite, gshare, metric)
);