Skip to content

Commit

Permalink
feat: add audit log feature for IDIR administrators
Browse files Browse the repository at this point in the history
  • Loading branch information
hamed-valiollahi committed Nov 13, 2024
1 parent 2fd89f8 commit 9ec7770
Show file tree
Hide file tree
Showing 26 changed files with 1,368 additions and 288 deletions.
121 changes: 110 additions & 11 deletions backend/lcfs/db/migrations/versions/2024-11-01-12-27_bf26425d2a14.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,17 @@
"""Database-Level Audit Logging with JSON Delta
Note:
As the table grows, automatic archiving (e.g., moving older logs to an archive table) and purging (e.g., deleting very old logs)
can be implemented in the future to maintain performance and manage storage efficiently.
Archiving:
- Create an `audit_log_archive` table with the same structure as `audit_log`.
- Use a scheduled job (e.g., with `pg_cron`) to move records older than a certain threshold (e.g., 1 month) from `audit_log` to `audit_log_archive`.
- Alternatively, consider creating date-based archive tables (e.g., audit_log_archive_2025_01) to organize logs by time periods.
Purging:
- Use a scheduled job (e.g., with `pg_cron`) to delete records older than a defined retention period (e.g., 1 years) from `audit_log_archive`.
Revision ID: bf26425d2a14
Revises: 1b4d0dcf70a8
Create Date: 2024-11-01 12:27:33.901648
Expand All @@ -21,31 +33,114 @@ def upgrade() -> None:
# Step 1: Create the audit_log table
op.create_table(
"audit_log",
sa.Column("id", sa.BigInteger(), autoincrement=True, nullable=False),
sa.Column("table_name", sa.Text(), nullable=False),
sa.Column("operation", sa.Text(), nullable=False),
sa.Column("row_id", postgresql.JSONB(astext_type=sa.Text()), nullable=False),
sa.Column("old_values", postgresql.JSONB(astext_type=sa.Text()), nullable=True),
sa.Column("new_values", postgresql.JSONB(astext_type=sa.Text()), nullable=True),
sa.Column("delta", postgresql.JSONB(astext_type=sa.Text()), nullable=True),
sa.Column(
"audit_log_id",
sa.Integer(),
autoincrement=True,
nullable=False,
comment="Unique identifier for each audit log entry.",
),
sa.Column(
"table_name",
sa.Text(),
nullable=False,
comment="Name of the table where the action occurred.",
),
sa.Column(
"operation",
sa.Text(),
nullable=False,
comment="Type of operation: 'INSERT', 'UPDATE', or 'DELETE'.",
),
sa.Column(
"row_id",
postgresql.JSONB(astext_type=sa.Text()),
nullable=False,
comment="Primary key of the affected row, stored as JSONB to support composite keys.",
),
sa.Column(
"old_values",
postgresql.JSONB(astext_type=sa.Text()),
nullable=True,
comment="Previous values before the operation.",
),
sa.Column(
"new_values",
postgresql.JSONB(astext_type=sa.Text()),
nullable=True,
comment="New values after the operation.",
),
sa.Column(
"delta",
postgresql.JSONB(astext_type=sa.Text()),
nullable=True,
comment="JSONB delta of the changes.",
),
sa.Column(
"create_date",
sa.TIMESTAMP(timezone=True),
server_default=sa.text("now()"),
nullable=True,
comment="Timestamp when the audit log entry was created.",
),
sa.Column(
"create_user",
sa.String(),
nullable=True,
comment="User who created the audit log entry.",
),
sa.Column("create_user", sa.String(), nullable=True),
sa.Column(
"update_date",
sa.TIMESTAMP(timezone=True),
server_default=sa.text("now()"),
nullable=True,
comment="Timestamp when the audit log entry was last updated.",
),
sa.Column(
"update_user",
sa.String(),
nullable=True,
comment="User who last updated the audit log entry.",
),
sa.Column("update_user", sa.String(), nullable=True),
sa.PrimaryKeyConstraint("id", name=op.f("pk_audit_log")),
sa.PrimaryKeyConstraint("audit_log_id", name=op.f("pk_audit_log")),
sa.UniqueConstraint(
"audit_log_id",
name=op.f("uq_audit_log_audit_log_id"),
),
comment="Audit log capturing changes to database tables.",
)

# Create indexes
op.create_index(
"idx_audit_log_table_name",
"audit_log",
["table_name"],
unique=False,
)
op.create_index(
"idx_audit_log_table_name", "audit_log", ["table_name"], unique=False
"idx_audit_log_operation",
"audit_log",
["operation"],
unique=False,
)
op.create_index(
"idx_audit_log_create_date",
"audit_log",
["create_date"],
unique=False,
)
op.create_index(
"idx_audit_log_create_user",
"audit_log",
["create_user"],
unique=False,
)
op.create_index(
"idx_audit_log_delta",
"audit_log",
["delta"],
postgresql_using="gin",
unique=False,
)

# Step 2: Create JSONB_DIFF FUNCTION
Expand Down Expand Up @@ -228,5 +323,9 @@ def downgrade() -> None:
op.execute("DROP FUNCTION IF EXISTS jsonb_diff;")

# Step 1 Downgrade: Drop audit_log table
op.drop_index("idx_audit_log_delta", table_name="audit_log")
op.drop_index("idx_audit_log_create_user", table_name="audit_log")
op.drop_index("idx_audit_log_create_date", table_name="audit_log")
op.drop_index("idx_audit_log_operation", table_name="audit_log")
op.drop_index("idx_audit_log_table_name", table_name="audit_log")
op.drop_table("audit_log")
63 changes: 52 additions & 11 deletions backend/lcfs/db/models/audit/AuditLog.py
Original file line number Diff line number Diff line change
@@ -1,23 +1,64 @@
from lcfs.db.base import Auditable, BaseModel
from sqlalchemy import (
BigInteger,
Integer,
Column,
Text,
)
from sqlalchemy.dialects.postgresql import JSONB


class AuditLog(BaseModel, Auditable):
__tablename__ = "audit_log"
__table_args__ = {"comment": "Track changes in defined tables."}
"""
Audit log capturing changes to database tables.
As the table grows, consider implementing automatic archiving (e.g., moving older logs to an archive table)
and purging (e.g., deleting logs after a retention period) using tools like `pg_cron` or external schedulers.
id = Column(BigInteger, primary_key=True, autoincrement=True)
Archiving:
- Create an `audit_log_archive` table with the same structure as `audit_log`.
- Use a scheduled job (e.g., with `pg_cron`) to move records older than a certain threshold (e.g., 1 month) from `audit_log` to `audit_log_archive`.
- Alternatively, consider creating date-based archive tables (e.g., audit_log_archive_2025_01) to organize logs by time periods.
table_name = Column(Text, nullable=False)
operation = Column(Text, nullable=False)
Purging:
- Use a scheduled job (e.g., with `pg_cron`) to delete records older than a defined retention period (e.g., 1 year) from `audit_log_archive`.
"""

__tablename__ = "audit_log"
__table_args__ = {"comment": "Track changes in defined tables."}

# JSONB fields for row ID, old values, new values, and delta
row_id = Column(JSONB, nullable=False)
old_values = Column(JSONB, nullable=True)
new_values = Column(JSONB, nullable=True)
delta = Column(JSONB, nullable=True)
audit_log_id = Column(
Integer,
primary_key=True,
autoincrement=True,
comment="Unique identifier for each audit log entry.",
)
table_name = Column(
Text,
nullable=False,
comment="Name of the table where the action occurred.",
)
operation = Column(
Text,
nullable=False,
comment="Type of operation: 'INSERT', 'UPDATE', or 'DELETE'.",
)
row_id = Column(
JSONB,
nullable=False,
comment="Primary key of the affected row, stored as JSONB to support composite keys.",
)
old_values = Column(
JSONB,
nullable=True,
comment="Previous values before the operation.",
)
new_values = Column(
JSONB,
nullable=True,
comment="New values after the operation.",
)
delta = Column(
JSONB,
nullable=True,
comment="JSONB delta of the changes.",
)
39 changes: 0 additions & 39 deletions backend/lcfs/tests/audit_log/conftest.py

This file was deleted.

Loading

0 comments on commit 9ec7770

Please sign in to comment.