From a1189b4baa86106a8c0e5c7c1bf90108b4991ff5 Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Wed, 2 Oct 2024 14:21:11 +0200 Subject: [PATCH 01/85] Retry postgres --- internal/api/resolver.go | 4 +- internal/manager/config/config.go | 12 +- internal/manager/init.go | 27 +- pkg/sqlite/anonymise.go | 2 +- pkg/sqlite/database.go | 132 +++-- .../migrationsPostgres/67_initial.up.sql | 475 ++++++++++++++++++ 6 files changed, 606 insertions(+), 46 deletions(-) create mode 100644 pkg/sqlite/migrationsPostgres/67_initial.up.sql diff --git a/internal/api/resolver.go b/internal/api/resolver.go index ab6eead7e5e..35d4ca065d8 100644 --- a/internal/api/resolver.go +++ b/internal/api/resolver.go @@ -319,12 +319,11 @@ func (r *queryResolver) Latestversion(ctx context.Context) (*LatestVersion, erro func (r *mutationResolver) ExecSQL(ctx context.Context, sql string, args []interface{}) (*SQLExecResult, error) { var rowsAffected *int64 - var lastInsertID *int64 db := manager.GetInstance().Database if err := r.withTxn(ctx, func(ctx context.Context) error { var err error - rowsAffected, lastInsertID, err = db.ExecSQL(ctx, sql, args) + rowsAffected, err = db.ExecSQL(ctx, sql, args) return err }); err != nil { return nil, err @@ -332,7 +331,6 @@ func (r *mutationResolver) ExecSQL(ctx context.Context, sql string, args []inter return &SQLExecResult{ RowsAffected: rowsAffected, - LastInsertID: lastInsertID, }, nil } diff --git a/internal/manager/config/config.go b/internal/manager/config/config.go index 6a568c1da0b..349c29a3bf9 100644 --- a/internal/manager/config/config.go +++ b/internal/manager/config/config.go @@ -50,7 +50,9 @@ const ( DefaultMaxSessionAge = 60 * 60 * 1 // 1 hours - Database = "database" + Database = "database" + DatabaseType = "database_type" + DatabaseConnectionString = "database_string" Exclude = "exclude" ImageExclude = "image_exclude" @@ -695,6 +697,14 @@ func (i *Config) GetDatabasePath() string { return i.getString(Database) } +func (i *Config) GetDatabaseType() string { + return i.getString(DatabaseType) +} + +func (i *Config) GetDatabaseConnectionString() string { + return i.getString(DatabaseConnectionString) +} + func (i *Config) GetBackupDirectoryPath() string { return i.getString(BackupDirectoryPath) } diff --git a/internal/manager/init.go b/internal/manager/init.go index dd1640ed368..668fd8f45bf 100644 --- a/internal/manager/init.go +++ b/internal/manager/init.go @@ -227,12 +227,27 @@ func (s *Manager) postInit(ctx context.Context) error { }) } - if err := s.Database.Open(s.Config.GetDatabasePath()); err != nil { - var migrationNeededErr *sqlite.MigrationNeededError - if errors.As(err, &migrationNeededErr) { - logger.Warn(err) - } else { - return err + { + var dbType = sqlite.DatabaseType(strings.ToUpper(s.Config.GetDatabaseType())) + if dbType != sqlite.SqliteBackend && dbType != sqlite.PostgresBackend { + dbType = sqlite.SqliteBackend + } + + var err error + if dbType == sqlite.SqliteBackend { + sqlite.RegisterSqliteDialect() + err = s.Database.OpenSqlite(s.Config.GetDatabasePath()) + } else if dbType == sqlite.PostgresBackend { + err = s.Database.OpenPostgres(s.Config.GetDatabaseConnectionString()) + } + + if err != nil { + var migrationNeededErr *sqlite.MigrationNeededError + if errors.As(err, &migrationNeededErr) { + logger.Warn(err) + } else { + return err + } } } diff --git a/pkg/sqlite/anonymise.go b/pkg/sqlite/anonymise.go index 519489abfc6..be692e38baa 100644 --- a/pkg/sqlite/anonymise.go +++ b/pkg/sqlite/anonymise.go @@ -33,7 +33,7 @@ func NewAnonymiser(db *Database, outPath string) (*Anonymiser, error) { } newDB := NewDatabase() - if err := newDB.Open(outPath); err != nil { + if err := newDB.OpenSqlite(outPath); err != nil { return nil, fmt.Errorf("opening %s: %w", outPath, err) } diff --git a/pkg/sqlite/database.go b/pkg/sqlite/database.go index eed335f0973..91d832b44df 100644 --- a/pkg/sqlite/database.go +++ b/pkg/sqlite/database.go @@ -10,6 +10,8 @@ import ( "path/filepath" "time" + "github.com/doug-martin/goqu/v9" + "github.com/doug-martin/goqu/v9/dialect/sqlite3" "github.com/jmoiron/sqlx" "github.com/stashapp/stash/pkg/fsutil" @@ -36,7 +38,7 @@ const ( var appSchemaVersion uint = 67 -//go:embed migrations/*.sql +//go:embed migrations/*.sql migrationsPostgres/*.sql var migrationsBox embed.FS var ( @@ -81,12 +83,21 @@ type storeRepository struct { Group *GroupStore } +type DatabaseType string + +const ( + PostgresBackend DatabaseType = "POSTGRES" + SqliteBackend DatabaseType = "SQLITE" +) + type Database struct { *storeRepository - readDB *sqlx.DB - writeDB *sqlx.DB - dbPath string + readDB *sqlx.DB + writeDB *sqlx.DB + dbPath string + dbType DatabaseType + dbString string schemaVersion uint @@ -140,15 +151,36 @@ func (db *Database) Ready() error { return nil } +func (db *Database) OpenPostgres(dbConnector string) error { + db.dbType = PostgresBackend + db.dbString = dbConnector + + dialect = goqu.Dialect("postgres") + + return db.OpenGeneric() +} + +func RegisterSqliteDialect() { + opts := sqlite3.DialectOptions() + opts.SupportsReturn = true + goqu.RegisterDialect("sqlite3new", opts) +} + +func (db *Database) OpenSqlite(dbPath string) error { + db.dbType = SqliteBackend + db.dbPath = dbPath + + dialect = goqu.Dialect("sqlite3new") + + return db.OpenGeneric() +} + // Open initializes the database. If the database is new, then it // performs a full migration to the latest schema version. Otherwise, any // necessary migrations must be run separately using RunMigrations. // Returns true if the database is new. -func (db *Database) Open(dbPath string) error { - db.lock() - defer db.unlock() - - db.dbPath = dbPath +func (db *Database) OpenGeneric() error { + goqu.SetDefaultPrepared(false) databaseSchemaVersion, err := db.getDatabaseSchemaVersion() if err != nil { @@ -234,26 +266,43 @@ func (db *Database) Close() error { return nil } -func (db *Database) open(disableForeignKeys bool, writable bool) (*sqlx.DB, error) { +func (db *Database) open(disableForeignKeys bool, writable bool) (conn *sqlx.DB, err error) { + // Fail-safe + err = errors.New("missing backend type") + // https://github.com/mattn/go-sqlite3 - url := "file:" + db.dbPath + "?_journal=WAL&_sync=NORMAL&_busy_timeout=50" - if !disableForeignKeys { - url += "&_fk=true" - } + if db.dbType == SqliteBackend { + url := "file:" + db.dbPath + "?_journal=WAL&_sync=NORMAL&_busy_timeout=50" + if !disableForeignKeys { + url += "&_fk=true" + } - if writable { - url += "&_txlock=immediate" - } else { - url += "&mode=ro" - } + if writable { + url += "&_txlock=immediate" + } else { + url += "&mode=ro" + } - // #5155 - set the cache size if the environment variable is set - // default is -2000 which is 2MB - if cacheSize := os.Getenv(cacheSizeEnv); cacheSize != "" { - url += "&_cache_size=" + cacheSize + // #5155 - set the cache size if the environment variable is set + // default is -2000 which is 2MB + if cacheSize := os.Getenv(cacheSizeEnv); cacheSize != "" { + url += "&_cache_size=" + cacheSize + } + + conn, err = sqlx.Open(sqlite3Driver, url) + } + if db.dbType == PostgresBackend { + conn, err = sqlx.Open("postgres", db.dbString) + if err == nil { + if disableForeignKeys { + conn.Exec("SET session_replication_role = replica;") + } + if !writable { + conn.Exec("SET default_transaction_read_only = ON;") + } + } } - conn, err := sqlx.Open(sqlite3Driver, url) if err != nil { return nil, fmt.Errorf("db.Open(): %w", err) } @@ -299,6 +348,11 @@ func (db *Database) openWriteDB() error { } func (db *Database) Remove() error { + if db.dbType == PostgresBackend { + logger.Warn("Postgres backend detected, ignoring Remove request") + return nil + } + databasePath := db.dbPath err := db.Close() @@ -326,12 +380,16 @@ func (db *Database) Remove() error { } func (db *Database) Reset() error { - databasePath := db.dbPath + if db.dbType == PostgresBackend { + logger.Warn("Postgres backend detected, ignoring Reset request") + return nil + } + if err := db.Remove(); err != nil { return err } - if err := db.Open(databasePath); err != nil { + if err := db.OpenSqlite(db.dbPath); err != nil { return fmt.Errorf("[reset DB] unable to initialize: %w", err) } @@ -341,6 +399,11 @@ func (db *Database) Reset() error { // Backup the database. If db is nil, then uses the existing database // connection. func (db *Database) Backup(backupPath string) (err error) { + if db.dbType == PostgresBackend { + logger.Warn("Postgres backend detected, ignoring Backup request") + return nil + } + thisDB := db.writeDB if thisDB == nil { thisDB, err = sqlx.Connect(sqlite3Driver, "file:"+db.dbPath+"?_fk=true") @@ -370,6 +433,11 @@ func (db *Database) Anonymise(outPath string) error { } func (db *Database) RestoreFromBackup(backupPath string) error { + if db.dbType == PostgresBackend { + logger.Warn("Postgres backend detected, ignoring RestoreFromBackup request") + return nil + } + logger.Infof("Restoring backup database %s into %s", backupPath, db.dbPath) return os.Rename(backupPath, db.dbPath) } @@ -434,12 +502,12 @@ func (db *Database) Analyze(ctx context.Context) error { return err } -func (db *Database) ExecSQL(ctx context.Context, query string, args []interface{}) (*int64, *int64, error) { +func (db *Database) ExecSQL(ctx context.Context, query string, args []interface{}) (*int64, error) { wrapper := dbWrapperType{} result, err := wrapper.Exec(ctx, query, args...) if err != nil { - return nil, nil, err + return nil, err } var rowsAffected *int64 @@ -448,13 +516,7 @@ func (db *Database) ExecSQL(ctx context.Context, query string, args []interface{ rowsAffected = &ra } - var lastInsertId *int64 - li, err := result.LastInsertId() - if err == nil { - lastInsertId = &li - } - - return rowsAffected, lastInsertId, nil + return rowsAffected, nil } func (db *Database) QuerySQL(ctx context.Context, query string, args []interface{}) ([]string, [][]interface{}, error) { diff --git a/pkg/sqlite/migrationsPostgres/67_initial.up.sql b/pkg/sqlite/migrationsPostgres/67_initial.up.sql new file mode 100644 index 00000000000..894a3c251e9 --- /dev/null +++ b/pkg/sqlite/migrationsPostgres/67_initial.up.sql @@ -0,0 +1,475 @@ +CREATE COLLATION NATURAL_CI (provider = icu, locale = 'en@colNumeric=yes'); +CREATE TABLE blobs ( + checksum varchar(255) NOT NULL PRIMARY KEY, + blob bytea +); +CREATE TABLE tags ( + id serial not null primary key, + name varchar(255), + created_at timestamp not null, + updated_at timestamp not null, + ignore_auto_tag boolean not null default FALSE, + description text, + image_blob varchar(255) + REFERENCES blobs(checksum), + favorite boolean not null default false +); +CREATE TABLE folders ( + id serial not null primary key, + path varchar(255) NOT NULL, + parent_folder_id integer, + mod_time timestamp not null, + created_at timestamp not null, + updated_at timestamp not null, + foreign key(parent_folder_id) references folders(id) on delete SET NULL +); +CREATE TABLE files ( + id serial not null primary key, + basename varchar(255) NOT NULL, + zip_file_id integer, + parent_folder_id integer not null, + size bigint NOT NULL, + mod_time timestamp not null, + created_at timestamp not null, + updated_at timestamp not null, + foreign key(zip_file_id) references files(id), + foreign key(parent_folder_id) references folders(id), + CHECK (basename != '') +); +ALTER TABLE folders ADD COLUMN zip_file_id integer REFERENCES files(id); +CREATE TABLE IF NOT EXISTS performers ( + id serial not null primary key, + name varchar(255) not null, + disambiguation varchar(255), + gender varchar(20), + birthdate date, + ethnicity varchar(255), + country varchar(255), + eye_color varchar(255), + height int, + measurements varchar(255), + fake_tits varchar(255), + career_length varchar(255), + tattoos varchar(255), + piercings varchar(255), + favorite boolean not null default FALSE, + created_at timestamp not null, + updated_at timestamp not null, + details text, + death_date date, + hair_color varchar(255), + weight integer, + rating smallint, + ignore_auto_tag boolean not null default FALSE, + image_blob varchar(255) REFERENCES blobs(checksum), + penis_length float, + circumcised varchar[10] +); +CREATE TABLE IF NOT EXISTS studios ( + id serial not null primary key, + name VARCHAR(255) NOT NULL, + url VARCHAR(255), + parent_id INTEGER DEFAULT NULL REFERENCES studios(id) ON DELETE SET NULL, + created_at timestamp NOT NULL, + updated_at timestamp NOT NULL, + details TEXT, + rating smallint, + ignore_auto_tag BOOLEAN NOT NULL DEFAULT FALSE, + image_blob VARCHAR(255) REFERENCES blobs(checksum), + favorite boolean not null default FALSE, + CHECK (id != parent_id) +); +CREATE TABLE IF NOT EXISTS saved_filters ( + id serial not null primary key, + name varchar(510) not null, + mode varchar(255) not null, + find_filter bytea, + object_filter bytea, + ui_options bytea +); +CREATE TABLE IF NOT EXISTS images ( + id serial not null primary key, + title varchar(255), + rating smallint, + studio_id integer, + o_counter smallint not null default 0, + organized boolean not null default FALSE, + created_at timestamp not null, + updated_at timestamp not null, + date date, code text, photographer text, details text, + foreign key(studio_id) references studios(id) on delete SET NULL +); +CREATE TABLE image_urls ( + image_id integer NOT NULL, + position integer NOT NULL, + url varchar(255) NOT NULL, + foreign key(image_id) references images(id) on delete CASCADE, + PRIMARY KEY(image_id, position, url) +); +CREATE TABLE IF NOT EXISTS galleries ( + id serial not null primary key, + folder_id integer, + title varchar(255), + date date, + details text, + studio_id integer, + rating smallint, + organized boolean not null default FALSE, + created_at timestamp not null, + updated_at timestamp not null, code text, photographer text, + foreign key(studio_id) references studios(id) on delete SET NULL, + foreign key(folder_id) references folders(id) on delete SET NULL +); +CREATE TABLE gallery_urls ( + gallery_id integer NOT NULL, + position integer NOT NULL, + url varchar(255) NOT NULL, + foreign key(gallery_id) references galleries(id) on delete CASCADE, + PRIMARY KEY(gallery_id, position, url) +); +CREATE TABLE IF NOT EXISTS scenes ( + id serial not null primary key, + title varchar(255), + details text, + date date, + rating smallint, + studio_id integer, + organized boolean not null default FALSE, + created_at timestamp not null, + updated_at timestamp not null, + code text, + director text, + resume_time float not null default 0, + play_duration float not null default 0, + cover_blob varchar(255) REFERENCES blobs(checksum), + foreign key(studio_id) references studios(id) on delete SET NULL +); +CREATE TABLE IF NOT EXISTS groups ( + id serial not null primary key, + name varchar(255) not null, + aliases varchar(255), + duration integer, + date date, + rating smallint, + studio_id integer REFERENCES studios(id) ON DELETE SET NULL, + director varchar(255), + "description" text, + created_at timestamp not null, + updated_at timestamp not null, + front_image_blob varchar(255) REFERENCES blobs(checksum), + back_image_blob varchar(255) REFERENCES blobs(checksum) +); +CREATE TABLE IF NOT EXISTS group_urls ( + "group_id" integer NOT NULL, + position integer NOT NULL, + url varchar(255) NOT NULL, + foreign key("group_id") references "groups"(id) on delete CASCADE, + PRIMARY KEY("group_id", position, url) +); +CREATE TABLE IF NOT EXISTS groups_tags ( + "group_id" integer NOT NULL, + tag_id integer NOT NULL, + foreign key("group_id") references "groups"(id) on delete CASCADE, + foreign key(tag_id) references tags(id) on delete CASCADE, + PRIMARY KEY("group_id", tag_id) +); +CREATE TABLE performer_urls ( + performer_id integer NOT NULL, + position integer NOT NULL, + url varchar(255) NOT NULL, + foreign key(performer_id) references performers(id) on delete CASCADE, + PRIMARY KEY(performer_id, position, url) +); +CREATE TABLE studios_tags ( + studio_id integer NOT NULL, + tag_id integer NOT NULL, + foreign key(studio_id) references studios(id) on delete CASCADE, + foreign key(tag_id) references tags(id) on delete CASCADE, + PRIMARY KEY(studio_id, tag_id) +); +CREATE TABLE IF NOT EXISTS scenes_view_dates ( + scene_id integer not null, + view_date timestamp not null, + foreign key(scene_id) references scenes(id) on delete CASCADE +); +CREATE TABLE IF NOT EXISTS scenes_o_dates ( + scene_id integer not null, + o_date timestamp not null, + foreign key(scene_id) references scenes(id) on delete CASCADE +); +CREATE TABLE performer_stash_ids ( + performer_id integer, + endpoint varchar(255), + stash_id varchar(36), + foreign key(performer_id) references performers(id) on delete CASCADE +); +CREATE TABLE studio_stash_ids ( + studio_id integer, + endpoint varchar(255), + stash_id varchar(36), + foreign key(studio_id) references studios(id) on delete CASCADE +); +CREATE TABLE tags_relations ( + parent_id integer, + child_id integer, + primary key (parent_id, child_id), + foreign key (parent_id) references tags(id) on delete cascade, + foreign key (child_id) references tags(id) on delete cascade +); +CREATE TABLE files_fingerprints ( + file_id integer NOT NULL, + type varchar(255) NOT NULL, + fingerprint bytea NOT NULL, + foreign key(file_id) references files(id) on delete CASCADE, + PRIMARY KEY (file_id, type, fingerprint) +); +CREATE TABLE video_files ( + file_id integer NOT NULL primary key, + duration float NOT NULL, + video_codec varchar(255) NOT NULL, + format varchar(255) NOT NULL, + audio_codec varchar(255) NOT NULL, + width smallint NOT NULL, + height smallint NOT NULL, + frame_rate float NOT NULL, + bit_rate integer NOT NULL, + interactive boolean not null default FALSE, + interactive_speed int, + foreign key(file_id) references files(id) on delete CASCADE +); +CREATE TABLE video_captions ( + file_id integer NOT NULL, + language_code varchar(255) NOT NULL, + filename varchar(255) NOT NULL, + caption_type varchar(255) NOT NULL, + primary key (file_id, language_code, caption_type), + foreign key(file_id) references video_files(file_id) on delete CASCADE +); +CREATE TABLE image_files ( + file_id integer NOT NULL primary key, + format varchar(255) NOT NULL, + width smallint NOT NULL, + height smallint NOT NULL, + foreign key(file_id) references files(id) on delete CASCADE +); +CREATE TABLE images_files ( + image_id integer NOT NULL, + file_id integer NOT NULL, + "primary" boolean NOT NULL, + foreign key(image_id) references images(id) on delete CASCADE, + foreign key(file_id) references files(id) on delete CASCADE, + PRIMARY KEY(image_id, file_id) +); +CREATE TABLE galleries_files ( + gallery_id integer NOT NULL, + file_id integer NOT NULL, + "primary" boolean NOT NULL, + foreign key(gallery_id) references galleries(id) on delete CASCADE, + foreign key(file_id) references files(id) on delete CASCADE, + PRIMARY KEY(gallery_id, file_id) +); +CREATE TABLE scenes_files ( + scene_id integer NOT NULL, + file_id integer NOT NULL, + "primary" boolean NOT NULL, + foreign key(scene_id) references scenes(id) on delete CASCADE, + foreign key(file_id) references files(id) on delete CASCADE, + PRIMARY KEY(scene_id, file_id) +); +CREATE TABLE IF NOT EXISTS performers_scenes ( + performer_id integer, + scene_id integer, + foreign key(performer_id) references performers(id) on delete CASCADE, + foreign key(scene_id) references scenes(id) on delete CASCADE, + PRIMARY KEY (scene_id, performer_id) +); +CREATE TABLE IF NOT EXISTS scene_markers ( + id serial not null primary key, + title VARCHAR(255) NOT NULL, + seconds FLOAT NOT NULL, + primary_tag_id INTEGER NOT NULL, + scene_id INTEGER NOT NULL, + created_at timestamp NOT NULL, + updated_at timestamp NOT NULL, + FOREIGN KEY(primary_tag_id) REFERENCES tags(id), + FOREIGN KEY(scene_id) REFERENCES scenes(id) +); +CREATE TABLE IF NOT EXISTS scene_markers_tags ( + scene_marker_id integer, + tag_id integer, + foreign key(scene_marker_id) references scene_markers(id) on delete CASCADE, + foreign key(tag_id) references tags(id) on delete CASCADE, + PRIMARY KEY(scene_marker_id, tag_id) +); +CREATE TABLE IF NOT EXISTS scenes_tags ( + scene_id integer, + tag_id integer, + foreign key(scene_id) references scenes(id) on delete CASCADE, + foreign key(tag_id) references tags(id) on delete CASCADE, + PRIMARY KEY(scene_id, tag_id) +); +CREATE TABLE IF NOT EXISTS groups_scenes ( + "group_id" integer, + scene_id integer, + scene_index smallint, + foreign key("group_id") references "groups"(id) on delete cascade, + foreign key(scene_id) references scenes(id) on delete cascade, + PRIMARY KEY("group_id", scene_id) +); +CREATE TABLE IF NOT EXISTS performers_images ( + performer_id integer, + image_id integer, + foreign key(performer_id) references performers(id) on delete CASCADE, + foreign key(image_id) references images(id) on delete CASCADE, + PRIMARY KEY(image_id, performer_id) +); +CREATE TABLE IF NOT EXISTS images_tags ( + image_id integer, + tag_id integer, + foreign key(image_id) references images(id) on delete CASCADE, + foreign key(tag_id) references tags(id) on delete CASCADE, + PRIMARY KEY(image_id, tag_id) +); +CREATE TABLE IF NOT EXISTS scene_stash_ids ( + scene_id integer NOT NULL, + endpoint varchar(255) NOT NULL, + stash_id varchar(36) NOT NULL, + foreign key(scene_id) references scenes(id) on delete CASCADE, + PRIMARY KEY(scene_id, endpoint) +); +CREATE TABLE IF NOT EXISTS scenes_galleries ( + scene_id integer NOT NULL, + gallery_id integer NOT NULL, + foreign key(scene_id) references scenes(id) on delete CASCADE, + foreign key(gallery_id) references galleries(id) on delete CASCADE, + PRIMARY KEY(scene_id, gallery_id) +); +CREATE TABLE IF NOT EXISTS galleries_images ( + gallery_id integer NOT NULL, + image_id integer NOT NULL, + cover boolean not null default FALSE, + foreign key(gallery_id) references galleries(id) on delete CASCADE, + foreign key(image_id) references images(id) on delete CASCADE, + PRIMARY KEY(gallery_id, image_id) +); +CREATE TABLE IF NOT EXISTS performers_galleries ( + performer_id integer NOT NULL, + gallery_id integer NOT NULL, + foreign key(performer_id) references performers(id) on delete CASCADE, + foreign key(gallery_id) references galleries(id) on delete CASCADE, + PRIMARY KEY(gallery_id, performer_id) +); +CREATE TABLE IF NOT EXISTS galleries_tags ( + gallery_id integer NOT NULL, + tag_id integer NOT NULL, + foreign key(gallery_id) references galleries(id) on delete CASCADE, + foreign key(tag_id) references tags(id) on delete CASCADE, + PRIMARY KEY(gallery_id, tag_id) +); +CREATE TABLE IF NOT EXISTS performers_tags ( + performer_id integer NOT NULL, + tag_id integer NOT NULL, + foreign key(performer_id) references performers(id) on delete CASCADE, + foreign key(tag_id) references tags(id) on delete CASCADE, + PRIMARY KEY(performer_id, tag_id) +); +CREATE TABLE IF NOT EXISTS tag_aliases ( + tag_id integer NOT NULL, + alias varchar(255) NOT NULL, + foreign key(tag_id) references tags(id) on delete CASCADE, + PRIMARY KEY(tag_id, alias) +); +CREATE TABLE IF NOT EXISTS studio_aliases ( + studio_id integer NOT NULL, + alias varchar(255) NOT NULL, + foreign key(studio_id) references studios(id) on delete CASCADE, + PRIMARY KEY(studio_id, alias) +); +CREATE TABLE performer_aliases ( + performer_id integer NOT NULL, + alias varchar(255) NOT NULL, + foreign key(performer_id) references performers(id) on delete CASCADE, + PRIMARY KEY(performer_id, alias) +); +CREATE TABLE galleries_chapters ( + id serial not null primary key, + title varchar(255) not null, + image_index integer not null, + gallery_id integer not null, + created_at timestamp not null, + updated_at timestamp not null, + foreign key(gallery_id) references galleries(id) on delete CASCADE +); +CREATE TABLE scene_urls ( + scene_id integer NOT NULL, + position integer NOT NULL, + url varchar(255) NOT NULL, + foreign key(scene_id) references scenes(id) on delete CASCADE, + PRIMARY KEY(scene_id, position, url) +); +CREATE TABLE groups_relations ( + containing_id integer not null, + sub_id integer not null, + order_index integer not null, + description varchar(255), + primary key (containing_id, sub_id), + foreign key (containing_id) references groups(id) on delete cascade, + foreign key (sub_id) references groups(id) on delete cascade, + check (containing_id != sub_id) +); +CREATE INDEX index_tags_on_name on tags (name); +CREATE INDEX index_folders_on_parent_folder_id on folders (parent_folder_id); +CREATE UNIQUE INDEX index_folders_on_path_unique on folders (path); +CREATE UNIQUE INDEX index_files_zip_basename_unique ON files (zip_file_id, parent_folder_id, basename) WHERE zip_file_id IS NOT NULL; +CREATE UNIQUE INDEX index_files_on_parent_folder_id_basename_unique on files (parent_folder_id, basename); +CREATE INDEX index_files_on_basename on files (basename); +CREATE INDEX index_folders_on_zip_file_id on folders (zip_file_id) WHERE zip_file_id IS NOT NULL; +CREATE INDEX index_fingerprint_type_fingerprint ON files_fingerprints (type, fingerprint); +CREATE INDEX index_images_files_on_file_id on images_files (file_id); +CREATE UNIQUE INDEX unique_index_images_files_on_primary on images_files (image_id) WHERE "primary" = TRUE; +CREATE INDEX index_galleries_files_file_id ON galleries_files (file_id); +CREATE UNIQUE INDEX unique_index_galleries_files_on_primary on galleries_files (gallery_id) WHERE "primary" = TRUE; +CREATE INDEX index_scenes_files_file_id ON scenes_files (file_id); +CREATE UNIQUE INDEX unique_index_scenes_files_on_primary on scenes_files (scene_id) WHERE "primary" = TRUE; +CREATE INDEX index_performer_stash_ids_on_performer_id ON performer_stash_ids (performer_id); +CREATE INDEX index_studio_stash_ids_on_studio_id ON studio_stash_ids (studio_id); +CREATE INDEX index_performers_scenes_on_performer_id on performers_scenes (performer_id); +CREATE INDEX index_scene_markers_tags_on_tag_id on scene_markers_tags (tag_id); +CREATE INDEX index_scenes_tags_on_tag_id on scenes_tags (tag_id); +CREATE INDEX index_movies_scenes_on_movie_id on groups_scenes (group_id); +CREATE INDEX index_performers_images_on_performer_id on performers_images (performer_id); +CREATE INDEX index_images_tags_on_tag_id on images_tags (tag_id); +CREATE INDEX index_scenes_galleries_on_gallery_id on scenes_galleries (gallery_id); +CREATE INDEX index_galleries_images_on_image_id on galleries_images (image_id); +CREATE INDEX index_performers_galleries_on_performer_id on performers_galleries (performer_id); +CREATE INDEX index_galleries_tags_on_tag_id on galleries_tags (tag_id); +CREATE INDEX index_performers_tags_on_tag_id on performers_tags (tag_id); +CREATE UNIQUE INDEX tag_aliases_alias_unique on tag_aliases (alias); +CREATE UNIQUE INDEX studio_aliases_alias_unique on studio_aliases (alias); +CREATE INDEX performer_aliases_alias on performer_aliases (alias); +CREATE INDEX index_galleries_chapters_on_gallery_id on galleries_chapters (gallery_id); +CREATE INDEX scene_urls_url on scene_urls (url); +CREATE INDEX index_scene_markers_on_primary_tag_id ON scene_markers(primary_tag_id); +CREATE INDEX index_scene_markers_on_scene_id ON scene_markers(scene_id); +CREATE UNIQUE INDEX index_studios_on_name_unique ON studios(name); +CREATE UNIQUE INDEX index_saved_filters_on_mode_name_unique on saved_filters (mode, name); +CREATE INDEX image_urls_url on image_urls (url); +CREATE INDEX index_images_on_studio_id on images (studio_id); +CREATE INDEX gallery_urls_url on gallery_urls (url); +CREATE INDEX index_galleries_on_studio_id on galleries (studio_id); +CREATE UNIQUE INDEX index_galleries_on_folder_id_unique on galleries (folder_id); +CREATE INDEX index_scenes_on_studio_id on scenes (studio_id); +CREATE INDEX performers_urls_url on performer_urls (url); +CREATE UNIQUE INDEX performers_name_disambiguation_unique on performers (name, disambiguation) WHERE disambiguation IS NOT NULL; +CREATE UNIQUE INDEX performers_name_unique on performers (name) WHERE disambiguation IS NULL; +CREATE INDEX index_studios_tags_on_tag_id on studios_tags (tag_id); +CREATE INDEX index_scenes_view_dates ON scenes_view_dates (scene_id); +CREATE INDEX index_scenes_o_dates ON scenes_o_dates (scene_id); +CREATE INDEX index_groups_on_name ON groups(name); +CREATE INDEX index_groups_on_studio_id on groups (studio_id); +CREATE INDEX group_urls_url on group_urls (url); +CREATE INDEX index_groups_tags_on_tag_id on groups_tags (tag_id); +CREATE INDEX index_groups_tags_on_movie_id on groups_tags (group_id); +CREATE UNIQUE INDEX index_galleries_images_gallery_id_cover on galleries_images (gallery_id, cover) WHERE cover = TRUE; +CREATE INDEX index_groups_relations_sub_id ON groups_relations (sub_id); +CREATE UNIQUE INDEX index_groups_relations_order_index_unique ON groups_relations (containing_id, order_index); From a45658fee410b703f7e4ecf5f58b6cf10fbef52f Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Thu, 3 Oct 2024 02:56:25 +0200 Subject: [PATCH 02/85] Better init --- go.mod | 17 ++- go.sum | 28 ++-- internal/manager/init.go | 44 +++---- pkg/sqlite/anonymise.go | 4 +- pkg/sqlite/database.go | 220 ++++++-------------------------- pkg/sqlite/database_postgres.go | 72 +++++++++++ pkg/sqlite/database_sqlite.go | 152 ++++++++++++++++++++++ pkg/sqlite/tx.go | 40 ++++-- 8 files changed, 351 insertions(+), 226 deletions(-) create mode 100644 pkg/sqlite/database_postgres.go create mode 100644 pkg/sqlite/database_sqlite.go diff --git a/go.mod b/go.mod index 7f7d6170332..c81dbac0986 100644 --- a/go.mod +++ b/go.mod @@ -51,16 +51,22 @@ require ( github.com/vektra/mockery/v2 v2.10.0 github.com/xWTF/chardet v0.0.0-20230208095535-c780f2ac244e github.com/zencoder/go-dash/v3 v3.0.2 - golang.org/x/crypto v0.24.0 + golang.org/x/crypto v0.27.0 golang.org/x/image v0.18.0 golang.org/x/net v0.26.0 - golang.org/x/sys v0.21.0 - golang.org/x/term v0.21.0 - golang.org/x/text v0.16.0 + golang.org/x/sys v0.25.0 + golang.org/x/term v0.24.0 + golang.org/x/text v0.18.0 gopkg.in/guregu/null.v4 v4.0.0 gopkg.in/yaml.v2 v2.4.0 ) +require ( + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect + github.com/jackc/puddle/v2 v2.2.2 // indirect +) + require ( github.com/agnivade/levenshtein v1.1.1 // indirect github.com/antchfx/xpath v1.2.3 // indirect @@ -82,6 +88,7 @@ require ( github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jackc/pgx/v5 v5.7.1 github.com/josharian/intern v1.0.0 // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect @@ -112,7 +119,7 @@ require ( github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 // indirect go.uber.org/atomic v1.11.0 // indirect golang.org/x/mod v0.18.0 // indirect - golang.org/x/sync v0.7.0 // indirect + golang.org/x/sync v0.8.0 // indirect golang.org/x/tools v0.22.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index 8c3b00d61f8..caafd68bbd8 100644 --- a/go.sum +++ b/go.sum @@ -409,6 +409,14 @@ github.com/ianlancetaylor/demangle v0.0.0-20220319035150-800ac71e25c2/go.mod h1: github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgx/v5 v5.7.1 h1:x7SYsPBYDkHDksogeSmZZ5xzThcTgRz++I5E+ePFUcs= +github.com/jackc/pgx/v5 v5.7.1/go.mod h1:e7O26IywZZ+naJtWWos6i6fvWK+29etgITqrqHLfoZA= +github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= +github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/jinzhu/copier v0.4.0 h1:w3ciUoD19shMCRargcpm0cm91ytaBhDvuRpz1ODO/U8= github.com/jinzhu/copier v0.4.0/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= @@ -728,8 +736,8 @@ golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20211215165025-cf75a172585e/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= -golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= +golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= +golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -854,8 +862,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -947,13 +955,13 @@ golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= -golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= -golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= -golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= +golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= +golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -966,8 +974,8 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= diff --git a/internal/manager/init.go b/internal/manager/init.go index 668fd8f45bf..d3e0ce23797 100644 --- a/internal/manager/init.go +++ b/internal/manager/init.go @@ -35,7 +35,22 @@ import ( func Initialize(cfg *config.Config, l *log.Logger) (*Manager, error) { ctx := context.TODO() - db := sqlite.NewDatabase() + var db *sqlite.Database + + { + var dbType = sqlite.DatabaseType(strings.ToUpper(cfg.GetDatabaseType())) + if dbType != sqlite.SqliteBackend && dbType != sqlite.PostgresBackend { + dbType = sqlite.SqliteBackend + } + + if dbType == sqlite.SqliteBackend { + sqlite.RegisterSqliteDialect() + db = sqlite.NewSQLiteDatabase(cfg.GetDatabasePath()) + } else if dbType == sqlite.PostgresBackend { + db = sqlite.NewPostgresDatabase(cfg.GetDatabaseConnectionString()) + } + } + repo := db.Repository() // start with empty paths @@ -227,27 +242,12 @@ func (s *Manager) postInit(ctx context.Context) error { }) } - { - var dbType = sqlite.DatabaseType(strings.ToUpper(s.Config.GetDatabaseType())) - if dbType != sqlite.SqliteBackend && dbType != sqlite.PostgresBackend { - dbType = sqlite.SqliteBackend - } - - var err error - if dbType == sqlite.SqliteBackend { - sqlite.RegisterSqliteDialect() - err = s.Database.OpenSqlite(s.Config.GetDatabasePath()) - } else if dbType == sqlite.PostgresBackend { - err = s.Database.OpenPostgres(s.Config.GetDatabaseConnectionString()) - } - - if err != nil { - var migrationNeededErr *sqlite.MigrationNeededError - if errors.As(err, &migrationNeededErr) { - logger.Warn(err) - } else { - return err - } + if err := s.Database.Open(); err != nil { + var migrationNeededErr *sqlite.MigrationNeededError + if errors.As(err, &migrationNeededErr) { + logger.Warn(err) + } else { + return err } } diff --git a/pkg/sqlite/anonymise.go b/pkg/sqlite/anonymise.go index be692e38baa..23a4a6d34f5 100644 --- a/pkg/sqlite/anonymise.go +++ b/pkg/sqlite/anonymise.go @@ -32,8 +32,8 @@ func NewAnonymiser(db *Database, outPath string) (*Anonymiser, error) { return nil, fmt.Errorf("vacuuming into %s: %w", outPath, err) } - newDB := NewDatabase() - if err := newDB.OpenSqlite(outPath); err != nil { + newDB := NewSQLiteDatabase(db.dbPath) + if err := newDB.Open(); err != nil { return nil, fmt.Errorf("opening %s: %w", outPath, err) } diff --git a/pkg/sqlite/database.go b/pkg/sqlite/database.go index 91d832b44df..b5a172fd942 100644 --- a/pkg/sqlite/database.go +++ b/pkg/sqlite/database.go @@ -6,16 +6,13 @@ import ( "embed" "errors" "fmt" - "os" - "path/filepath" "time" "github.com/doug-martin/goqu/v9" - "github.com/doug-martin/goqu/v9/dialect/sqlite3" "github.com/jmoiron/sqlx" - "github.com/stashapp/stash/pkg/fsutil" "github.com/stashapp/stash/pkg/logger" + "github.com/stashapp/stash/pkg/models" ) const ( @@ -90,8 +87,48 @@ const ( SqliteBackend DatabaseType = "SQLITE" ) +type databaseFunctions interface { + Analyze(ctx context.Context) error + Anonymise(outPath string) error + AnonymousDatabasePath(backupDirectoryPath string) string + AppSchemaVersion() uint + Backup(backupPath string) (err error) + Begin(ctx context.Context, writable bool) (context.Context, error) + Open() error + Close() error + Commit(ctx context.Context) error + DatabaseBackupPath(backupDirectoryPath string) string + DatabasePath() string + ExecSQL(ctx context.Context, query string, args []interface{}) (*int64, error) + IsLocked(err error) bool + Optimise(ctx context.Context) error + QuerySQL(ctx context.Context, query string, args []interface{}) ([]string, [][]interface{}, error) + ReInitialise() error + Ready() error + Remove() error + Repository() models.Repository + Reset() error + RestoreFromBackup(backupPath string) error + Rollback(ctx context.Context) error + RunAllMigrations() error + SetBlobStoreOptions(options BlobStoreOptions) + Vacuum(ctx context.Context) error + Version() uint + WithDatabase(ctx context.Context) (context.Context, error) + getDatabaseSchemaVersion() (uint, error) + initialise() error + lock() + needsMigration() bool + open(disableForeignKeys bool, writable bool) (conn *sqlx.DB, err error) + openReadDB() error + openWriteDB() error + txnComplete(ctx context.Context) + unlock() +} + type Database struct { *storeRepository + databaseFunctions readDB *sqlx.DB writeDB *sqlx.DB @@ -151,35 +188,11 @@ func (db *Database) Ready() error { return nil } -func (db *Database) OpenPostgres(dbConnector string) error { - db.dbType = PostgresBackend - db.dbString = dbConnector - - dialect = goqu.Dialect("postgres") - - return db.OpenGeneric() -} - -func RegisterSqliteDialect() { - opts := sqlite3.DialectOptions() - opts.SupportsReturn = true - goqu.RegisterDialect("sqlite3new", opts) -} - -func (db *Database) OpenSqlite(dbPath string) error { - db.dbType = SqliteBackend - db.dbPath = dbPath - - dialect = goqu.Dialect("sqlite3new") - - return db.OpenGeneric() -} - // Open initializes the database. If the database is new, then it // performs a full migration to the latest schema version. Otherwise, any // necessary migrations must be run separately using RunMigrations. // Returns true if the database is new. -func (db *Database) OpenGeneric() error { +func (db *Database) Open() error { goqu.SetDefaultPrepared(false) databaseSchemaVersion, err := db.getDatabaseSchemaVersion() @@ -266,50 +279,6 @@ func (db *Database) Close() error { return nil } -func (db *Database) open(disableForeignKeys bool, writable bool) (conn *sqlx.DB, err error) { - // Fail-safe - err = errors.New("missing backend type") - - // https://github.com/mattn/go-sqlite3 - if db.dbType == SqliteBackend { - url := "file:" + db.dbPath + "?_journal=WAL&_sync=NORMAL&_busy_timeout=50" - if !disableForeignKeys { - url += "&_fk=true" - } - - if writable { - url += "&_txlock=immediate" - } else { - url += "&mode=ro" - } - - // #5155 - set the cache size if the environment variable is set - // default is -2000 which is 2MB - if cacheSize := os.Getenv(cacheSizeEnv); cacheSize != "" { - url += "&_cache_size=" + cacheSize - } - - conn, err = sqlx.Open(sqlite3Driver, url) - } - if db.dbType == PostgresBackend { - conn, err = sqlx.Open("postgres", db.dbString) - if err == nil { - if disableForeignKeys { - conn.Exec("SET session_replication_role = replica;") - } - if !writable { - conn.Exec("SET default_transaction_read_only = ON;") - } - } - } - - if err != nil { - return nil, fmt.Errorf("db.Open(): %w", err) - } - - return conn, nil -} - func (db *Database) initialise() error { if err := db.openReadDB(); err != nil { return fmt.Errorf("opening read database: %w", err) @@ -347,81 +316,6 @@ func (db *Database) openWriteDB() error { return err } -func (db *Database) Remove() error { - if db.dbType == PostgresBackend { - logger.Warn("Postgres backend detected, ignoring Remove request") - return nil - } - - databasePath := db.dbPath - err := db.Close() - - if err != nil { - return fmt.Errorf("error closing database: %w", err) - } - - err = os.Remove(databasePath) - if err != nil { - return fmt.Errorf("error removing database: %w", err) - } - - // remove the -shm, -wal files ( if they exist ) - walFiles := []string{databasePath + "-shm", databasePath + "-wal"} - for _, wf := range walFiles { - if exists, _ := fsutil.FileExists(wf); exists { - err = os.Remove(wf) - if err != nil { - return fmt.Errorf("error removing database: %w", err) - } - } - } - - return nil -} - -func (db *Database) Reset() error { - if db.dbType == PostgresBackend { - logger.Warn("Postgres backend detected, ignoring Reset request") - return nil - } - - if err := db.Remove(); err != nil { - return err - } - - if err := db.OpenSqlite(db.dbPath); err != nil { - return fmt.Errorf("[reset DB] unable to initialize: %w", err) - } - - return nil -} - -// Backup the database. If db is nil, then uses the existing database -// connection. -func (db *Database) Backup(backupPath string) (err error) { - if db.dbType == PostgresBackend { - logger.Warn("Postgres backend detected, ignoring Backup request") - return nil - } - - thisDB := db.writeDB - if thisDB == nil { - thisDB, err = sqlx.Connect(sqlite3Driver, "file:"+db.dbPath+"?_fk=true") - if err != nil { - return fmt.Errorf("open database %s failed: %w", db.dbPath, err) - } - defer thisDB.Close() - } - - logger.Infof("Backing up database into: %s", backupPath) - _, err = thisDB.Exec(`VACUUM INTO "` + backupPath + `"`) - if err != nil { - return fmt.Errorf("vacuum failed: %w", err) - } - - return nil -} - func (db *Database) Anonymise(outPath string) error { anon, err := NewAnonymiser(db, outPath) @@ -432,16 +326,6 @@ func (db *Database) Anonymise(outPath string) error { return anon.Anonymise(context.Background()) } -func (db *Database) RestoreFromBackup(backupPath string) error { - if db.dbType == PostgresBackend { - logger.Warn("Postgres backend detected, ignoring RestoreFromBackup request") - return nil - } - - logger.Infof("Restoring backup database %s into %s", backupPath, db.dbPath) - return os.Rename(backupPath, db.dbPath) -} - func (db *Database) AppSchemaVersion() uint { return appSchemaVersion } @@ -450,26 +334,6 @@ func (db *Database) DatabasePath() string { return db.dbPath } -func (db *Database) DatabaseBackupPath(backupDirectoryPath string) string { - fn := fmt.Sprintf("%s.%d.%s", filepath.Base(db.dbPath), db.schemaVersion, time.Now().Format("20060102_150405")) - - if backupDirectoryPath != "" { - return filepath.Join(backupDirectoryPath, fn) - } - - return fn -} - -func (db *Database) AnonymousDatabasePath(backupDirectoryPath string) string { - fn := fmt.Sprintf("%s.anonymous.%d.%s", filepath.Base(db.dbPath), db.schemaVersion, time.Now().Format("20060102_150405")) - - if backupDirectoryPath != "" { - return filepath.Join(backupDirectoryPath, fn) - } - - return fn -} - func (db *Database) Version() uint { return db.schemaVersion } diff --git a/pkg/sqlite/database_postgres.go b/pkg/sqlite/database_postgres.go new file mode 100644 index 00000000000..de59de557a0 --- /dev/null +++ b/pkg/sqlite/database_postgres.go @@ -0,0 +1,72 @@ +package sqlite + +import ( + "fmt" + + "github.com/doug-martin/goqu/v9" + _ "github.com/jackc/pgx/v5/stdlib" + "github.com/jmoiron/sqlx" + "github.com/stashapp/stash/pkg/logger" +) + +type PostgresDB Database + +func NewPostgresDatabase(dbConnector string) *Database { + dialect = goqu.Dialect("postgres") + ret := NewDatabase() + + db := &PostgresDB{ + databaseFunctions: ret, + storeRepository: ret.storeRepository, + lockChan: ret.lockChan, + dbType: SqliteBackend, + dbString: dbConnector, + } + + dbWrapper.dbType = SqliteBackend + + return (*Database)(db) +} + +func (db *Database) open(disableForeignKeys bool, writable bool) (conn *sqlx.DB, err error) { + conn, err = sqlx.Open("pgx", db.dbString) + if err == nil { + if disableForeignKeys { + conn.Exec("SET session_replication_role = replica;") + } + if !writable { + conn.Exec("SET default_transaction_read_only = ON;") + } + } + + if err != nil { + return nil, fmt.Errorf("db.Open(): %w", err) + } + + return conn, nil +} + +func (db *PostgresDB) Remove() error { + logger.Warn("Postgres backend detected, ignoring Remove request") + return nil +} + +func (db *PostgresDB) Reset() error { + logger.Warn("Postgres backend detected, ignoring Reset request") + return nil +} + +func (db *PostgresDB) Backup(backupPath string) (err error) { + logger.Warn("Postgres backend detected, ignoring Backup request") + return nil +} + +func (db *PostgresDB) RestoreFromBackup(backupPath string) error { + logger.Warn("Postgres backend detected, ignoring RestoreFromBackup request") + return nil +} + +func (db *PostgresDB) DatabaseBackupPath(backupDirectoryPath string) string { + logger.Warn("Postgres backend detected, ignoring DatabaseBackupPath request") + return "" +} diff --git a/pkg/sqlite/database_sqlite.go b/pkg/sqlite/database_sqlite.go new file mode 100644 index 00000000000..4539fbd51ca --- /dev/null +++ b/pkg/sqlite/database_sqlite.go @@ -0,0 +1,152 @@ +package sqlite + +import ( + "fmt" + "os" + "path/filepath" + "time" + + "github.com/doug-martin/goqu/v9" + "github.com/doug-martin/goqu/v9/dialect/sqlite3" + "github.com/jmoiron/sqlx" + "github.com/stashapp/stash/pkg/fsutil" + "github.com/stashapp/stash/pkg/logger" +) + +type SQLiteDB Database + +func RegisterSqliteDialect() { + opts := sqlite3.DialectOptions() + opts.SupportsReturn = true + goqu.RegisterDialect("sqlite3new", opts) +} + +func NewSQLiteDatabase(dbPath string) *Database { + dialect = goqu.Dialect("sqlite3new") + ret := NewDatabase() + + db := &SQLiteDB{ + databaseFunctions: ret, + storeRepository: ret.storeRepository, + lockChan: ret.lockChan, + dbType: SqliteBackend, + dbPath: dbPath, + } + + dbWrapper.dbType = SqliteBackend + + return (*Database)(db) +} + +func (db *SQLiteDB) open(disableForeignKeys bool, writable bool) (conn *sqlx.DB, err error) { + // https://github.com/mattn/go-sqlite3 + url := "file:" + db.dbPath + "?_journal=WAL&_sync=NORMAL&_busy_timeout=50" + if !disableForeignKeys { + url += "&_fk=true" + } + + if writable { + url += "&_txlock=immediate" + } else { + url += "&mode=ro" + } + + // #5155 - set the cache size if the environment variable is set + // default is -2000 which is 2MB + if cacheSize := os.Getenv(cacheSizeEnv); cacheSize != "" { + url += "&_cache_size=" + cacheSize + } + + conn, err = sqlx.Open(sqlite3Driver, url) + + if err != nil { + return nil, fmt.Errorf("db.Open(): %w", err) + } + + return conn, nil +} + +func (db *SQLiteDB) Remove() error { + databasePath := db.dbPath + err := db.Close() + + if err != nil { + return fmt.Errorf("error closing database: %w", err) + } + + err = os.Remove(databasePath) + if err != nil { + return fmt.Errorf("error removing database: %w", err) + } + + // remove the -shm, -wal files ( if they exist ) + walFiles := []string{databasePath + "-shm", databasePath + "-wal"} + for _, wf := range walFiles { + if exists, _ := fsutil.FileExists(wf); exists { + err = os.Remove(wf) + if err != nil { + return fmt.Errorf("error removing database: %w", err) + } + } + } + + return nil +} + +func (db *SQLiteDB) Reset() error { + if err := db.Remove(); err != nil { + return err + } + + if err := db.Open(); err != nil { + return fmt.Errorf("[reset DB] unable to initialize: %w", err) + } + + return nil +} + +// Backup the database. If db is nil, then uses the existing database +// connection. +func (db *SQLiteDB) Backup(backupPath string) (err error) { + thisDB := db.writeDB + if thisDB == nil { + thisDB, err = sqlx.Connect(sqlite3Driver, "file:"+db.dbPath+"?_fk=true") + if err != nil { + return fmt.Errorf("open database %s failed: %w", db.dbPath, err) + } + defer thisDB.Close() + } + + logger.Infof("Backing up database into: %s", backupPath) + _, err = thisDB.Exec(`VACUUM INTO "` + backupPath + `"`) + if err != nil { + return fmt.Errorf("vacuum failed: %w", err) + } + + return nil +} + +func (db *SQLiteDB) RestoreFromBackup(backupPath string) error { + logger.Infof("Restoring backup database %s into %s", backupPath, db.dbPath) + return os.Rename(backupPath, db.dbPath) +} + +func (db *SQLiteDB) DatabaseBackupPath(backupDirectoryPath string) string { + fn := fmt.Sprintf("%s.%d.%s", filepath.Base(db.dbPath), db.schemaVersion, time.Now().Format("20060102_150405")) + + if backupDirectoryPath != "" { + return filepath.Join(backupDirectoryPath, fn) + } + + return fn +} + +func (db *SQLiteDB) AnonymousDatabasePath(backupDirectoryPath string) string { + fn := fmt.Sprintf("%s.anonymous.%d.%s", filepath.Base(db.dbPath), db.schemaVersion, time.Now().Format("20060102_150405")) + + if backupDirectoryPath != "" { + return filepath.Join(backupDirectoryPath, fn) + } + + return fn +} diff --git a/pkg/sqlite/tx.go b/pkg/sqlite/tx.go index a2e272aa9f3..dcd287ef5d2 100644 --- a/pkg/sqlite/tx.go +++ b/pkg/sqlite/tx.go @@ -35,7 +35,9 @@ func logSQL(start time.Time, query string, args ...interface{}) { } } -type dbWrapperType struct{} +type dbWrapperType struct { + dbType DatabaseType +} var dbWrapper = dbWrapperType{} @@ -47,7 +49,21 @@ func sqlError(err error, sql string, args ...interface{}) error { return fmt.Errorf("error executing `%s` [%v]: %w", sql, args, err) } -func (*dbWrapperType) Get(ctx context.Context, dest interface{}, query string, args ...interface{}) error { +func (db *dbWrapperType) Rebind(query string) string { + var bindType int + + switch db.dbType { + case SqliteBackend: + bindType = sqlx.QUESTION + case PostgresBackend: + bindType = sqlx.DOLLAR + } + + return sqlx.Rebind(bindType, query) +} + +func (db *dbWrapperType) Get(ctx context.Context, dest interface{}, query string, args ...interface{}) error { + query = db.Rebind(query) tx, err := getDBReader(ctx) if err != nil { return sqlError(err, query, args...) @@ -60,7 +76,8 @@ func (*dbWrapperType) Get(ctx context.Context, dest interface{}, query string, a return sqlError(err, query, args...) } -func (*dbWrapperType) Select(ctx context.Context, dest interface{}, query string, args ...interface{}) error { +func (db *dbWrapperType) Select(ctx context.Context, dest interface{}, query string, args ...interface{}) error { + query = db.Rebind(query) tx, err := getDBReader(ctx) if err != nil { return sqlError(err, query, args...) @@ -73,7 +90,8 @@ func (*dbWrapperType) Select(ctx context.Context, dest interface{}, query string return sqlError(err, query, args...) } -func (*dbWrapperType) Queryx(ctx context.Context, query string, args ...interface{}) (*sqlx.Rows, error) { +func (db *dbWrapperType) Queryx(ctx context.Context, query string, args ...interface{}) (*sqlx.Rows, error) { + query = db.Rebind(query) tx, err := getDBReader(ctx) if err != nil { return nil, sqlError(err, query, args...) @@ -86,7 +104,8 @@ func (*dbWrapperType) Queryx(ctx context.Context, query string, args ...interfac return ret, sqlError(err, query, args...) } -func (*dbWrapperType) QueryxContext(ctx context.Context, query string, args ...interface{}) (*sqlx.Rows, error) { +func (db *dbWrapperType) QueryxContext(ctx context.Context, query string, args ...interface{}) (*sqlx.Rows, error) { + query = db.Rebind(query) tx, err := getDBReader(ctx) if err != nil { return nil, sqlError(err, query, args...) @@ -99,7 +118,8 @@ func (*dbWrapperType) QueryxContext(ctx context.Context, query string, args ...i return ret, sqlError(err, query, args...) } -func (*dbWrapperType) NamedExec(ctx context.Context, query string, arg interface{}) (sql.Result, error) { +func (db *dbWrapperType) NamedExec(ctx context.Context, query string, arg interface{}) (sql.Result, error) { + query = db.Rebind(query) tx, err := getTx(ctx) if err != nil { return nil, sqlError(err, query, arg) @@ -112,7 +132,8 @@ func (*dbWrapperType) NamedExec(ctx context.Context, query string, arg interface return ret, sqlError(err, query, arg) } -func (*dbWrapperType) Exec(ctx context.Context, query string, args ...interface{}) (sql.Result, error) { +func (db *dbWrapperType) Exec(ctx context.Context, query string, args ...interface{}) (sql.Result, error) { + query = db.Rebind(query) tx, err := getTx(ctx) if err != nil { return nil, sqlError(err, query, args...) @@ -126,7 +147,8 @@ func (*dbWrapperType) Exec(ctx context.Context, query string, args ...interface{ } // Prepare creates a prepared statement. -func (*dbWrapperType) Prepare(ctx context.Context, query string, args ...interface{}) (*stmt, error) { +func (db *dbWrapperType) Prepare(ctx context.Context, query string, args ...interface{}) (*stmt, error) { + query = db.Rebind(query) tx, err := getTx(ctx) if err != nil { return nil, sqlError(err, query, args...) @@ -144,7 +166,7 @@ func (*dbWrapperType) Prepare(ctx context.Context, query string, args ...interfa }, nil } -func (*dbWrapperType) ExecStmt(ctx context.Context, stmt *stmt, args ...interface{}) (sql.Result, error) { +func (db *dbWrapperType) ExecStmt(ctx context.Context, stmt *stmt, args ...interface{}) (sql.Result, error) { _, err := getTx(ctx) if err != nil { return nil, sqlError(err, stmt.query, args...) From 876ad844c08bbefc08cf8a9a38e3d6663e19c6b6 Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Thu, 3 Oct 2024 03:29:42 +0200 Subject: [PATCH 03/85] Query fixes --- pkg/sqlite/blob.go | 27 ++++++++++++++++----------- pkg/sqlite/blob_migrate.go | 6 +++--- pkg/sqlite/database.go | 3 --- pkg/sqlite/database_postgres.go | 4 ++-- pkg/sqlite/file.go | 4 ++-- pkg/sqlite/gallery.go | 6 +++--- pkg/sqlite/group.go | 4 ++-- pkg/sqlite/image.go | 8 ++++---- pkg/sqlite/performer.go | 4 ++-- pkg/sqlite/repository.go | 6 +++--- pkg/sqlite/scene.go | 8 ++++---- pkg/sqlite/scene_marker.go | 2 +- pkg/sqlite/studio.go | 4 ++-- pkg/sqlite/table.go | 6 +++--- pkg/sqlite/tag.go | 4 ++-- 15 files changed, 49 insertions(+), 47 deletions(-) diff --git a/pkg/sqlite/blob.go b/pkg/sqlite/blob.go index 241b63d23cf..0caa7a65f61 100644 --- a/pkg/sqlite/blob.go +++ b/pkg/sqlite/blob.go @@ -70,8 +70,8 @@ func NewBlobStore(options BlobStoreOptions) *BlobStore { } type blobRow struct { - Checksum string `db:"checksum"` - Blob []byte `db:"blob"` + Checksum string `db:"checksum"` + Blob sql.Null[[]byte] `db:"blob"` } func (qb *BlobStore) table() exp.IdentifierExpression { @@ -124,10 +124,14 @@ func (qb *BlobStore) Write(ctx context.Context, data []byte) (string, error) { } func (qb *BlobStore) write(ctx context.Context, checksum string, data []byte) error { + var blobdata sql.Null[[]byte] + blobdata.V = data + blobdata.Valid = len(data) > 0 + table := qb.table() - q := dialect.Insert(table).Prepared(true).Rows(blobRow{ + q := dialect.Insert(table).Rows(blobRow{ Checksum: checksum, - Blob: data, + Blob: blobdata, }).OnConflict(goqu.DoNothing()) _, err := exec(ctx, q) @@ -140,7 +144,7 @@ func (qb *BlobStore) write(ctx context.Context, checksum string, data []byte) er func (qb *BlobStore) update(ctx context.Context, checksum string, data []byte) error { table := qb.table() - q := dialect.Update(table).Prepared(true).Set(goqu.Record{ + q := dialect.Update(table).Set(goqu.Record{ "blob": data, }).Where(goqu.C(blobChecksumColumn).Eq(checksum)) @@ -195,8 +199,8 @@ func (qb *BlobStore) readSQL(ctx context.Context, querySQL string, args ...inter checksum := row.Checksum - if row.Blob != nil { - return row.Blob, checksum, nil + if row.Blob.Valid { + return row.Blob.V, checksum, nil } // don't use the filesystem if not configured to do so @@ -265,8 +269,8 @@ func (qb *BlobStore) Read(ctx context.Context, checksum string) ([]byte, error) } } - if ret != nil { - return ret, nil + if ret.Valid { + return ret.V, nil } // don't use the filesystem if not configured to do so @@ -280,9 +284,10 @@ func (qb *BlobStore) Read(ctx context.Context, checksum string) ([]byte, error) } } -func (qb *BlobStore) readFromDatabase(ctx context.Context, checksum string) ([]byte, error) { +func (qb *BlobStore) readFromDatabase(ctx context.Context, checksum string) (sql.Null[[]byte], error) { q := dialect.From(qb.table()).Select(qb.table().All()).Where(qb.tableMgr.byID(checksum)) + var empty sql.Null[[]byte] var row blobRow const single = true if err := queryFunc(ctx, q, single, func(r *sqlx.Rows) error { @@ -292,7 +297,7 @@ func (qb *BlobStore) readFromDatabase(ctx context.Context, checksum string) ([]b return nil }); err != nil { - return nil, fmt.Errorf("querying %s: %w", qb.table(), err) + return empty, fmt.Errorf("querying %s: %w", qb.table(), err) } return row.Blob, nil diff --git a/pkg/sqlite/blob_migrate.go b/pkg/sqlite/blob_migrate.go index e121d07923a..7f42aff2c6d 100644 --- a/pkg/sqlite/blob_migrate.go +++ b/pkg/sqlite/blob_migrate.go @@ -63,7 +63,7 @@ func (qb *BlobStore) migrateBlobDatabase(ctx context.Context, checksum string, d return fmt.Errorf("reading from database: %w", err) } - if len(existing) == 0 { + if len(existing.V) == 0 { // find the blob in the filesystem blob, err := qb.fsStore.Read(ctx, checksum) if err != nil { @@ -94,14 +94,14 @@ func (qb *BlobStore) migrateBlobFilesystem(ctx context.Context, checksum string, return fmt.Errorf("reading from database: %w", err) } - if len(blob) == 0 { + if len(blob.V) == 0 { // it's possible that the blob is already present in the filesystem // just ignore return nil } // write the blob to the filesystem - if err := qb.fsStore.Write(ctx, checksum, blob); err != nil { + if err := qb.fsStore.Write(ctx, checksum, blob.V); err != nil { return fmt.Errorf("writing to filesystem: %w", err) } diff --git a/pkg/sqlite/database.go b/pkg/sqlite/database.go index b5a172fd942..354ad4ad294 100644 --- a/pkg/sqlite/database.go +++ b/pkg/sqlite/database.go @@ -8,7 +8,6 @@ import ( "fmt" "time" - "github.com/doug-martin/goqu/v9" "github.com/jmoiron/sqlx" "github.com/stashapp/stash/pkg/logger" @@ -193,8 +192,6 @@ func (db *Database) Ready() error { // necessary migrations must be run separately using RunMigrations. // Returns true if the database is new. func (db *Database) Open() error { - goqu.SetDefaultPrepared(false) - databaseSchemaVersion, err := db.getDatabaseSchemaVersion() if err != nil { return fmt.Errorf("getting database schema version: %w", err) diff --git a/pkg/sqlite/database_postgres.go b/pkg/sqlite/database_postgres.go index de59de557a0..fd8c516ebe9 100644 --- a/pkg/sqlite/database_postgres.go +++ b/pkg/sqlite/database_postgres.go @@ -19,11 +19,11 @@ func NewPostgresDatabase(dbConnector string) *Database { databaseFunctions: ret, storeRepository: ret.storeRepository, lockChan: ret.lockChan, - dbType: SqliteBackend, + dbType: PostgresBackend, dbString: dbConnector, } - dbWrapper.dbType = SqliteBackend + dbWrapper.dbType = PostgresBackend return (*Database)(db) } diff --git a/pkg/sqlite/file.go b/pkg/sqlite/file.go index 6bf6e32b51f..b7c410170a7 100644 --- a/pkg/sqlite/file.go +++ b/pkg/sqlite/file.go @@ -764,7 +764,7 @@ func (qb *FileStore) IsPrimary(ctx context.Context, fileID models.FileID) (bool, for _, t := range joinTables { qq := dialect.From(t).Select(t.Col(fileIDColumn)).Where( t.Col(fileIDColumn).Eq(fileID), - t.Col("primary").Eq(1), + t.Col("primary").IsTrue(), ) if sq == nil { @@ -849,7 +849,7 @@ func (qb *FileStore) Query(ctx context.Context, options models.FileQueryOptions) query := qb.newQuery() query.join(folderTable, "", "files.parent_folder_id = folders.id") - distinctIDs(&query, fileTable) + selectIDs(&query, fileTable) if q := findFilter.Q; q != nil && *q != "" { filepathColumn := "folders.path || '" + string(filepath.Separator) + "' || files.basename" diff --git a/pkg/sqlite/gallery.go b/pkg/sqlite/gallery.go index 5473b9c36ee..008ab85f7f1 100644 --- a/pkg/sqlite/gallery.go +++ b/pkg/sqlite/gallery.go @@ -209,7 +209,7 @@ func (qb *GalleryStore) selectDataset() *goqu.SelectDataset { galleriesFilesJoinTable, goqu.On( galleriesFilesJoinTable.Col(galleryIDColumn).Eq(table.Col(idColumn)), - galleriesFilesJoinTable.Col("primary").Eq(1), + galleriesFilesJoinTable.Col("primary").IsTrue(), ), ).LeftJoin( files, @@ -687,7 +687,7 @@ func (qb *GalleryStore) makeQuery(ctx context.Context, galleryFilter *models.Gal } query := galleryRepository.newQuery() - distinctIDs(&query, galleryTable) + selectIDs(&query, galleryTable) if q := findFilter.Q; q != nil && *q != "" { query.addJoins( @@ -851,7 +851,7 @@ func (qb *GalleryStore) setGallerySort(query *queryBuilder, findFilter *models.F } // Whatever the sorting, always use title/id as a final sort - query.sortAndPagination += ", COALESCE(galleries.title, galleries.id) COLLATE NATURAL_CI ASC" + query.sortAndPagination += ", COALESCE(galleries.title, cast(galleries.id as text)) COLLATE NATURAL_CI ASC" return nil } diff --git a/pkg/sqlite/group.go b/pkg/sqlite/group.go index 603494fe71a..cd10461d3fb 100644 --- a/pkg/sqlite/group.go +++ b/pkg/sqlite/group.go @@ -427,7 +427,7 @@ func (qb *GroupStore) makeQuery(ctx context.Context, groupFilter *models.GroupFi } query := groupRepository.newQuery() - distinctIDs(&query, groupTable) + selectIDs(&query, groupTable) if q := findFilter.Q; q != nil && *q != "" { searchColumns := []string{"groups.name", "groups.aliases"} @@ -529,7 +529,7 @@ func (qb *GroupStore) setGroupSort(query *queryBuilder, findFilter *models.FindF } // Whatever the sorting, always use name/id as a final sort - query.sortAndPagination += ", COALESCE(groups.name, groups.id) COLLATE NATURAL_CI ASC" + query.sortAndPagination += ", COALESCE(groups.name, cast(groups.id as text)) COLLATE NATURAL_CI ASC" return nil } diff --git a/pkg/sqlite/image.go b/pkg/sqlite/image.go index db40d4f474b..65494f0722c 100644 --- a/pkg/sqlite/image.go +++ b/pkg/sqlite/image.go @@ -210,7 +210,7 @@ func (qb *ImageStore) selectDataset() *goqu.SelectDataset { imagesFilesJoinTable, goqu.On( imagesFilesJoinTable.Col(imageIDColumn).Eq(table.Col(idColumn)), - imagesFilesJoinTable.Col("primary").Eq(1), + imagesFilesJoinTable.Col("primary").IsTrue(), ), ).LeftJoin( files, @@ -604,7 +604,7 @@ func (qb *ImageStore) FindByChecksum(ctx context.Context, checksum string) ([]*m var defaultGalleryOrder = []exp.OrderedExpression{ goqu.L("COALESCE(folders.path, '') || COALESCE(files.basename, '') COLLATE NATURAL_CI").Asc(), - goqu.L("COALESCE(images.title, images.id) COLLATE NATURAL_CI").Asc(), + goqu.L("COALESCE(images.title, cast(images.id as text)) COLLATE NATURAL_CI").Asc(), } func (qb *ImageStore) FindByGalleryID(ctx context.Context, galleryID int) ([]*models.Image, error) { @@ -778,7 +778,7 @@ func (qb *ImageStore) makeQuery(ctx context.Context, imageFilter *models.ImageFi } query := imageRepository.newQuery() - distinctIDs(&query, imageTable) + selectIDs(&query, imageTable) if q := findFilter.Q; q != nil && *q != "" { query.addJoins( @@ -987,7 +987,7 @@ func (qb *ImageStore) setImageSortAndPagination(q *queryBuilder, findFilter *mod } // Whatever the sorting, always use title/id as a final sort - sortClause += ", COALESCE(images.title, images.id) COLLATE NATURAL_CI ASC" + sortClause += ", COALESCE(images.title, cast(images.id as text)) COLLATE NATURAL_CI ASC" } q.sortAndPagination = sortClause + getPagination(findFilter) diff --git a/pkg/sqlite/performer.go b/pkg/sqlite/performer.go index 7ff6f5401a0..df1dbbea837 100644 --- a/pkg/sqlite/performer.go +++ b/pkg/sqlite/performer.go @@ -596,7 +596,7 @@ func (qb *PerformerStore) makeQuery(ctx context.Context, performerFilter *models } query := performerRepository.newQuery() - distinctIDs(&query, performerTable) + selectIDs(&query, performerTable) if q := findFilter.Q; q != nil && *q != "" { query.join(performersAliasesTable, "", "performer_aliases.performer_id = performers.id") @@ -770,7 +770,7 @@ func (qb *PerformerStore) getPerformerSort(findFilter *models.FindFilterType) (s } // Whatever the sorting, always use name/id as a final sort - sortQuery += ", COALESCE(performers.name, performers.id) COLLATE NATURAL_CI ASC" + sortQuery += ", COALESCE(performers.name, cast(performers.id as text)) COLLATE NATURAL_CI ASC" return sortQuery, nil } diff --git a/pkg/sqlite/repository.go b/pkg/sqlite/repository.go index 8eb87b9aff1..8be8779747c 100644 --- a/pkg/sqlite/repository.go +++ b/pkg/sqlite/repository.go @@ -456,10 +456,10 @@ func idToIndexMap(ids []int) map[int]int { func (r *filesRepository) getMany(ctx context.Context, ids []int, primaryOnly bool) ([][]models.FileID, error) { var primaryClause string if primaryOnly { - primaryClause = " AND `primary` = 1" + primaryClause = " AND \"primary\" = 1" } - query := fmt.Sprintf("SELECT %s as id, file_id, `primary` from %s WHERE %[1]s IN %[3]s%s", r.idColumn, r.tableName, getInBinding(len(ids)), primaryClause) + query := fmt.Sprintf("SELECT %s as id, file_id, \"primary\" from %s WHERE %[1]s IN %[3]s%s", r.idColumn, r.tableName, getInBinding(len(ids)), primaryClause) idi := make([]interface{}, len(ids)) for i, id := range ids { @@ -500,7 +500,7 @@ func (r *filesRepository) getMany(ctx context.Context, ids []int, primaryOnly bo } func (r *filesRepository) get(ctx context.Context, id int) ([]models.FileID, error) { - query := fmt.Sprintf("SELECT file_id, `primary` from %s WHERE %s = ?", r.tableName, r.idColumn) + query := fmt.Sprintf("SELECT file_id, \"primary\" from %s WHERE %s = ?", r.tableName, r.idColumn) type relatedFile struct { FileID models.FileID `db:"file_id"` diff --git a/pkg/sqlite/scene.go b/pkg/sqlite/scene.go index 5df614b886f..e4da8777578 100644 --- a/pkg/sqlite/scene.go +++ b/pkg/sqlite/scene.go @@ -267,7 +267,7 @@ func (qb *SceneStore) selectDataset() *goqu.SelectDataset { scenesFilesJoinTable, goqu.On( scenesFilesJoinTable.Col(sceneIDColumn).Eq(table.Col(idColumn)), - scenesFilesJoinTable.Col("primary").Eq(1), + scenesFilesJoinTable.Col("primary").IsTrue(), ), ).LeftJoin( files, @@ -630,7 +630,7 @@ func (qb *SceneStore) FindByFileID(ctx context.Context, fileID models.FileID) ([ func (qb *SceneStore) FindByPrimaryFileID(ctx context.Context, fileID models.FileID) ([]*models.Scene, error) { sq := dialect.From(scenesFilesJoinTable).Select(scenesFilesJoinTable.Col(sceneIDColumn)).Where( scenesFilesJoinTable.Col(fileIDColumn).Eq(fileID), - scenesFilesJoinTable.Col("primary").Eq(1), + scenesFilesJoinTable.Col("primary").IsTrue(), ) ret, err := qb.findBySubquery(ctx, sq) @@ -915,7 +915,7 @@ func (qb *SceneStore) makeQuery(ctx context.Context, sceneFilter *models.SceneFi } query := sceneRepository.newQuery() - distinctIDs(&query, sceneTable) + selectIDs(&query, sceneTable) if q := findFilter.Q; q != nil && *q != "" { query.addJoins( @@ -1195,7 +1195,7 @@ func (qb *SceneStore) setSceneSort(query *queryBuilder, findFilter *models.FindF } // Whatever the sorting, always use title/id as a final sort - query.sortAndPagination += ", COALESCE(scenes.title, scenes.id) COLLATE NATURAL_CI ASC" + query.sortAndPagination += ", COALESCE(scenes.title, cast(scenes.id as text)) COLLATE NATURAL_CI ASC" return nil } diff --git a/pkg/sqlite/scene_marker.go b/pkg/sqlite/scene_marker.go index 87a849d2084..2d3f59ec25c 100644 --- a/pkg/sqlite/scene_marker.go +++ b/pkg/sqlite/scene_marker.go @@ -298,7 +298,7 @@ func (qb *SceneMarkerStore) makeQuery(ctx context.Context, sceneMarkerFilter *mo } query := sceneMarkerRepository.newQuery() - distinctIDs(&query, sceneMarkerTable) + selectIDs(&query, sceneMarkerTable) if q := findFilter.Q; q != nil && *q != "" { query.join(sceneTable, "", "scenes.id = scene_markers.scene_id") diff --git a/pkg/sqlite/studio.go b/pkg/sqlite/studio.go index 95edf4173e2..15c1cc92795 100644 --- a/pkg/sqlite/studio.go +++ b/pkg/sqlite/studio.go @@ -522,7 +522,7 @@ func (qb *StudioStore) makeQuery(ctx context.Context, studioFilter *models.Studi } query := studioRepository.newQuery() - distinctIDs(&query, studioTable) + selectIDs(&query, studioTable) if q := findFilter.Q; q != nil && *q != "" { query.join(studioAliasesTable, "", "studio_aliases.studio_id = studios.id") @@ -622,7 +622,7 @@ func (qb *StudioStore) getStudioSort(findFilter *models.FindFilterType) (string, } // Whatever the sorting, always use name/id as a final sort - sortQuery += ", COALESCE(studios.name, studios.id) COLLATE NATURAL_CI ASC" + sortQuery += ", COALESCE(studios.name, cast(studios.id as text)) COLLATE NATURAL_CI ASC" return sortQuery, nil } diff --git a/pkg/sqlite/table.go b/pkg/sqlite/table.go index 80d6b718a7f..f2116139abb 100644 --- a/pkg/sqlite/table.go +++ b/pkg/sqlite/table.go @@ -32,7 +32,7 @@ func (e *NotFoundError) Error() string { } func (t *table) insert(ctx context.Context, o interface{}) (sql.Result, error) { - q := dialect.Insert(t.table).Prepared(true).Rows(o) + q := dialect.Insert(t.table).Prepared(true).Rows(o).Returning(goqu.I("id")) ret, err := exec(ctx, q) if err != nil { return nil, fmt.Errorf("inserting into %s: %w", t.table.GetTable(), err) @@ -830,7 +830,7 @@ func (t *relatedFilesTable) setPrimary(ctx context.Context, id int, fileID model table := t.table.table q := dialect.Update(table).Prepared(true).Set(goqu.Record{ - "primary": 0, + "primary": false, }).Where(t.idColumn.Eq(id), table.Col(fileIDColumn).Neq(fileID)) if _, err := exec(ctx, q); err != nil { @@ -838,7 +838,7 @@ func (t *relatedFilesTable) setPrimary(ctx context.Context, id int, fileID model } q = dialect.Update(table).Prepared(true).Set(goqu.Record{ - "primary": 1, + "primary": true, }).Where(t.idColumn.Eq(id), table.Col(fileIDColumn).Eq(fileID)) if _, err := exec(ctx, q); err != nil { diff --git a/pkg/sqlite/tag.go b/pkg/sqlite/tag.go index 42bdd9bbe45..40a9d78a439 100644 --- a/pkg/sqlite/tag.go +++ b/pkg/sqlite/tag.go @@ -599,7 +599,7 @@ func (qb *TagStore) Query(ctx context.Context, tagFilter *models.TagFilterType, } query := tagRepository.newQuery() - distinctIDs(&query, tagTable) + selectIDs(&query, tagTable) if q := findFilter.Q; q != nil && *q != "" { query.join(tagAliasesTable, "", "tag_aliases.tag_id = tags.id") @@ -691,7 +691,7 @@ func (qb *TagStore) getTagSort(query *queryBuilder, findFilter *models.FindFilte } // Whatever the sorting, always use name/id as a final sort - sortQuery += ", COALESCE(tags.name, tags.id) COLLATE NATURAL_CI ASC" + sortQuery += ", COALESCE(tags.name, cast(tags.id as text)) COLLATE NATURAL_CI ASC" return sortQuery, nil } From 2ac97685746150655f5e38fc6595eff326edaf75 Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Thu, 3 Oct 2024 13:00:31 +0200 Subject: [PATCH 04/85] Migration changes Fix insert return id --- go.mod | 1 + internal/manager/init.go | 5 ++- pkg/sqlite/anonymise.go | 2 +- pkg/sqlite/database.go | 26 +++++------- pkg/sqlite/database_postgres.go | 26 ++++++++---- pkg/sqlite/database_sqlite.go | 34 +++++++++------- pkg/sqlite/migrate.go | 40 ++++++++++++++++--- .../migrationsPostgres/67_initial.up.sql | 17 +++++++- pkg/sqlite/table.go | 36 ++++++++++++----- 9 files changed, 131 insertions(+), 56 deletions(-) diff --git a/go.mod b/go.mod index c81dbac0986..3259a122ddb 100644 --- a/go.mod +++ b/go.mod @@ -65,6 +65,7 @@ require ( github.com/jackc/pgpassfile v1.0.0 // indirect github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect github.com/jackc/puddle/v2 v2.2.2 // indirect + github.com/lib/pq v1.10.9 // indirect ) require ( diff --git a/internal/manager/init.go b/internal/manager/init.go index d3e0ce23797..ec70ed23d09 100644 --- a/internal/manager/init.go +++ b/internal/manager/init.go @@ -43,10 +43,11 @@ func Initialize(cfg *config.Config, l *log.Logger) (*Manager, error) { dbType = sqlite.SqliteBackend } - if dbType == sqlite.SqliteBackend { + switch dbType { + case sqlite.SqliteBackend: sqlite.RegisterSqliteDialect() db = sqlite.NewSQLiteDatabase(cfg.GetDatabasePath()) - } else if dbType == sqlite.PostgresBackend { + case sqlite.PostgresBackend: db = sqlite.NewPostgresDatabase(cfg.GetDatabaseConnectionString()) } } diff --git a/pkg/sqlite/anonymise.go b/pkg/sqlite/anonymise.go index 23a4a6d34f5..a97c964f1bf 100644 --- a/pkg/sqlite/anonymise.go +++ b/pkg/sqlite/anonymise.go @@ -32,7 +32,7 @@ func NewAnonymiser(db *Database, outPath string) (*Anonymiser, error) { return nil, fmt.Errorf("vacuuming into %s: %w", outPath, err) } - newDB := NewSQLiteDatabase(db.dbPath) + newDB := NewSQLiteDatabase(db.DatabasePath()) if err := newDB.Open(); err != nil { return nil, fmt.Errorf("opening %s: %w", outPath, err) } diff --git a/pkg/sqlite/database.go b/pkg/sqlite/database.go index 354ad4ad294..fe85930dc28 100644 --- a/pkg/sqlite/database.go +++ b/pkg/sqlite/database.go @@ -86,7 +86,7 @@ const ( SqliteBackend DatabaseType = "SQLITE" ) -type databaseFunctions interface { +type dbInterface interface { Analyze(ctx context.Context) error Anonymise(outPath string) error AnonymousDatabasePath(backupDirectoryPath string) string @@ -98,6 +98,7 @@ type databaseFunctions interface { Commit(ctx context.Context) error DatabaseBackupPath(backupDirectoryPath string) string DatabasePath() string + DatabaseType() DatabaseType ExecSQL(ctx context.Context, query string, args []interface{}) (*int64, error) IsLocked(err error) bool Optimise(ctx context.Context) error @@ -127,20 +128,18 @@ type databaseFunctions interface { type Database struct { *storeRepository - databaseFunctions + dbInterface readDB *sqlx.DB writeDB *sqlx.DB - dbPath string - dbType DatabaseType - dbString string + dbConfig interface{} schemaVersion uint lockChan chan struct{} } -func NewDatabase() *Database { +func newDatabase() *storeRepository { fileStore := NewFileStore() folderStore := NewFolderStore() galleryStore := NewGalleryStore(fileStore, folderStore) @@ -166,18 +165,17 @@ func NewDatabase() *Database { SavedFilter: NewSavedFilterStore(), } - ret := &Database{ - storeRepository: r, - lockChan: make(chan struct{}, 1), - } - - return ret + return r } func (db *Database) SetBlobStoreOptions(options BlobStoreOptions) { *db.Blobs = *NewBlobStore(options) } +func (db *Database) DatabasePath() string { + return "" +} + // Ready returns an error if the database is not ready to begin transactions. func (db *Database) Ready() error { if db.readDB == nil || db.writeDB == nil { @@ -327,10 +325,6 @@ func (db *Database) AppSchemaVersion() uint { return appSchemaVersion } -func (db *Database) DatabasePath() string { - return db.dbPath -} - func (db *Database) Version() uint { return db.schemaVersion } diff --git a/pkg/sqlite/database_postgres.go b/pkg/sqlite/database_postgres.go index fd8c516ebe9..223273be4ed 100644 --- a/pkg/sqlite/database_postgres.go +++ b/pkg/sqlite/database_postgres.go @@ -13,23 +13,33 @@ type PostgresDB Database func NewPostgresDatabase(dbConnector string) *Database { dialect = goqu.Dialect("postgres") - ret := NewDatabase() db := &PostgresDB{ - databaseFunctions: ret, - storeRepository: ret.storeRepository, - lockChan: ret.lockChan, - dbType: PostgresBackend, - dbString: dbConnector, + storeRepository: newDatabase(), + lockChan: make(chan struct{}, 1), + dbConfig: dbConnector, } + db.dbInterface = db dbWrapper.dbType = PostgresBackend return (*Database)(db) } -func (db *Database) open(disableForeignKeys bool, writable bool) (conn *sqlx.DB, err error) { - conn, err = sqlx.Open("pgx", db.dbString) +func (db *PostgresDB) DatabaseType() DatabaseType { + return PostgresBackend +} + +/*func (db *PostgresDB) AppSchemaVersion() uint { + return uint(0 - (66 - int(appSchemaVersion))) +}*/ + +func (db *PostgresDB) DatabaseConnector() string { + return db.dbConfig.(string) +} + +func (db *PostgresDB) open(disableForeignKeys bool, writable bool) (conn *sqlx.DB, err error) { + conn, err = sqlx.Open("pgx", db.DatabaseConnector()) if err == nil { if disableForeignKeys { conn.Exec("SET session_replication_role = replica;") diff --git a/pkg/sqlite/database_sqlite.go b/pkg/sqlite/database_sqlite.go index 4539fbd51ca..d5c26a3cd5f 100644 --- a/pkg/sqlite/database_sqlite.go +++ b/pkg/sqlite/database_sqlite.go @@ -23,24 +23,30 @@ func RegisterSqliteDialect() { func NewSQLiteDatabase(dbPath string) *Database { dialect = goqu.Dialect("sqlite3new") - ret := NewDatabase() db := &SQLiteDB{ - databaseFunctions: ret, - storeRepository: ret.storeRepository, - lockChan: ret.lockChan, - dbType: SqliteBackend, - dbPath: dbPath, + storeRepository: newDatabase(), + lockChan: make(chan struct{}, 1), + dbConfig: dbPath, } + db.dbInterface = db dbWrapper.dbType = SqliteBackend return (*Database)(db) } +func (db *SQLiteDB) DatabaseType() DatabaseType { + return SqliteBackend +} + +func (db *SQLiteDB) DatabasePath() string { + return (db.dbConfig).(string) +} + func (db *SQLiteDB) open(disableForeignKeys bool, writable bool) (conn *sqlx.DB, err error) { // https://github.com/mattn/go-sqlite3 - url := "file:" + db.dbPath + "?_journal=WAL&_sync=NORMAL&_busy_timeout=50" + url := "file:" + db.DatabasePath() + "?_journal=WAL&_sync=NORMAL&_busy_timeout=50" if !disableForeignKeys { url += "&_fk=true" } @@ -67,7 +73,7 @@ func (db *SQLiteDB) open(disableForeignKeys bool, writable bool) (conn *sqlx.DB, } func (db *SQLiteDB) Remove() error { - databasePath := db.dbPath + databasePath := db.DatabasePath() err := db.Close() if err != nil { @@ -110,9 +116,9 @@ func (db *SQLiteDB) Reset() error { func (db *SQLiteDB) Backup(backupPath string) (err error) { thisDB := db.writeDB if thisDB == nil { - thisDB, err = sqlx.Connect(sqlite3Driver, "file:"+db.dbPath+"?_fk=true") + thisDB, err = sqlx.Connect(sqlite3Driver, "file:"+db.DatabasePath()+"?_fk=true") if err != nil { - return fmt.Errorf("open database %s failed: %w", db.dbPath, err) + return fmt.Errorf("open database %s failed: %w", db.DatabasePath(), err) } defer thisDB.Close() } @@ -127,12 +133,12 @@ func (db *SQLiteDB) Backup(backupPath string) (err error) { } func (db *SQLiteDB) RestoreFromBackup(backupPath string) error { - logger.Infof("Restoring backup database %s into %s", backupPath, db.dbPath) - return os.Rename(backupPath, db.dbPath) + logger.Infof("Restoring backup database %s into %s", backupPath, db.DatabasePath()) + return os.Rename(backupPath, db.DatabasePath()) } func (db *SQLiteDB) DatabaseBackupPath(backupDirectoryPath string) string { - fn := fmt.Sprintf("%s.%d.%s", filepath.Base(db.dbPath), db.schemaVersion, time.Now().Format("20060102_150405")) + fn := fmt.Sprintf("%s.%d.%s", filepath.Base(db.DatabasePath()), db.schemaVersion, time.Now().Format("20060102_150405")) if backupDirectoryPath != "" { return filepath.Join(backupDirectoryPath, fn) @@ -142,7 +148,7 @@ func (db *SQLiteDB) DatabaseBackupPath(backupDirectoryPath string) string { } func (db *SQLiteDB) AnonymousDatabasePath(backupDirectoryPath string) string { - fn := fmt.Sprintf("%s.anonymous.%d.%s", filepath.Base(db.dbPath), db.schemaVersion, time.Now().Format("20060102_150405")) + fn := fmt.Sprintf("%s.anonymous.%d.%s", filepath.Base(db.DatabasePath()), db.schemaVersion, time.Now().Format("20060102_150405")) if backupDirectoryPath != "" { return filepath.Join(backupDirectoryPath, fn) diff --git a/pkg/sqlite/migrate.go b/pkg/sqlite/migrate.go index ba47544588d..0dc60b3bf60 100644 --- a/pkg/sqlite/migrate.go +++ b/pkg/sqlite/migrate.go @@ -5,6 +5,7 @@ import ( "fmt" "github.com/golang-migrate/migrate/v4" + postgresmig "github.com/golang-migrate/migrate/v4/database/postgres" sqlite3mig "github.com/golang-migrate/migrate/v4/database/sqlite3" "github.com/golang-migrate/migrate/v4/source/iofs" "github.com/jmoiron/sqlx" @@ -12,7 +13,7 @@ import ( ) func (db *Database) needsMigration() bool { - return db.schemaVersion != appSchemaVersion + return db.schemaVersion != db.AppSchemaVersion() } type Migrator struct { @@ -55,16 +56,25 @@ func (m *Migrator) CurrentSchemaVersion() uint { } func (m *Migrator) RequiredSchemaVersion() uint { - return appSchemaVersion + return m.db.AppSchemaVersion() } func (m *Migrator) getMigrate() (*migrate.Migrate, error) { + if m.db.DatabaseType() == PostgresBackend { + return m._getMigratePostgres() + } + + return m._getMigrateSqlite() +} + +func (m *Migrator) _getMigrateSqlite() (*migrate.Migrate, error) { migrations, err := iofs.New(migrationsBox, "migrations") if err != nil { return nil, err } driver, err := sqlite3mig.WithInstance(m.conn.DB, &sqlite3mig.Config{}) + if err != nil { return nil, err } @@ -73,7 +83,27 @@ func (m *Migrator) getMigrate() (*migrate.Migrate, error) { return migrate.NewWithInstance( "iofs", migrations, - m.db.dbPath, + m.db.DatabasePath(), + driver, + ) +} + +func (m *Migrator) _getMigratePostgres() (*migrate.Migrate, error) { + migrations, err := iofs.New(migrationsBox, "migrationsPostgres") + if err != nil { + return nil, err + } + + driver, err := postgresmig.WithInstance(m.conn.DB, &postgresmig.Config{}) + + if err != nil { + return nil, err + } + + return migrate.NewWithInstance( + "iofs", + migrations, + "postgres", driver, ) } @@ -150,9 +180,9 @@ func (db *Database) RunAllMigrations() error { defer m.Close() databaseSchemaVersion, _, _ := m.m.Version() - stepNumber := appSchemaVersion - databaseSchemaVersion + stepNumber := db.AppSchemaVersion() - databaseSchemaVersion if stepNumber != 0 { - logger.Infof("Migrating database from version %d to %d", databaseSchemaVersion, appSchemaVersion) + logger.Infof("Migrating database from version %d to %d", databaseSchemaVersion, db.AppSchemaVersion()) // run each migration individually, and run custom migrations as needed var i uint = 1 diff --git a/pkg/sqlite/migrationsPostgres/67_initial.up.sql b/pkg/sqlite/migrationsPostgres/67_initial.up.sql index 894a3c251e9..9bf47762ba4 100644 --- a/pkg/sqlite/migrationsPostgres/67_initial.up.sql +++ b/pkg/sqlite/migrationsPostgres/67_initial.up.sql @@ -1,4 +1,19 @@ -CREATE COLLATION NATURAL_CI (provider = icu, locale = 'en@colNumeric=yes'); +CREATE COLLATION IF NOT EXISTS NATURAL_CI (provider = icu, locale = 'en@colNumeric=yes'); + +CREATE OR REPLACE FUNCTION regexp(re TEXT, s TEXT) +RETURNS BOOLEAN AS $$ +BEGIN + RETURN s ~ re; +END; +$$ LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION basename(str TEXT) +RETURNS TEXT AS $$ +BEGIN + RETURN substring(str FROM '[^/\\]+$'); +END; +$$ LANGUAGE plpgsql; + CREATE TABLE blobs ( checksum varchar(255) NOT NULL PRIMARY KEY, blob bytea diff --git a/pkg/sqlite/table.go b/pkg/sqlite/table.go index f2116139abb..0aa043f1654 100644 --- a/pkg/sqlite/table.go +++ b/pkg/sqlite/table.go @@ -32,7 +32,7 @@ func (e *NotFoundError) Error() string { } func (t *table) insert(ctx context.Context, o interface{}) (sql.Result, error) { - q := dialect.Insert(t.table).Prepared(true).Rows(o).Returning(goqu.I("id")) + q := dialect.Insert(t.table).Prepared(true).Rows(o) ret, err := exec(ctx, q) if err != nil { return nil, fmt.Errorf("inserting into %s: %w", t.table.GetTable(), err) @@ -42,17 +42,13 @@ func (t *table) insert(ctx context.Context, o interface{}) (sql.Result, error) { } func (t *table) insertID(ctx context.Context, o interface{}) (int, error) { - result, err := t.insert(ctx, o) - if err != nil { - return 0, err - } - - ret, err := result.LastInsertId() + q := dialect.Insert(t.table).Prepared(true).Rows(o).Returning(goqu.I("id")) + val, err := execID(ctx, q) if err != nil { - return 0, err + return -1, fmt.Errorf("inserting into %s: %w", t.table.GetTable(), err) } - return int(ret), nil + return int(*val), nil } func (t *table) updateByID(ctx context.Context, id interface{}, o interface{}) error { @@ -1136,6 +1132,28 @@ func exec(ctx context.Context, stmt sqler) (sql.Result, error) { return ret, nil } +// Execute, but returns an ID +func execID(ctx context.Context, stmt sqler) (*int64, error) { + tx, err := getTx(ctx) + if err != nil { + return nil, err + } + + sql, args, err := stmt.ToSQL() + if err != nil { + return nil, fmt.Errorf("generating sql: %w", err) + } + + logger.Tracef("SQL: %s [%v]", sql, args) + var id int64 + err = tx.QueryRowContext(ctx, sql, args...).Scan(&id) + if err != nil { + return nil, fmt.Errorf("executing `%s` [%v]: %w", sql, args, err) + } + + return &id, nil +} + func count(ctx context.Context, q *goqu.SelectDataset) (int, error) { var count int if err := querySimple(ctx, q, &count); err != nil { From 9f42dd1a6d3ad34bb1a6bd7a388f54cc6e718e08 Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Thu, 3 Oct 2024 13:15:02 +0200 Subject: [PATCH 05/85] Remove prepared Fix blobs --- pkg/sqlite/anonymise.go | 2 +- pkg/sqlite/blob.go | 74 +++++++++++++++++++++++------------ pkg/sqlite/file.go | 12 +++--- pkg/sqlite/folder.go | 6 +-- pkg/sqlite/gallery.go | 4 +- pkg/sqlite/gallery_chapter.go | 2 +- pkg/sqlite/group.go | 10 ++--- pkg/sqlite/image.go | 9 ++--- pkg/sqlite/performer.go | 2 +- pkg/sqlite/saved_filter.go | 4 +- pkg/sqlite/scene.go | 4 +- pkg/sqlite/scene_marker.go | 4 +- pkg/sqlite/studio.go | 4 +- pkg/sqlite/table.go | 14 +++---- pkg/sqlite/tag.go | 6 +-- 15 files changed, 89 insertions(+), 68 deletions(-) diff --git a/pkg/sqlite/anonymise.go b/pkg/sqlite/anonymise.go index a97c964f1bf..1c68ae23e90 100644 --- a/pkg/sqlite/anonymise.go +++ b/pkg/sqlite/anonymise.go @@ -132,7 +132,7 @@ func (db *Anonymiser) anonymiseFoldersRecurse(ctx context.Context, parentFolderI if parentFolderID == 0 { stmt = stmt.Set(goqu.Record{"path": goqu.Cast(table.Col(idColumn), "VARCHAR")}).Where(table.Col("parent_folder_id").IsNull()) } else { - stmt = stmt.Prepared(true).Set(goqu.Record{ + stmt = stmt.Set(goqu.Record{ "path": goqu.L("? || ? || id", parentPath, string(filepath.Separator)), }).Where(table.Col("parent_folder_id").Eq(parentFolderID)) } diff --git a/pkg/sqlite/blob.go b/pkg/sqlite/blob.go index 0caa7a65f61..776a51745ae 100644 --- a/pkg/sqlite/blob.go +++ b/pkg/sqlite/blob.go @@ -15,7 +15,6 @@ import ( "github.com/stashapp/stash/pkg/hash/md5" "github.com/stashapp/stash/pkg/logger" "github.com/stashapp/stash/pkg/sqlite/blob" - "github.com/stashapp/stash/pkg/utils" "gopkg.in/guregu/null.v4" ) @@ -172,16 +171,21 @@ func (e *ChecksumBlobNotExistError) Error() string { return fmt.Sprintf("blob for checksum %s does not exist", e.Checksum) } -func (qb *BlobStore) readSQL(ctx context.Context, querySQL string, args ...interface{}) ([]byte, string, error) { +func (qb *BlobStore) readSQL(ctx context.Context, querySQL sqler) ([]byte, string, error) { if !qb.options.UseDatabase && !qb.options.UseFilesystem { panic("no blob store configured") } + query, args, err := querySQL.ToSQL() + if err != nil { + return nil, "", fmt.Errorf("reading blob tosql: %w", err) + } + // always try to get from the database first, even if set to use filesystem var row blobRow found := false const single = true - if err := qb.queryFunc(ctx, querySQL, args, single, func(r *sqlx.Rows) error { + if err := qb.queryFunc(ctx, query, args, single, func(r *sqlx.Rows) error { found = true if err := r.StructScan(&row); err != nil { return err @@ -358,15 +362,12 @@ type blobJoinQueryBuilder struct { } func (qb *blobJoinQueryBuilder) GetImage(ctx context.Context, id int, blobCol string) ([]byte, error) { - sqlQuery := utils.StrFormat(` -SELECT blobs.checksum, blobs.blob FROM {joinTable} INNER JOIN blobs ON {joinTable}.{joinCol} = blobs.checksum -WHERE {joinTable}.id = ? -`, utils.StrFormatMap{ - "joinTable": qb.joinTable, - "joinCol": blobCol, - }) - - ret, _, err := qb.blobStore.readSQL(ctx, sqlQuery, id) + sqlQuery := dialect.From(qb.joinTable). + Join(goqu.I("blobs"), goqu.On(goqu.I(qb.joinTable+"."+blobCol).Eq(goqu.I("blobs.checksum")))). + Select(goqu.I("blobs.checksum"), goqu.I("blobs.blob")). + Where(goqu.Ex{"id": id}) + + ret, _, err := qb.blobStore.readSQL(ctx, sqlQuery) return ret, err } @@ -386,6 +387,7 @@ func (qb *blobJoinQueryBuilder) UpdateImage(ctx context.Context, id int, blobCol } sqlQuery := fmt.Sprintf("UPDATE %s SET %s = ? WHERE id = ?", qb.joinTable, blobCol) + if _, err := dbWrapper.Exec(ctx, sqlQuery, checksum, id); err != nil { return err } @@ -401,15 +403,17 @@ func (qb *blobJoinQueryBuilder) UpdateImage(ctx context.Context, id int, blobCol } func (qb *blobJoinQueryBuilder) getChecksum(ctx context.Context, id int, blobCol string) (*string, error) { - sqlQuery := utils.StrFormat(` -SELECT {joinTable}.{joinCol} FROM {joinTable} WHERE {joinTable}.id = ? -`, utils.StrFormatMap{ - "joinTable": qb.joinTable, - "joinCol": blobCol, - }) + sqlQuery := dialect.From(qb.joinTable). + Select(blobCol). + Where(goqu.Ex{"id": id}) + + query, args, err := sqlQuery.ToSQL() + if err != nil { + return nil, err + } var checksum null.String - err := qb.repository.querySimple(ctx, sqlQuery, []interface{}{id}, &checksum) + err = qb.repository.querySimple(ctx, query, args, &checksum) if err != nil { return nil, err } @@ -432,8 +436,16 @@ func (qb *blobJoinQueryBuilder) DestroyImage(ctx context.Context, id int, blobCo return nil } - updateQuery := fmt.Sprintf("UPDATE %s SET %s = NULL WHERE id = ?", qb.joinTable, blobCol) - if _, err = dbWrapper.Exec(ctx, updateQuery, id); err != nil { + updateQuery := dialect.Update(qb.joinTable). + Set(goqu.Record{blobCol: nil}). + Where(goqu.Ex{"id": id}) + + query, args, err := updateQuery.ToSQL() + if err != nil { + return err + } + + if _, err = dbWrapper.Exec(ctx, query, args); err != nil { return err } @@ -441,12 +453,22 @@ func (qb *blobJoinQueryBuilder) DestroyImage(ctx context.Context, id int, blobCo } func (qb *blobJoinQueryBuilder) HasImage(ctx context.Context, id int, blobCol string) (bool, error) { - stmt := utils.StrFormat("SELECT COUNT(*) as count FROM (SELECT {joinCol} FROM {joinTable} WHERE id = ? AND {joinCol} IS NOT NULL LIMIT 1)", utils.StrFormatMap{ - "joinTable": qb.joinTable, - "joinCol": blobCol, - }) + ds := dialect.From(goqu.T(qb.joinTable)). + Select(goqu.C(blobCol)). + Where( + goqu.C("id").Eq(id), + goqu.C(blobCol).IsNotNull(), + ). + Limit(1) + + countDs := dialect.From(ds.As("subquery")).Select(goqu.COUNT("*").As("count")) + + sql, params, err := countDs.ToSQL() + if err != nil { + return false, err + } - c, err := qb.repository.runCountQuery(ctx, stmt, []interface{}{id}) + c, err := qb.repository.runCountQuery(ctx, sql, params) if err != nil { return false, err } diff --git a/pkg/sqlite/file.go b/pkg/sqlite/file.go index b7c410170a7..cfc2edda7e1 100644 --- a/pkg/sqlite/file.go +++ b/pkg/sqlite/file.go @@ -662,7 +662,7 @@ func (qb *FileStore) FindAllInPaths(ctx context.Context, p []string, limit, offs table := qb.table() folderTable := folderTableMgr.table - q := dialect.From(table).Prepared(true).InnerJoin( + q := dialect.From(table).InnerJoin( folderTable, goqu.On(table.Col("parent_folder_id").Eq(folderTable.Col(idColumn))), ).Select(table.Col(idColumn)) @@ -695,7 +695,7 @@ func (qb *FileStore) CountAllInPaths(ctx context.Context, p []string) (int, erro func (qb *FileStore) findBySubquery(ctx context.Context, sq *goqu.SelectDataset) ([]models.File, error) { table := qb.table() - q := qb.selectDataset().Prepared(true).Where( + q := qb.selectDataset().Where( table.Col(idColumn).Eq( sq, ), @@ -720,7 +720,7 @@ func (qb *FileStore) FindByFingerprint(ctx context.Context, fp models.Fingerprin func (qb *FileStore) FindByZipFileID(ctx context.Context, zipFileID models.FileID) ([]models.File, error) { table := qb.table() - q := qb.selectDataset().Prepared(true).Where( + q := qb.selectDataset().Where( table.Col("zip_file_id").Eq(zipFileID), ) @@ -733,7 +733,7 @@ func (qb *FileStore) FindByFileInfo(ctx context.Context, info fs.FileInfo, size modTime := info.ModTime().Format(time.RFC3339) - q := qb.selectDataset().Prepared(true).Where( + q := qb.selectDataset().Where( table.Col("basename").Eq(info.Name()), table.Col("size").Eq(size), table.Col("mod_time").Eq(modTime), @@ -745,7 +745,7 @@ func (qb *FileStore) FindByFileInfo(ctx context.Context, info fs.FileInfo, size func (qb *FileStore) CountByFolderID(ctx context.Context, folderID models.FolderID) (int, error) { table := qb.table() - q := qb.countDataset().Prepared(true).Where( + q := qb.countDataset().Where( table.Col("parent_folder_id").Eq(folderID), ) @@ -774,7 +774,7 @@ func (qb *FileStore) IsPrimary(ctx context.Context, fileID models.FileID) (bool, } } - q := dialect.Select(goqu.COUNT("*").As("count")).Prepared(true).From( + q := dialect.Select(goqu.COUNT("*").As("count")).From( sq, ) diff --git a/pkg/sqlite/folder.go b/pkg/sqlite/folder.go index 4cf632d49e6..9201d2df52e 100644 --- a/pkg/sqlite/folder.go +++ b/pkg/sqlite/folder.go @@ -226,7 +226,7 @@ func (qb *FolderStore) Find(ctx context.Context, id models.FolderID) (*models.Fo } func (qb *FolderStore) FindByPath(ctx context.Context, p string) (*models.Folder, error) { - q := qb.selectDataset().Prepared(true).Where(qb.table().Col("path").Eq(p)) + q := qb.selectDataset().Where(qb.table().Col("path").Eq(p)) ret, err := qb.get(ctx, q) if err != nil && !errors.Is(err, sql.ErrNoRows) { @@ -295,7 +295,7 @@ func (qb *FolderStore) CountAllInPaths(ctx context.Context, p []string) (int, er // func (qb *FolderStore) findBySubquery(ctx context.Context, sq *goqu.SelectDataset) ([]*file.Folder, error) { // table := qb.table() -// q := qb.selectDataset().Prepared(true).Where( +// q := qb.selectDataset().Where( // table.Col(idColumn).Eq( // sq, // ), @@ -307,7 +307,7 @@ func (qb *FolderStore) CountAllInPaths(ctx context.Context, p []string) (int, er func (qb *FolderStore) FindByZipFileID(ctx context.Context, zipFileID models.FileID) ([]*models.Folder, error) { table := qb.table() - q := qb.selectDataset().Prepared(true).Where( + q := qb.selectDataset().Where( table.Col("zip_file_id").Eq(zipFileID), ) diff --git a/pkg/sqlite/gallery.go b/pkg/sqlite/gallery.go index 008ab85f7f1..6b13368fd79 100644 --- a/pkg/sqlite/gallery.go +++ b/pkg/sqlite/gallery.go @@ -405,7 +405,7 @@ func (qb *GalleryStore) FindMany(ctx context.Context, ids []int) ([]*models.Gall galleries := make([]*models.Gallery, len(ids)) if err := batchExec(ids, defaultBatchSize, func(batch []int) error { - q := qb.selectDataset().Prepared(true).Where(qb.table().Col(idColumn).In(batch)) + q := qb.selectDataset().Where(qb.table().Col(idColumn).In(batch)) unsorted, err := qb.getMany(ctx, q) if err != nil { return err @@ -445,7 +445,7 @@ func (qb *GalleryStore) find(ctx context.Context, id int) (*models.Gallery, erro func (qb *GalleryStore) findBySubquery(ctx context.Context, sq *goqu.SelectDataset) ([]*models.Gallery, error) { table := qb.table() - q := qb.selectDataset().Prepared(true).Where( + q := qb.selectDataset().Where( table.Col(idColumn).Eq( sq, ), diff --git a/pkg/sqlite/gallery_chapter.go b/pkg/sqlite/gallery_chapter.go index f0d9c52980b..49c702a7625 100644 --- a/pkg/sqlite/gallery_chapter.go +++ b/pkg/sqlite/gallery_chapter.go @@ -155,7 +155,7 @@ func (qb *GalleryChapterStore) FindMany(ctx context.Context, ids []int) ([]*mode ret := make([]*models.GalleryChapter, len(ids)) table := qb.table() - q := qb.selectDataset().Prepared(true).Where(table.Col(idColumn).In(ids)) + q := qb.selectDataset().Where(table.Col(idColumn).In(ids)) unsorted, err := qb.getMany(ctx, q) if err != nil { return nil, err diff --git a/pkg/sqlite/group.go b/pkg/sqlite/group.go index cd10461d3fb..97a1b91f984 100644 --- a/pkg/sqlite/group.go +++ b/pkg/sqlite/group.go @@ -288,7 +288,7 @@ func (qb *GroupStore) FindMany(ctx context.Context, ids []int) ([]*models.Group, table := qb.table() if err := batchExec(ids, defaultBatchSize, func(batch []int) error { - q := qb.selectDataset().Prepared(true).Where(table.Col(idColumn).In(batch)) + q := qb.selectDataset().Where(table.Col(idColumn).In(batch)) unsorted, err := qb.getMany(ctx, q) if err != nil { return err @@ -369,7 +369,7 @@ func (qb *GroupStore) FindByName(ctx context.Context, name string, nocase bool) if nocase { where += " COLLATE NOCASE" } - sq := qb.selectDataset().Prepared(true).Where(goqu.L(where, name)).Limit(1) + sq := qb.selectDataset().Where(goqu.L(where, name)).Limit(1) ret, err := qb.get(ctx, sq) if err != nil && !errors.Is(err, sql.ErrNoRows) { @@ -394,7 +394,7 @@ func (qb *GroupStore) FindByNames(ctx context.Context, names []string, nocase bo for _, name := range names { args = append(args, name) } - sq := qb.selectDataset().Prepared(true).Where(goqu.L(where, args...)) + sq := qb.selectDataset().Where(goqu.L(where, args...)) ret, err := qb.getMany(ctx, sq) if err != nil { @@ -638,7 +638,7 @@ func (qb *GroupStore) FindSubGroupIDs(ctx context.Context, containingID int, ids WHERE gr.containing_id = :parentID AND gr.sub_id IN (:ids); */ table := groupRelationshipTableMgr.table - q := dialect.From(table).Prepared(true). + q := dialect.From(table). Select(table.Col("sub_id")).Where( table.Col("containing_id").Eq(containingID), table.Col("sub_id").In(ids), @@ -674,7 +674,7 @@ func (qb *GroupStore) FindInAncestors(ctx context.Context, ascestorIDs []int, id table := qb.table() const ascestors = "ancestors" const parentID = "parent_id" - q := dialect.From(ascestors).Prepared(true). + q := dialect.From(ascestors). WithRecursive(ascestors, dialect.From(qb.table()).Select(table.Col(idColumn).As(parentID)). Where(table.Col(idColumn).In(ascestorIDs)). diff --git a/pkg/sqlite/image.go b/pkg/sqlite/image.go index 65494f0722c..46b6d5ccbed 100644 --- a/pkg/sqlite/image.go +++ b/pkg/sqlite/image.go @@ -391,7 +391,7 @@ func (qb *ImageStore) FindMany(ctx context.Context, ids []int) ([]*models.Image, images := make([]*models.Image, len(ids)) if err := batchExec(ids, defaultBatchSize, func(batch []int) error { - q := qb.selectDataset().Prepared(true).Where(qb.table().Col(idColumn).In(batch)) + q := qb.selectDataset().Where(qb.table().Col(idColumn).In(batch)) unsorted, err := qb.getMany(ctx, q) if err != nil { return err @@ -431,7 +431,7 @@ func (qb *ImageStore) find(ctx context.Context, id int) (*models.Image, error) { func (qb *ImageStore) findBySubquery(ctx context.Context, sq *goqu.SelectDataset) ([]*models.Image, error) { table := qb.table() - q := qb.selectDataset().Prepared(true).Where( + q := qb.selectDataset().Where( table.Col(idColumn).Eq( sq, ), @@ -495,7 +495,7 @@ func (qb *ImageStore) CoverByGalleryID(ctx context.Context, galleryID int) (*mod galleriesImagesJoinTable.Col("cover").Eq(true), )) - q := qb.selectDataset().Prepared(true).Where( + q := qb.selectDataset().Where( table.Col(idColumn).Eq( sq, ), @@ -619,7 +619,7 @@ func (qb *ImageStore) FindByGalleryID(ctx context.Context, galleryID int) ([]*mo galleriesImagesJoinTable.Col("gallery_id").Eq(galleryID), ) - q := qb.selectDataset().Prepared(true).Where( + q := qb.selectDataset().Where( table.Col(idColumn).Eq( sq, ), @@ -642,7 +642,6 @@ func (qb *ImageStore) FindByGalleryIDIndex(ctx context.Context, galleryID int, i goqu.On(table.Col(idColumn).Eq(galleriesImagesJoinTable.Col(imageIDColumn))), ). Where(galleriesImagesJoinTable.Col(galleryIDColumn).Eq(galleryID)). - Prepared(true). Order(defaultGalleryOrder...). Limit(1).Offset(index) diff --git a/pkg/sqlite/performer.go b/pkg/sqlite/performer.go index df1dbbea837..809f15e8789 100644 --- a/pkg/sqlite/performer.go +++ b/pkg/sqlite/performer.go @@ -525,7 +525,7 @@ func (qb *PerformerStore) FindByNames(ctx context.Context, names []string, nocas args = append(args, name) } - sq := qb.selectDataset().Prepared(true).Where( + sq := qb.selectDataset().Where( goqu.L(clause, args...), ) ret, err := qb.getMany(ctx, sq) diff --git a/pkg/sqlite/saved_filter.go b/pkg/sqlite/saved_filter.go index 8f58b05e76c..7a5bb30eac3 100644 --- a/pkg/sqlite/saved_filter.go +++ b/pkg/sqlite/saved_filter.go @@ -158,7 +158,7 @@ func (qb *SavedFilterStore) FindMany(ctx context.Context, ids []int, ignoreNotFo ret := make([]*models.SavedFilter, len(ids)) table := qb.table() - q := qb.selectDataset().Prepared(true).Where(table.Col(idColumn).In(ids)) + q := qb.selectDataset().Where(table.Col(idColumn).In(ids)) unsorted, err := qb.getMany(ctx, q) if err != nil { return nil, err @@ -242,7 +242,7 @@ func (qb *SavedFilterStore) FindByMode(ctx context.Context, mode models.FilterMo whereClause = table.Col("mode").Eq(mode) } - sq := qb.selectDataset().Prepared(true).Where(whereClause).Order(table.Col("name").Asc()) + sq := qb.selectDataset().Where(whereClause).Order(table.Col("name").Asc()) ret, err := qb.getMany(ctx, sq) if err != nil { diff --git a/pkg/sqlite/scene.go b/pkg/sqlite/scene.go index e4da8777578..53c182c4cb9 100644 --- a/pkg/sqlite/scene.go +++ b/pkg/sqlite/scene.go @@ -497,7 +497,7 @@ func (qb *SceneStore) FindMany(ctx context.Context, ids []int) ([]*models.Scene, table := qb.table() if err := batchExec(ids, defaultBatchSize, func(batch []int) error { - q := qb.selectDataset().Prepared(true).Where(table.Col(idColumn).In(batch)) + q := qb.selectDataset().Where(table.Col(idColumn).In(batch)) unsorted, err := qb.getMany(ctx, q) if err != nil { return err @@ -890,7 +890,7 @@ func (qb *SceneStore) Wall(ctx context.Context, q *string) ([]*models.Scene, err } table := qb.table() - qq := qb.selectDataset().Prepared(true).Where(table.Col("details").Like("%" + s + "%")).Order(goqu.L("RANDOM()").Asc()).Limit(80) + qq := qb.selectDataset().Where(table.Col("details").Like("%" + s + "%")).Order(goqu.L("RANDOM()").Asc()).Limit(80) return qb.getMany(ctx, qq) } diff --git a/pkg/sqlite/scene_marker.go b/pkg/sqlite/scene_marker.go index 2d3f59ec25c..b340bfeda57 100644 --- a/pkg/sqlite/scene_marker.go +++ b/pkg/sqlite/scene_marker.go @@ -181,7 +181,7 @@ func (qb *SceneMarkerStore) FindMany(ctx context.Context, ids []int) ([]*models. ret := make([]*models.SceneMarker, len(ids)) table := qb.table() - q := qb.selectDataset().Prepared(true).Where(table.Col(idColumn).In(ids)) + q := qb.selectDataset().Where(table.Col(idColumn).In(ids)) unsorted, err := qb.getMany(ctx, q) if err != nil { return nil, err @@ -285,7 +285,7 @@ func (qb *SceneMarkerStore) Wall(ctx context.Context, q *string) ([]*models.Scen } table := qb.table() - qq := qb.selectDataset().Prepared(true).Where(table.Col("title").Like("%" + s + "%")).Order(goqu.L("RANDOM()").Asc()).Limit(80) + qq := qb.selectDataset().Where(table.Col("title").Like("%" + s + "%")).Order(goqu.L("RANDOM()").Asc()).Limit(80) return qb.getMany(ctx, qq) } diff --git a/pkg/sqlite/studio.go b/pkg/sqlite/studio.go index 15c1cc92795..6d73337c9b0 100644 --- a/pkg/sqlite/studio.go +++ b/pkg/sqlite/studio.go @@ -298,7 +298,7 @@ func (qb *StudioStore) FindMany(ctx context.Context, ids []int) ([]*models.Studi table := qb.table() if err := batchExec(ids, defaultBatchSize, func(batch []int) error { - q := qb.selectDataset().Prepared(true).Where(table.Col(idColumn).In(batch)) + q := qb.selectDataset().Where(table.Col(idColumn).In(batch)) unsorted, err := qb.getMany(ctx, q) if err != nil { return err @@ -422,7 +422,7 @@ func (qb *StudioStore) FindByName(ctx context.Context, name string, nocase bool) if nocase { where += " COLLATE NOCASE" } - sq := qb.selectDataset().Prepared(true).Where(goqu.L(where, name)).Limit(1) + sq := qb.selectDataset().Where(goqu.L(where, name)).Limit(1) ret, err := qb.get(ctx, sq) if err != nil && !errors.Is(err, sql.ErrNoRows) { diff --git a/pkg/sqlite/table.go b/pkg/sqlite/table.go index 0aa043f1654..4a63b04fcb8 100644 --- a/pkg/sqlite/table.go +++ b/pkg/sqlite/table.go @@ -32,7 +32,7 @@ func (e *NotFoundError) Error() string { } func (t *table) insert(ctx context.Context, o interface{}) (sql.Result, error) { - q := dialect.Insert(t.table).Prepared(true).Rows(o) + q := dialect.Insert(t.table).Rows(o) ret, err := exec(ctx, q) if err != nil { return nil, fmt.Errorf("inserting into %s: %w", t.table.GetTable(), err) @@ -42,7 +42,7 @@ func (t *table) insert(ctx context.Context, o interface{}) (sql.Result, error) { } func (t *table) insertID(ctx context.Context, o interface{}) (int, error) { - q := dialect.Insert(t.table).Prepared(true).Rows(o).Returning(goqu.I("id")) + q := dialect.Insert(t.table).Rows(o).Returning(goqu.I("id")) val, err := execID(ctx, q) if err != nil { return -1, fmt.Errorf("inserting into %s: %w", t.table.GetTable(), err) @@ -52,7 +52,7 @@ func (t *table) insertID(ctx context.Context, o interface{}) (int, error) { } func (t *table) updateByID(ctx context.Context, id interface{}, o interface{}) error { - q := dialect.Update(t.table).Prepared(true).Set(o).Where(t.byID(id)) + q := dialect.Update(t.table).Set(o).Where(t.byID(id)) if _, err := exec(ctx, q); err != nil { return fmt.Errorf("updating %s: %w", t.table.GetTable(), err) @@ -719,7 +719,7 @@ func (t *imageGalleriesTable) setCover(ctx context.Context, id int, galleryID in table := t.table.table - q := dialect.Update(table).Prepared(true).Set(goqu.Record{ + q := dialect.Update(table).Set(goqu.Record{ "cover": true, }).Where(t.idColumn.Eq(id), table.Col(galleryIDColumn).Eq(galleryID)) @@ -733,7 +733,7 @@ func (t *imageGalleriesTable) setCover(ctx context.Context, id int, galleryID in func (t *imageGalleriesTable) resetCover(ctx context.Context, galleryID int) error { table := t.table.table - q := dialect.Update(table).Prepared(true).Set(goqu.Record{ + q := dialect.Update(table).Set(goqu.Record{ "cover": false, }).Where( table.Col(galleryIDColumn).Eq(galleryID), @@ -825,7 +825,7 @@ func (t *relatedFilesTable) destroyJoins(ctx context.Context, fileIDs []models.F func (t *relatedFilesTable) setPrimary(ctx context.Context, id int, fileID models.FileID) error { table := t.table.table - q := dialect.Update(table).Prepared(true).Set(goqu.Record{ + q := dialect.Update(table).Set(goqu.Record{ "primary": false, }).Where(t.idColumn.Eq(id), table.Col(fileIDColumn).Neq(fileID)) @@ -833,7 +833,7 @@ func (t *relatedFilesTable) setPrimary(ctx context.Context, id int, fileID model return fmt.Errorf("unsetting primary flags in %s: %w", t.table.table.GetTable(), err) } - q = dialect.Update(table).Prepared(true).Set(goqu.Record{ + q = dialect.Update(table).Set(goqu.Record{ "primary": true, }).Where(t.idColumn.Eq(id), table.Col(fileIDColumn).Eq(fileID)) diff --git a/pkg/sqlite/tag.go b/pkg/sqlite/tag.go index 40a9d78a439..f79853fbba8 100644 --- a/pkg/sqlite/tag.go +++ b/pkg/sqlite/tag.go @@ -305,7 +305,7 @@ func (qb *TagStore) FindMany(ctx context.Context, ids []int) ([]*models.Tag, err table := qb.table() if err := batchExec(ids, defaultBatchSize, func(batch []int) error { - q := qb.selectDataset().Prepared(true).Where(table.Col(idColumn).In(batch)) + q := qb.selectDataset().Where(table.Col(idColumn).In(batch)) unsorted, err := qb.getMany(ctx, q) if err != nil { return err @@ -470,7 +470,7 @@ func (qb *TagStore) FindByName(ctx context.Context, name string, nocase bool) (* if nocase { where += " COLLATE NOCASE" } - sq := qb.selectDataset().Prepared(true).Where(goqu.L(where, name)).Limit(1) + sq := qb.selectDataset().Where(goqu.L(where, name)).Limit(1) ret, err := qb.get(ctx, sq) if err != nil && !errors.Is(err, sql.ErrNoRows) { @@ -495,7 +495,7 @@ func (qb *TagStore) FindByNames(ctx context.Context, names []string, nocase bool for _, name := range names { args = append(args, name) } - sq := qb.selectDataset().Prepared(true).Where(goqu.L(where, args...)) + sq := qb.selectDataset().Where(goqu.L(where, args...)) ret, err := qb.getMany(ctx, sq) if err != nil { From 5790ec1a813b3247e616f581ed2adff010e6478c Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Fri, 4 Oct 2024 13:42:13 +0200 Subject: [PATCH 06/85] SQL Boolean fixes --- pkg/sqlite/criterion_handlers.go | 7 +----- pkg/sqlite/database.go | 14 +++++++++++ pkg/sqlite/database_postgres.go | 23 ++++++++++++------- pkg/sqlite/gallery_filter.go | 6 ++--- pkg/sqlite/image.go | 2 +- pkg/sqlite/image_filter.go | 7 +++--- .../migrationsPostgres/67_initial.up.sql | 15 ------------ pkg/sqlite/performer.go | 2 +- pkg/sqlite/repository.go | 2 +- pkg/sqlite/scene_filter.go | 6 ++--- pkg/sqlite/studio.go | 2 +- pkg/sqlite/table.go | 2 +- pkg/sqlite/tag.go | 2 +- pkg/sqlite/tx.go | 2 +- 14 files changed, 47 insertions(+), 45 deletions(-) diff --git a/pkg/sqlite/criterion_handlers.go b/pkg/sqlite/criterion_handlers.go index e021bd1759b..6171cbd3ad2 100644 --- a/pkg/sqlite/criterion_handlers.go +++ b/pkg/sqlite/criterion_handlers.go @@ -225,12 +225,7 @@ func boolCriterionHandler(c *bool, column string, addJoinFn func(f *filterBuilde if addJoinFn != nil { addJoinFn(f) } - var v string - if *c { - v = "1" - } else { - v = "0" - } + v := getDBBoolean(*c) f.addWhere(column + " = " + v) } diff --git a/pkg/sqlite/database.go b/pkg/sqlite/database.go index fe85930dc28..a16f85273b9 100644 --- a/pkg/sqlite/database.go +++ b/pkg/sqlite/database.go @@ -6,6 +6,7 @@ import ( "embed" "errors" "fmt" + "strconv" "time" "github.com/jmoiron/sqlx" @@ -168,6 +169,19 @@ func newDatabase() *storeRepository { return r } +func getDBBoolean(val bool) string { + switch dbWrapper.dbType { + case SqliteBackend: + if val { + return "1" + } else { + return "0" + } + default: + return strconv.FormatBool(val) + } +} + func (db *Database) SetBlobStoreOptions(options BlobStoreOptions) { *db.Blobs = *NewBlobStore(options) } diff --git a/pkg/sqlite/database_postgres.go b/pkg/sqlite/database_postgres.go index 223273be4ed..ead21d5b496 100644 --- a/pkg/sqlite/database_postgres.go +++ b/pkg/sqlite/database_postgres.go @@ -40,19 +40,26 @@ func (db *PostgresDB) DatabaseConnector() string { func (db *PostgresDB) open(disableForeignKeys bool, writable bool) (conn *sqlx.DB, err error) { conn, err = sqlx.Open("pgx", db.DatabaseConnector()) - if err == nil { - if disableForeignKeys { - conn.Exec("SET session_replication_role = replica;") - } - if !writable { - conn.Exec("SET default_transaction_read_only = ON;") - } - } if err != nil { return nil, fmt.Errorf("db.Open(): %w", err) } + if disableForeignKeys { + _, err = conn.Exec("SET session_replication_role = replica;") + + if err != nil { + return nil, fmt.Errorf("conn.Exec(): %w", err) + } + } + if !writable { + _, err = conn.Exec("SET default_transaction_read_only = ON;") + + if err != nil { + return nil, fmt.Errorf("conn.Exec(): %w", err) + } + } + return conn, nil } diff --git a/pkg/sqlite/gallery_filter.go b/pkg/sqlite/gallery_filter.go index ad5ac592ada..5f03006f47c 100644 --- a/pkg/sqlite/gallery_filter.go +++ b/pkg/sqlite/gallery_filter.go @@ -378,12 +378,12 @@ func (qb *galleryFilterHandler) performerFavoriteCriterionHandler(performerfavor if *performerfavorite { // contains at least one favorite f.addLeftJoin("performers", "", "performers.id = performers_galleries.performer_id") - f.addWhere("performers.favorite = 1") + f.addWhere(fmt.Sprintf("performers.favorite = %s", getDBBoolean(true))) } else { // contains zero favorites - f.addLeftJoin(`(SELECT performers_galleries.gallery_id as id FROM performers_galleries + f.addLeftJoin(fmt.Sprintf(`(SELECT performers_galleries.gallery_id as id FROM performers_galleries JOIN performers ON performers.id = performers_galleries.performer_id -GROUP BY performers_galleries.gallery_id HAVING SUM(performers.favorite) = 0)`, "nofaves", "galleries.id = nofaves.id") +GROUP BY performers_galleries.gallery_id HAVING SUM(performers.favorite) = %s)`, getDBBoolean(false)), "nofaves", "galleries.id = nofaves.id") f.addWhere("performers_galleries.gallery_id IS NULL OR nofaves.id IS NOT NULL") } } diff --git a/pkg/sqlite/image.go b/pkg/sqlite/image.go index 46b6d5ccbed..e870d01be6c 100644 --- a/pkg/sqlite/image.go +++ b/pkg/sqlite/image.go @@ -492,7 +492,7 @@ func (qb *ImageStore) CoverByGalleryID(ctx context.Context, galleryID int) (*mod Select(table.Col(idColumn)). Where(goqu.And( galleriesImagesJoinTable.Col("gallery_id").Eq(galleryID), - galleriesImagesJoinTable.Col("cover").Eq(true), + galleriesImagesJoinTable.Col("cover").IsTrue(), )) q := qb.selectDataset().Where( diff --git a/pkg/sqlite/image_filter.go b/pkg/sqlite/image_filter.go index 8f2d5d6b90a..8c0ec7dbe98 100644 --- a/pkg/sqlite/image_filter.go +++ b/pkg/sqlite/image_filter.go @@ -2,6 +2,7 @@ package sqlite import ( "context" + "fmt" "github.com/stashapp/stash/pkg/models" ) @@ -254,12 +255,12 @@ func (qb *imageFilterHandler) performerFavoriteCriterionHandler(performerfavorit if *performerfavorite { // contains at least one favorite f.addLeftJoin("performers", "", "performers.id = performers_images.performer_id") - f.addWhere("performers.favorite = 1") + f.addWhere(fmt.Sprintf("performers.favorite = %s", getDBBoolean(true))) } else { // contains zero favorites - f.addLeftJoin(`(SELECT performers_images.image_id as id FROM performers_images + f.addLeftJoin(fmt.Sprintf(`(SELECT performers_images.image_id as id FROM performers_images JOIN performers ON performers.id = performers_images.performer_id -GROUP BY performers_images.image_id HAVING SUM(performers.favorite) = 0)`, "nofaves", "images.id = nofaves.id") +GROUP BY performers_images.image_id HAVING SUM(performers.favorite) = %s)`, getDBBoolean(false)), "nofaves", "images.id = nofaves.id") f.addWhere("performers_images.image_id IS NULL OR nofaves.id IS NOT NULL") } } diff --git a/pkg/sqlite/migrationsPostgres/67_initial.up.sql b/pkg/sqlite/migrationsPostgres/67_initial.up.sql index 9bf47762ba4..4efe2d60f74 100644 --- a/pkg/sqlite/migrationsPostgres/67_initial.up.sql +++ b/pkg/sqlite/migrationsPostgres/67_initial.up.sql @@ -1,19 +1,4 @@ CREATE COLLATION IF NOT EXISTS NATURAL_CI (provider = icu, locale = 'en@colNumeric=yes'); - -CREATE OR REPLACE FUNCTION regexp(re TEXT, s TEXT) -RETURNS BOOLEAN AS $$ -BEGIN - RETURN s ~ re; -END; -$$ LANGUAGE plpgsql; - -CREATE OR REPLACE FUNCTION basename(str TEXT) -RETURNS TEXT AS $$ -BEGIN - RETURN substring(str FROM '[^/\\]+$'); -END; -$$ LANGUAGE plpgsql; - CREATE TABLE blobs ( checksum varchar(255) NOT NULL PRIMARY KEY, blob bytea diff --git a/pkg/sqlite/performer.go b/pkg/sqlite/performer.go index 809f15e8789..d0e92d3890a 100644 --- a/pkg/sqlite/performer.go +++ b/pkg/sqlite/performer.go @@ -575,7 +575,7 @@ func (qb *PerformerStore) QueryForAutoTag(ctx context.Context, words []string) ( sq = sq.Where( goqu.Or(whereClauses...), - table.Col("ignore_auto_tag").Eq(0), + table.Col("ignore_auto_tag").IsFalse(), ) ret, err := qb.findBySubquery(ctx, sq) diff --git a/pkg/sqlite/repository.go b/pkg/sqlite/repository.go index 8be8779747c..0a6a2caf1fb 100644 --- a/pkg/sqlite/repository.go +++ b/pkg/sqlite/repository.go @@ -456,7 +456,7 @@ func idToIndexMap(ids []int) map[int]int { func (r *filesRepository) getMany(ctx context.Context, ids []int, primaryOnly bool) ([][]models.FileID, error) { var primaryClause string if primaryOnly { - primaryClause = " AND \"primary\" = 1" + primaryClause = fmt.Sprintf(" AND \"primary\" = %s", getDBBoolean(true)) } query := fmt.Sprintf("SELECT %s as id, file_id, \"primary\" from %s WHERE %[1]s IN %[3]s%s", r.idColumn, r.tableName, getInBinding(len(ids)), primaryClause) diff --git a/pkg/sqlite/scene_filter.go b/pkg/sqlite/scene_filter.go index 2e63dad975f..66e7d744dad 100644 --- a/pkg/sqlite/scene_filter.go +++ b/pkg/sqlite/scene_filter.go @@ -455,12 +455,12 @@ func (qb *sceneFilterHandler) performerFavoriteCriterionHandler(performerfavorit if *performerfavorite { // contains at least one favorite f.addLeftJoin("performers", "", "performers.id = performers_scenes.performer_id") - f.addWhere("performers.favorite = 1") + f.addWhere(fmt.Sprintf("performers.favorite = %s", getDBBoolean(true))) } else { // contains zero favorites - f.addLeftJoin(`(SELECT performers_scenes.scene_id as id FROM performers_scenes + f.addLeftJoin(fmt.Sprintf(`(SELECT performers_scenes.scene_id as id FROM performers_scenes JOIN performers ON performers.id = performers_scenes.performer_id -GROUP BY performers_scenes.scene_id HAVING SUM(performers.favorite) = 0)`, "nofaves", "scenes.id = nofaves.id") +GROUP BY performers_scenes.scene_id HAVING SUM(performers.favorite) = %s)`, getDBBoolean(false)), "nofaves", "scenes.id = nofaves.id") f.addWhere("performers_scenes.scene_id IS NULL OR nofaves.id IS NOT NULL") } } diff --git a/pkg/sqlite/studio.go b/pkg/sqlite/studio.go index 6d73337c9b0..089c5d75ea2 100644 --- a/pkg/sqlite/studio.go +++ b/pkg/sqlite/studio.go @@ -501,7 +501,7 @@ func (qb *StudioStore) QueryForAutoTag(ctx context.Context, words []string) ([]* sq = sq.Where( goqu.Or(whereClauses...), - table.Col("ignore_auto_tag").Eq(0), + table.Col("ignore_auto_tag").IsFalse(), ) ret, err := qb.findBySubquery(ctx, sq) diff --git a/pkg/sqlite/table.go b/pkg/sqlite/table.go index 4a63b04fcb8..857bebe174e 100644 --- a/pkg/sqlite/table.go +++ b/pkg/sqlite/table.go @@ -737,7 +737,7 @@ func (t *imageGalleriesTable) resetCover(ctx context.Context, galleryID int) err "cover": false, }).Where( table.Col(galleryIDColumn).Eq(galleryID), - table.Col("cover").Eq(true), + table.Col("cover").IsTrue(), ) if _, err := exec(ctx, q); err != nil { diff --git a/pkg/sqlite/tag.go b/pkg/sqlite/tag.go index f79853fbba8..37bdc26c925 100644 --- a/pkg/sqlite/tag.go +++ b/pkg/sqlite/tag.go @@ -584,7 +584,7 @@ func (qb *TagStore) QueryForAutoTag(ctx context.Context, words []string) ([]*mod whereOr := "(" + strings.Join(whereClauses, " OR ") + ")" where := strings.Join([]string{ - "tags.ignore_auto_tag = 0", + fmt.Sprintf("tags.ignore_auto_tag = %s", getDBBoolean(false)), whereOr, }, " AND ") return qb.queryTags(ctx, query+" WHERE "+where, args) diff --git a/pkg/sqlite/tx.go b/pkg/sqlite/tx.go index dcd287ef5d2..eddaf407be6 100644 --- a/pkg/sqlite/tx.go +++ b/pkg/sqlite/tx.go @@ -55,7 +55,7 @@ func (db *dbWrapperType) Rebind(query string) string { switch db.dbType { case SqliteBackend: bindType = sqlx.QUESTION - case PostgresBackend: + default: bindType = sqlx.DOLLAR } From 61c360513b995fff67b97a8b9f65bb2bb7f86fd0 Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Fri, 4 Oct 2024 14:03:36 +0200 Subject: [PATCH 07/85] PGSQL Fingerprints fix --- pkg/sqlite/migrationsPostgres/67_initial.up.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/sqlite/migrationsPostgres/67_initial.up.sql b/pkg/sqlite/migrationsPostgres/67_initial.up.sql index 4efe2d60f74..c12e1311153 100644 --- a/pkg/sqlite/migrationsPostgres/67_initial.up.sql +++ b/pkg/sqlite/migrationsPostgres/67_initial.up.sql @@ -219,7 +219,7 @@ CREATE TABLE tags_relations ( CREATE TABLE files_fingerprints ( file_id integer NOT NULL, type varchar(255) NOT NULL, - fingerprint bytea NOT NULL, + fingerprint text NOT NULL, foreign key(file_id) references files(id) on delete CASCADE, PRIMARY KEY (file_id, type, fingerprint) ); From 17c503613a04cfb5c0ba61acd313272a93b0c75c Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Fri, 4 Oct 2024 14:27:21 +0200 Subject: [PATCH 08/85] PGSQL fix duplicate ids in the wake of removing distinctIDs select --- pkg/sqlite/repository.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pkg/sqlite/repository.go b/pkg/sqlite/repository.go index 0a6a2caf1fb..5042b54bd6f 100644 --- a/pkg/sqlite/repository.go +++ b/pkg/sqlite/repository.go @@ -5,6 +5,7 @@ import ( "database/sql" "errors" "fmt" + "slices" "strings" "github.com/jmoiron/sqlx" @@ -97,6 +98,11 @@ func (r *repository) runIdsQuery(ctx context.Context, query string, args []inter for i, v := range result { vsm[i] = v.Int } + + // We removed distinctIDs for postgresql, but now we have duplicates + slices.Sort(vsm) + vsm = slices.Compact(vsm) + return vsm, nil } From 08750b365679c5a8ad6b8ed4c2542a02c6a8ed68 Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Fri, 4 Oct 2024 17:15:50 +0200 Subject: [PATCH 09/85] Undo remove prepared --- pkg/sqlite/anonymise.go | 2 +- pkg/sqlite/file.go | 12 ++++++------ pkg/sqlite/folder.go | 6 +++--- pkg/sqlite/gallery.go | 4 ++-- pkg/sqlite/gallery_chapter.go | 2 +- pkg/sqlite/group.go | 10 +++++----- pkg/sqlite/image.go | 9 +++++---- pkg/sqlite/performer.go | 2 +- pkg/sqlite/saved_filter.go | 4 ++-- pkg/sqlite/scene.go | 4 ++-- pkg/sqlite/scene_marker.go | 4 ++-- pkg/sqlite/studio.go | 4 ++-- pkg/sqlite/table.go | 14 +++++++------- pkg/sqlite/tag.go | 6 +++--- 14 files changed, 42 insertions(+), 41 deletions(-) diff --git a/pkg/sqlite/anonymise.go b/pkg/sqlite/anonymise.go index 1c68ae23e90..a97c964f1bf 100644 --- a/pkg/sqlite/anonymise.go +++ b/pkg/sqlite/anonymise.go @@ -132,7 +132,7 @@ func (db *Anonymiser) anonymiseFoldersRecurse(ctx context.Context, parentFolderI if parentFolderID == 0 { stmt = stmt.Set(goqu.Record{"path": goqu.Cast(table.Col(idColumn), "VARCHAR")}).Where(table.Col("parent_folder_id").IsNull()) } else { - stmt = stmt.Set(goqu.Record{ + stmt = stmt.Prepared(true).Set(goqu.Record{ "path": goqu.L("? || ? || id", parentPath, string(filepath.Separator)), }).Where(table.Col("parent_folder_id").Eq(parentFolderID)) } diff --git a/pkg/sqlite/file.go b/pkg/sqlite/file.go index cfc2edda7e1..7626a8c53bb 100644 --- a/pkg/sqlite/file.go +++ b/pkg/sqlite/file.go @@ -662,7 +662,7 @@ func (qb *FileStore) FindAllInPaths(ctx context.Context, p []string, limit, offs table := qb.table() folderTable := folderTableMgr.table - q := dialect.From(table).InnerJoin( + q := dialect.From(table).Prepared(true).InnerJoin( folderTable, goqu.On(table.Col("parent_folder_id").Eq(folderTable.Col(idColumn))), ).Select(table.Col(idColumn)) @@ -695,7 +695,7 @@ func (qb *FileStore) CountAllInPaths(ctx context.Context, p []string) (int, erro func (qb *FileStore) findBySubquery(ctx context.Context, sq *goqu.SelectDataset) ([]models.File, error) { table := qb.table() - q := qb.selectDataset().Where( + q := qb.selectDataset().Prepared(true).Where( table.Col(idColumn).Eq( sq, ), @@ -720,7 +720,7 @@ func (qb *FileStore) FindByFingerprint(ctx context.Context, fp models.Fingerprin func (qb *FileStore) FindByZipFileID(ctx context.Context, zipFileID models.FileID) ([]models.File, error) { table := qb.table() - q := qb.selectDataset().Where( + q := qb.selectDataset().Prepared(true).Where( table.Col("zip_file_id").Eq(zipFileID), ) @@ -733,7 +733,7 @@ func (qb *FileStore) FindByFileInfo(ctx context.Context, info fs.FileInfo, size modTime := info.ModTime().Format(time.RFC3339) - q := qb.selectDataset().Where( + q := qb.selectDataset().Prepared(true).Where( table.Col("basename").Eq(info.Name()), table.Col("size").Eq(size), table.Col("mod_time").Eq(modTime), @@ -745,7 +745,7 @@ func (qb *FileStore) FindByFileInfo(ctx context.Context, info fs.FileInfo, size func (qb *FileStore) CountByFolderID(ctx context.Context, folderID models.FolderID) (int, error) { table := qb.table() - q := qb.countDataset().Where( + q := qb.countDataset().Prepared(true).Where( table.Col("parent_folder_id").Eq(folderID), ) @@ -762,7 +762,7 @@ func (qb *FileStore) IsPrimary(ctx context.Context, fileID models.FileID) (bool, var sq *goqu.SelectDataset for _, t := range joinTables { - qq := dialect.From(t).Select(t.Col(fileIDColumn)).Where( + qq := dialect.From(t).Select(t.Col(fileIDColumn)).Prepared(true).Where( t.Col(fileIDColumn).Eq(fileID), t.Col("primary").IsTrue(), ) diff --git a/pkg/sqlite/folder.go b/pkg/sqlite/folder.go index 9201d2df52e..4cf632d49e6 100644 --- a/pkg/sqlite/folder.go +++ b/pkg/sqlite/folder.go @@ -226,7 +226,7 @@ func (qb *FolderStore) Find(ctx context.Context, id models.FolderID) (*models.Fo } func (qb *FolderStore) FindByPath(ctx context.Context, p string) (*models.Folder, error) { - q := qb.selectDataset().Where(qb.table().Col("path").Eq(p)) + q := qb.selectDataset().Prepared(true).Where(qb.table().Col("path").Eq(p)) ret, err := qb.get(ctx, q) if err != nil && !errors.Is(err, sql.ErrNoRows) { @@ -295,7 +295,7 @@ func (qb *FolderStore) CountAllInPaths(ctx context.Context, p []string) (int, er // func (qb *FolderStore) findBySubquery(ctx context.Context, sq *goqu.SelectDataset) ([]*file.Folder, error) { // table := qb.table() -// q := qb.selectDataset().Where( +// q := qb.selectDataset().Prepared(true).Where( // table.Col(idColumn).Eq( // sq, // ), @@ -307,7 +307,7 @@ func (qb *FolderStore) CountAllInPaths(ctx context.Context, p []string) (int, er func (qb *FolderStore) FindByZipFileID(ctx context.Context, zipFileID models.FileID) ([]*models.Folder, error) { table := qb.table() - q := qb.selectDataset().Where( + q := qb.selectDataset().Prepared(true).Where( table.Col("zip_file_id").Eq(zipFileID), ) diff --git a/pkg/sqlite/gallery.go b/pkg/sqlite/gallery.go index 6b13368fd79..008ab85f7f1 100644 --- a/pkg/sqlite/gallery.go +++ b/pkg/sqlite/gallery.go @@ -405,7 +405,7 @@ func (qb *GalleryStore) FindMany(ctx context.Context, ids []int) ([]*models.Gall galleries := make([]*models.Gallery, len(ids)) if err := batchExec(ids, defaultBatchSize, func(batch []int) error { - q := qb.selectDataset().Where(qb.table().Col(idColumn).In(batch)) + q := qb.selectDataset().Prepared(true).Where(qb.table().Col(idColumn).In(batch)) unsorted, err := qb.getMany(ctx, q) if err != nil { return err @@ -445,7 +445,7 @@ func (qb *GalleryStore) find(ctx context.Context, id int) (*models.Gallery, erro func (qb *GalleryStore) findBySubquery(ctx context.Context, sq *goqu.SelectDataset) ([]*models.Gallery, error) { table := qb.table() - q := qb.selectDataset().Where( + q := qb.selectDataset().Prepared(true).Where( table.Col(idColumn).Eq( sq, ), diff --git a/pkg/sqlite/gallery_chapter.go b/pkg/sqlite/gallery_chapter.go index 49c702a7625..f0d9c52980b 100644 --- a/pkg/sqlite/gallery_chapter.go +++ b/pkg/sqlite/gallery_chapter.go @@ -155,7 +155,7 @@ func (qb *GalleryChapterStore) FindMany(ctx context.Context, ids []int) ([]*mode ret := make([]*models.GalleryChapter, len(ids)) table := qb.table() - q := qb.selectDataset().Where(table.Col(idColumn).In(ids)) + q := qb.selectDataset().Prepared(true).Where(table.Col(idColumn).In(ids)) unsorted, err := qb.getMany(ctx, q) if err != nil { return nil, err diff --git a/pkg/sqlite/group.go b/pkg/sqlite/group.go index 97a1b91f984..cd10461d3fb 100644 --- a/pkg/sqlite/group.go +++ b/pkg/sqlite/group.go @@ -288,7 +288,7 @@ func (qb *GroupStore) FindMany(ctx context.Context, ids []int) ([]*models.Group, table := qb.table() if err := batchExec(ids, defaultBatchSize, func(batch []int) error { - q := qb.selectDataset().Where(table.Col(idColumn).In(batch)) + q := qb.selectDataset().Prepared(true).Where(table.Col(idColumn).In(batch)) unsorted, err := qb.getMany(ctx, q) if err != nil { return err @@ -369,7 +369,7 @@ func (qb *GroupStore) FindByName(ctx context.Context, name string, nocase bool) if nocase { where += " COLLATE NOCASE" } - sq := qb.selectDataset().Where(goqu.L(where, name)).Limit(1) + sq := qb.selectDataset().Prepared(true).Where(goqu.L(where, name)).Limit(1) ret, err := qb.get(ctx, sq) if err != nil && !errors.Is(err, sql.ErrNoRows) { @@ -394,7 +394,7 @@ func (qb *GroupStore) FindByNames(ctx context.Context, names []string, nocase bo for _, name := range names { args = append(args, name) } - sq := qb.selectDataset().Where(goqu.L(where, args...)) + sq := qb.selectDataset().Prepared(true).Where(goqu.L(where, args...)) ret, err := qb.getMany(ctx, sq) if err != nil { @@ -638,7 +638,7 @@ func (qb *GroupStore) FindSubGroupIDs(ctx context.Context, containingID int, ids WHERE gr.containing_id = :parentID AND gr.sub_id IN (:ids); */ table := groupRelationshipTableMgr.table - q := dialect.From(table). + q := dialect.From(table).Prepared(true). Select(table.Col("sub_id")).Where( table.Col("containing_id").Eq(containingID), table.Col("sub_id").In(ids), @@ -674,7 +674,7 @@ func (qb *GroupStore) FindInAncestors(ctx context.Context, ascestorIDs []int, id table := qb.table() const ascestors = "ancestors" const parentID = "parent_id" - q := dialect.From(ascestors). + q := dialect.From(ascestors).Prepared(true). WithRecursive(ascestors, dialect.From(qb.table()).Select(table.Col(idColumn).As(parentID)). Where(table.Col(idColumn).In(ascestorIDs)). diff --git a/pkg/sqlite/image.go b/pkg/sqlite/image.go index e870d01be6c..a7dd0d574bc 100644 --- a/pkg/sqlite/image.go +++ b/pkg/sqlite/image.go @@ -391,7 +391,7 @@ func (qb *ImageStore) FindMany(ctx context.Context, ids []int) ([]*models.Image, images := make([]*models.Image, len(ids)) if err := batchExec(ids, defaultBatchSize, func(batch []int) error { - q := qb.selectDataset().Where(qb.table().Col(idColumn).In(batch)) + q := qb.selectDataset().Prepared(true).Where(qb.table().Col(idColumn).In(batch)) unsorted, err := qb.getMany(ctx, q) if err != nil { return err @@ -431,7 +431,7 @@ func (qb *ImageStore) find(ctx context.Context, id int) (*models.Image, error) { func (qb *ImageStore) findBySubquery(ctx context.Context, sq *goqu.SelectDataset) ([]*models.Image, error) { table := qb.table() - q := qb.selectDataset().Where( + q := qb.selectDataset().Prepared(true).Where( table.Col(idColumn).Eq( sq, ), @@ -495,7 +495,7 @@ func (qb *ImageStore) CoverByGalleryID(ctx context.Context, galleryID int) (*mod galleriesImagesJoinTable.Col("cover").IsTrue(), )) - q := qb.selectDataset().Where( + q := qb.selectDataset().Prepared(true).Where( table.Col(idColumn).Eq( sq, ), @@ -619,7 +619,7 @@ func (qb *ImageStore) FindByGalleryID(ctx context.Context, galleryID int) ([]*mo galleriesImagesJoinTable.Col("gallery_id").Eq(galleryID), ) - q := qb.selectDataset().Where( + q := qb.selectDataset().Prepared(true).Where( table.Col(idColumn).Eq( sq, ), @@ -642,6 +642,7 @@ func (qb *ImageStore) FindByGalleryIDIndex(ctx context.Context, galleryID int, i goqu.On(table.Col(idColumn).Eq(galleriesImagesJoinTable.Col(imageIDColumn))), ). Where(galleriesImagesJoinTable.Col(galleryIDColumn).Eq(galleryID)). + Prepared(true). Order(defaultGalleryOrder...). Limit(1).Offset(index) diff --git a/pkg/sqlite/performer.go b/pkg/sqlite/performer.go index d0e92d3890a..ee05a4bcbc6 100644 --- a/pkg/sqlite/performer.go +++ b/pkg/sqlite/performer.go @@ -525,7 +525,7 @@ func (qb *PerformerStore) FindByNames(ctx context.Context, names []string, nocas args = append(args, name) } - sq := qb.selectDataset().Where( + sq := qb.selectDataset().Prepared(true).Where( goqu.L(clause, args...), ) ret, err := qb.getMany(ctx, sq) diff --git a/pkg/sqlite/saved_filter.go b/pkg/sqlite/saved_filter.go index 7a5bb30eac3..8f58b05e76c 100644 --- a/pkg/sqlite/saved_filter.go +++ b/pkg/sqlite/saved_filter.go @@ -158,7 +158,7 @@ func (qb *SavedFilterStore) FindMany(ctx context.Context, ids []int, ignoreNotFo ret := make([]*models.SavedFilter, len(ids)) table := qb.table() - q := qb.selectDataset().Where(table.Col(idColumn).In(ids)) + q := qb.selectDataset().Prepared(true).Where(table.Col(idColumn).In(ids)) unsorted, err := qb.getMany(ctx, q) if err != nil { return nil, err @@ -242,7 +242,7 @@ func (qb *SavedFilterStore) FindByMode(ctx context.Context, mode models.FilterMo whereClause = table.Col("mode").Eq(mode) } - sq := qb.selectDataset().Where(whereClause).Order(table.Col("name").Asc()) + sq := qb.selectDataset().Prepared(true).Where(whereClause).Order(table.Col("name").Asc()) ret, err := qb.getMany(ctx, sq) if err != nil { diff --git a/pkg/sqlite/scene.go b/pkg/sqlite/scene.go index 53c182c4cb9..e4da8777578 100644 --- a/pkg/sqlite/scene.go +++ b/pkg/sqlite/scene.go @@ -497,7 +497,7 @@ func (qb *SceneStore) FindMany(ctx context.Context, ids []int) ([]*models.Scene, table := qb.table() if err := batchExec(ids, defaultBatchSize, func(batch []int) error { - q := qb.selectDataset().Where(table.Col(idColumn).In(batch)) + q := qb.selectDataset().Prepared(true).Where(table.Col(idColumn).In(batch)) unsorted, err := qb.getMany(ctx, q) if err != nil { return err @@ -890,7 +890,7 @@ func (qb *SceneStore) Wall(ctx context.Context, q *string) ([]*models.Scene, err } table := qb.table() - qq := qb.selectDataset().Where(table.Col("details").Like("%" + s + "%")).Order(goqu.L("RANDOM()").Asc()).Limit(80) + qq := qb.selectDataset().Prepared(true).Where(table.Col("details").Like("%" + s + "%")).Order(goqu.L("RANDOM()").Asc()).Limit(80) return qb.getMany(ctx, qq) } diff --git a/pkg/sqlite/scene_marker.go b/pkg/sqlite/scene_marker.go index b340bfeda57..2d3f59ec25c 100644 --- a/pkg/sqlite/scene_marker.go +++ b/pkg/sqlite/scene_marker.go @@ -181,7 +181,7 @@ func (qb *SceneMarkerStore) FindMany(ctx context.Context, ids []int) ([]*models. ret := make([]*models.SceneMarker, len(ids)) table := qb.table() - q := qb.selectDataset().Where(table.Col(idColumn).In(ids)) + q := qb.selectDataset().Prepared(true).Where(table.Col(idColumn).In(ids)) unsorted, err := qb.getMany(ctx, q) if err != nil { return nil, err @@ -285,7 +285,7 @@ func (qb *SceneMarkerStore) Wall(ctx context.Context, q *string) ([]*models.Scen } table := qb.table() - qq := qb.selectDataset().Where(table.Col("title").Like("%" + s + "%")).Order(goqu.L("RANDOM()").Asc()).Limit(80) + qq := qb.selectDataset().Prepared(true).Where(table.Col("title").Like("%" + s + "%")).Order(goqu.L("RANDOM()").Asc()).Limit(80) return qb.getMany(ctx, qq) } diff --git a/pkg/sqlite/studio.go b/pkg/sqlite/studio.go index 089c5d75ea2..afe92976c58 100644 --- a/pkg/sqlite/studio.go +++ b/pkg/sqlite/studio.go @@ -298,7 +298,7 @@ func (qb *StudioStore) FindMany(ctx context.Context, ids []int) ([]*models.Studi table := qb.table() if err := batchExec(ids, defaultBatchSize, func(batch []int) error { - q := qb.selectDataset().Where(table.Col(idColumn).In(batch)) + q := qb.selectDataset().Prepared(true).Where(table.Col(idColumn).In(batch)) unsorted, err := qb.getMany(ctx, q) if err != nil { return err @@ -422,7 +422,7 @@ func (qb *StudioStore) FindByName(ctx context.Context, name string, nocase bool) if nocase { where += " COLLATE NOCASE" } - sq := qb.selectDataset().Where(goqu.L(where, name)).Limit(1) + sq := qb.selectDataset().Prepared(true).Where(goqu.L(where, name)).Limit(1) ret, err := qb.get(ctx, sq) if err != nil && !errors.Is(err, sql.ErrNoRows) { diff --git a/pkg/sqlite/table.go b/pkg/sqlite/table.go index 857bebe174e..b2c473b9f0c 100644 --- a/pkg/sqlite/table.go +++ b/pkg/sqlite/table.go @@ -32,7 +32,7 @@ func (e *NotFoundError) Error() string { } func (t *table) insert(ctx context.Context, o interface{}) (sql.Result, error) { - q := dialect.Insert(t.table).Rows(o) + q := dialect.Insert(t.table).Prepared(true).Rows(o) ret, err := exec(ctx, q) if err != nil { return nil, fmt.Errorf("inserting into %s: %w", t.table.GetTable(), err) @@ -42,7 +42,7 @@ func (t *table) insert(ctx context.Context, o interface{}) (sql.Result, error) { } func (t *table) insertID(ctx context.Context, o interface{}) (int, error) { - q := dialect.Insert(t.table).Rows(o).Returning(goqu.I("id")) + q := dialect.Insert(t.table).Prepared(true).Rows(o).Returning(goqu.I("id")) val, err := execID(ctx, q) if err != nil { return -1, fmt.Errorf("inserting into %s: %w", t.table.GetTable(), err) @@ -52,7 +52,7 @@ func (t *table) insertID(ctx context.Context, o interface{}) (int, error) { } func (t *table) updateByID(ctx context.Context, id interface{}, o interface{}) error { - q := dialect.Update(t.table).Set(o).Where(t.byID(id)) + q := dialect.Update(t.table).Prepared(true).Set(o).Where(t.byID(id)) if _, err := exec(ctx, q); err != nil { return fmt.Errorf("updating %s: %w", t.table.GetTable(), err) @@ -719,7 +719,7 @@ func (t *imageGalleriesTable) setCover(ctx context.Context, id int, galleryID in table := t.table.table - q := dialect.Update(table).Set(goqu.Record{ + q := dialect.Update(table).Prepared(true).Set(goqu.Record{ "cover": true, }).Where(t.idColumn.Eq(id), table.Col(galleryIDColumn).Eq(galleryID)) @@ -733,7 +733,7 @@ func (t *imageGalleriesTable) setCover(ctx context.Context, id int, galleryID in func (t *imageGalleriesTable) resetCover(ctx context.Context, galleryID int) error { table := t.table.table - q := dialect.Update(table).Set(goqu.Record{ + q := dialect.Update(table).Prepared(true).Set(goqu.Record{ "cover": false, }).Where( table.Col(galleryIDColumn).Eq(galleryID), @@ -825,7 +825,7 @@ func (t *relatedFilesTable) destroyJoins(ctx context.Context, fileIDs []models.F func (t *relatedFilesTable) setPrimary(ctx context.Context, id int, fileID models.FileID) error { table := t.table.table - q := dialect.Update(table).Set(goqu.Record{ + q := dialect.Update(table).Prepared(true).Set(goqu.Record{ "primary": false, }).Where(t.idColumn.Eq(id), table.Col(fileIDColumn).Neq(fileID)) @@ -833,7 +833,7 @@ func (t *relatedFilesTable) setPrimary(ctx context.Context, id int, fileID model return fmt.Errorf("unsetting primary flags in %s: %w", t.table.table.GetTable(), err) } - q = dialect.Update(table).Set(goqu.Record{ + q = dialect.Update(table).Prepared(true).Set(goqu.Record{ "primary": true, }).Where(t.idColumn.Eq(id), table.Col(fileIDColumn).Eq(fileID)) diff --git a/pkg/sqlite/tag.go b/pkg/sqlite/tag.go index 37bdc26c925..5241e1664c0 100644 --- a/pkg/sqlite/tag.go +++ b/pkg/sqlite/tag.go @@ -305,7 +305,7 @@ func (qb *TagStore) FindMany(ctx context.Context, ids []int) ([]*models.Tag, err table := qb.table() if err := batchExec(ids, defaultBatchSize, func(batch []int) error { - q := qb.selectDataset().Where(table.Col(idColumn).In(batch)) + q := qb.selectDataset().Prepared(true).Where(table.Col(idColumn).In(batch)) unsorted, err := qb.getMany(ctx, q) if err != nil { return err @@ -470,7 +470,7 @@ func (qb *TagStore) FindByName(ctx context.Context, name string, nocase bool) (* if nocase { where += " COLLATE NOCASE" } - sq := qb.selectDataset().Where(goqu.L(where, name)).Limit(1) + sq := qb.selectDataset().Prepared(true).Where(goqu.L(where, name)).Limit(1) ret, err := qb.get(ctx, sq) if err != nil && !errors.Is(err, sql.ErrNoRows) { @@ -495,7 +495,7 @@ func (qb *TagStore) FindByNames(ctx context.Context, names []string, nocase bool for _, name := range names { args = append(args, name) } - sq := qb.selectDataset().Where(goqu.L(where, args...)) + sq := qb.selectDataset().Prepared(true).Where(goqu.L(where, args...)) ret, err := qb.getMany(ctx, sq) if err != nil { From 9d72fdd21baf81a771a4373ab25bae74ef48a8a6 Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Fri, 4 Oct 2024 17:16:25 +0200 Subject: [PATCH 10/85] Change rebind func --- pkg/sqlite/tx.go | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/pkg/sqlite/tx.go b/pkg/sqlite/tx.go index eddaf407be6..e0aa2265485 100644 --- a/pkg/sqlite/tx.go +++ b/pkg/sqlite/tx.go @@ -50,16 +50,12 @@ func sqlError(err error, sql string, args ...interface{}) error { } func (db *dbWrapperType) Rebind(query string) string { - var bindType int - switch db.dbType { - case SqliteBackend: - bindType = sqlx.QUESTION + case PostgresBackend: + return sqlx.Rebind(sqlx.DOLLAR, query) default: - bindType = sqlx.DOLLAR + return query } - - return sqlx.Rebind(bindType, query) } func (db *dbWrapperType) Get(ctx context.Context, dest interface{}, query string, args ...interface{}) error { From b743bf5caaa24dd8cd0692c6e6ba3db4c5f86f2d Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Fri, 4 Oct 2024 17:23:09 +0200 Subject: [PATCH 11/85] fix prepared for postgresql --- pkg/sqlite/database_postgres.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/sqlite/database_postgres.go b/pkg/sqlite/database_postgres.go index ead21d5b496..ae312537753 100644 --- a/pkg/sqlite/database_postgres.go +++ b/pkg/sqlite/database_postgres.go @@ -4,6 +4,7 @@ import ( "fmt" "github.com/doug-martin/goqu/v9" + _ "github.com/doug-martin/goqu/v9/dialect/postgres" _ "github.com/jackc/pgx/v5/stdlib" "github.com/jmoiron/sqlx" "github.com/stashapp/stash/pkg/logger" From 0332276e3f302c4becbec418d644515089004b8e Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Fri, 4 Oct 2024 17:25:06 +0200 Subject: [PATCH 12/85] nicer init --- internal/manager/init.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/internal/manager/init.go b/internal/manager/init.go index ec70ed23d09..dbd425b27fa 100644 --- a/internal/manager/init.go +++ b/internal/manager/init.go @@ -39,7 +39,11 @@ func Initialize(cfg *config.Config, l *log.Logger) (*Manager, error) { { var dbType = sqlite.DatabaseType(strings.ToUpper(cfg.GetDatabaseType())) - if dbType != sqlite.SqliteBackend && dbType != sqlite.PostgresBackend { + + switch dbType { + case sqlite.SqliteBackend, sqlite.PostgresBackend: + // Valid case + default: dbType = sqlite.SqliteBackend } From 58603ca77423ed1c2c03fc81ebe6163a5d7eddd6 Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Sun, 6 Oct 2024 17:08:41 +0200 Subject: [PATCH 13/85] Dont fuck up the sorting --- pkg/sqlite/repository.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/pkg/sqlite/repository.go b/pkg/sqlite/repository.go index 5042b54bd6f..8c37a11fd13 100644 --- a/pkg/sqlite/repository.go +++ b/pkg/sqlite/repository.go @@ -5,12 +5,12 @@ import ( "database/sql" "errors" "fmt" - "slices" "strings" "github.com/jmoiron/sqlx" "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/sliceutil" ) const idColumn = "id" @@ -100,8 +100,7 @@ func (r *repository) runIdsQuery(ctx context.Context, query string, args []inter } // We removed distinctIDs for postgresql, but now we have duplicates - slices.Sort(vsm) - vsm = slices.Compact(vsm) + vsm = sliceutil.AppendUniques(nil, vsm) return vsm, nil } From 0cdc191807b6f6b72f591b42a6d86b4c431bc41f Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Tue, 8 Oct 2024 11:23:00 +0200 Subject: [PATCH 14/85] regexp changes --- pkg/sqlite/criterion_handlers.go | 8 ++++---- pkg/sqlite/filter_internal_test.go | 4 ++-- pkg/sqlite/gallery_filter.go | 8 ++++---- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/pkg/sqlite/criterion_handlers.go b/pkg/sqlite/criterion_handlers.go index 6171cbd3ad2..afbd7f3f963 100644 --- a/pkg/sqlite/criterion_handlers.go +++ b/pkg/sqlite/criterion_handlers.go @@ -51,13 +51,13 @@ func stringCriterionHandler(c *models.StringCriterionInput, column string) crite f.setError(err) return } - f.addWhere(fmt.Sprintf("(%s IS NOT NULL AND %[1]s regexp ?)", column), c.Value) + f.addWhere(fmt.Sprintf("(%s IS NOT NULL AND regexp(?, %[1]s))", column), c.Value) case models.CriterionModifierNotMatchesRegex: if _, err := regexp.Compile(c.Value); err != nil { f.setError(err) return } - f.addWhere(fmt.Sprintf("(%s IS NULL OR %[1]s NOT regexp ?)", column), c.Value) + f.addWhere(fmt.Sprintf("(%s IS NULL OR NOT regexp(?, %[1]s))", column), c.Value) case models.CriterionModifierIsNull: f.addWhere("(" + column + " IS NULL OR TRIM(" + column + ") = '')") case models.CriterionModifierNotNull: @@ -122,14 +122,14 @@ func pathCriterionHandler(c *models.StringCriterionInput, pathColumn string, bas return } filepathColumn := fmt.Sprintf("%s || '%s' || %s", pathColumn, string(filepath.Separator), basenameColumn) - f.addWhere(fmt.Sprintf("%s IS NOT NULL AND %s IS NOT NULL AND %s regexp ?", pathColumn, basenameColumn, filepathColumn), c.Value) + f.addWhere(fmt.Sprintf("%s IS NOT NULL AND %s IS NOT NULL AND regexp(?, %s)", pathColumn, basenameColumn, filepathColumn), c.Value) case models.CriterionModifierNotMatchesRegex: if _, err := regexp.Compile(c.Value); err != nil { f.setError(err) return } filepathColumn := fmt.Sprintf("%s || '%s' || %s", pathColumn, string(filepath.Separator), basenameColumn) - f.addWhere(fmt.Sprintf("%s IS NULL OR %s IS NULL OR %s NOT regexp ?", pathColumn, basenameColumn, filepathColumn), c.Value) + f.addWhere(fmt.Sprintf("%s IS NULL OR %s IS NULL OR NOT regexp(?, %s)", pathColumn, basenameColumn, filepathColumn), c.Value) case models.CriterionModifierIsNull: f.addWhere(fmt.Sprintf("%s IS NULL OR TRIM(%[1]s) = '' OR %s IS NULL OR TRIM(%[2]s) = ''", pathColumn, basenameColumn)) case models.CriterionModifierNotNull: diff --git a/pkg/sqlite/filter_internal_test.go b/pkg/sqlite/filter_internal_test.go index f416b661cbc..54a6390621b 100644 --- a/pkg/sqlite/filter_internal_test.go +++ b/pkg/sqlite/filter_internal_test.go @@ -569,7 +569,7 @@ func TestStringCriterionHandlerMatchesRegex(t *testing.T) { }, column)) assert.Len(f.whereClauses, 1) - assert.Equal(fmt.Sprintf("(%s IS NOT NULL AND %[1]s regexp ?)", column), f.whereClauses[0].sql) + assert.Equal(fmt.Sprintf("(%s IS NOT NULL AND regexp(?, %[1]s))", column), f.whereClauses[0].sql) assert.Len(f.whereClauses[0].args, 1) assert.Equal(validValue, f.whereClauses[0].args[0]) @@ -597,7 +597,7 @@ func TestStringCriterionHandlerNotMatchesRegex(t *testing.T) { }, column)) assert.Len(f.whereClauses, 1) - assert.Equal(fmt.Sprintf("(%s IS NULL OR %[1]s NOT regexp ?)", column), f.whereClauses[0].sql) + assert.Equal(fmt.Sprintf("(%s IS NULL OR NOT regexp(?, %[1]s))", column), f.whereClauses[0].sql) assert.Len(f.whereClauses[0].args, 1) assert.Equal(validValue, f.whereClauses[0].args[0]) diff --git a/pkg/sqlite/gallery_filter.go b/pkg/sqlite/gallery_filter.go index 5f03006f47c..7a3a5f14a47 100644 --- a/pkg/sqlite/gallery_filter.go +++ b/pkg/sqlite/gallery_filter.go @@ -215,8 +215,8 @@ func (qb *galleryFilterHandler) pathCriterionHandler(c *models.StringCriterionIn return } filepathColumn := fmt.Sprintf("%s || '%s' || %s", pathColumn, string(filepath.Separator), basenameColumn) - clause := makeClause(fmt.Sprintf("%s IS NOT NULL AND %s IS NOT NULL AND %s regexp ?", pathColumn, basenameColumn, filepathColumn), c.Value) - clause2 := makeClause(fmt.Sprintf("%s IS NOT NULL AND %[1]s regexp ?", folderPathColumn), c.Value) + clause := makeClause(fmt.Sprintf("%s IS NOT NULL AND %s IS NOT NULL AND regexp(?, %s)", pathColumn, basenameColumn, filepathColumn), c.Value) + clause2 := makeClause(fmt.Sprintf("%s IS NOT NULL AND regexp(?, %[1]s)", folderPathColumn), c.Value) f.whereClauses = append(f.whereClauses, orClauses(clause, clause2)) case models.CriterionModifierNotMatchesRegex: if _, err := regexp.Compile(c.Value); err != nil { @@ -224,8 +224,8 @@ func (qb *galleryFilterHandler) pathCriterionHandler(c *models.StringCriterionIn return } filepathColumn := fmt.Sprintf("%s || '%s' || %s", pathColumn, string(filepath.Separator), basenameColumn) - f.addWhere(fmt.Sprintf("%s IS NULL OR %s IS NULL OR %s NOT regexp ?", pathColumn, basenameColumn, filepathColumn), c.Value) - f.addWhere(fmt.Sprintf("%s IS NULL OR %[1]s NOT regexp ?", folderPathColumn), c.Value) + f.addWhere(fmt.Sprintf("%s IS NULL OR %s IS NULL OR NOT regexp(?, %s)", pathColumn, basenameColumn, filepathColumn), c.Value) + f.addWhere(fmt.Sprintf("%s IS NULL OR NOT regexp(?, %[1]s)", folderPathColumn), c.Value) case models.CriterionModifierIsNull: f.addWhere(fmt.Sprintf("%s IS NULL OR TRIM(%[1]s) = '' OR %s IS NULL OR TRIM(%[2]s) = ''", pathColumn, basenameColumn)) f.addWhere(fmt.Sprintf("%s IS NULL OR TRIM(%[1]s) = ''", folderPathColumn)) From ae1759c6a5f68f77467c6ec89527be5f96e6ca13 Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Tue, 8 Oct 2024 11:57:32 +0200 Subject: [PATCH 15/85] PGSQL migration fix --- pkg/sqlite/database.go | 10 +++------- pkg/sqlite/database_postgres.go | 4 ++-- pkg/sqlite/database_sqlite.go | 4 ++++ .../{67_initial.up.sql => 1_initial.up.sql} | 0 4 files changed, 9 insertions(+), 9 deletions(-) rename pkg/sqlite/migrationsPostgres/{67_initial.up.sql => 1_initial.up.sql} (100%) diff --git a/pkg/sqlite/database.go b/pkg/sqlite/database.go index a16f85273b9..bf17ae3a667 100644 --- a/pkg/sqlite/database.go +++ b/pkg/sqlite/database.go @@ -219,10 +219,10 @@ func (db *Database) Open() error { return fmt.Errorf("error running initial schema migrations: %w", err) } } else { - if databaseSchemaVersion > appSchemaVersion { + if databaseSchemaVersion > db.AppSchemaVersion() { return &MismatchedSchemaVersionError{ CurrentSchemaVersion: databaseSchemaVersion, - RequiredSchemaVersion: appSchemaVersion, + RequiredSchemaVersion: db.AppSchemaVersion(), } } @@ -230,7 +230,7 @@ func (db *Database) Open() error { if db.needsMigration() { return &MigrationNeededError{ CurrentSchemaVersion: databaseSchemaVersion, - RequiredSchemaVersion: appSchemaVersion, + RequiredSchemaVersion: db.AppSchemaVersion(), } } } @@ -335,10 +335,6 @@ func (db *Database) Anonymise(outPath string) error { return anon.Anonymise(context.Background()) } -func (db *Database) AppSchemaVersion() uint { - return appSchemaVersion -} - func (db *Database) Version() uint { return db.schemaVersion } diff --git a/pkg/sqlite/database_postgres.go b/pkg/sqlite/database_postgres.go index ae312537753..479779c05ae 100644 --- a/pkg/sqlite/database_postgres.go +++ b/pkg/sqlite/database_postgres.go @@ -31,9 +31,9 @@ func (db *PostgresDB) DatabaseType() DatabaseType { return PostgresBackend } -/*func (db *PostgresDB) AppSchemaVersion() uint { +func (db *PostgresDB) AppSchemaVersion() uint { return uint(0 - (66 - int(appSchemaVersion))) -}*/ +} func (db *PostgresDB) DatabaseConnector() string { return db.dbConfig.(string) diff --git a/pkg/sqlite/database_sqlite.go b/pkg/sqlite/database_sqlite.go index d5c26a3cd5f..cc531d97f36 100644 --- a/pkg/sqlite/database_sqlite.go +++ b/pkg/sqlite/database_sqlite.go @@ -40,6 +40,10 @@ func (db *SQLiteDB) DatabaseType() DatabaseType { return SqliteBackend } +func (db *SQLiteDB) AppSchemaVersion() uint { + return appSchemaVersion +} + func (db *SQLiteDB) DatabasePath() string { return (db.dbConfig).(string) } diff --git a/pkg/sqlite/migrationsPostgres/67_initial.up.sql b/pkg/sqlite/migrationsPostgres/1_initial.up.sql similarity index 100% rename from pkg/sqlite/migrationsPostgres/67_initial.up.sql rename to pkg/sqlite/migrationsPostgres/1_initial.up.sql From bae1463ce963c03ee6c1d8b11e52cb023f8dc478 Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Tue, 8 Oct 2024 12:06:30 +0200 Subject: [PATCH 16/85] PGSQL Revert go.mod changes (or try to) --- go.mod | 22 +++++++++------------- go.sum | 53 +++++++++++++++++++++++++++++++++++++---------------- 2 files changed, 46 insertions(+), 29 deletions(-) diff --git a/go.mod b/go.mod index 3259a122ddb..fd21a2906d1 100644 --- a/go.mod +++ b/go.mod @@ -26,6 +26,7 @@ require ( github.com/gorilla/sessions v1.2.1 github.com/gorilla/websocket v1.5.0 github.com/hashicorp/golang-lru/v2 v2.0.7 + github.com/jackc/pgx/v5 v5.3.1 github.com/jinzhu/copier v0.4.0 github.com/jmoiron/sqlx v1.4.0 github.com/json-iterator/go v1.1.12 @@ -51,23 +52,16 @@ require ( github.com/vektra/mockery/v2 v2.10.0 github.com/xWTF/chardet v0.0.0-20230208095535-c780f2ac244e github.com/zencoder/go-dash/v3 v3.0.2 - golang.org/x/crypto v0.27.0 + golang.org/x/crypto v0.24.0 golang.org/x/image v0.18.0 golang.org/x/net v0.26.0 - golang.org/x/sys v0.25.0 - golang.org/x/term v0.24.0 - golang.org/x/text v0.18.0 + golang.org/x/sys v0.21.0 + golang.org/x/term v0.21.0 + golang.org/x/text v0.16.0 gopkg.in/guregu/null.v4 v4.0.0 gopkg.in/yaml.v2 v2.4.0 ) -require ( - github.com/jackc/pgpassfile v1.0.0 // indirect - github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect - github.com/jackc/puddle/v2 v2.2.2 // indirect - github.com/lib/pq v1.10.9 // indirect -) - require ( github.com/agnivade/levenshtein v1.1.1 // indirect github.com/antchfx/xpath v1.2.3 // indirect @@ -89,8 +83,10 @@ require ( github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/jackc/pgx/v5 v5.7.1 + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect github.com/josharian/intern v1.0.0 // indirect + github.com/lib/pq v1.10.9 // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect @@ -120,7 +116,7 @@ require ( github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 // indirect go.uber.org/atomic v1.11.0 // indirect golang.org/x/mod v0.18.0 // indirect - golang.org/x/sync v0.8.0 // indirect + golang.org/x/sync v0.7.0 // indirect golang.org/x/tools v0.22.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index caafd68bbd8..3ad2733e482 100644 --- a/go.sum +++ b/go.sum @@ -54,11 +54,15 @@ filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4 github.com/99designs/gqlgen v0.17.2/go.mod h1:K5fzLKwtph+FFgh9j7nFbRUdBKvTcGnsta51fsMTn3o= github.com/99designs/gqlgen v0.17.49 h1:b3hNGexHd33fBSAd4NDT/c3NCcQzcAVkknhN9ym36YQ= github.com/99designs/gqlgen v0.17.49/go.mod h1:tC8YFVZMed81x7UJ7ORUwXF4Kn6SXuucFqQBhN8+BU0= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/PuerkitoBio/goquery v1.9.2 h1:4/wZksC3KgkQw7SQgkKotmKljk0M6V8TUvA8Wb4yPeE= github.com/PuerkitoBio/goquery v1.9.2/go.mod h1:GHPCaP0ODyyxqcNoFGYlAprUFH81NuRPd0GX3Zu2Mvk= @@ -173,11 +177,21 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/denisenkom/go-mssqldb v0.10.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48 h1:fRzb/w+pyskVMQ+UbP35JkH8yB7MYb4q/qhBarqZE6g= github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA= +github.com/dhui/dktest v0.3.16 h1:i6gq2YQEtcrjKbeJpBkWjE8MmLZPYllcjOFbTZuPDnw= +github.com/dhui/dktest v0.3.16/go.mod h1:gYaA3LRmM8Z4vJl2MA0THIigJoZrwOansEOsp+kqxp0= github.com/disintegration/imaging v1.6.2 h1:w1LecBlG2Lnp8B3jk5zSuNqd7b4DXhcjwek1ei82L+c= github.com/disintegration/imaging v1.6.2/go.mod h1:44/5580QXChDfwIclfc/PCwrr44amcmDAg8hxG0Ewe4= github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= github.com/dlclark/regexp2 v1.7.0 h1:7lJfhqlPssTb1WQx4yvTHN0uElPEv52sbaECrAQxjAo= github.com/dlclark/regexp2 v1.7.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= +github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= +github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v20.10.24+incompatible h1:Ugvxm7a8+Gz6vqQYQQ2W7GYq5EUPaAiuPgIfVyI3dYE= +github.com/docker/docker v20.10.24+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dop251/goja v0.0.0-20211022113120-dc8c55024d06/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk= github.com/dop251/goja v0.0.0-20231027120936-b396bb4c349d h1:wi6jN5LVt/ljaBG4ue79Ekzb12QfJ52L9Q98tl8SWhw= @@ -249,6 +263,7 @@ github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5x github.com/gofrs/uuid/v5 v5.1.0 h1:S5rqVKIigghZTCBKPCw0Y+bXkn26K3TB5mvQq2Ix8dk= github.com/gofrs/uuid/v5 v5.1.0/go.mod h1:CDOjlDMVAtN56jqyRUZh58JT31Tiw7/oQyEXZV+9bD8= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= @@ -411,12 +426,10 @@ github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2 github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= -github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= -github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= -github.com/jackc/pgx/v5 v5.7.1 h1:x7SYsPBYDkHDksogeSmZZ5xzThcTgRz++I5E+ePFUcs= -github.com/jackc/pgx/v5 v5.7.1/go.mod h1:e7O26IywZZ+naJtWWos6i6fvWK+29etgITqrqHLfoZA= -github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= -github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgx/v5 v5.3.1 h1:Fcr8QJ1ZeLi5zsPZqQeUZhNhxfkkKBOgJuYkJHoBOtU= +github.com/jackc/pgx/v5 v5.3.1/go.mod h1:t3JDKnCBlYIc0ewLF0Q7B8MXmoIaBOZj/ic7iHozM/8= github.com/jinzhu/copier v0.4.0 h1:w3ciUoD19shMCRargcpm0cm91ytaBhDvuRpz1ODO/U8= github.com/jinzhu/copier v0.4.0/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= @@ -520,6 +533,8 @@ github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -527,6 +542,8 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= @@ -538,6 +555,10 @@ github.com/npillmayer/nestext v0.1.3/go.mod h1:h2lrijH8jpicr25dFY+oAJLyzlya6jhnu github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d h1:VhgPp6v9qf9Agr/56bj7Y/xa04UccTW04VP0Qed4vnQ= github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d/go.mod h1:YUTz3bUH2ZwIWBy3CJBeOBEugqcmXREj14T+iG/4k4U= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= +github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde h1:x0TT0RDC7UhAVbbWWBzr41ElhJx5tXPWkIHA2HWPRuw= github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= @@ -736,8 +757,8 @@ golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20211215165025-cf75a172585e/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= -golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= +golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= +golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -862,8 +883,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -955,13 +976,13 @@ golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= -golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= -golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= -golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= +golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= +golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -974,8 +995,8 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= -golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= From b94e7b775cb6123c824944393d95f6e22de64b5c Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Wed, 9 Oct 2024 12:03:54 +0200 Subject: [PATCH 17/85] Add the lock back --- pkg/sqlite/database.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pkg/sqlite/database.go b/pkg/sqlite/database.go index bf17ae3a667..440f3414dd6 100644 --- a/pkg/sqlite/database.go +++ b/pkg/sqlite/database.go @@ -204,6 +204,9 @@ func (db *Database) Ready() error { // necessary migrations must be run separately using RunMigrations. // Returns true if the database is new. func (db *Database) Open() error { + db.lock() + defer db.unlock() + databaseSchemaVersion, err := db.getDatabaseSchemaVersion() if err != nil { return fmt.Errorf("getting database schema version: %w", err) From 0ce122649f9f80c3a5963ef9073f070f8bfb11a6 Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Wed, 9 Oct 2024 12:11:06 +0200 Subject: [PATCH 18/85] Fix linter issues --- internal/autotag/integration_test.go | 5 +++-- pkg/sqlite/setup_test.go | 5 +++-- pkg/sqlite/sql.go | 5 ----- 3 files changed, 6 insertions(+), 9 deletions(-) diff --git a/internal/autotag/integration_test.go b/internal/autotag/integration_test.go index e74cb30aa66..03be36a9e12 100644 --- a/internal/autotag/integration_test.go +++ b/internal/autotag/integration_test.go @@ -58,8 +58,9 @@ func runTests(m *testing.M) int { f.Close() databaseFile := f.Name() - db = sqlite.NewDatabase() - if err := db.Open(databaseFile); err != nil { + sqlite.RegisterSqliteDialect() + db = sqlite.NewSQLiteDatabase(databaseFile) + if err := db.Open(); err != nil { panic(fmt.Sprintf("Could not initialize database: %s", err.Error())) } diff --git a/pkg/sqlite/setup_test.go b/pkg/sqlite/setup_test.go index 624ffb4e222..8e0d8e0000a 100644 --- a/pkg/sqlite/setup_test.go +++ b/pkg/sqlite/setup_test.go @@ -639,13 +639,14 @@ func runTests(m *testing.M) int { f.Close() databaseFile := f.Name() - db = sqlite.NewDatabase() + sqlite.RegisterSqliteDialect() + db = sqlite.NewSQLiteDatabase(databaseFile) db.SetBlobStoreOptions(sqlite.BlobStoreOptions{ UseDatabase: true, // don't use filesystem }) - if err := db.Open(databaseFile); err != nil { + if err := db.Open(); err != nil { panic(fmt.Sprintf("Could not initialize database: %s", err.Error())) } diff --git a/pkg/sqlite/sql.go b/pkg/sqlite/sql.go index 780d2e9881b..3d6c3a17bdb 100644 --- a/pkg/sqlite/sql.go +++ b/pkg/sqlite/sql.go @@ -16,11 +16,6 @@ func selectAll(tableName string) string { return "SELECT " + idColumn + " FROM " + tableName + " " } -func distinctIDs(qb *queryBuilder, tableName string) { - qb.addColumn("DISTINCT " + getColumn(tableName, "id")) - qb.from = tableName -} - func selectIDs(qb *queryBuilder, tableName string) { qb.addColumn(getColumn(tableName, "id")) qb.from = tableName From f810b3b36189a6f14c1fb8dbfee6a755bef2446a Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Wed, 9 Oct 2024 13:00:49 +0200 Subject: [PATCH 19/85] Close being nil fix Blob test fixes --- pkg/sqlite/blob.go | 27 ++++++++++++++++----------- pkg/sqlite/database_sqlite.go | 4 ++-- 2 files changed, 18 insertions(+), 13 deletions(-) diff --git a/pkg/sqlite/blob.go b/pkg/sqlite/blob.go index 776a51745ae..21b1f78b86d 100644 --- a/pkg/sqlite/blob.go +++ b/pkg/sqlite/blob.go @@ -104,9 +104,10 @@ func (qb *BlobStore) Write(ctx context.Context, data []byte) (string, error) { // only write blob to the database if UseDatabase is true // always at least write the checksum - var storedData []byte + var storedData sql.Null[[]byte] if qb.options.UseDatabase { - storedData = data + storedData.V = data + storedData.Valid = len(storedData.V) > 0 } if err := qb.write(ctx, checksum, storedData); err != nil { @@ -122,15 +123,11 @@ func (qb *BlobStore) Write(ctx context.Context, data []byte) (string, error) { return checksum, nil } -func (qb *BlobStore) write(ctx context.Context, checksum string, data []byte) error { - var blobdata sql.Null[[]byte] - blobdata.V = data - blobdata.Valid = len(data) > 0 - +func (qb *BlobStore) write(ctx context.Context, checksum string, data sql.Null[[]byte]) error { table := qb.table() q := dialect.Insert(table).Rows(blobRow{ Checksum: checksum, - Blob: blobdata, + Blob: data, }).OnConflict(goqu.DoNothing()) _, err := exec(ctx, q) @@ -386,9 +383,17 @@ func (qb *blobJoinQueryBuilder) UpdateImage(ctx context.Context, id int, blobCol return err } - sqlQuery := fmt.Sprintf("UPDATE %s SET %s = ? WHERE id = ?", qb.joinTable, blobCol) + sqlQuery := dialect.From(qb.joinTable).Update(). + Set(goqu.Record{blobCol: checksum}). + Prepared(true). + Where(goqu.Ex{"id": id}) + + query, args, err := sqlQuery.ToSQL() + if err != nil { + return err + } - if _, err := dbWrapper.Exec(ctx, sqlQuery, checksum, id); err != nil { + if _, err := dbWrapper.Exec(ctx, query, args...); err != nil { return err } @@ -445,7 +450,7 @@ func (qb *blobJoinQueryBuilder) DestroyImage(ctx context.Context, id int, blobCo return err } - if _, err = dbWrapper.Exec(ctx, query, args); err != nil { + if _, err = dbWrapper.Exec(ctx, query, args...); err != nil { return err } diff --git a/pkg/sqlite/database_sqlite.go b/pkg/sqlite/database_sqlite.go index cc531d97f36..00dc086f7d9 100644 --- a/pkg/sqlite/database_sqlite.go +++ b/pkg/sqlite/database_sqlite.go @@ -78,7 +78,7 @@ func (db *SQLiteDB) open(disableForeignKeys bool, writable bool) (conn *sqlx.DB, func (db *SQLiteDB) Remove() error { databasePath := db.DatabasePath() - err := db.Close() + err := (*Database)(db).Close() if err != nil { return fmt.Errorf("error closing database: %w", err) @@ -108,7 +108,7 @@ func (db *SQLiteDB) Reset() error { return err } - if err := db.Open(); err != nil { + if err := (*Database)(db).Open(); err != nil { return fmt.Errorf("[reset DB] unable to initialize: %w", err) } From aaaad7916f73306fe6af4c49eea9d3c9a22f96a4 Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Wed, 9 Oct 2024 15:56:28 +0200 Subject: [PATCH 20/85] fix anonymiser --- pkg/sqlite/anonymise.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/sqlite/anonymise.go b/pkg/sqlite/anonymise.go index a97c964f1bf..5508e8ddad5 100644 --- a/pkg/sqlite/anonymise.go +++ b/pkg/sqlite/anonymise.go @@ -32,7 +32,7 @@ func NewAnonymiser(db *Database, outPath string) (*Anonymiser, error) { return nil, fmt.Errorf("vacuuming into %s: %w", outPath, err) } - newDB := NewSQLiteDatabase(db.DatabasePath()) + newDB := NewSQLiteDatabase(outPath) if err := newDB.Open(); err != nil { return nil, fmt.Errorf("opening %s: %w", outPath, err) } From c54540b8659aeaf951c9a13048843f602c587c58 Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Thu, 10 Oct 2024 00:06:03 +0200 Subject: [PATCH 21/85] Readbility nits around getDBBoolean --- pkg/sqlite/gallery_filter.go | 6 +++--- pkg/sqlite/image_filter.go | 7 +++---- pkg/sqlite/repository.go | 2 +- pkg/sqlite/scene_filter.go | 6 +++--- pkg/sqlite/tag.go | 2 +- 5 files changed, 11 insertions(+), 12 deletions(-) diff --git a/pkg/sqlite/gallery_filter.go b/pkg/sqlite/gallery_filter.go index 7a3a5f14a47..d3465eb6cf6 100644 --- a/pkg/sqlite/gallery_filter.go +++ b/pkg/sqlite/gallery_filter.go @@ -378,12 +378,12 @@ func (qb *galleryFilterHandler) performerFavoriteCriterionHandler(performerfavor if *performerfavorite { // contains at least one favorite f.addLeftJoin("performers", "", "performers.id = performers_galleries.performer_id") - f.addWhere(fmt.Sprintf("performers.favorite = %s", getDBBoolean(true))) + f.addWhere("performers.favorite = " + getDBBoolean(true)) } else { // contains zero favorites - f.addLeftJoin(fmt.Sprintf(`(SELECT performers_galleries.gallery_id as id FROM performers_galleries + f.addLeftJoin(`(SELECT performers_galleries.gallery_id as id FROM performers_galleries JOIN performers ON performers.id = performers_galleries.performer_id -GROUP BY performers_galleries.gallery_id HAVING SUM(performers.favorite) = %s)`, getDBBoolean(false)), "nofaves", "galleries.id = nofaves.id") +GROUP BY performers_galleries.gallery_id HAVING SUM(performers.favorite) = `+getDBBoolean(false)+")", "nofaves", "galleries.id = nofaves.id") f.addWhere("performers_galleries.gallery_id IS NULL OR nofaves.id IS NOT NULL") } } diff --git a/pkg/sqlite/image_filter.go b/pkg/sqlite/image_filter.go index 8c0ec7dbe98..255451f178f 100644 --- a/pkg/sqlite/image_filter.go +++ b/pkg/sqlite/image_filter.go @@ -2,7 +2,6 @@ package sqlite import ( "context" - "fmt" "github.com/stashapp/stash/pkg/models" ) @@ -255,12 +254,12 @@ func (qb *imageFilterHandler) performerFavoriteCriterionHandler(performerfavorit if *performerfavorite { // contains at least one favorite f.addLeftJoin("performers", "", "performers.id = performers_images.performer_id") - f.addWhere(fmt.Sprintf("performers.favorite = %s", getDBBoolean(true))) + f.addWhere("performers.favorite = " + getDBBoolean(true)) } else { // contains zero favorites - f.addLeftJoin(fmt.Sprintf(`(SELECT performers_images.image_id as id FROM performers_images + f.addLeftJoin(`(SELECT performers_images.image_id as id FROM performers_images JOIN performers ON performers.id = performers_images.performer_id -GROUP BY performers_images.image_id HAVING SUM(performers.favorite) = %s)`, getDBBoolean(false)), "nofaves", "images.id = nofaves.id") +GROUP BY performers_images.image_id HAVING SUM(performers.favorite) = `+getDBBoolean(false)+")", "nofaves", "images.id = nofaves.id") f.addWhere("performers_images.image_id IS NULL OR nofaves.id IS NOT NULL") } } diff --git a/pkg/sqlite/repository.go b/pkg/sqlite/repository.go index 8c37a11fd13..64059e7761e 100644 --- a/pkg/sqlite/repository.go +++ b/pkg/sqlite/repository.go @@ -461,7 +461,7 @@ func idToIndexMap(ids []int) map[int]int { func (r *filesRepository) getMany(ctx context.Context, ids []int, primaryOnly bool) ([][]models.FileID, error) { var primaryClause string if primaryOnly { - primaryClause = fmt.Sprintf(" AND \"primary\" = %s", getDBBoolean(true)) + primaryClause = ` AND "primary" = ` + getDBBoolean(true) } query := fmt.Sprintf("SELECT %s as id, file_id, \"primary\" from %s WHERE %[1]s IN %[3]s%s", r.idColumn, r.tableName, getInBinding(len(ids)), primaryClause) diff --git a/pkg/sqlite/scene_filter.go b/pkg/sqlite/scene_filter.go index 66e7d744dad..632152c8ebb 100644 --- a/pkg/sqlite/scene_filter.go +++ b/pkg/sqlite/scene_filter.go @@ -455,12 +455,12 @@ func (qb *sceneFilterHandler) performerFavoriteCriterionHandler(performerfavorit if *performerfavorite { // contains at least one favorite f.addLeftJoin("performers", "", "performers.id = performers_scenes.performer_id") - f.addWhere(fmt.Sprintf("performers.favorite = %s", getDBBoolean(true))) + f.addWhere("performers.favorite = " + getDBBoolean(true)) } else { // contains zero favorites - f.addLeftJoin(fmt.Sprintf(`(SELECT performers_scenes.scene_id as id FROM performers_scenes + f.addLeftJoin(`(SELECT performers_scenes.scene_id as id FROM performers_scenes JOIN performers ON performers.id = performers_scenes.performer_id -GROUP BY performers_scenes.scene_id HAVING SUM(performers.favorite) = %s)`, getDBBoolean(false)), "nofaves", "scenes.id = nofaves.id") +GROUP BY performers_scenes.scene_id HAVING SUM(performers.favorite) = `+getDBBoolean(false)+")", "nofaves", "scenes.id = nofaves.id") f.addWhere("performers_scenes.scene_id IS NULL OR nofaves.id IS NOT NULL") } } diff --git a/pkg/sqlite/tag.go b/pkg/sqlite/tag.go index 5241e1664c0..c3725b15e5a 100644 --- a/pkg/sqlite/tag.go +++ b/pkg/sqlite/tag.go @@ -584,7 +584,7 @@ func (qb *TagStore) QueryForAutoTag(ctx context.Context, words []string) ([]*mod whereOr := "(" + strings.Join(whereClauses, " OR ") + ")" where := strings.Join([]string{ - fmt.Sprintf("tags.ignore_auto_tag = %s", getDBBoolean(false)), + "tags.ignore_auto_tag = " + getDBBoolean(false), whereOr, }, " AND ") return qb.queryTags(ctx, query+" WHERE "+where, args) From 1d50d453d3c026c0d6946a840d74b777336d7808 Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Thu, 10 Oct 2024 00:12:09 +0200 Subject: [PATCH 22/85] Infer db type --- internal/manager/config/config.go | 5 ----- internal/manager/init.go | 9 +++------ 2 files changed, 3 insertions(+), 11 deletions(-) diff --git a/internal/manager/config/config.go b/internal/manager/config/config.go index 349c29a3bf9..e42a6786678 100644 --- a/internal/manager/config/config.go +++ b/internal/manager/config/config.go @@ -51,7 +51,6 @@ const ( DefaultMaxSessionAge = 60 * 60 * 1 // 1 hours Database = "database" - DatabaseType = "database_type" DatabaseConnectionString = "database_string" Exclude = "exclude" @@ -697,10 +696,6 @@ func (i *Config) GetDatabasePath() string { return i.getString(Database) } -func (i *Config) GetDatabaseType() string { - return i.getString(DatabaseType) -} - func (i *Config) GetDatabaseConnectionString() string { return i.getString(DatabaseConnectionString) } diff --git a/internal/manager/init.go b/internal/manager/init.go index dbd425b27fa..c8c5e0d4170 100644 --- a/internal/manager/init.go +++ b/internal/manager/init.go @@ -38,13 +38,10 @@ func Initialize(cfg *config.Config, l *log.Logger) (*Manager, error) { var db *sqlite.Database { - var dbType = sqlite.DatabaseType(strings.ToUpper(cfg.GetDatabaseType())) + var dbType = sqlite.SqliteBackend - switch dbType { - case sqlite.SqliteBackend, sqlite.PostgresBackend: - // Valid case - default: - dbType = sqlite.SqliteBackend + if strings.HasPrefix(strings.ToUpper(cfg.GetDatabaseConnectionString()), string(sqlite.PostgresBackend)) { + dbType = sqlite.PostgresBackend } switch dbType { From 16e0e368f5517ea5a216185dd45bd95f14e033d5 Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Thu, 10 Oct 2024 00:16:38 +0200 Subject: [PATCH 23/85] Missed a readability nit --- pkg/sqlite/repository.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/sqlite/repository.go b/pkg/sqlite/repository.go index 64059e7761e..08b596a6b9b 100644 --- a/pkg/sqlite/repository.go +++ b/pkg/sqlite/repository.go @@ -464,7 +464,7 @@ func (r *filesRepository) getMany(ctx context.Context, ids []int, primaryOnly bo primaryClause = ` AND "primary" = ` + getDBBoolean(true) } - query := fmt.Sprintf("SELECT %s as id, file_id, \"primary\" from %s WHERE %[1]s IN %[3]s%s", r.idColumn, r.tableName, getInBinding(len(ids)), primaryClause) + query := fmt.Sprintf(`SELECT %s as id, file_id, "primary" from %s WHERE %[1]s IN %[3]s%s`, r.idColumn, r.tableName, getInBinding(len(ids)), primaryClause) idi := make([]interface{}, len(ids)) for i, id := range ids { From 9b55dcd28971181c039a84a389f39295610ea8dd Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Thu, 10 Oct 2024 00:18:27 +0200 Subject: [PATCH 24/85] Another nit --- pkg/sqlite/repository.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/sqlite/repository.go b/pkg/sqlite/repository.go index 08b596a6b9b..0d49bcee812 100644 --- a/pkg/sqlite/repository.go +++ b/pkg/sqlite/repository.go @@ -505,7 +505,7 @@ func (r *filesRepository) getMany(ctx context.Context, ids []int, primaryOnly bo } func (r *filesRepository) get(ctx context.Context, id int) ([]models.FileID, error) { - query := fmt.Sprintf("SELECT file_id, \"primary\" from %s WHERE %s = ?", r.tableName, r.idColumn) + query := fmt.Sprintf(`SELECT file_id, "primary" from %s WHERE %s = ?`, r.tableName, r.idColumn) type relatedFile struct { FileID models.FileID `db:"file_id"` From 21f6d16645971a0e1a5d654032024592b8e28cd7 Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Thu, 10 Oct 2024 00:28:26 +0200 Subject: [PATCH 25/85] No more psql lock --- pkg/sqlite/database.go | 16 ---------------- pkg/sqlite/database_postgres.go | 5 ++++- pkg/sqlite/database_sqlite.go | 16 ++++++++++++++++ 3 files changed, 20 insertions(+), 17 deletions(-) diff --git a/pkg/sqlite/database.go b/pkg/sqlite/database.go index 440f3414dd6..6efe3a01c2e 100644 --- a/pkg/sqlite/database.go +++ b/pkg/sqlite/database.go @@ -253,22 +253,6 @@ func (db *Database) Open() error { return nil } -// lock locks the database for writing. This method will block until the lock is acquired. -func (db *Database) lock() { - db.lockChan <- struct{}{} -} - -// unlock unlocks the database -func (db *Database) unlock() { - // will block the caller if the lock is not held, so check first - select { - case <-db.lockChan: - return - default: - panic("database is not locked") - } -} - func (db *Database) Close() error { db.lock() defer db.unlock() diff --git a/pkg/sqlite/database_postgres.go b/pkg/sqlite/database_postgres.go index 479779c05ae..8226ceaba59 100644 --- a/pkg/sqlite/database_postgres.go +++ b/pkg/sqlite/database_postgres.go @@ -17,7 +17,6 @@ func NewPostgresDatabase(dbConnector string) *Database { db := &PostgresDB{ storeRepository: newDatabase(), - lockChan: make(chan struct{}, 1), dbConfig: dbConnector, } db.dbInterface = db @@ -27,6 +26,10 @@ func NewPostgresDatabase(dbConnector string) *Database { return (*Database)(db) } +// Does nothing +func (db *PostgresDB) lock() {} +func (db *PostgresDB) unlock() {} + func (db *PostgresDB) DatabaseType() DatabaseType { return PostgresBackend } diff --git a/pkg/sqlite/database_sqlite.go b/pkg/sqlite/database_sqlite.go index 00dc086f7d9..f80b61fde91 100644 --- a/pkg/sqlite/database_sqlite.go +++ b/pkg/sqlite/database_sqlite.go @@ -36,6 +36,22 @@ func NewSQLiteDatabase(dbPath string) *Database { return (*Database)(db) } +// lock locks the database for writing. This method will block until the lock is acquired. +func (db *SQLiteDB) lock() { + db.lockChan <- struct{}{} +} + +// unlock unlocks the database +func (db *SQLiteDB) unlock() { + // will block the caller if the lock is not held, so check first + select { + case <-db.lockChan: + return + default: + panic("database is not locked") + } +} + func (db *SQLiteDB) DatabaseType() DatabaseType { return SqliteBackend } From 8e4ce3a8e0a54ec83e2f7a90f075719b639f3d3b Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Thu, 10 Oct 2024 00:51:17 +0200 Subject: [PATCH 26/85] Multiple lines in pgsql schema nit --- pkg/sqlite/migrationsPostgres/1_initial.up.sql | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pkg/sqlite/migrationsPostgres/1_initial.up.sql b/pkg/sqlite/migrationsPostgres/1_initial.up.sql index c12e1311153..32aaef69f54 100644 --- a/pkg/sqlite/migrationsPostgres/1_initial.up.sql +++ b/pkg/sqlite/migrationsPostgres/1_initial.up.sql @@ -96,7 +96,10 @@ CREATE TABLE IF NOT EXISTS images ( organized boolean not null default FALSE, created_at timestamp not null, updated_at timestamp not null, - date date, code text, photographer text, details text, + date date, + code text, + photographer text, + details text, foreign key(studio_id) references studios(id) on delete SET NULL ); CREATE TABLE image_urls ( From 54034317d6640d8a7d41abd54c6529dfce27e4cb Mon Sep 17 00:00:00 2001 From: its-josh4 <74079536+its-josh4@users.noreply.github.com> Date: Wed, 9 Oct 2024 16:59:16 -0700 Subject: [PATCH 27/85] Use a single "database" configuration as connection string too - If the `database` config option begins with `postgres:`, then we use postgres and parse it as URL - If it begins with `sqlite:` we use SQLite (and the path to file after, e.g. `sqlite:/path/to/file.db` or `sqlite:file.db`) - If it doesn't have any prefix, then we assume it's a SQLite DB path (e.g. `file.db`) - this is for backwards-compatibility --- internal/api/resolver_mutation_configure.go | 4 +-- internal/api/resolver_query_configuration.go | 2 +- internal/manager/config/config.go | 11 +++----- .../manager/config/config_concurrency_test.go | 2 +- internal/manager/init.go | 25 ++++++++----------- 5 files changed, 17 insertions(+), 27 deletions(-) diff --git a/internal/api/resolver_mutation_configure.go b/internal/api/resolver_mutation_configure.go index 34b627b3c66..f18243aed7f 100644 --- a/internal/api/resolver_mutation_configure.go +++ b/internal/api/resolver_mutation_configure.go @@ -127,8 +127,8 @@ func (r *mutationResolver) ConfigureGeneral(ctx context.Context, input ConfigGen return nil } - existingDBPath := c.GetDatabasePath() - if input.DatabasePath != nil && existingDBPath != *input.DatabasePath { + existingDBUrl := c.GetDatabaseUrl() + if input.DatabasePath != nil && existingDBUrl != *input.DatabasePath { if err := checkConfigOverride(config.Database); err != nil { return makeConfigGeneralResult(), err } diff --git a/internal/api/resolver_query_configuration.go b/internal/api/resolver_query_configuration.go index 3328e4a356b..e28128fca9e 100644 --- a/internal/api/resolver_query_configuration.go +++ b/internal/api/resolver_query_configuration.go @@ -81,7 +81,7 @@ func makeConfigGeneralResult() *ConfigGeneralResult { return &ConfigGeneralResult{ Stashes: config.GetStashPaths(), - DatabasePath: config.GetDatabasePath(), + DatabasePath: config.GetDatabaseUrl(), BackupDirectoryPath: config.GetBackupDirectoryPath(), GeneratedPath: config.GetGeneratedPath(), MetadataPath: config.GetMetadataPath(), diff --git a/internal/manager/config/config.go b/internal/manager/config/config.go index e42a6786678..49cef271f47 100644 --- a/internal/manager/config/config.go +++ b/internal/manager/config/config.go @@ -50,8 +50,7 @@ const ( DefaultMaxSessionAge = 60 * 60 * 1 // 1 hours - Database = "database" - DatabaseConnectionString = "database_string" + Database = "database" Exclude = "exclude" ImageExclude = "image_exclude" @@ -692,14 +691,10 @@ func (i *Config) GetMetadataPath() string { return i.getString(Metadata) } -func (i *Config) GetDatabasePath() string { +func (i *Config) GetDatabaseUrl() string { return i.getString(Database) } -func (i *Config) GetDatabaseConnectionString() string { - return i.getString(DatabaseConnectionString) -} - func (i *Config) GetBackupDirectoryPath() string { return i.getString(BackupDirectoryPath) } @@ -708,7 +703,7 @@ func (i *Config) GetBackupDirectoryPathOrDefault() string { ret := i.GetBackupDirectoryPath() if ret == "" { // #4915 - default to the same directory as the database - return filepath.Dir(i.GetDatabasePath()) + return filepath.Dir(i.GetDatabaseUrl()) } return ret diff --git a/internal/manager/config/config_concurrency_test.go b/internal/manager/config/config_concurrency_test.go index fd9b067c7e7..3c5313113dc 100644 --- a/internal/manager/config/config_concurrency_test.go +++ b/internal/manager/config/config_concurrency_test.go @@ -35,7 +35,7 @@ func TestConcurrentConfigAccess(t *testing.T) { i.SetInterface(Cache, i.GetCachePath()) i.SetInterface(Generated, i.GetGeneratedPath()) i.SetInterface(Metadata, i.GetMetadataPath()) - i.SetInterface(Database, i.GetDatabasePath()) + i.SetInterface(Database, i.GetDatabaseUrl()) // these must be set as strings since the original values are also strings // setting them as []byte will cause the returned string to be corrupted diff --git a/internal/manager/init.go b/internal/manager/init.go index c8c5e0d4170..ba30cd1f9b2 100644 --- a/internal/manager/init.go +++ b/internal/manager/init.go @@ -36,21 +36,16 @@ func Initialize(cfg *config.Config, l *log.Logger) (*Manager, error) { ctx := context.TODO() var db *sqlite.Database - - { - var dbType = sqlite.SqliteBackend - - if strings.HasPrefix(strings.ToUpper(cfg.GetDatabaseConnectionString()), string(sqlite.PostgresBackend)) { - dbType = sqlite.PostgresBackend - } - - switch dbType { - case sqlite.SqliteBackend: - sqlite.RegisterSqliteDialect() - db = sqlite.NewSQLiteDatabase(cfg.GetDatabasePath()) - case sqlite.PostgresBackend: - db = sqlite.NewPostgresDatabase(cfg.GetDatabaseConnectionString()) - } + dbUrl := cfg.GetDatabaseUrl() + upperUrl := strings.ToUpper(dbUrl) + switch { + case strings.HasPrefix(upperUrl, string(sqlite.PostgresBackend)+":"): + db = sqlite.NewPostgresDatabase(dbUrl) + case strings.HasPrefix(upperUrl, string(sqlite.SqliteBackend)+":"): + db = sqlite.NewSQLiteDatabase(dbUrl[len(sqlite.SqliteBackend)+1:]) + default: + // Assume it's the path to a SQLite database - for backwards compat + db = sqlite.NewSQLiteDatabase(dbUrl) } repo := db.Repository() From 1520e4752dde47da31c873934a84d5642d0d0b45 Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Thu, 10 Oct 2024 09:24:39 +0200 Subject: [PATCH 28/85] Fix values criterion for pgsql. Switch from GROUP_CONCAT to string_agg for sqlite/pgsql cross-compatibility. Add NOCASE collation for pgsql. Fix some varchar errors in pgsql. Switch from varchar(36) to uuid in pgsql. --- pkg/sqlite/criterion_handlers.go | 23 +++++++++++-------- .../migrationsPostgres/1_initial.up.sql | 11 +++++---- pkg/sqlite/performer_filter.go | 2 +- pkg/sqlite/scene.go | 2 +- pkg/sqlite/scene_marker_filter.go | 2 +- pkg/sqlite/sql.go | 4 ++-- 6 files changed, 25 insertions(+), 19 deletions(-) diff --git a/pkg/sqlite/criterion_handlers.go b/pkg/sqlite/criterion_handlers.go index afbd7f3f963..ac209814165 100644 --- a/pkg/sqlite/criterion_handlers.go +++ b/pkg/sqlite/criterion_handlers.go @@ -412,7 +412,7 @@ func (m *joinedMultiCriterionHandlerBuilder) handler(c *models.MultiCriterionInp "primaryFK": m.primaryFK, "primaryTable": m.primaryTable, }) - havingClause = fmt.Sprintf("count(distinct %s.%s) IS %d", joinAlias, m.foreignFK, len(criterion.Value)) + havingClause = fmt.Sprintf("count(distinct %s.%s) = %d", joinAlias, m.foreignFK, len(criterion.Value)) args = append(args, len(criterion.Value)) case models.CriterionModifierNotEquals: f.setError(fmt.Errorf("not equals modifier is not supported for multi criterion input")) @@ -420,7 +420,7 @@ func (m *joinedMultiCriterionHandlerBuilder) handler(c *models.MultiCriterionInp // includes all of the provided ids m.addJoinTable(f) whereClause = fmt.Sprintf("%s.%s IN %s", joinAlias, m.foreignFK, getInBinding(len(criterion.Value))) - havingClause = fmt.Sprintf("count(distinct %s.%s) IS %d", joinAlias, m.foreignFK, len(criterion.Value)) + havingClause = fmt.Sprintf("count(distinct %s.%s) = %d", joinAlias, m.foreignFK, len(criterion.Value)) } f.addWhere(whereClause, args...) @@ -674,7 +674,7 @@ WHERE id in {inBinding} {unionClause}) `, withClauseMap) - query := fmt.Sprintf("WITH RECURSIVE %s SELECT 'VALUES' || GROUP_CONCAT('(' || root_id || ', ' || item_id || ')') AS val FROM items", withClause) + query := fmt.Sprintf("WITH RECURSIVE %s SELECT 'VALUES' || string_agg('(' || root_id || ', ' || item_id || ')', ',') AS val FROM items", withClause) var valuesClause sql.NullString err := dbWrapper.Get(ctx, &valuesClause, query, args...) @@ -699,7 +699,7 @@ func addHierarchicalConditionClauses(f *filterBuilder, criterion models.Hierarch f.addWhere(fmt.Sprintf("%s.%s IS NOT NULL", table, idColumn)) case models.CriterionModifierIncludesAll: f.addWhere(fmt.Sprintf("%s.%s IS NOT NULL", table, idColumn)) - f.addHaving(fmt.Sprintf("count(distinct %s.%s) IS %d", table, idColumn, len(criterion.Value))) + f.addHaving(fmt.Sprintf("count(distinct %s.%s) = %d", table, idColumn, len(criterion.Value))) case models.CriterionModifierExcludes: f.addWhere(fmt.Sprintf("%s.%s IS NULL", table, idColumn)) } @@ -742,6 +742,11 @@ func (m *hierarchicalMultiCriterionHandlerBuilder) handler(c *models.Hierarchica criterion.Value = nil } + var pgsql_fix string + if dbWrapper.dbType == PostgresBackend { + pgsql_fix = " AS v(column1, column2)" + } + if len(criterion.Value) > 0 { valuesClause, err := getHierarchicalValues(ctx, criterion.Value, m.foreignTable, m.relationsTable, m.parentFK, m.childFK, criterion.Depth) if err != nil { @@ -751,10 +756,10 @@ func (m *hierarchicalMultiCriterionHandlerBuilder) handler(c *models.Hierarchica switch criterion.Modifier { case models.CriterionModifierIncludes: - f.addWhere(fmt.Sprintf("%s.%s IN (SELECT column2 FROM (%s))", m.primaryTable, m.foreignFK, valuesClause)) + f.addWhere(fmt.Sprintf("%s.%s IN (SELECT column2 FROM (%s)%s)", m.primaryTable, m.foreignFK, valuesClause, pgsql_fix)) case models.CriterionModifierIncludesAll: - f.addWhere(fmt.Sprintf("%s.%s IN (SELECT column2 FROM (%s))", m.primaryTable, m.foreignFK, valuesClause)) - f.addHaving(fmt.Sprintf("count(distinct %s.%s) IS %d", m.primaryTable, m.foreignFK, len(criterion.Value))) + f.addWhere(fmt.Sprintf("%s.%s IN (SELECT column2 FROM (%s)%s)", m.primaryTable, m.foreignFK, valuesClause, pgsql_fix)) + f.addHaving(fmt.Sprintf("count(distinct %s.%s) = %d", m.primaryTable, m.foreignFK, len(criterion.Value))) } } @@ -765,7 +770,7 @@ func (m *hierarchicalMultiCriterionHandlerBuilder) handler(c *models.Hierarchica return } - f.addWhere(fmt.Sprintf("%s.%s NOT IN (SELECT column2 FROM (%s)) OR %[1]s.%[2]s IS NULL", m.primaryTable, m.foreignFK, valuesClause)) + f.addWhere(fmt.Sprintf("%s.%s NOT IN (SELECT column2 FROM (%s)%s) OR %[1]s.%[2]s IS NULL", m.primaryTable, m.foreignFK, valuesClause, pgsql_fix)) } } } @@ -796,7 +801,7 @@ func (m *joinedHierarchicalMultiCriterionHandlerBuilder) addHierarchicalConditio case models.CriterionModifierEquals: // includes only the provided ids f.addWhere(fmt.Sprintf("%s.%s IS NOT NULL", table, idColumn)) - f.addHaving(fmt.Sprintf("count(distinct %s.%s) IS %d", table, idColumn, len(criterion.Value))) + f.addHaving(fmt.Sprintf("count(distinct %s.%s) = %d", table, idColumn, len(criterion.Value))) f.addWhere(utils.StrFormat("(SELECT COUNT(*) FROM {joinTable} s WHERE s.{primaryFK} = {primaryTable}.{primaryKey}) = ?", utils.StrFormatMap{ "joinTable": m.joinTable, "primaryFK": m.primaryFK, diff --git a/pkg/sqlite/migrationsPostgres/1_initial.up.sql b/pkg/sqlite/migrationsPostgres/1_initial.up.sql index 32aaef69f54..958510d4cd9 100644 --- a/pkg/sqlite/migrationsPostgres/1_initial.up.sql +++ b/pkg/sqlite/migrationsPostgres/1_initial.up.sql @@ -1,4 +1,5 @@ CREATE COLLATION IF NOT EXISTS NATURAL_CI (provider = icu, locale = 'en@colNumeric=yes'); +CREATE COLLATION IF NOT EXISTS NOCASE (provider = icu, locale = 'und-u-ks-level2', deterministic = false); CREATE TABLE blobs ( checksum varchar(255) NOT NULL PRIMARY KEY, blob bytea @@ -50,8 +51,8 @@ CREATE TABLE IF NOT EXISTS performers ( measurements varchar(255), fake_tits varchar(255), career_length varchar(255), - tattoos varchar(255), - piercings varchar(255), + tattoos text, -- For you artsy motherfuckers + piercings text, favorite boolean not null default FALSE, created_at timestamp not null, updated_at timestamp not null, @@ -203,13 +204,13 @@ CREATE TABLE IF NOT EXISTS scenes_o_dates ( CREATE TABLE performer_stash_ids ( performer_id integer, endpoint varchar(255), - stash_id varchar(36), + stash_id uuid, foreign key(performer_id) references performers(id) on delete CASCADE ); CREATE TABLE studio_stash_ids ( studio_id integer, endpoint varchar(255), - stash_id varchar(36), + stash_id uuid, foreign key(studio_id) references studios(id) on delete CASCADE ); CREATE TABLE tags_relations ( @@ -336,7 +337,7 @@ CREATE TABLE IF NOT EXISTS images_tags ( CREATE TABLE IF NOT EXISTS scene_stash_ids ( scene_id integer NOT NULL, endpoint varchar(255) NOT NULL, - stash_id varchar(36) NOT NULL, + stash_id uuid NOT NULL, foreign key(scene_id) references scenes(id) on delete CASCADE, PRIMARY KEY(scene_id, endpoint) ); diff --git a/pkg/sqlite/performer_filter.go b/pkg/sqlite/performer_filter.go index 72990a7febd..8d532835478 100644 --- a/pkg/sqlite/performer_filter.go +++ b/pkg/sqlite/performer_filter.go @@ -519,7 +519,7 @@ func (qb *performerFilterHandler) appearsWithCriterionHandler(performers *models if performers.Modifier == models.CriterionModifierIncludesAll && len(performers.Value) > 1 { templStr += ` GROUP BY {primaryTable}2.performer_id - HAVING(count(distinct {primaryTable}.performer_id) IS ` + strconv.Itoa(len(performers.Value)) + `)` + HAVING(count(distinct {primaryTable}.performer_id) = ` + strconv.Itoa(len(performers.Value)) + `)` } var unions []string diff --git a/pkg/sqlite/scene.go b/pkg/sqlite/scene.go index e4da8777578..2e21f52b166 100644 --- a/pkg/sqlite/scene.go +++ b/pkg/sqlite/scene.go @@ -40,7 +40,7 @@ const ( ) var findExactDuplicateQuery = ` -SELECT GROUP_CONCAT(DISTINCT scene_id) as ids +SELECT string_agg(DISTINCT scene_id, ',') as ids FROM ( SELECT scenes.id as scene_id , video_files.duration as file_duration diff --git a/pkg/sqlite/scene_marker_filter.go b/pkg/sqlite/scene_marker_filter.go index d5e044e85a7..c001a09f6b9 100644 --- a/pkg/sqlite/scene_marker_filter.go +++ b/pkg/sqlite/scene_marker_filter.go @@ -116,7 +116,7 @@ func (qb *sceneMarkerFilterHandler) tagsCriterionHandler(criterion *models.Hiera // includes only the provided ids f.addWhere("marker_tags.root_tag_id IS NOT NULL") tagsLen := len(tags.Value) - f.addHaving(fmt.Sprintf("count(distinct marker_tags.root_tag_id) IS %d", tagsLen)) + f.addHaving(fmt.Sprintf("count(distinct marker_tags.root_tag_id) = %d", tagsLen)) // decrement by one to account for primary tag id f.addWhere("(SELECT COUNT(*) FROM scene_markers_tags s WHERE s.scene_marker_id = scene_markers.id) = ?", tagsLen-1) case models.CriterionModifierNotEquals: diff --git a/pkg/sqlite/sql.go b/pkg/sqlite/sql.go index 3d6c3a17bdb..229ff633287 100644 --- a/pkg/sqlite/sql.go +++ b/pkg/sqlite/sql.go @@ -328,10 +328,10 @@ func getMultiCriterionClause(primaryTable, foreignTable, joinTable, primaryFK, f // includes all of the provided ids if joinTable != "" { whereClause = joinTable + "." + foreignFK + " IN " + getInBinding(len(criterion.Value)) - havingClause = "count(distinct " + joinTable + "." + foreignFK + ") IS " + strconv.Itoa(len(criterion.Value)) + havingClause = "count(distinct " + joinTable + "." + foreignFK + ") = " + strconv.Itoa(len(criterion.Value)) } else { whereClause = foreignTable + ".id IN " + getInBinding(len(criterion.Value)) - havingClause = "count(distinct " + foreignTable + ".id) IS " + strconv.Itoa(len(criterion.Value)) + havingClause = "count(distinct " + foreignTable + ".id) = " + strconv.Itoa(len(criterion.Value)) } case models.CriterionModifierExcludes: // excludes all of the provided ids From edc3c7dab378f653a0dc35ae43aebced56c650b9 Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Thu, 10 Oct 2024 09:33:06 +0200 Subject: [PATCH 29/85] Better pgsql open --- pkg/sqlite/database_postgres.go | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/pkg/sqlite/database_postgres.go b/pkg/sqlite/database_postgres.go index 8226ceaba59..a55f153918a 100644 --- a/pkg/sqlite/database_postgres.go +++ b/pkg/sqlite/database_postgres.go @@ -50,14 +50,10 @@ func (db *PostgresDB) open(disableForeignKeys bool, writable bool) (conn *sqlx.D } if disableForeignKeys { - _, err = conn.Exec("SET session_replication_role = replica;") - - if err != nil { - return nil, fmt.Errorf("conn.Exec(): %w", err) - } + logger.Warn("open with disableForeignKeys is not implemented.") } if !writable { - _, err = conn.Exec("SET default_transaction_read_only = ON;") + _, err = conn.Exec("SET SESSION CHARACTERISTICS AS TRANSACTION READ ONLY;") if err != nil { return nil, fmt.Errorf("conn.Exec(): %w", err) From 817da3ee83b3cf5d3829951db9ad01da0dd1ab3e Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Thu, 10 Oct 2024 09:33:40 +0200 Subject: [PATCH 30/85] Fix DISTINCT aggregates must have exactly one argument --- pkg/sqlite/scene.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/sqlite/scene.go b/pkg/sqlite/scene.go index 2e21f52b166..ccdf6d5d9ae 100644 --- a/pkg/sqlite/scene.go +++ b/pkg/sqlite/scene.go @@ -40,7 +40,7 @@ const ( ) var findExactDuplicateQuery = ` -SELECT string_agg(DISTINCT scene_id, ',') as ids +SELECT string_agg(DISTINCT scene_id) as ids FROM ( SELECT scenes.id as scene_id , video_files.duration as file_duration From 2a79c63ded3bb0556390b32ce994d8e74fadd674 Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Thu, 10 Oct 2024 09:38:16 +0200 Subject: [PATCH 31/85] Keep disableForeignKeys --- pkg/sqlite/database_postgres.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/pkg/sqlite/database_postgres.go b/pkg/sqlite/database_postgres.go index a55f153918a..667c028d17d 100644 --- a/pkg/sqlite/database_postgres.go +++ b/pkg/sqlite/database_postgres.go @@ -50,7 +50,11 @@ func (db *PostgresDB) open(disableForeignKeys bool, writable bool) (conn *sqlx.D } if disableForeignKeys { - logger.Warn("open with disableForeignKeys is not implemented.") + _, err = conn.Exec("SET session_replication_role = replica;") + + if err != nil { + return nil, fmt.Errorf("conn.Exec(): %w", err) + } } if !writable { _, err = conn.Exec("SET SESSION CHARACTERISTICS AS TRANSACTION READ ONLY;") From 3959e4f5d1eb96dc7d5f7c2bd102663b18a35458 Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Fri, 11 Oct 2024 15:53:08 +0200 Subject: [PATCH 32/85] Better DB abstraction, still ugly though --- internal/api/resolver_mutation_migrate.go | 2 +- internal/autotag/integration_test.go | 12 +- internal/manager/init.go | 25 ++-- internal/manager/manager.go | 2 +- internal/manager/task/migrate.go | 2 +- pkg/sqlite/anonymise.go | 8 +- pkg/sqlite/database.go | 66 +++++++-- pkg/sqlite/database_postgres.go | 16 ++- pkg/sqlite/database_sqlite.go | 42 ++---- pkg/sqlite/file_test.go | 12 +- pkg/sqlite/folder_test.go | 6 +- pkg/sqlite/gallery_chapter_test.go | 2 +- pkg/sqlite/gallery_test.go | 104 +++++++------- pkg/sqlite/group_test.go | 56 ++++---- pkg/sqlite/image_test.go | 98 +++++++------- pkg/sqlite/migrate.go | 7 +- pkg/sqlite/performer_test.go | 70 +++++----- pkg/sqlite/saved_filter_test.go | 10 +- pkg/sqlite/scene.go | 2 +- pkg/sqlite/scene_marker_test.go | 18 +-- pkg/sqlite/scene_test.go | 158 +++++++++++----------- pkg/sqlite/setup_test.go | 51 ++++--- pkg/sqlite/studio_test.go | 72 +++++----- pkg/sqlite/tag_test.go | 62 ++++----- pkg/sqlite/transaction_test.go | 22 +-- 25 files changed, 486 insertions(+), 439 deletions(-) diff --git a/internal/api/resolver_mutation_migrate.go b/internal/api/resolver_mutation_migrate.go index 083d307e9fd..d684da8e335 100644 --- a/internal/api/resolver_mutation_migrate.go +++ b/internal/api/resolver_mutation_migrate.go @@ -30,7 +30,7 @@ func (r *mutationResolver) MigrateBlobs(ctx context.Context, input MigrateBlobsI mgr := manager.GetInstance() t := &task.MigrateBlobsJob{ TxnManager: mgr.Database, - BlobStore: mgr.Database.Blobs, + BlobStore: mgr.Database.GetRepo().Blobs, Vacuumer: mgr.Database, DeleteOld: utils.IsTrue(input.DeleteOld), } diff --git a/internal/autotag/integration_test.go b/internal/autotag/integration_test.go index 03be36a9e12..d13cf60a6e8 100644 --- a/internal/autotag/integration_test.go +++ b/internal/autotag/integration_test.go @@ -33,7 +33,7 @@ var existingStudioID int const expectedMatchTitle = "expected match" -var db *sqlite.Database +var db sqlite.DBInterface var r models.Repository func testTeardown(databaseFile string) { @@ -57,9 +57,17 @@ func runTests(m *testing.M) int { } f.Close() + databaseFile := f.Name() sqlite.RegisterSqliteDialect() - db = sqlite.NewSQLiteDatabase(databaseFile) + + dbUrl, valid := os.LookupEnv("PGSQL_TEST") + if valid { + db = sqlite.NewPostgresDatabase(dbUrl) + } else { + db = sqlite.NewSQLiteDatabase(databaseFile) + } + if err := db.Open(); err != nil { panic(fmt.Sprintf("Could not initialize database: %s", err.Error())) } diff --git a/internal/manager/init.go b/internal/manager/init.go index ba30cd1f9b2..fab76b60d5c 100644 --- a/internal/manager/init.go +++ b/internal/manager/init.go @@ -35,7 +35,8 @@ import ( func Initialize(cfg *config.Config, l *log.Logger) (*Manager, error) { ctx := context.TODO() - var db *sqlite.Database + var db sqlite.DBInterface + dbUrl := cfg.GetDatabaseUrl() upperUrl := strings.ToUpper(dbUrl) switch { @@ -58,30 +59,32 @@ func Initialize(cfg *config.Config, l *log.Logger) (*Manager, error) { pluginCache := plugin.NewCache(cfg) + dbRepo := db.GetRepo() + sceneService := &scene.Service{ - File: db.File, - Repository: db.Scene, - MarkerRepository: db.SceneMarker, + File: dbRepo.File, + Repository: dbRepo.Scene, + MarkerRepository: dbRepo.SceneMarker, PluginCache: pluginCache, Paths: mgrPaths, Config: cfg, } imageService := &image.Service{ - File: db.File, - Repository: db.Image, + File: dbRepo.File, + Repository: dbRepo.Image, } galleryService := &gallery.Service{ - Repository: db.Gallery, - ImageFinder: db.Image, + Repository: dbRepo.Gallery, + ImageFinder: dbRepo.Image, ImageService: imageService, - File: db.File, - Folder: db.Folder, + File: dbRepo.File, + Folder: dbRepo.Folder, } groupService := &group.Service{ - Repository: db.Group, + Repository: dbRepo.Group, } sceneServer := &SceneServer{ diff --git a/internal/manager/manager.go b/internal/manager/manager.go index 4827a3e3d92..138e38570b2 100644 --- a/internal/manager/manager.go +++ b/internal/manager/manager.go @@ -60,7 +60,7 @@ type Manager struct { DLNAService *dlna.Service - Database *sqlite.Database + Database sqlite.DBInterface Repository models.Repository SceneService SceneService diff --git a/internal/manager/task/migrate.go b/internal/manager/task/migrate.go index 37062329e48..609512b2f4b 100644 --- a/internal/manager/task/migrate.go +++ b/internal/manager/task/migrate.go @@ -20,7 +20,7 @@ type migrateJobConfig interface { type MigrateJob struct { BackupPath string Config migrateJobConfig - Database *sqlite.Database + Database sqlite.DBInterface } type databaseSchemaInfo struct { diff --git a/pkg/sqlite/anonymise.go b/pkg/sqlite/anonymise.go index 5508e8ddad5..7ef4ae2588a 100644 --- a/pkg/sqlite/anonymise.go +++ b/pkg/sqlite/anonymise.go @@ -24,11 +24,11 @@ const ( ) type Anonymiser struct { - *Database + *SQLiteDB } -func NewAnonymiser(db *Database, outPath string) (*Anonymiser, error) { - if _, err := db.writeDB.Exec(fmt.Sprintf(`VACUUM INTO "%s"`, outPath)); err != nil { +func NewAnonymiser(db DBInterface, outPath string) (*Anonymiser, error) { + if _, err := db.GetWriteDB().Exec(fmt.Sprintf(`VACUUM INTO "%s"`, outPath)); err != nil { return nil, fmt.Errorf("vacuuming into %s: %w", outPath, err) } @@ -37,7 +37,7 @@ func NewAnonymiser(db *Database, outPath string) (*Anonymiser, error) { return nil, fmt.Errorf("opening %s: %w", outPath, err) } - return &Anonymiser{Database: newDB}, nil + return &Anonymiser{SQLiteDB: newDB}, nil } func (db *Anonymiser) Anonymise(ctx context.Context) error { diff --git a/pkg/sqlite/database.go b/pkg/sqlite/database.go index 6efe3a01c2e..9f846c28658 100644 --- a/pkg/sqlite/database.go +++ b/pkg/sqlite/database.go @@ -87,25 +87,35 @@ const ( SqliteBackend DatabaseType = "SQLITE" ) -type dbInterface interface { +type DBInterface interface { Analyze(ctx context.Context) error Anonymise(outPath string) error AnonymousDatabasePath(backupDirectoryPath string) string AppSchemaVersion() uint Backup(backupPath string) (err error) Begin(ctx context.Context, writable bool) (context.Context, error) - Open() error Close() error Commit(ctx context.Context) error DatabaseBackupPath(backupDirectoryPath string) string DatabasePath() string DatabaseType() DatabaseType ExecSQL(ctx context.Context, query string, args []interface{}) (*int64, error) + getDatabaseSchemaVersion() (uint, error) + GetReadDB() *sqlx.DB + GetRepo() *storeRepository + GetWriteDB() *sqlx.DB + initialise() error IsLocked(err error) bool + lock() + needsMigration() bool + Open() error + open(disableForeignKeys bool, writable bool) (conn *sqlx.DB, err error) + openReadDB() error + openWriteDB() error Optimise(ctx context.Context) error QuerySQL(ctx context.Context, query string, args []interface{}) ([]string, [][]interface{}, error) - ReInitialise() error Ready() error + ReInitialise() error Remove() error Repository() models.Repository Reset() error @@ -113,23 +123,17 @@ type dbInterface interface { Rollback(ctx context.Context) error RunAllMigrations() error SetBlobStoreOptions(options BlobStoreOptions) + SetSchemaVersion(version uint) + txnComplete(ctx context.Context) + unlock() Vacuum(ctx context.Context) error Version() uint WithDatabase(ctx context.Context) (context.Context, error) - getDatabaseSchemaVersion() (uint, error) - initialise() error - lock() - needsMigration() bool - open(disableForeignKeys bool, writable bool) (conn *sqlx.DB, err error) - openReadDB() error - openWriteDB() error - txnComplete(ctx context.Context) - unlock() } type Database struct { *storeRepository - dbInterface + DBInterface readDB *sqlx.DB writeDB *sqlx.DB @@ -182,6 +186,34 @@ func getDBBoolean(val bool) string { } } +func (db *Database) SetSchemaVersion(version uint) { + db.schemaVersion = version +} + +func (db *Database) GetRepo() *storeRepository { + return db.storeRepository +} + +// lock locks the database for writing. This method will block until the lock is acquired. +func (db *Database) lock() { + db.lockChan <- struct{}{} +} + +// unlock unlocks the database +func (db *Database) unlock() { + // will block the caller if the lock is not held, so check first + select { + case <-db.lockChan: + return + default: + panic("database is not locked") + } +} + +func (db *Database) AppSchemaVersion() uint { + return appSchemaVersion +} + func (db *Database) SetBlobStoreOptions(options BlobStoreOptions) { *db.Blobs = *NewBlobStore(options) } @@ -354,6 +386,14 @@ func (db *Database) Analyze(ctx context.Context) error { return err } +func (db *Database) GetWriteDB() *sqlx.DB { + return db.writeDB +} + +func (db *Database) GetReadDB() *sqlx.DB { + return db.readDB +} + func (db *Database) ExecSQL(ctx context.Context, query string, args []interface{}) (*int64, error) { wrapper := dbWrapperType{} diff --git a/pkg/sqlite/database_postgres.go b/pkg/sqlite/database_postgres.go index 667c028d17d..0bd601b4e1b 100644 --- a/pkg/sqlite/database_postgres.go +++ b/pkg/sqlite/database_postgres.go @@ -10,20 +10,24 @@ import ( "github.com/stashapp/stash/pkg/logger" ) -type PostgresDB Database +type PostgresDB struct { + Database +} -func NewPostgresDatabase(dbConnector string) *Database { +func NewPostgresDatabase(dbConnector string) *PostgresDB { dialect = goqu.Dialect("postgres") db := &PostgresDB{ - storeRepository: newDatabase(), - dbConfig: dbConnector, + Database: Database{ + storeRepository: newDatabase(), + dbConfig: dbConnector, + }, } - db.dbInterface = db + db.DBInterface = db dbWrapper.dbType = PostgresBackend - return (*Database)(db) + return db } // Does nothing diff --git a/pkg/sqlite/database_sqlite.go b/pkg/sqlite/database_sqlite.go index f80b61fde91..2a58d6c65ef 100644 --- a/pkg/sqlite/database_sqlite.go +++ b/pkg/sqlite/database_sqlite.go @@ -13,7 +13,9 @@ import ( "github.com/stashapp/stash/pkg/logger" ) -type SQLiteDB Database +type SQLiteDB struct { + Database +} func RegisterSqliteDialect() { opts := sqlite3.DialectOptions() @@ -21,45 +23,27 @@ func RegisterSqliteDialect() { goqu.RegisterDialect("sqlite3new", opts) } -func NewSQLiteDatabase(dbPath string) *Database { +func NewSQLiteDatabase(dbPath string) *SQLiteDB { dialect = goqu.Dialect("sqlite3new") db := &SQLiteDB{ - storeRepository: newDatabase(), - lockChan: make(chan struct{}, 1), - dbConfig: dbPath, + Database: Database{ + storeRepository: newDatabase(), + lockChan: make(chan struct{}, 1), + dbConfig: dbPath, + }, } - db.dbInterface = db + db.DBInterface = db dbWrapper.dbType = SqliteBackend - return (*Database)(db) -} - -// lock locks the database for writing. This method will block until the lock is acquired. -func (db *SQLiteDB) lock() { - db.lockChan <- struct{}{} -} - -// unlock unlocks the database -func (db *SQLiteDB) unlock() { - // will block the caller if the lock is not held, so check first - select { - case <-db.lockChan: - return - default: - panic("database is not locked") - } + return db } func (db *SQLiteDB) DatabaseType() DatabaseType { return SqliteBackend } -func (db *SQLiteDB) AppSchemaVersion() uint { - return appSchemaVersion -} - func (db *SQLiteDB) DatabasePath() string { return (db.dbConfig).(string) } @@ -94,7 +78,7 @@ func (db *SQLiteDB) open(disableForeignKeys bool, writable bool) (conn *sqlx.DB, func (db *SQLiteDB) Remove() error { databasePath := db.DatabasePath() - err := (*Database)(db).Close() + err := db.Close() if err != nil { return fmt.Errorf("error closing database: %w", err) @@ -124,7 +108,7 @@ func (db *SQLiteDB) Reset() error { return err } - if err := (*Database)(db).Open(); err != nil { + if err := db.Open(); err != nil { return fmt.Errorf("[reset DB] unable to initialize: %w", err) } diff --git a/pkg/sqlite/file_test.go b/pkg/sqlite/file_test.go index 766ffcc70b7..728667800b1 100644 --- a/pkg/sqlite/file_test.go +++ b/pkg/sqlite/file_test.go @@ -192,7 +192,7 @@ func Test_fileFileStore_Create(t *testing.T) { }, } - qb := db.File + qb := db.GetRepo().File for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -419,7 +419,7 @@ func Test_fileStore_Update(t *testing.T) { }, } - qb := db.File + qb := db.GetRepo().File for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { assert := assert.New(t) @@ -491,7 +491,7 @@ func Test_fileStore_Find(t *testing.T) { }, } - qb := db.File + qb := db.GetRepo().File for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -546,7 +546,7 @@ func Test_FileStore_FindByPath(t *testing.T) { }, } - qb := db.File + qb := db.GetRepo().File for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -598,7 +598,7 @@ func TestFileStore_FindByFingerprint(t *testing.T) { }, } - qb := db.File + qb := db.GetRepo().File for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -647,7 +647,7 @@ func TestFileStore_IsPrimary(t *testing.T) { }, } - qb := db.File + qb := db.GetRepo().File for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { diff --git a/pkg/sqlite/folder_test.go b/pkg/sqlite/folder_test.go index 1d948d06368..c27a89e7322 100644 --- a/pkg/sqlite/folder_test.go +++ b/pkg/sqlite/folder_test.go @@ -65,7 +65,7 @@ func Test_FolderStore_Create(t *testing.T) { }, } - qb := db.Folder + qb := db.GetRepo().Folder for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -165,7 +165,7 @@ func Test_FolderStore_Update(t *testing.T) { }, } - qb := db.Folder + qb := db.GetRepo().Folder for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { assert := assert.New(t) @@ -224,7 +224,7 @@ func Test_FolderStore_FindByPath(t *testing.T) { }, } - qb := db.Folder + qb := db.GetRepo().Folder for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { diff --git a/pkg/sqlite/gallery_chapter_test.go b/pkg/sqlite/gallery_chapter_test.go index 4c71ae6b5a4..13ec2aa3fec 100644 --- a/pkg/sqlite/gallery_chapter_test.go +++ b/pkg/sqlite/gallery_chapter_test.go @@ -12,7 +12,7 @@ import ( func TestChapterFindByGalleryID(t *testing.T) { withTxn(func(ctx context.Context) error { - mqb := db.GalleryChapter + mqb := db.GetRepo().GalleryChapter galleryID := galleryIDs[galleryIdxWithChapters] chapters, err := mqb.FindByGalleryID(ctx, galleryID) diff --git a/pkg/sqlite/gallery_test.go b/pkg/sqlite/gallery_test.go index be1edb687ae..ee602ef05b6 100644 --- a/pkg/sqlite/gallery_test.go +++ b/pkg/sqlite/gallery_test.go @@ -18,27 +18,27 @@ var invalidID = -1 func loadGalleryRelationships(ctx context.Context, expected models.Gallery, actual *models.Gallery) error { if expected.URLs.Loaded() { - if err := actual.LoadURLs(ctx, db.Gallery); err != nil { + if err := actual.LoadURLs(ctx, db.GetRepo().Gallery); err != nil { return err } } if expected.SceneIDs.Loaded() { - if err := actual.LoadSceneIDs(ctx, db.Gallery); err != nil { + if err := actual.LoadSceneIDs(ctx, db.GetRepo().Gallery); err != nil { return err } } if expected.TagIDs.Loaded() { - if err := actual.LoadTagIDs(ctx, db.Gallery); err != nil { + if err := actual.LoadTagIDs(ctx, db.GetRepo().Gallery); err != nil { return err } } if expected.PerformerIDs.Loaded() { - if err := actual.LoadPerformerIDs(ctx, db.Gallery); err != nil { + if err := actual.LoadPerformerIDs(ctx, db.GetRepo().Gallery); err != nil { return err } } if expected.Files.Loaded() { - if err := actual.LoadFiles(ctx, db.Gallery); err != nil { + if err := actual.LoadFiles(ctx, db.GetRepo().Gallery); err != nil { return err } } @@ -148,7 +148,7 @@ func Test_galleryQueryBuilder_Create(t *testing.T) { }, } - qb := db.Gallery + qb := db.GetRepo().Gallery for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -353,7 +353,7 @@ func Test_galleryQueryBuilder_Update(t *testing.T) { }, } - qb := db.Gallery + qb := db.GetRepo().Gallery for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { assert := assert.New(t) @@ -510,7 +510,7 @@ func Test_galleryQueryBuilder_UpdatePartial(t *testing.T) { }, } for _, tt := range tests { - qb := db.Gallery + qb := db.GetRepo().Gallery runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { assert := assert.New(t) @@ -779,7 +779,7 @@ func Test_galleryQueryBuilder_UpdatePartialRelationships(t *testing.T) { } for _, tt := range tests { - qb := db.Gallery + qb := db.GetRepo().Gallery runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { assert := assert.New(t) @@ -844,7 +844,7 @@ func Test_galleryQueryBuilder_Destroy(t *testing.T) { }, } - qb := db.Gallery + qb := db.GetRepo().Gallery for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -908,7 +908,7 @@ func Test_galleryQueryBuilder_Find(t *testing.T) { }, } - qb := db.Gallery + qb := db.GetRepo().Gallery for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -971,7 +971,7 @@ func Test_galleryQueryBuilder_FindMany(t *testing.T) { }, } - qb := db.Gallery + qb := db.GetRepo().Gallery for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -1029,7 +1029,7 @@ func Test_galleryQueryBuilder_FindByChecksum(t *testing.T) { }, } - qb := db.Gallery + qb := db.GetRepo().Gallery for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -1092,7 +1092,7 @@ func Test_galleryQueryBuilder_FindByChecksums(t *testing.T) { }, } - qb := db.Gallery + qb := db.GetRepo().Gallery for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -1150,7 +1150,7 @@ func Test_galleryQueryBuilder_FindByPath(t *testing.T) { }, } - qb := db.Gallery + qb := db.GetRepo().Gallery for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -1192,7 +1192,7 @@ func Test_galleryQueryBuilder_FindBySceneID(t *testing.T) { }, } - qb := db.Gallery + qb := db.GetRepo().Gallery for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -1237,7 +1237,7 @@ func Test_galleryQueryBuilder_FindByImageID(t *testing.T) { }, } - qb := db.Gallery + qb := db.GetRepo().Gallery for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -1279,7 +1279,7 @@ func Test_galleryQueryBuilder_CountByImageID(t *testing.T) { }, } - qb := db.Gallery + qb := db.GetRepo().Gallery for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -1325,7 +1325,7 @@ func Test_galleryStore_FindByFileID(t *testing.T) { }, } - qb := db.Gallery + qb := db.GetRepo().Gallery for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -1369,7 +1369,7 @@ func Test_galleryStore_FindByFolderID(t *testing.T) { }, } - qb := db.Gallery + qb := db.GetRepo().Gallery for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -1409,7 +1409,7 @@ func TestGalleryQueryQ(t *testing.T) { } func galleryQueryQ(ctx context.Context, t *testing.T, q string, expectedGalleryIdx int) { - qb := db.Gallery + qb := db.GetRepo().Gallery filter := models.FindFilterType{ Q: &q, @@ -1484,7 +1484,7 @@ func TestGalleryQueryPath(t *testing.T) { }, } - qb := db.Gallery + qb := db.GetRepo().Gallery for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -1511,7 +1511,7 @@ func verifyGalleriesPath(ctx context.Context, t *testing.T, pathCriterion models Path: &pathCriterion, } - sqb := db.Gallery + sqb := db.GetRepo().Gallery galleries, _, err := sqb.Query(ctx, &galleryFilter, nil) if err != nil { t.Errorf("Error querying gallery: %s", err.Error()) @@ -1545,7 +1545,7 @@ func TestGalleryQueryPathOr(t *testing.T) { } withTxn(func(ctx context.Context) error { - sqb := db.Gallery + sqb := db.GetRepo().Gallery galleries := queryGallery(ctx, t, sqb, &galleryFilter, nil) @@ -1581,7 +1581,7 @@ func TestGalleryQueryPathAndRating(t *testing.T) { } withTxn(func(ctx context.Context) error { - sqb := db.Gallery + sqb := db.GetRepo().Gallery galleries := queryGallery(ctx, t, sqb, &galleryFilter, nil) @@ -1621,7 +1621,7 @@ func TestGalleryQueryPathNotRating(t *testing.T) { } withTxn(func(ctx context.Context) error { - sqb := db.Gallery + sqb := db.GetRepo().Gallery galleries := queryGallery(ctx, t, sqb, &galleryFilter, nil) @@ -1654,7 +1654,7 @@ func TestGalleryIllegalQuery(t *testing.T) { } withTxn(func(ctx context.Context) error { - sqb := db.Gallery + sqb := db.GetRepo().Gallery _, _, err := sqb.Query(ctx, galleryFilter, nil) assert.NotNil(err) @@ -1720,7 +1720,7 @@ func TestGalleryQueryURL(t *testing.T) { func verifyGalleryQuery(t *testing.T, filter models.GalleryFilterType, verifyFn func(s *models.Gallery)) { withTxn(func(ctx context.Context) error { t.Helper() - sqb := db.Gallery + sqb := db.GetRepo().Gallery galleries := queryGallery(ctx, t, sqb, &filter, nil) @@ -1768,7 +1768,7 @@ func TestGalleryQueryRating100(t *testing.T) { func verifyGalleriesRating100(t *testing.T, ratingCriterion models.IntCriterionInput) { withTxn(func(ctx context.Context) error { - sqb := db.Gallery + sqb := db.GetRepo().Gallery galleryFilter := models.GalleryFilterType{ Rating100: &ratingCriterion, } @@ -1788,7 +1788,7 @@ func verifyGalleriesRating100(t *testing.T, ratingCriterion models.IntCriterionI func TestGalleryQueryIsMissingScene(t *testing.T) { withTxn(func(ctx context.Context) error { - qb := db.Gallery + qb := db.GetRepo().Gallery isMissing := "scenes" galleryFilter := models.GalleryFilterType{ IsMissing: &isMissing, @@ -1832,7 +1832,7 @@ func queryGallery(ctx context.Context, t *testing.T, sqb models.GalleryReader, g func TestGalleryQueryIsMissingStudio(t *testing.T) { withTxn(func(ctx context.Context) error { - sqb := db.Gallery + sqb := db.GetRepo().Gallery isMissing := "studio" galleryFilter := models.GalleryFilterType{ IsMissing: &isMissing, @@ -1861,7 +1861,7 @@ func TestGalleryQueryIsMissingStudio(t *testing.T) { func TestGalleryQueryIsMissingPerformers(t *testing.T) { withTxn(func(ctx context.Context) error { - sqb := db.Gallery + sqb := db.GetRepo().Gallery isMissing := "performers" galleryFilter := models.GalleryFilterType{ IsMissing: &isMissing, @@ -1892,7 +1892,7 @@ func TestGalleryQueryIsMissingPerformers(t *testing.T) { func TestGalleryQueryIsMissingTags(t *testing.T) { withTxn(func(ctx context.Context) error { - sqb := db.Gallery + sqb := db.GetRepo().Gallery isMissing := "tags" galleryFilter := models.GalleryFilterType{ IsMissing: &isMissing, @@ -1918,7 +1918,7 @@ func TestGalleryQueryIsMissingTags(t *testing.T) { func TestGalleryQueryIsMissingDate(t *testing.T) { withTxn(func(ctx context.Context) error { - sqb := db.Gallery + sqb := db.GetRepo().Gallery isMissing := "date" galleryFilter := models.GalleryFilterType{ IsMissing: &isMissing, @@ -2051,7 +2051,7 @@ func TestGalleryQueryPerformers(t *testing.T) { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { assert := assert.New(t) - results, _, err := db.Gallery.Query(ctx, &models.GalleryFilterType{ + results, _, err := db.GetRepo().Gallery.Query(ctx, &models.GalleryFilterType{ Performers: &tt.filter, }, nil) if (err != nil) != tt.wantErr { @@ -2187,7 +2187,7 @@ func TestGalleryQueryTags(t *testing.T) { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { assert := assert.New(t) - results, _, err := db.Gallery.Query(ctx, &models.GalleryFilterType{ + results, _, err := db.GetRepo().Gallery.Query(ctx, &models.GalleryFilterType{ Tags: &tt.filter, }, nil) if (err != nil) != tt.wantErr { @@ -2280,7 +2280,7 @@ func TestGalleryQueryStudio(t *testing.T) { }, } - qb := db.Gallery + qb := db.GetRepo().Gallery for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -2306,7 +2306,7 @@ func TestGalleryQueryStudio(t *testing.T) { func TestGalleryQueryStudioDepth(t *testing.T) { withTxn(func(ctx context.Context) error { - sqb := db.Gallery + sqb := db.GetRepo().Gallery depth := 2 studioCriterion := models.HierarchicalMultiCriterionInput{ Value: []string{ @@ -2539,7 +2539,7 @@ func TestGalleryQueryPerformerTags(t *testing.T) { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { assert := assert.New(t) - results, _, err := db.Gallery.Query(ctx, tt.filter, tt.findFilter) + results, _, err := db.GetRepo().Gallery.Query(ctx, tt.filter, tt.findFilter) if (err != nil) != tt.wantErr { t.Errorf("ImageStore.Query() error = %v, wantErr %v", err, tt.wantErr) return @@ -2581,7 +2581,7 @@ func TestGalleryQueryTagCount(t *testing.T) { func verifyGalleriesTagCount(t *testing.T, tagCountCriterion models.IntCriterionInput) { withTxn(func(ctx context.Context) error { - sqb := db.Gallery + sqb := db.GetRepo().Gallery galleryFilter := models.GalleryFilterType{ TagCount: &tagCountCriterion, } @@ -2622,7 +2622,7 @@ func TestGalleryQueryPerformerCount(t *testing.T) { func verifyGalleriesPerformerCount(t *testing.T, performerCountCriterion models.IntCriterionInput) { withTxn(func(ctx context.Context) error { - sqb := db.Gallery + sqb := db.GetRepo().Gallery galleryFilter := models.GalleryFilterType{ PerformerCount: &performerCountCriterion, } @@ -2645,7 +2645,7 @@ func verifyGalleriesPerformerCount(t *testing.T, performerCountCriterion models. func TestGalleryQueryAverageResolution(t *testing.T) { withTxn(func(ctx context.Context) error { - qb := db.Gallery + qb := db.GetRepo().Gallery resolution := models.ResolutionEnumLow galleryFilter := models.GalleryFilterType{ AverageResolution: &models.ResolutionCriterionInput{ @@ -2683,7 +2683,7 @@ func TestGalleryQueryImageCount(t *testing.T) { func verifyGalleriesImageCount(t *testing.T, imageCountCriterion models.IntCriterionInput) { withTxn(func(ctx context.Context) error { - sqb := db.Gallery + sqb := db.GetRepo().Gallery galleryFilter := models.GalleryFilterType{ ImageCount: &imageCountCriterion, } @@ -2694,7 +2694,7 @@ func verifyGalleriesImageCount(t *testing.T, imageCountCriterion models.IntCrite for _, gallery := range galleries { pp := 0 - result, err := db.Image.Query(ctx, models.ImageQueryOptions{ + result, err := db.GetRepo().Image.Query(ctx, models.ImageQueryOptions{ QueryOptions: models.QueryOptions{ FindFilter: &models.FindFilterType{ PerPage: &pp, @@ -2749,7 +2749,7 @@ func TestGalleryQuerySorting(t *testing.T) { }, } - qb := db.Gallery + qb := db.GetRepo().Gallery for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -2835,7 +2835,7 @@ func TestGalleryStore_AddImages(t *testing.T) { }, } - qb := db.Gallery + qb := db.GetRepo().Gallery for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -2914,7 +2914,7 @@ func TestGalleryStore_RemoveImages(t *testing.T) { }, } - qb := db.Gallery + qb := db.GetRepo().Gallery for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -2944,7 +2944,7 @@ func TestGalleryStore_RemoveImages(t *testing.T) { func TestGalleryQueryHasChapters(t *testing.T) { withTxn(func(ctx context.Context) error { - sqb := db.Gallery + sqb := db.GetRepo().Gallery hasChapters := "true" galleryFilter := models.GalleryFilterType{ HasChapters: &hasChapters, @@ -2975,25 +2975,25 @@ func TestGalleryQueryHasChapters(t *testing.T) { func TestGallerySetAndResetCover(t *testing.T) { withTxn(func(ctx context.Context) error { - sqb := db.Gallery + sqb := db.GetRepo().Gallery imagePath2 := getFilePath(folderIdxWithImageFiles, getImageBasename(imageIdx2WithGallery)) - result, err := db.Image.CoverByGalleryID(ctx, galleryIDs[galleryIdxWithTwoImages]) + result, err := db.GetRepo().Image.CoverByGalleryID(ctx, galleryIDs[galleryIdxWithTwoImages]) assert.Nil(t, err) assert.Nil(t, result) err = sqb.SetCover(ctx, galleryIDs[galleryIdxWithTwoImages], imageIDs[imageIdx2WithGallery]) assert.Nil(t, err) - result, err = db.Image.CoverByGalleryID(ctx, galleryIDs[galleryIdxWithTwoImages]) + result, err = db.GetRepo().Image.CoverByGalleryID(ctx, galleryIDs[galleryIdxWithTwoImages]) assert.Nil(t, err) assert.Equal(t, result.Path, imagePath2) err = sqb.ResetCover(ctx, galleryIDs[galleryIdxWithTwoImages]) assert.Nil(t, err) - result, err = db.Image.CoverByGalleryID(ctx, galleryIDs[galleryIdxWithTwoImages]) + result, err = db.GetRepo().Image.CoverByGalleryID(ctx, galleryIDs[galleryIdxWithTwoImages]) assert.Nil(t, err) assert.Nil(t, result) diff --git a/pkg/sqlite/group_test.go b/pkg/sqlite/group_test.go index 1d3637c8611..4b8fe97ae03 100644 --- a/pkg/sqlite/group_test.go +++ b/pkg/sqlite/group_test.go @@ -20,22 +20,22 @@ import ( func loadGroupRelationships(ctx context.Context, expected models.Group, actual *models.Group) error { if expected.URLs.Loaded() { - if err := actual.LoadURLs(ctx, db.Group); err != nil { + if err := actual.LoadURLs(ctx, db.GetRepo().Group); err != nil { return err } } if expected.TagIDs.Loaded() { - if err := actual.LoadTagIDs(ctx, db.Group); err != nil { + if err := actual.LoadTagIDs(ctx, db.GetRepo().Group); err != nil { return err } } if expected.ContainingGroups.Loaded() { - if err := actual.LoadContainingGroupIDs(ctx, db.Group); err != nil { + if err := actual.LoadContainingGroupIDs(ctx, db.GetRepo().Group); err != nil { return err } } if expected.SubGroups.Loaded() { - if err := actual.LoadSubGroupIDs(ctx, db.Group); err != nil { + if err := actual.LoadSubGroupIDs(ctx, db.GetRepo().Group); err != nil { return err } } @@ -114,7 +114,7 @@ func Test_GroupStore_Create(t *testing.T) { }, } - qb := db.Group + qb := db.GetRepo().Group for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -276,7 +276,7 @@ func Test_groupQueryBuilder_Update(t *testing.T) { }, } - qb := db.Group + qb := db.GetRepo().Group for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { assert := assert.New(t) @@ -526,7 +526,7 @@ func Test_groupQueryBuilder_UpdatePartial(t *testing.T) { }, } for _, tt := range tests { - qb := db.Group + qb := db.GetRepo().Group runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { assert := assert.New(t) @@ -567,7 +567,7 @@ func Test_groupQueryBuilder_UpdatePartial(t *testing.T) { func TestGroupFindByName(t *testing.T) { withTxn(func(ctx context.Context) error { - mqb := db.Group + mqb := db.GetRepo().Group name := groupNames[groupIdxWithScene] // find a group by name @@ -600,7 +600,7 @@ func TestGroupFindByNames(t *testing.T) { withTxn(func(ctx context.Context) error { var names []string - mqb := db.Group + mqb := db.GetRepo().Group names = append(names, groupNames[groupIdxWithScene]) // find groups by names @@ -674,7 +674,7 @@ func TestGroupQuery(t *testing.T) { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { assert := assert.New(t) - results, _, err := db.Group.Query(ctx, tt.filter, tt.findFilter) + results, _, err := db.GetRepo().Group.Query(ctx, tt.filter, tt.findFilter) if (err != nil) != tt.wantErr { t.Errorf("GroupQueryBuilder.Query() error = %v, wantErr %v", err, tt.wantErr) return @@ -696,7 +696,7 @@ func TestGroupQuery(t *testing.T) { func TestGroupQueryStudio(t *testing.T) { withTxn(func(ctx context.Context) error { - mqb := db.Group + mqb := db.GetRepo().Group studioCriterion := models.HierarchicalMultiCriterionInput{ Value: []string{ strconv.Itoa(studioIDs[studioIdxWithGroup]), @@ -787,7 +787,7 @@ func TestGroupQueryURL(t *testing.T) { func TestGroupQueryURLExcludes(t *testing.T) { withRollbackTxn(func(ctx context.Context) error { - mqb := db.Group + mqb := db.GetRepo().Group // create group with two URLs group := models.Group{ @@ -838,7 +838,7 @@ func TestGroupQueryURLExcludes(t *testing.T) { func verifyGroupQuery(t *testing.T, filter models.GroupFilterType, verifyFn func(s *models.Group)) { withTxn(func(ctx context.Context) error { t.Helper() - sqb := db.Group + sqb := db.GetRepo().Group groups := queryGroups(ctx, t, &filter, nil) @@ -860,7 +860,7 @@ func verifyGroupQuery(t *testing.T, filter models.GroupFilterType, verifyFn func } func queryGroups(ctx context.Context, t *testing.T, groupFilter *models.GroupFilterType, findFilter *models.FindFilterType) []*models.Group { - sqb := db.Group + sqb := db.GetRepo().Group groups, _, err := sqb.Query(ctx, groupFilter, findFilter) if err != nil { t.Errorf("Error querying group: %s", err.Error()) @@ -945,7 +945,7 @@ func TestGroupQueryTagCount(t *testing.T) { func verifyGroupsTagCount(t *testing.T, tagCountCriterion models.IntCriterionInput) { withTxn(func(ctx context.Context) error { - sqb := db.Group + sqb := db.GetRepo().Group groupFilter := models.GroupFilterType{ TagCount: &tagCountCriterion, } @@ -1010,12 +1010,12 @@ func TestGroupQuerySortOrderIndex(t *testing.T) { withTxn(func(ctx context.Context) error { // just ensure there are no errors - _, _, err := db.Group.Query(ctx, &groupFilter, &findFilter) + _, _, err := db.GetRepo().Group.Query(ctx, &groupFilter, &findFilter) if err != nil { t.Errorf("Error querying group: %s", err.Error()) } - _, _, err = db.Group.Query(ctx, nil, &findFilter) + _, _, err = db.GetRepo().Group.Query(ctx, nil, &findFilter) if err != nil { t.Errorf("Error querying group: %s", err.Error()) } @@ -1026,7 +1026,7 @@ func TestGroupQuerySortOrderIndex(t *testing.T) { func TestGroupUpdateFrontImage(t *testing.T) { if err := withRollbackTxn(func(ctx context.Context) error { - qb := db.Group + qb := db.GetRepo().Group // create group to test against const name = "TestGroupUpdateGroupImages" @@ -1046,7 +1046,7 @@ func TestGroupUpdateFrontImage(t *testing.T) { func TestGroupUpdateBackImage(t *testing.T) { if err := withRollbackTxn(func(ctx context.Context) error { - qb := db.Group + qb := db.GetRepo().Group // create group to test against const name = "TestGroupUpdateGroupImages" @@ -1141,7 +1141,7 @@ func TestGroupQueryContainingGroups(t *testing.T) { }, } - qb := db.Group + qb := db.GetRepo().Group for _, tt := range tests { valueIDs := indexesToIDs(groupIDs, tt.c.valueIdxs) @@ -1254,7 +1254,7 @@ func TestGroupQuerySubGroups(t *testing.T) { }, } - qb := db.Group + qb := db.GetRepo().Group for _, tt := range tests { valueIDs := indexesToIDs(groupIDs, tt.c.valueIdxs) @@ -1330,7 +1330,7 @@ func TestGroupQueryContainingGroupCount(t *testing.T) { }, } - qb := db.Group + qb := db.GetRepo().Group for _, tt := range tests { expectedIDs := indexesToIDs(groupIDs, tt.expectedIdxs) @@ -1401,7 +1401,7 @@ func TestGroupQuerySubGroupCount(t *testing.T) { }, } - qb := db.Group + qb := db.GetRepo().Group for _, tt := range tests { expectedIDs := indexesToIDs(groupIDs, tt.expectedIdxs) @@ -1459,7 +1459,7 @@ func TestGroupFindInAncestors(t *testing.T) { }, } - qb := db.Group + qb := db.GetRepo().Group for _, tt := range tests { ancestorIDs := indexesToIDs(groupIDs, tt.ancestorIdxs) @@ -1555,7 +1555,7 @@ func TestGroupReorderSubGroups(t *testing.T) { }, } - qb := db.Group + qb := db.GetRepo().Group for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -1665,7 +1665,7 @@ func TestGroupAddSubGroups(t *testing.T) { }, } - qb := db.Group + qb := db.GetRepo().Group for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -1780,7 +1780,7 @@ func TestGroupRemoveSubGroups(t *testing.T) { }, } - qb := db.Group + qb := db.GetRepo().Group for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -1868,7 +1868,7 @@ func TestGroupFindSubGroupIDs(t *testing.T) { }, } - qb := db.Group + qb := db.GetRepo().Group for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { diff --git a/pkg/sqlite/image_test.go b/pkg/sqlite/image_test.go index aa4ed3b99ad..a823f8fc146 100644 --- a/pkg/sqlite/image_test.go +++ b/pkg/sqlite/image_test.go @@ -16,27 +16,27 @@ import ( func loadImageRelationships(ctx context.Context, expected models.Image, actual *models.Image) error { if expected.URLs.Loaded() { - if err := actual.LoadURLs(ctx, db.Image); err != nil { + if err := actual.LoadURLs(ctx, db.GetRepo().Image); err != nil { return err } } if expected.GalleryIDs.Loaded() { - if err := actual.LoadGalleryIDs(ctx, db.Image); err != nil { + if err := actual.LoadGalleryIDs(ctx, db.GetRepo().Image); err != nil { return err } } if expected.TagIDs.Loaded() { - if err := actual.LoadTagIDs(ctx, db.Image); err != nil { + if err := actual.LoadTagIDs(ctx, db.GetRepo().Image); err != nil { return err } } if expected.PerformerIDs.Loaded() { - if err := actual.LoadPerformerIDs(ctx, db.Image); err != nil { + if err := actual.LoadPerformerIDs(ctx, db.GetRepo().Image); err != nil { return err } } if expected.Files.Loaded() { - if err := actual.LoadFiles(ctx, db.Image); err != nil { + if err := actual.LoadFiles(ctx, db.GetRepo().Image); err != nil { return err } } @@ -153,7 +153,7 @@ func Test_imageQueryBuilder_Create(t *testing.T) { }, } - qb := db.Image + qb := db.GetRepo().Image for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -360,7 +360,7 @@ func Test_imageQueryBuilder_Update(t *testing.T) { }, } - qb := db.Image + qb := db.GetRepo().Image for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { assert := assert.New(t) @@ -511,7 +511,7 @@ func Test_imageQueryBuilder_UpdatePartial(t *testing.T) { }, } for _, tt := range tests { - qb := db.Image + qb := db.GetRepo().Image runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { assert := assert.New(t) @@ -785,7 +785,7 @@ func Test_imageQueryBuilder_UpdatePartialRelationships(t *testing.T) { } for _, tt := range tests { - qb := db.Image + qb := db.GetRepo().Image runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { assert := assert.New(t) @@ -853,7 +853,7 @@ func Test_imageQueryBuilder_IncrementOCounter(t *testing.T) { }, } - qb := db.Image + qb := db.GetRepo().Image for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -896,7 +896,7 @@ func Test_imageQueryBuilder_DecrementOCounter(t *testing.T) { }, } - qb := db.Image + qb := db.GetRepo().Image for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -939,7 +939,7 @@ func Test_imageQueryBuilder_ResetOCounter(t *testing.T) { }, } - qb := db.Image + qb := db.GetRepo().Image for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -973,7 +973,7 @@ func Test_imageQueryBuilder_Destroy(t *testing.T) { }, } - qb := db.Image + qb := db.GetRepo().Image for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -1034,7 +1034,7 @@ func Test_imageQueryBuilder_Find(t *testing.T) { }, } - qb := db.Image + qb := db.GetRepo().Image for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -1097,7 +1097,7 @@ func Test_imageQueryBuilder_FindMany(t *testing.T) { }, } - qb := db.Image + qb := db.GetRepo().Image for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -1156,7 +1156,7 @@ func Test_imageQueryBuilder_FindByChecksum(t *testing.T) { }, } - qb := db.Image + qb := db.GetRepo().Image for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -1234,7 +1234,7 @@ func Test_imageQueryBuilder_FindByFingerprints(t *testing.T) { }, } - qb := db.Image + qb := db.GetRepo().Image for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -1276,7 +1276,7 @@ func Test_imageQueryBuilder_FindByGalleryID(t *testing.T) { }, } - qb := db.Image + qb := db.GetRepo().Image for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -1319,7 +1319,7 @@ func Test_imageQueryBuilder_CountByGalleryID(t *testing.T) { }, } - qb := db.Image + qb := db.GetRepo().Image for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -1365,7 +1365,7 @@ func Test_imageStore_FindByFileID(t *testing.T) { }, } - qb := db.Image + qb := db.GetRepo().Image for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -1420,7 +1420,7 @@ func Test_imageStore_FindByFolderID(t *testing.T) { }, } - qb := db.Image + qb := db.GetRepo().Image for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -1469,7 +1469,7 @@ func Test_imageStore_FindByZipFileID(t *testing.T) { }, } - qb := db.Image + qb := db.GetRepo().Image for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -1503,7 +1503,7 @@ func TestImageQueryQ(t *testing.T) { q := getImageStringValue(imageIdx, titleField) - sqb := db.Image + sqb := db.GetRepo().Image imageQueryQ(ctx, t, sqb, q, imageIdx) @@ -1558,7 +1558,7 @@ func verifyImageQuery(t *testing.T, filter models.ImageFilterType, verifyFn func t.Helper() withTxn(func(ctx context.Context) error { t.Helper() - sqb := db.Image + sqb := db.GetRepo().Image images := queryImages(ctx, t, sqb, &filter, nil) @@ -1587,7 +1587,7 @@ func TestImageQueryURL(t *testing.T) { verifyFn := func(ctx context.Context, o *models.Image) { t.Helper() - if err := o.LoadURLs(ctx, db.Image); err != nil { + if err := o.LoadURLs(ctx, db.GetRepo().Image); err != nil { t.Errorf("Error loading scene URLs: %v", err) } @@ -1639,7 +1639,7 @@ func TestImageQueryPath(t *testing.T) { func verifyImagePath(t *testing.T, pathCriterion models.StringCriterionInput, expected int) { withTxn(func(ctx context.Context) error { - sqb := db.Image + sqb := db.GetRepo().Image imageFilter := models.ImageFilterType{ Path: &pathCriterion, } @@ -1679,7 +1679,7 @@ func TestImageQueryPathOr(t *testing.T) { } withTxn(func(ctx context.Context) error { - sqb := db.Image + sqb := db.GetRepo().Image images := queryImages(ctx, t, sqb, &imageFilter, nil) @@ -1715,7 +1715,7 @@ func TestImageQueryPathAndRating(t *testing.T) { } withTxn(func(ctx context.Context) error { - sqb := db.Image + sqb := db.GetRepo().Image images := queryImages(ctx, t, sqb, &imageFilter, nil) @@ -1755,7 +1755,7 @@ func TestImageQueryPathNotRating(t *testing.T) { } withTxn(func(ctx context.Context) error { - sqb := db.Image + sqb := db.GetRepo().Image images := queryImages(ctx, t, sqb, &imageFilter, nil) @@ -1788,7 +1788,7 @@ func TestImageIllegalQuery(t *testing.T) { } withTxn(func(ctx context.Context) error { - sqb := db.Image + sqb := db.GetRepo().Image _, _, err := queryImagesWithCount(ctx, sqb, imageFilter, nil) assert.NotNil(err) @@ -1834,7 +1834,7 @@ func TestImageQueryRating100(t *testing.T) { func verifyImagesRating100(t *testing.T, ratingCriterion models.IntCriterionInput) { withTxn(func(ctx context.Context) error { - sqb := db.Image + sqb := db.GetRepo().Image imageFilter := models.ImageFilterType{ Rating100: &ratingCriterion, } @@ -1873,7 +1873,7 @@ func TestImageQueryOCounter(t *testing.T) { func verifyImagesOCounter(t *testing.T, oCounterCriterion models.IntCriterionInput) { withTxn(func(ctx context.Context) error { - sqb := db.Image + sqb := db.GetRepo().Image imageFilter := models.ImageFilterType{ OCounter: &oCounterCriterion, } @@ -1902,7 +1902,7 @@ func TestImageQueryResolution(t *testing.T) { func verifyImagesResolution(t *testing.T, resolution models.ResolutionEnum) { withTxn(func(ctx context.Context) error { - sqb := db.Image + sqb := db.GetRepo().Image imageFilter := models.ImageFilterType{ Resolution: &models.ResolutionCriterionInput{ Value: resolution, @@ -1916,7 +1916,7 @@ func verifyImagesResolution(t *testing.T, resolution models.ResolutionEnum) { } for _, image := range images { - if err := image.LoadPrimaryFile(ctx, db.File); err != nil { + if err := image.LoadPrimaryFile(ctx, db.GetRepo().File); err != nil { t.Errorf("Error loading primary file: %s", err.Error()) return nil } @@ -1955,7 +1955,7 @@ func verifyImageResolution(t *testing.T, height int, resolution models.Resolutio func TestImageQueryIsMissingGalleries(t *testing.T) { withTxn(func(ctx context.Context) error { - sqb := db.Image + sqb := db.GetRepo().Image isMissing := "galleries" imageFilter := models.ImageFilterType{ IsMissing: &isMissing, @@ -1992,7 +1992,7 @@ func TestImageQueryIsMissingGalleries(t *testing.T) { func TestImageQueryIsMissingStudio(t *testing.T) { withTxn(func(ctx context.Context) error { - sqb := db.Image + sqb := db.GetRepo().Image isMissing := "studio" imageFilter := models.ImageFilterType{ IsMissing: &isMissing, @@ -2027,7 +2027,7 @@ func TestImageQueryIsMissingStudio(t *testing.T) { func TestImageQueryIsMissingPerformers(t *testing.T) { withTxn(func(ctx context.Context) error { - sqb := db.Image + sqb := db.GetRepo().Image isMissing := "performers" imageFilter := models.ImageFilterType{ IsMissing: &isMissing, @@ -2064,7 +2064,7 @@ func TestImageQueryIsMissingPerformers(t *testing.T) { func TestImageQueryIsMissingTags(t *testing.T) { withTxn(func(ctx context.Context) error { - sqb := db.Image + sqb := db.GetRepo().Image isMissing := "tags" imageFilter := models.ImageFilterType{ IsMissing: &isMissing, @@ -2096,7 +2096,7 @@ func TestImageQueryIsMissingTags(t *testing.T) { func TestImageQueryIsMissingRating(t *testing.T) { withTxn(func(ctx context.Context) error { - sqb := db.Image + sqb := db.GetRepo().Image isMissing := "rating" imageFilter := models.ImageFilterType{ IsMissing: &isMissing, @@ -2120,7 +2120,7 @@ func TestImageQueryIsMissingRating(t *testing.T) { func TestImageQueryGallery(t *testing.T) { withTxn(func(ctx context.Context) error { - sqb := db.Image + sqb := db.GetRepo().Image galleryCriterion := models.MultiCriterionInput{ Value: []string{ strconv.Itoa(galleryIDs[galleryIdxWithImage]), @@ -2289,7 +2289,7 @@ func TestImageQueryPerformers(t *testing.T) { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { assert := assert.New(t) - results, err := db.Image.Query(ctx, models.ImageQueryOptions{ + results, err := db.GetRepo().Image.Query(ctx, models.ImageQueryOptions{ ImageFilter: &models.ImageFilterType{ Performers: &tt.filter, }, @@ -2425,7 +2425,7 @@ func TestImageQueryTags(t *testing.T) { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { assert := assert.New(t) - results, err := db.Image.Query(ctx, models.ImageQueryOptions{ + results, err := db.GetRepo().Image.Query(ctx, models.ImageQueryOptions{ ImageFilter: &models.ImageFilterType{ Tags: &tt.filter, }, @@ -2518,7 +2518,7 @@ func TestImageQueryStudio(t *testing.T) { }, } - qb := db.Image + qb := db.GetRepo().Image for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -2544,7 +2544,7 @@ func TestImageQueryStudio(t *testing.T) { func TestImageQueryStudioDepth(t *testing.T) { withTxn(func(ctx context.Context) error { - sqb := db.Image + sqb := db.GetRepo().Image depth := 2 studioCriterion := models.HierarchicalMultiCriterionInput{ Value: []string{ @@ -2786,7 +2786,7 @@ func TestImageQueryPerformerTags(t *testing.T) { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { assert := assert.New(t) - results, err := db.Image.Query(ctx, models.ImageQueryOptions{ + results, err := db.GetRepo().Image.Query(ctx, models.ImageQueryOptions{ ImageFilter: tt.filter, QueryOptions: models.QueryOptions{ FindFilter: tt.findFilter, @@ -2831,7 +2831,7 @@ func TestImageQueryTagCount(t *testing.T) { func verifyImagesTagCount(t *testing.T, tagCountCriterion models.IntCriterionInput) { withTxn(func(ctx context.Context) error { - sqb := db.Image + sqb := db.GetRepo().Image imageFilter := models.ImageFilterType{ TagCount: &tagCountCriterion, } @@ -2872,7 +2872,7 @@ func TestImageQueryPerformerCount(t *testing.T) { func verifyImagesPerformerCount(t *testing.T, performerCountCriterion models.IntCriterionInput) { withTxn(func(ctx context.Context) error { - sqb := db.Image + sqb := db.GetRepo().Image imageFilter := models.ImageFilterType{ PerformerCount: &performerCountCriterion, } @@ -2930,7 +2930,7 @@ func TestImageQuerySorting(t *testing.T) { }, } - qb := db.Image + qb := db.GetRepo().Image for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -2982,7 +2982,7 @@ func TestImageQueryPagination(t *testing.T) { PerPage: &perPage, } - sqb := db.Image + sqb := db.GetRepo().Image images, _, err := queryImagesWithCount(ctx, sqb, nil, &findFilter) if err != nil { t.Errorf("Error querying image: %s", err.Error()) diff --git a/pkg/sqlite/migrate.go b/pkg/sqlite/migrate.go index 0dc60b3bf60..c3ed895ada9 100644 --- a/pkg/sqlite/migrate.go +++ b/pkg/sqlite/migrate.go @@ -17,12 +17,12 @@ func (db *Database) needsMigration() bool { } type Migrator struct { - db *Database + db DBInterface conn *sqlx.DB m *migrate.Migrate } -func NewMigrator(db *Database) (*Migrator, error) { +func NewMigrator(db DBInterface) (*Migrator, error) { m := &Migrator{ db: db, } @@ -131,7 +131,8 @@ func (m *Migrator) RunMigration(ctx context.Context, newVersion uint) error { } // update the schema version - m.db.schemaVersion, _, _ = m.m.Version() + schemaVersion, _, _ := m.m.Version() + m.db.SetSchemaVersion(schemaVersion) return nil } diff --git a/pkg/sqlite/performer_test.go b/pkg/sqlite/performer_test.go index e0294f3e442..e03ee42063e 100644 --- a/pkg/sqlite/performer_test.go +++ b/pkg/sqlite/performer_test.go @@ -18,22 +18,22 @@ import ( func loadPerformerRelationships(ctx context.Context, expected models.Performer, actual *models.Performer) error { if expected.Aliases.Loaded() { - if err := actual.LoadAliases(ctx, db.Performer); err != nil { + if err := actual.LoadAliases(ctx, db.GetRepo().Performer); err != nil { return err } } if expected.URLs.Loaded() { - if err := actual.LoadURLs(ctx, db.Performer); err != nil { + if err := actual.LoadURLs(ctx, db.GetRepo().Performer); err != nil { return err } } if expected.TagIDs.Loaded() { - if err := actual.LoadTagIDs(ctx, db.Performer); err != nil { + if err := actual.LoadTagIDs(ctx, db.GetRepo().Performer); err != nil { return err } } if expected.StashIDs.Loaded() { - if err := actual.LoadStashIDs(ctx, db.Performer); err != nil { + if err := actual.LoadStashIDs(ctx, db.GetRepo().Performer); err != nil { return err } } @@ -137,7 +137,7 @@ func Test_PerformerStore_Create(t *testing.T) { }, } - qb := db.Performer + qb := db.GetRepo().Performer for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -304,7 +304,7 @@ func Test_PerformerStore_Update(t *testing.T) { }, } - qb := db.Performer + qb := db.GetRepo().Performer for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { assert := assert.New(t) @@ -535,7 +535,7 @@ func Test_PerformerStore_UpdatePartial(t *testing.T) { }, } for _, tt := range tests { - qb := db.Performer + qb := db.GetRepo().Performer runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { assert := assert.New(t) @@ -575,7 +575,7 @@ func Test_PerformerStore_UpdatePartial(t *testing.T) { func TestPerformerFindBySceneID(t *testing.T) { withTxn(func(ctx context.Context) error { - pqb := db.Performer + pqb := db.GetRepo().Performer sceneID := sceneIDs[sceneIdxWithPerformer] performers, err := pqb.FindBySceneID(ctx, sceneID) @@ -606,7 +606,7 @@ func TestPerformerFindBySceneID(t *testing.T) { func TestPerformerFindByImageID(t *testing.T) { withTxn(func(ctx context.Context) error { - pqb := db.Performer + pqb := db.GetRepo().Performer imageID := imageIDs[imageIdxWithPerformer] performers, err := pqb.FindByImageID(ctx, imageID) @@ -637,7 +637,7 @@ func TestPerformerFindByImageID(t *testing.T) { func TestPerformerFindByGalleryID(t *testing.T) { withTxn(func(ctx context.Context) error { - pqb := db.Performer + pqb := db.GetRepo().Performer galleryID := galleryIDs[galleryIdxWithPerformer] performers, err := pqb.FindByGalleryID(ctx, galleryID) @@ -678,7 +678,7 @@ func TestPerformerFindByNames(t *testing.T) { withTxn(func(ctx context.Context) error { var names []string - pqb := db.Performer + pqb := db.GetRepo().Performer names = append(names, performerNames[performerIdxWithScene]) // find performers by names @@ -893,7 +893,7 @@ func TestPerformerIllegalQuery(t *testing.T) { }, } - sqb := db.Performer + sqb := db.GetRepo().Performer for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -1022,7 +1022,7 @@ func TestPerformerQuery(t *testing.T) { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { assert := assert.New(t) - performers, _, err := db.Performer.Query(ctx, tt.filter, tt.findFilter) + performers, _, err := db.GetRepo().Performer.Query(ctx, tt.filter, tt.findFilter) if (err != nil) != tt.wantErr { t.Errorf("PerformerStore.Query() error = %v, wantErr %v", err, tt.wantErr) return @@ -1105,7 +1105,7 @@ func TestPerformerQueryPenisLength(t *testing.T) { }, } - performers, _, err := db.Performer.Query(ctx, filter, nil) + performers, _, err := db.GetRepo().Performer.Query(ctx, filter, nil) if err != nil { t.Errorf("PerformerStore.Query() error = %v", err) return @@ -1145,7 +1145,7 @@ func verifyFloat(t *testing.T, value *float64, criterion models.FloatCriterionIn func TestPerformerQueryForAutoTag(t *testing.T) { withTxn(func(ctx context.Context) error { - tqb := db.Performer + tqb := db.GetRepo().Performer name := performerNames[performerIdx1WithScene] // find a performer by name @@ -1165,7 +1165,7 @@ func TestPerformerQueryForAutoTag(t *testing.T) { func TestPerformerUpdatePerformerImage(t *testing.T) { if err := withRollbackTxn(func(ctx context.Context) error { - qb := db.Performer + qb := db.GetRepo().Performer // create performer to test against const name = "TestPerformerUpdatePerformerImage" @@ -1204,7 +1204,7 @@ func TestPerformerQueryAge(t *testing.T) { func verifyPerformerAge(t *testing.T, ageCriterion models.IntCriterionInput) { withTxn(func(ctx context.Context) error { - qb := db.Performer + qb := db.GetRepo().Performer performerFilter := models.PerformerFilterType{ Age: &ageCriterion, } @@ -1259,7 +1259,7 @@ func TestPerformerQueryCareerLength(t *testing.T) { func verifyPerformerCareerLength(t *testing.T, criterion models.StringCriterionInput) { withTxn(func(ctx context.Context) error { - qb := db.Performer + qb := db.GetRepo().Performer performerFilter := models.PerformerFilterType{ CareerLength: &criterion, } @@ -1329,7 +1329,7 @@ func verifyPerformerQuery(t *testing.T, filter models.PerformerFilterType, verif performers := queryPerformers(ctx, t, &filter, nil) for _, performer := range performers { - if err := performer.LoadURLs(ctx, db.Performer); err != nil { + if err := performer.LoadURLs(ctx, db.GetRepo().Performer); err != nil { t.Errorf("Error loading url relationships: %v", err) } } @@ -1347,7 +1347,7 @@ func verifyPerformerQuery(t *testing.T, filter models.PerformerFilterType, verif func queryPerformers(ctx context.Context, t *testing.T, performerFilter *models.PerformerFilterType, findFilter *models.FindFilterType) []*models.Performer { t.Helper() - performers, _, err := db.Performer.Query(ctx, performerFilter, findFilter) + performers, _, err := db.GetRepo().Performer.Query(ctx, performerFilter, findFilter) if err != nil { t.Errorf("Error querying performers: %s", err.Error()) } @@ -1429,7 +1429,7 @@ func TestPerformerQueryTagCount(t *testing.T) { func verifyPerformersTagCount(t *testing.T, tagCountCriterion models.IntCriterionInput) { withTxn(func(ctx context.Context) error { - sqb := db.Performer + sqb := db.GetRepo().Performer performerFilter := models.PerformerFilterType{ TagCount: &tagCountCriterion, } @@ -1478,7 +1478,7 @@ func verifyPerformersSceneCount(t *testing.T, sceneCountCriterion models.IntCrit assert.Greater(t, len(performers), 0) for _, performer := range performers { - ids, err := db.Scene.FindByPerformerID(ctx, performer.ID) + ids, err := db.GetRepo().Scene.FindByPerformerID(ctx, performer.ID) if err != nil { return err } @@ -1520,7 +1520,7 @@ func verifyPerformersImageCount(t *testing.T, imageCountCriterion models.IntCrit for _, performer := range performers { pp := 0 - result, err := db.Image.Query(ctx, models.ImageQueryOptions{ + result, err := db.GetRepo().Image.Query(ctx, models.ImageQueryOptions{ QueryOptions: models.QueryOptions{ FindFilter: &models.FindFilterType{ PerPage: &pp, @@ -1575,7 +1575,7 @@ func verifyPerformersGalleryCount(t *testing.T, galleryCountCriterion models.Int for _, performer := range performers { pp := 0 - _, count, err := db.Gallery.Query(ctx, &models.GalleryFilterType{ + _, count, err := db.GetRepo().Gallery.Query(ctx, &models.GalleryFilterType{ Performers: &models.MultiCriterionInput{ Value: []string{strconv.Itoa(performer.ID)}, Modifier: models.CriterionModifierIncludes, @@ -1673,7 +1673,7 @@ func TestPerformerQueryStudio(t *testing.T) { func TestPerformerStashIDs(t *testing.T) { if err := withRollbackTxn(func(ctx context.Context) error { - qb := db.Performer + qb := db.GetRepo().Performer // create scene to test against const name = "TestPerformerStashIDs" @@ -1707,7 +1707,7 @@ func testPerformerStashIDs(ctx context.Context, t *testing.T, s *models.Performe Endpoint: endpoint, } - qb := db.Performer + qb := db.GetRepo().Performer // update stash ids and ensure was updated var err error @@ -1817,7 +1817,7 @@ func TestPerformerQueryIsMissingImage(t *testing.T) { assert.True(t, len(performers) > 0) for _, performer := range performers { - img, err := db.Performer.GetImage(ctx, performer.ID) + img, err := db.GetRepo().Performer.GetImage(ctx, performer.ID) if err != nil { t.Errorf("error getting performer image: %s", err.Error()) } @@ -1835,7 +1835,7 @@ func TestPerformerQueryIsMissingAlias(t *testing.T) { assert.True(t, len(performers) > 0) for _, performer := range performers { - a, err := db.Performer.GetAliases(ctx, performer.ID) + a, err := db.GetRepo().Performer.GetAliases(ctx, performer.ID) if err != nil { t.Errorf("error getting performer aliases: %s", err.Error()) } @@ -1856,7 +1856,7 @@ func TestPerformerQuerySortScenesCount(t *testing.T) { withTxn(func(ctx context.Context) error { // just ensure it queries without error - performers, _, err := db.Performer.Query(ctx, nil, findFilter) + performers, _, err := db.GetRepo().Performer.Query(ctx, nil, findFilter) if err != nil { t.Errorf("Error querying performers: %s", err.Error()) } @@ -1871,7 +1871,7 @@ func TestPerformerQuerySortScenesCount(t *testing.T) { // sort in ascending order direction = models.SortDirectionEnumAsc - performers, _, err = db.Performer.Query(ctx, nil, findFilter) + performers, _, err = db.GetRepo().Performer.Query(ctx, nil, findFilter) if err != nil { t.Errorf("Error querying performers: %s", err.Error()) } @@ -1887,7 +1887,7 @@ func TestPerformerQuerySortScenesCount(t *testing.T) { func TestPerformerCountByTagID(t *testing.T) { withTxn(func(ctx context.Context) error { - sqb := db.Performer + sqb := db.GetRepo().Performer count, err := sqb.CountByTagID(ctx, tagIDs[tagIdxWithPerformer]) if err != nil { @@ -1910,7 +1910,7 @@ func TestPerformerCountByTagID(t *testing.T) { func TestPerformerCount(t *testing.T) { withTxn(func(ctx context.Context) error { - sqb := db.Performer + sqb := db.GetRepo().Performer count, err := sqb.Count(ctx) if err != nil { @@ -1925,7 +1925,7 @@ func TestPerformerCount(t *testing.T) { func TestPerformerAll(t *testing.T) { withTxn(func(ctx context.Context) error { - sqb := db.Performer + sqb := db.GetRepo().Performer all, err := sqb.All(ctx) if err != nil { @@ -1974,7 +1974,7 @@ func TestPerformerStore_FindByStashID(t *testing.T) { }, } - qb := db.Performer + qb := db.GetRepo().Performer for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -2025,7 +2025,7 @@ func TestPerformerStore_FindByStashIDStatus(t *testing.T) { }, } - qb := db.Performer + qb := db.GetRepo().Performer for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { diff --git a/pkg/sqlite/saved_filter_test.go b/pkg/sqlite/saved_filter_test.go index 60592a923df..dc37b5a8d22 100644 --- a/pkg/sqlite/saved_filter_test.go +++ b/pkg/sqlite/saved_filter_test.go @@ -13,7 +13,7 @@ import ( func TestSavedFilterFind(t *testing.T) { withTxn(func(ctx context.Context) error { - savedFilter, err := db.SavedFilter.Find(ctx, savedFilterIDs[savedFilterIdxImage]) + savedFilter, err := db.GetRepo().SavedFilter.Find(ctx, savedFilterIDs[savedFilterIdxImage]) if err != nil { t.Errorf("Error finding saved filter: %s", err.Error()) @@ -27,7 +27,7 @@ func TestSavedFilterFind(t *testing.T) { func TestSavedFilterFindByMode(t *testing.T) { withTxn(func(ctx context.Context) error { - savedFilters, err := db.SavedFilter.FindByMode(ctx, models.FilterModeScenes) + savedFilters, err := db.GetRepo().SavedFilter.FindByMode(ctx, models.FilterModeScenes) if err != nil { t.Errorf("Error finding saved filters: %s", err.Error()) @@ -72,7 +72,7 @@ func TestSavedFilterDestroy(t *testing.T) { ObjectFilter: objectFilter, UIOptions: uiOptions, } - err := db.SavedFilter.Create(ctx, &newFilter) + err := db.GetRepo().SavedFilter.Create(ctx, &newFilter) if err == nil { id = newFilter.ID @@ -82,12 +82,12 @@ func TestSavedFilterDestroy(t *testing.T) { }) withTxn(func(ctx context.Context) error { - return db.SavedFilter.Destroy(ctx, id) + return db.GetRepo().SavedFilter.Destroy(ctx, id) }) // now try to find it withTxn(func(ctx context.Context) error { - found, err := db.SavedFilter.Find(ctx, id) + found, err := db.GetRepo().SavedFilter.Find(ctx, id) if err == nil { assert.Nil(t, found) } diff --git a/pkg/sqlite/scene.go b/pkg/sqlite/scene.go index ccdf6d5d9ae..e4da8777578 100644 --- a/pkg/sqlite/scene.go +++ b/pkg/sqlite/scene.go @@ -40,7 +40,7 @@ const ( ) var findExactDuplicateQuery = ` -SELECT string_agg(DISTINCT scene_id) as ids +SELECT GROUP_CONCAT(DISTINCT scene_id) as ids FROM ( SELECT scenes.id as scene_id , video_files.duration as file_duration diff --git a/pkg/sqlite/scene_marker_test.go b/pkg/sqlite/scene_marker_test.go index 0a8343a8bfc..1be14ba8275 100644 --- a/pkg/sqlite/scene_marker_test.go +++ b/pkg/sqlite/scene_marker_test.go @@ -16,7 +16,7 @@ import ( func TestMarkerFindBySceneID(t *testing.T) { withTxn(func(ctx context.Context) error { - mqb := db.SceneMarker + mqb := db.GetRepo().SceneMarker sceneID := sceneIDs[sceneIdxWithMarkers] markers, err := mqb.FindBySceneID(ctx, sceneID) @@ -44,7 +44,7 @@ func TestMarkerFindBySceneID(t *testing.T) { func TestMarkerCountByTagID(t *testing.T) { withTxn(func(ctx context.Context) error { - mqb := db.SceneMarker + mqb := db.GetRepo().SceneMarker markerCount, err := mqb.CountByTagID(ctx, tagIDs[tagIdxWithPrimaryMarkers]) @@ -77,7 +77,7 @@ func TestMarkerCountByTagID(t *testing.T) { func TestMarkerQueryQ(t *testing.T) { withTxn(func(ctx context.Context) error { q := getSceneTitle(sceneIdxWithMarkers) - m, _, err := db.SceneMarker.Query(ctx, nil, &models.FindFilterType{ + m, _, err := db.GetRepo().SceneMarker.Query(ctx, nil, &models.FindFilterType{ Q: &q, }) @@ -98,7 +98,7 @@ func TestMarkerQueryQ(t *testing.T) { func TestMarkerQuerySortBySceneUpdated(t *testing.T) { withTxn(func(ctx context.Context) error { sort := "scenes_updated_at" - _, _, err := db.SceneMarker.Query(ctx, nil, &models.FindFilterType{ + _, _, err := db.GetRepo().SceneMarker.Query(ctx, nil, &models.FindFilterType{ Sort: &sort, }) @@ -153,7 +153,7 @@ func TestMarkerQueryTags(t *testing.T) { withTxn(func(ctx context.Context) error { testTags := func(t *testing.T, m *models.SceneMarker, markerFilter *models.SceneMarkerFilterType) { - tagIDs, err := db.SceneMarker.GetTagIDs(ctx, m.ID) + tagIDs, err := db.GetRepo().SceneMarker.GetTagIDs(ctx, m.ID) if err != nil { t.Errorf("error getting marker tag ids: %v", err) } @@ -255,7 +255,7 @@ func TestMarkerQueryTags(t *testing.T) { for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { - markers := queryMarkers(ctx, t, db.SceneMarker, tc.markerFilter, tc.findFilter) + markers := queryMarkers(ctx, t, db.GetRepo().SceneMarker, tc.markerFilter, tc.findFilter) assert.Greater(t, len(markers), 0) for _, m := range markers { testTags(t, m, tc.markerFilter) @@ -276,13 +276,13 @@ func TestMarkerQuerySceneTags(t *testing.T) { withTxn(func(ctx context.Context) error { testTags := func(t *testing.T, m *models.SceneMarker, markerFilter *models.SceneMarkerFilterType) { - s, err := db.Scene.Find(ctx, m.SceneID) + s, err := db.GetRepo().Scene.Find(ctx, m.SceneID) if err != nil { t.Errorf("error getting marker tag ids: %v", err) return } - if err := s.LoadTagIDs(ctx, db.Scene); err != nil { + if err := s.LoadTagIDs(ctx, db.GetRepo().Scene); err != nil { t.Errorf("error getting marker tag ids: %v", err) return } @@ -379,7 +379,7 @@ func TestMarkerQuerySceneTags(t *testing.T) { for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { - markers := queryMarkers(ctx, t, db.SceneMarker, tc.markerFilter, tc.findFilter) + markers := queryMarkers(ctx, t, db.GetRepo().SceneMarker, tc.markerFilter, tc.findFilter) assert.Greater(t, len(markers), 0) for _, m := range markers { testTags(t, m, tc.markerFilter) diff --git a/pkg/sqlite/scene_test.go b/pkg/sqlite/scene_test.go index a3174d7278d..97a9f8bea02 100644 --- a/pkg/sqlite/scene_test.go +++ b/pkg/sqlite/scene_test.go @@ -22,38 +22,38 @@ import ( func loadSceneRelationships(ctx context.Context, expected models.Scene, actual *models.Scene) error { if expected.URLs.Loaded() { - if err := actual.LoadURLs(ctx, db.Scene); err != nil { + if err := actual.LoadURLs(ctx, db.GetRepo().Scene); err != nil { return err } } if expected.GalleryIDs.Loaded() { - if err := actual.LoadGalleryIDs(ctx, db.Scene); err != nil { + if err := actual.LoadGalleryIDs(ctx, db.GetRepo().Scene); err != nil { return err } } if expected.TagIDs.Loaded() { - if err := actual.LoadTagIDs(ctx, db.Scene); err != nil { + if err := actual.LoadTagIDs(ctx, db.GetRepo().Scene); err != nil { return err } } if expected.PerformerIDs.Loaded() { - if err := actual.LoadPerformerIDs(ctx, db.Scene); err != nil { + if err := actual.LoadPerformerIDs(ctx, db.GetRepo().Scene); err != nil { return err } } if expected.Groups.Loaded() { - if err := actual.LoadGroups(ctx, db.Scene); err != nil { + if err := actual.LoadGroups(ctx, db.GetRepo().Scene); err != nil { return err } } if expected.StashIDs.Loaded() { - if err := actual.LoadStashIDs(ctx, db.Scene); err != nil { + if err := actual.LoadStashIDs(ctx, db.GetRepo().Scene); err != nil { return err } } if expected.Files.Loaded() { - if err := actual.LoadFiles(ctx, db.Scene); err != nil { + if err := actual.LoadFiles(ctx, db.GetRepo().Scene); err != nil { return err } } @@ -233,7 +233,7 @@ func Test_sceneQueryBuilder_Create(t *testing.T) { }, } - qb := db.Scene + qb := db.GetRepo().Scene for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -466,7 +466,7 @@ func Test_sceneQueryBuilder_Update(t *testing.T) { }, } - qb := db.Scene + qb := db.GetRepo().Scene for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { assert := assert.New(t) @@ -675,7 +675,7 @@ func Test_sceneQueryBuilder_UpdatePartial(t *testing.T) { }, } for _, tt := range tests { - qb := db.Scene + qb := db.GetRepo().Scene runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { assert := assert.New(t) @@ -1215,7 +1215,7 @@ func Test_sceneQueryBuilder_UpdatePartialRelationships(t *testing.T) { } for _, tt := range tests { - qb := db.Scene + qb := db.GetRepo().Scene runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { assert := assert.New(t) @@ -1291,7 +1291,7 @@ func Test_sceneQueryBuilder_AddO(t *testing.T) { }, } - qb := db.Scene + qb := db.GetRepo().Scene for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -1328,7 +1328,7 @@ func Test_sceneQueryBuilder_DeleteO(t *testing.T) { }, } - qb := db.Scene + qb := db.GetRepo().Scene for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -1365,7 +1365,7 @@ func Test_sceneQueryBuilder_ResetO(t *testing.T) { }, } - qb := db.Scene + qb := db.GetRepo().Scene for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -1403,7 +1403,7 @@ func Test_sceneQueryBuilder_Destroy(t *testing.T) { }, } - qb := db.Scene + qb := db.GetRepo().Scene for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -1475,7 +1475,7 @@ func Test_sceneQueryBuilder_Find(t *testing.T) { }, } - qb := db.Scene + qb := db.GetRepo().Scene for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -1546,7 +1546,7 @@ func Test_sceneQueryBuilder_FindMany(t *testing.T) { }, } - qb := db.Scene + qb := db.GetRepo().Scene for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -1616,7 +1616,7 @@ func Test_sceneQueryBuilder_FindByChecksum(t *testing.T) { }, } - qb := db.Scene + qb := db.GetRepo().Scene for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -1686,7 +1686,7 @@ func Test_sceneQueryBuilder_FindByOSHash(t *testing.T) { }, } - qb := db.Scene + qb := db.GetRepo().Scene for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -1757,7 +1757,7 @@ func Test_sceneQueryBuilder_FindByPath(t *testing.T) { }, } - qb := db.Scene + qb := db.GetRepo().Scene for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -1799,7 +1799,7 @@ func Test_sceneQueryBuilder_FindByGalleryID(t *testing.T) { }, } - qb := db.Scene + qb := db.GetRepo().Scene for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -1823,7 +1823,7 @@ func Test_sceneQueryBuilder_FindByGalleryID(t *testing.T) { func TestSceneCountByPerformerID(t *testing.T) { withTxn(func(ctx context.Context) error { - sqb := db.Scene + sqb := db.GetRepo().Scene count, err := sqb.CountByPerformerID(ctx, performerIDs[performerIdxWithScene]) if err != nil { @@ -1874,7 +1874,7 @@ func Test_sceneStore_FindByFileID(t *testing.T) { }, } - qb := db.Scene + qb := db.GetRepo().Scene for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -1920,7 +1920,7 @@ func Test_sceneStore_CountByFileID(t *testing.T) { }, } - qb := db.Scene + qb := db.GetRepo().Scene for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -1947,7 +1947,7 @@ func Test_sceneStore_CountMissingChecksum(t *testing.T) { }, } - qb := db.Scene + qb := db.GetRepo().Scene for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -1974,7 +1974,7 @@ func Test_sceneStore_CountMissingOshash(t *testing.T) { }, } - qb := db.Scene + qb := db.GetRepo().Scene for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -1992,7 +1992,7 @@ func Test_sceneStore_CountMissingOshash(t *testing.T) { func TestSceneWall(t *testing.T) { withTxn(func(ctx context.Context) error { - sqb := db.Scene + sqb := db.GetRepo().Scene const sceneIdx = 2 wallQuery := getSceneStringValue(sceneIdx, "Details") @@ -2029,7 +2029,7 @@ func TestSceneQueryQ(t *testing.T) { q := getSceneStringValue(sceneIdx, titleField) withTxn(func(ctx context.Context) error { - sqb := db.Scene + sqb := db.GetRepo().Scene sceneQueryQ(ctx, t, sqb, q, sceneIdx) @@ -2211,7 +2211,7 @@ func TestSceneQuery(t *testing.T) { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { assert := assert.New(t) - results, err := db.Scene.Query(ctx, models.SceneQueryOptions{ + results, err := db.GetRepo().Scene.Query(ctx, models.SceneQueryOptions{ SceneFilter: tt.filter, QueryOptions: models.QueryOptions{ FindFilter: tt.findFilter, @@ -2324,7 +2324,7 @@ func TestSceneQueryPath(t *testing.T) { }, } - qb := db.Scene + qb := db.GetRepo().Scene for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -2423,7 +2423,7 @@ func TestSceneQueryPathOr(t *testing.T) { } withTxn(func(ctx context.Context) error { - sqb := db.Scene + sqb := db.GetRepo().Scene scenes := queryScene(ctx, t, sqb, &sceneFilter, nil) @@ -2458,7 +2458,7 @@ func TestSceneQueryPathAndRating(t *testing.T) { } withTxn(func(ctx context.Context) error { - sqb := db.Scene + sqb := db.GetRepo().Scene scenes := queryScene(ctx, t, sqb, &sceneFilter, nil) @@ -2497,7 +2497,7 @@ func TestSceneQueryPathNotRating(t *testing.T) { } withTxn(func(ctx context.Context) error { - sqb := db.Scene + sqb := db.GetRepo().Scene scenes := queryScene(ctx, t, sqb, &sceneFilter, nil) @@ -2530,7 +2530,7 @@ func TestSceneIllegalQuery(t *testing.T) { } withTxn(func(ctx context.Context) error { - sqb := db.Scene + sqb := db.GetRepo().Scene queryOptions := models.SceneQueryOptions{ SceneFilter: sceneFilter, @@ -2557,7 +2557,7 @@ func verifySceneQuery(t *testing.T, filter models.SceneFilterType, verifyFn func t.Helper() withTxn(func(ctx context.Context) error { t.Helper() - sqb := db.Scene + sqb := db.GetRepo().Scene scenes := queryScene(ctx, t, sqb, &filter, nil) @@ -2580,7 +2580,7 @@ func verifySceneQuery(t *testing.T, filter models.SceneFilterType, verifyFn func func verifyScenesPath(t *testing.T, pathCriterion models.StringCriterionInput) { withTxn(func(ctx context.Context) error { - sqb := db.Scene + sqb := db.GetRepo().Scene sceneFilter := models.SceneFilterType{ Path: &pathCriterion, } @@ -2674,7 +2674,7 @@ func TestSceneQueryRating100(t *testing.T) { func verifyScenesRating100(t *testing.T, ratingCriterion models.IntCriterionInput) { withTxn(func(ctx context.Context) error { - sqb := db.Scene + sqb := db.GetRepo().Scene sceneFilter := models.SceneFilterType{ Rating100: &ratingCriterion, } @@ -2733,7 +2733,7 @@ func TestSceneQueryOCounter(t *testing.T) { func verifyScenesOCounter(t *testing.T, oCounterCriterion models.IntCriterionInput) { withTxn(func(ctx context.Context) error { - sqb := db.Scene + sqb := db.GetRepo().Scene sceneFilter := models.SceneFilterType{ OCounter: &oCounterCriterion, } @@ -2798,7 +2798,7 @@ func TestSceneQueryDuration(t *testing.T) { func verifyScenesDuration(t *testing.T, durationCriterion models.IntCriterionInput) { withTxn(func(ctx context.Context) error { - sqb := db.Scene + sqb := db.GetRepo().Scene sceneFilter := models.SceneFilterType{ Duration: &durationCriterion, } @@ -2806,7 +2806,7 @@ func verifyScenesDuration(t *testing.T, durationCriterion models.IntCriterionInp scenes := queryScene(ctx, t, sqb, &sceneFilter, nil) for _, scene := range scenes { - if err := scene.LoadPrimaryFile(ctx, db.File); err != nil { + if err := scene.LoadPrimaryFile(ctx, db.GetRepo().File); err != nil { t.Errorf("Error querying scene files: %v", err) return nil } @@ -2870,7 +2870,7 @@ func TestSceneQueryResolution(t *testing.T) { func verifyScenesResolution(t *testing.T, resolution models.ResolutionEnum) { withTxn(func(ctx context.Context) error { - sqb := db.Scene + sqb := db.GetRepo().Scene sceneFilter := models.SceneFilterType{ Resolution: &models.ResolutionCriterionInput{ Value: resolution, @@ -2881,7 +2881,7 @@ func verifyScenesResolution(t *testing.T, resolution models.ResolutionEnum) { scenes := queryScene(ctx, t, sqb, &sceneFilter, nil) for _, scene := range scenes { - if err := scene.LoadPrimaryFile(ctx, db.File); err != nil { + if err := scene.LoadPrimaryFile(ctx, db.GetRepo().File); err != nil { t.Errorf("Error querying scene files: %v", err) return nil } @@ -2933,7 +2933,7 @@ func TestAllResolutionsHaveResolutionRange(t *testing.T) { func TestSceneQueryResolutionModifiers(t *testing.T) { if err := withRollbackTxn(func(ctx context.Context) error { - qb := db.Scene + qb := db.GetRepo().Scene sceneNoResolution, _ := createScene(ctx, 0, 0) firstScene540P, _ := createScene(ctx, 960, 540) secondScene540P, _ := createScene(ctx, 1280, 719) @@ -2994,13 +2994,13 @@ func createScene(ctx context.Context, width int, height int) (*models.Scene, err Height: height, } - if err := db.File.Create(ctx, sceneFile); err != nil { + if err := db.GetRepo().File.Create(ctx, sceneFile); err != nil { return nil, err } scene := &models.Scene{} - if err := db.Scene.Create(ctx, scene, []models.FileID{sceneFile.ID}); err != nil { + if err := db.GetRepo().Scene.Create(ctx, scene, []models.FileID{sceneFile.ID}); err != nil { return nil, err } @@ -3009,7 +3009,7 @@ func createScene(ctx context.Context, width int, height int) (*models.Scene, err func TestSceneQueryHasMarkers(t *testing.T) { withTxn(func(ctx context.Context) error { - sqb := db.Scene + sqb := db.GetRepo().Scene hasMarkers := "true" sceneFilter := models.SceneFilterType{ HasMarkers: &hasMarkers, @@ -3045,7 +3045,7 @@ func TestSceneQueryHasMarkers(t *testing.T) { func TestSceneQueryIsMissingGallery(t *testing.T) { withTxn(func(ctx context.Context) error { - sqb := db.Scene + sqb := db.GetRepo().Scene isMissing := "galleries" sceneFilter := models.SceneFilterType{ IsMissing: &isMissing, @@ -3074,7 +3074,7 @@ func TestSceneQueryIsMissingGallery(t *testing.T) { func TestSceneQueryIsMissingStudio(t *testing.T) { withTxn(func(ctx context.Context) error { - sqb := db.Scene + sqb := db.GetRepo().Scene isMissing := "studio" sceneFilter := models.SceneFilterType{ IsMissing: &isMissing, @@ -3103,7 +3103,7 @@ func TestSceneQueryIsMissingStudio(t *testing.T) { func TestSceneQueryIsMissingMovies(t *testing.T) { withTxn(func(ctx context.Context) error { - sqb := db.Scene + sqb := db.GetRepo().Scene isMissing := "movie" sceneFilter := models.SceneFilterType{ IsMissing: &isMissing, @@ -3132,7 +3132,7 @@ func TestSceneQueryIsMissingMovies(t *testing.T) { func TestSceneQueryIsMissingPerformers(t *testing.T) { withTxn(func(ctx context.Context) error { - sqb := db.Scene + sqb := db.GetRepo().Scene isMissing := "performers" sceneFilter := models.SceneFilterType{ IsMissing: &isMissing, @@ -3163,7 +3163,7 @@ func TestSceneQueryIsMissingPerformers(t *testing.T) { func TestSceneQueryIsMissingDate(t *testing.T) { withTxn(func(ctx context.Context) error { - sqb := db.Scene + sqb := db.GetRepo().Scene isMissing := "date" sceneFilter := models.SceneFilterType{ IsMissing: &isMissing, @@ -3185,7 +3185,7 @@ func TestSceneQueryIsMissingDate(t *testing.T) { func TestSceneQueryIsMissingTags(t *testing.T) { withTxn(func(ctx context.Context) error { - sqb := db.Scene + sqb := db.GetRepo().Scene isMissing := "tags" sceneFilter := models.SceneFilterType{ IsMissing: &isMissing, @@ -3211,7 +3211,7 @@ func TestSceneQueryIsMissingTags(t *testing.T) { func TestSceneQueryIsMissingRating(t *testing.T) { withTxn(func(ctx context.Context) error { - sqb := db.Scene + sqb := db.GetRepo().Scene isMissing := "rating" sceneFilter := models.SceneFilterType{ IsMissing: &isMissing, @@ -3232,7 +3232,7 @@ func TestSceneQueryIsMissingRating(t *testing.T) { func TestSceneQueryIsMissingPhash(t *testing.T) { withTxn(func(ctx context.Context) error { - sqb := db.Scene + sqb := db.GetRepo().Scene isMissing := "phash" sceneFilter := models.SceneFilterType{ IsMissing: &isMissing, @@ -3363,7 +3363,7 @@ func TestSceneQueryPerformers(t *testing.T) { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { assert := assert.New(t) - results, err := db.Scene.Query(ctx, models.SceneQueryOptions{ + results, err := db.GetRepo().Scene.Query(ctx, models.SceneQueryOptions{ SceneFilter: &models.SceneFilterType{ Performers: &tt.filter, }, @@ -3499,7 +3499,7 @@ func TestSceneQueryTags(t *testing.T) { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { assert := assert.New(t) - results, err := db.Scene.Query(ctx, models.SceneQueryOptions{ + results, err := db.GetRepo().Scene.Query(ctx, models.SceneQueryOptions{ SceneFilter: &models.SceneFilterType{ Tags: &tt.filter, }, @@ -3696,7 +3696,7 @@ func TestSceneQueryPerformerTags(t *testing.T) { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { assert := assert.New(t) - results, err := db.Scene.Query(ctx, models.SceneQueryOptions{ + results, err := db.GetRepo().Scene.Query(ctx, models.SceneQueryOptions{ SceneFilter: tt.filter, QueryOptions: models.QueryOptions{ FindFilter: tt.findFilter, @@ -3790,7 +3790,7 @@ func TestSceneQueryStudio(t *testing.T) { }, } - qb := db.Scene + qb := db.GetRepo().Scene for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -3816,7 +3816,7 @@ func TestSceneQueryStudio(t *testing.T) { func TestSceneQueryStudioDepth(t *testing.T) { withTxn(func(ctx context.Context) error { - sqb := db.Scene + sqb := db.GetRepo().Scene depth := 2 studioCriterion := models.HierarchicalMultiCriterionInput{ Value: []string{ @@ -3945,7 +3945,7 @@ func TestSceneGroups(t *testing.T) { findFilter.Q = &tt.q } - results, err := db.Scene.Query(ctx, models.SceneQueryOptions{ + results, err := db.GetRepo().Scene.Query(ctx, models.SceneQueryOptions{ SceneFilter: sceneFilter, QueryOptions: models.QueryOptions{ FindFilter: findFilter, @@ -3970,7 +3970,7 @@ func TestSceneGroups(t *testing.T) { func TestSceneQueryMovies(t *testing.T) { withTxn(func(ctx context.Context) error { - sqb := db.Scene + sqb := db.GetRepo().Scene movieCriterion := models.MultiCriterionInput{ Value: []string{ strconv.Itoa(groupIDs[groupIdxWithScene]), @@ -4010,7 +4010,7 @@ func TestSceneQueryMovies(t *testing.T) { func TestSceneQueryPhashDuplicated(t *testing.T) { withTxn(func(ctx context.Context) error { - sqb := db.Scene + sqb := db.GetRepo().Scene duplicated := true phashCriterion := models.PHashDuplicationCriterionInput{ Duplicated: &duplicated, @@ -4121,7 +4121,7 @@ func TestSceneQuerySorting(t *testing.T) { }, } - qb := db.Scene + qb := db.GetRepo().Scene for _, tt := range tests { runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) { @@ -4173,7 +4173,7 @@ func TestSceneQueryPagination(t *testing.T) { } withTxn(func(ctx context.Context) error { - sqb := db.Scene + sqb := db.GetRepo().Scene scenes := queryScene(ctx, t, sqb, nil, &findFilter) assert.Len(t, scenes, 1) @@ -4221,7 +4221,7 @@ func TestSceneQueryTagCount(t *testing.T) { func verifyScenesTagCount(t *testing.T, tagCountCriterion models.IntCriterionInput) { withTxn(func(ctx context.Context) error { - sqb := db.Scene + sqb := db.GetRepo().Scene sceneFilter := models.SceneFilterType{ TagCount: &tagCountCriterion, } @@ -4262,7 +4262,7 @@ func TestSceneQueryPerformerCount(t *testing.T) { func verifyScenesPerformerCount(t *testing.T, performerCountCriterion models.IntCriterionInput) { withTxn(func(ctx context.Context) error { - sqb := db.Scene + sqb := db.GetRepo().Scene sceneFilter := models.SceneFilterType{ PerformerCount: &performerCountCriterion, } @@ -4285,7 +4285,7 @@ func verifyScenesPerformerCount(t *testing.T, performerCountCriterion models.Int func TestFindByMovieID(t *testing.T) { withTxn(func(ctx context.Context) error { - sqb := db.Scene + sqb := db.GetRepo().Scene scenes, err := sqb.FindByGroupID(ctx, groupIDs[groupIdxWithScene]) @@ -4310,7 +4310,7 @@ func TestFindByMovieID(t *testing.T) { func TestFindByPerformerID(t *testing.T) { withTxn(func(ctx context.Context) error { - sqb := db.Scene + sqb := db.GetRepo().Scene scenes, err := sqb.FindByPerformerID(ctx, performerIDs[performerIdxWithScene]) @@ -4335,7 +4335,7 @@ func TestFindByPerformerID(t *testing.T) { func TestSceneUpdateSceneCover(t *testing.T) { if err := withTxn(func(ctx context.Context) error { - qb := db.Scene + qb := db.GetRepo().Scene sceneID := sceneIDs[sceneIdxWithGallery] @@ -4347,7 +4347,7 @@ func TestSceneUpdateSceneCover(t *testing.T) { func TestSceneStashIDs(t *testing.T) { if err := withTxn(func(ctx context.Context) error { - qb := db.Scene + qb := db.GetRepo().Scene // create scene to test against const name = "TestSceneStashIDs" @@ -4381,7 +4381,7 @@ func testSceneStashIDs(ctx context.Context, t *testing.T, s *models.Scene) { Endpoint: endpoint, } - qb := db.Scene + qb := db.GetRepo().Scene // update stash ids and ensure was updated var err error @@ -4423,7 +4423,7 @@ func testSceneStashIDs(ctx context.Context, t *testing.T, s *models.Scene) { func TestSceneQueryQTrim(t *testing.T) { if err := withTxn(func(ctx context.Context) error { - qb := db.Scene + qb := db.GetRepo().Scene expectedID := sceneIDs[sceneIdxWithSpacedName] @@ -4465,7 +4465,7 @@ func TestSceneQueryQTrim(t *testing.T) { } func TestSceneStore_All(t *testing.T) { - qb := db.Scene + qb := db.GetRepo().Scene withRollbackTxn(func(ctx context.Context) error { got, err := qb.All(ctx) @@ -4482,7 +4482,7 @@ func TestSceneStore_All(t *testing.T) { } func TestSceneStore_FindDuplicates(t *testing.T) { - qb := db.Scene + qb := db.GetRepo().Scene withRollbackTxn(func(ctx context.Context) error { distance := 0 @@ -4536,7 +4536,7 @@ func TestSceneStore_AssignFiles(t *testing.T) { }, } - qb := db.Scene + qb := db.GetRepo().Scene for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -4572,7 +4572,7 @@ func TestSceneStore_AddView(t *testing.T) { }, } - qb := db.Scene + qb := db.GetRepo().Scene for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -4663,7 +4663,7 @@ func TestSceneStore_SaveActivity(t *testing.T) { }, } - qb := db.Scene + qb := db.GetRepo().Scene for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -4715,7 +4715,7 @@ func TestSceneStore_SaveActivity(t *testing.T) { // TODO - this should be in history_test and generalised func TestSceneStore_CountAllViews(t *testing.T) { withRollbackTxn(func(ctx context.Context) error { - qb := db.Scene + qb := db.GetRepo().Scene sceneID := sceneIDs[sceneIdx1WithPerformer] @@ -4748,7 +4748,7 @@ func TestSceneStore_CountAllViews(t *testing.T) { func TestSceneStore_CountUniqueViews(t *testing.T) { withRollbackTxn(func(ctx context.Context) error { - qb := db.Scene + qb := db.GetRepo().Scene sceneID := sceneIDs[sceneIdx1WithPerformer] diff --git a/pkg/sqlite/setup_test.go b/pkg/sqlite/setup_test.go index 8e0d8e0000a..f057394f7f0 100644 --- a/pkg/sqlite/setup_test.go +++ b/pkg/sqlite/setup_test.go @@ -584,7 +584,7 @@ func indexFromID(ids []int, id int) int { return -1 } -var db *sqlite.Database +var db sqlite.DBInterface func TestMain(m *testing.M) { // initialise empty config - needed by some migrations @@ -640,7 +640,14 @@ func runTests(m *testing.M) int { f.Close() databaseFile := f.Name() sqlite.RegisterSqliteDialect() - db = sqlite.NewSQLiteDatabase(databaseFile) + + dbUrl, valid := os.LookupEnv("PGSQL_TEST") + if valid { + db = sqlite.NewPostgresDatabase(dbUrl) + } else { + db = sqlite.NewSQLiteDatabase(databaseFile) + } + db.SetBlobStoreOptions(sqlite.BlobStoreOptions{ UseDatabase: true, // don't use filesystem @@ -674,11 +681,11 @@ func populateDB() error { // TODO - link folders to zip files - if err := createTags(ctx, db.Tag, tagsNameCase, tagsNameNoCase); err != nil { + if err := createTags(ctx, db.GetRepo().Tag, tagsNameCase, tagsNameNoCase); err != nil { return fmt.Errorf("error creating tags: %s", err.Error()) } - if err := createGroups(ctx, db.Group, groupsNameCase, groupsNameNoCase); err != nil { + if err := createGroups(ctx, db.GetRepo().Group, groupsNameCase, groupsNameNoCase); err != nil { return fmt.Errorf("error creating groups: %s", err.Error()) } @@ -702,15 +709,15 @@ func populateDB() error { return fmt.Errorf("error creating images: %s", err.Error()) } - if err := addTagImage(ctx, db.Tag, tagIdxWithCoverImage); err != nil { + if err := addTagImage(ctx, db.GetRepo().Tag, tagIdxWithCoverImage); err != nil { return fmt.Errorf("error adding tag image: %s", err.Error()) } - if err := createSavedFilters(ctx, db.SavedFilter, totalSavedFilters); err != nil { + if err := createSavedFilters(ctx, db.GetRepo().SavedFilter, totalSavedFilters); err != nil { return fmt.Errorf("error creating saved filters: %s", err.Error()) } - if err := linkGroupStudios(ctx, db.Group); err != nil { + if err := linkGroupStudios(ctx, db.GetRepo().Group); err != nil { return fmt.Errorf("error linking group studios: %s", err.Error()) } @@ -718,21 +725,21 @@ func populateDB() error { return fmt.Errorf("error linking studios parent: %s", err.Error()) } - if err := linkTagsParent(ctx, db.Tag); err != nil { + if err := linkTagsParent(ctx, db.GetRepo().Tag); err != nil { return fmt.Errorf("error linking tags parent: %s", err.Error()) } - if err := linkGroupsParent(ctx, db.Group); err != nil { + if err := linkGroupsParent(ctx, db.GetRepo().Group); err != nil { return fmt.Errorf("error linking tags parent: %s", err.Error()) } for _, ms := range markerSpecs { - if err := createMarker(ctx, db.SceneMarker, ms); err != nil { + if err := createMarker(ctx, db.GetRepo().SceneMarker, ms); err != nil { return fmt.Errorf("error creating scene marker: %s", err.Error()) } } for _, cs := range chapterSpecs { - if err := createChapter(ctx, db.GalleryChapter, cs); err != nil { + if err := createChapter(ctx, db.GetRepo().GalleryChapter, cs); err != nil { return fmt.Errorf("error creating gallery chapter: %s", err.Error()) } } @@ -779,7 +786,7 @@ func makeFolder(i int) models.Folder { } func createFolders(ctx context.Context) error { - qb := db.Folder + qb := db.GetRepo().Folder for i := 0; i < totalFolders; i++ { folder := makeFolder(i) @@ -882,7 +889,7 @@ func makeFile(i int) models.File { } func createFiles(ctx context.Context) error { - qb := db.File + qb := db.GetRepo().File for i := 0; i < totalFiles; i++ { file := makeFile(i) @@ -1131,8 +1138,8 @@ func makeScene(i int) *models.Scene { } func createScenes(ctx context.Context, n int) error { - sqb := db.Scene - fqb := db.File + sqb := db.GetRepo().Scene + fqb := db.GetRepo().File for i := 0; i < n; i++ { f := makeSceneFile(i) @@ -1220,8 +1227,8 @@ func makeImage(i int) *models.Image { } func createImages(ctx context.Context, n int) error { - qb := db.Image - fqb := db.File + qb := db.GetRepo().Image + fqb := db.GetRepo().File for i := 0; i < n; i++ { f := makeImageFile(i) @@ -1317,8 +1324,8 @@ func makeGallery(i int, includeScenes bool) *models.Gallery { } func createGalleries(ctx context.Context, n int) error { - gqb := db.Gallery - fqb := db.File + gqb := db.GetRepo().Gallery + fqb := db.GetRepo().File for i := 0; i < n; i++ { var fileIDs []models.FileID @@ -1509,7 +1516,7 @@ func performerAliases(i int) []string { // createPerformers creates n performers with plain Name and o performers with camel cased NaMe included func createPerformers(ctx context.Context, n int, o int) error { - pqb := db.Performer + pqb := db.GetRepo().Performer const namePlain = "Name" const nameNoCase = "NaMe" @@ -1714,7 +1721,7 @@ func getStudioBoolValue(index int) bool { // createStudios creates n studios with plain Name and o studios with camel cased NaMe included func createStudios(ctx context.Context, n int, o int) error { - sqb := db.Studio + sqb := db.GetRepo().Studio const namePlain = "Name" const nameNoCase = "NaMe" @@ -1883,7 +1890,7 @@ func linkGroupStudios(ctx context.Context, mqb models.GroupWriter) error { } func linkStudiosParent(ctx context.Context) error { - qb := db.Studio + qb := db.GetRepo().Studio return doLinks(studioParentLinks, func(parentIndex, childIndex int) error { input := &models.StudioPartial{ ID: studioIDs[childIndex], diff --git a/pkg/sqlite/studio_test.go b/pkg/sqlite/studio_test.go index a61dadc245f..b5a25314c4c 100644 --- a/pkg/sqlite/studio_test.go +++ b/pkg/sqlite/studio_test.go @@ -18,7 +18,7 @@ import ( func TestStudioFindByName(t *testing.T) { withTxn(func(ctx context.Context) error { - sqb := db.Studio + sqb := db.GetRepo().Studio name := studioNames[studioIdxWithScene] // find a studio by name @@ -70,7 +70,7 @@ func TestStudioQueryNameOr(t *testing.T) { } withTxn(func(ctx context.Context) error { - sqb := db.Studio + sqb := db.GetRepo().Studio studios := queryStudio(ctx, t, sqb, &studioFilter, nil) @@ -103,7 +103,7 @@ func TestStudioQueryNameAndUrl(t *testing.T) { } withTxn(func(ctx context.Context) error { - sqb := db.Studio + sqb := db.GetRepo().Studio studios := queryStudio(ctx, t, sqb, &studioFilter, nil) @@ -140,7 +140,7 @@ func TestStudioQueryNameNotUrl(t *testing.T) { } withTxn(func(ctx context.Context) error { - sqb := db.Studio + sqb := db.GetRepo().Studio studios := queryStudio(ctx, t, sqb, &studioFilter, nil) @@ -173,7 +173,7 @@ func TestStudioIllegalQuery(t *testing.T) { } withTxn(func(ctx context.Context) error { - sqb := db.Studio + sqb := db.GetRepo().Studio _, _, err := sqb.Query(ctx, studioFilter, nil) assert.NotNil(err) @@ -199,7 +199,7 @@ func TestStudioQueryIgnoreAutoTag(t *testing.T) { IgnoreAutoTag: &ignoreAutoTag, } - sqb := db.Studio + sqb := db.GetRepo().Studio studios := queryStudio(ctx, t, sqb, &studioFilter, nil) @@ -214,7 +214,7 @@ func TestStudioQueryIgnoreAutoTag(t *testing.T) { func TestStudioQueryForAutoTag(t *testing.T) { withTxn(func(ctx context.Context) error { - tqb := db.Studio + tqb := db.GetRepo().Studio name := studioNames[studioIdxWithGroup] // find a studio by name @@ -242,7 +242,7 @@ func TestStudioQueryForAutoTag(t *testing.T) { func TestStudioQueryParent(t *testing.T) { withTxn(func(ctx context.Context) error { - sqb := db.Studio + sqb := db.GetRepo().Studio studioCriterion := models.MultiCriterionInput{ Value: []string{ strconv.Itoa(studioIDs[studioIdxWithChildStudio]), @@ -292,18 +292,18 @@ func TestStudioDestroyParent(t *testing.T) { // create parent and child studios if err := withTxn(func(ctx context.Context) error { - createdParent, err := createStudio(ctx, db.Studio, parentName, nil) + createdParent, err := createStudio(ctx, db.GetRepo().Studio, parentName, nil) if err != nil { return fmt.Errorf("Error creating parent studio: %s", err.Error()) } parentID := createdParent.ID - createdChild, err := createStudio(ctx, db.Studio, childName, &parentID) + createdChild, err := createStudio(ctx, db.GetRepo().Studio, childName, &parentID) if err != nil { return fmt.Errorf("Error creating child studio: %s", err.Error()) } - sqb := db.Studio + sqb := db.GetRepo().Studio // destroy the parent err = sqb.Destroy(ctx, createdParent.ID) @@ -325,7 +325,7 @@ func TestStudioDestroyParent(t *testing.T) { func TestStudioFindChildren(t *testing.T) { withTxn(func(ctx context.Context) error { - sqb := db.Studio + sqb := db.GetRepo().Studio studios, err := sqb.FindChildren(ctx, studioIDs[studioIdxWithChildStudio]) @@ -354,18 +354,18 @@ func TestStudioUpdateClearParent(t *testing.T) { // create parent and child studios if err := withTxn(func(ctx context.Context) error { - createdParent, err := createStudio(ctx, db.Studio, parentName, nil) + createdParent, err := createStudio(ctx, db.GetRepo().Studio, parentName, nil) if err != nil { return fmt.Errorf("Error creating parent studio: %s", err.Error()) } parentID := createdParent.ID - createdChild, err := createStudio(ctx, db.Studio, childName, &parentID) + createdChild, err := createStudio(ctx, db.GetRepo().Studio, childName, &parentID) if err != nil { return fmt.Errorf("Error creating child studio: %s", err.Error()) } - sqb := db.Studio + sqb := db.GetRepo().Studio // clear the parent id from the child input := models.StudioPartial{ @@ -391,11 +391,11 @@ func TestStudioUpdateClearParent(t *testing.T) { func TestStudioUpdateStudioImage(t *testing.T) { if err := withTxn(func(ctx context.Context) error { - qb := db.Studio + qb := db.GetRepo().Studio // create studio to test against const name = "TestStudioUpdateStudioImage" - created, err := createStudio(ctx, db.Studio, name, nil) + created, err := createStudio(ctx, db.GetRepo().Studio, name, nil) if err != nil { return fmt.Errorf("Error creating studio: %s", err.Error()) } @@ -427,7 +427,7 @@ func TestStudioQuerySceneCount(t *testing.T) { func verifyStudiosSceneCount(t *testing.T, sceneCountCriterion models.IntCriterionInput) { withTxn(func(ctx context.Context) error { - sqb := db.Studio + sqb := db.GetRepo().Studio studioFilter := models.StudioFilterType{ SceneCount: &sceneCountCriterion, } @@ -436,7 +436,7 @@ func verifyStudiosSceneCount(t *testing.T, sceneCountCriterion models.IntCriteri assert.Greater(t, len(studios), 0) for _, studio := range studios { - sceneCount, err := db.Scene.CountByStudioID(ctx, studio.ID) + sceneCount, err := db.GetRepo().Scene.CountByStudioID(ctx, studio.ID) if err != nil { return err } @@ -468,7 +468,7 @@ func TestStudioQueryImageCount(t *testing.T) { func verifyStudiosImageCount(t *testing.T, imageCountCriterion models.IntCriterionInput) { withTxn(func(ctx context.Context) error { - sqb := db.Studio + sqb := db.GetRepo().Studio studioFilter := models.StudioFilterType{ ImageCount: &imageCountCriterion, } @@ -479,7 +479,7 @@ func verifyStudiosImageCount(t *testing.T, imageCountCriterion models.IntCriteri for _, studio := range studios { pp := 0 - result, err := db.Image.Query(ctx, models.ImageQueryOptions{ + result, err := db.GetRepo().Image.Query(ctx, models.ImageQueryOptions{ QueryOptions: models.QueryOptions{ FindFilter: &models.FindFilterType{ PerPage: &pp, @@ -524,7 +524,7 @@ func TestStudioQueryGalleryCount(t *testing.T) { func verifyStudiosGalleryCount(t *testing.T, galleryCountCriterion models.IntCriterionInput) { withTxn(func(ctx context.Context) error { - sqb := db.Studio + sqb := db.GetRepo().Studio studioFilter := models.StudioFilterType{ GalleryCount: &galleryCountCriterion, } @@ -535,7 +535,7 @@ func verifyStudiosGalleryCount(t *testing.T, galleryCountCriterion models.IntCri for _, studio := range studios { pp := 0 - _, count, err := db.Gallery.Query(ctx, &models.GalleryFilterType{ + _, count, err := db.GetRepo().Gallery.Query(ctx, &models.GalleryFilterType{ Studios: &models.HierarchicalMultiCriterionInput{ Value: []string{strconv.Itoa(studio.ID)}, Modifier: models.CriterionModifierIncludes, @@ -555,11 +555,11 @@ func verifyStudiosGalleryCount(t *testing.T, galleryCountCriterion models.IntCri func TestStudioStashIDs(t *testing.T) { if err := withRollbackTxn(func(ctx context.Context) error { - qb := db.Studio + qb := db.GetRepo().Studio // create studio to test against const name = "TestStudioStashIDs" - created, err := createStudio(ctx, db.Studio, name, nil) + created, err := createStudio(ctx, db.GetRepo().Studio, name, nil) if err != nil { return fmt.Errorf("Error creating studio: %s", err.Error()) } @@ -581,7 +581,7 @@ func TestStudioStashIDs(t *testing.T) { } func testStudioStashIDs(ctx context.Context, t *testing.T, s *models.Studio) { - qb := db.Studio + qb := db.GetRepo().Studio if err := s.LoadStashIDs(ctx, qb); err != nil { t.Error(err.Error()) @@ -706,7 +706,7 @@ func TestStudioQueryRating(t *testing.T) { func queryStudios(ctx context.Context, t *testing.T, studioFilter *models.StudioFilterType, findFilter *models.FindFilterType) []*models.Studio { t.Helper() - studios, _, err := db.Studio.Query(ctx, studioFilter, findFilter) + studios, _, err := db.GetRepo().Studio.Query(ctx, studioFilter, findFilter) if err != nil { t.Errorf("Error querying studio: %s", err.Error()) } @@ -788,7 +788,7 @@ func TestStudioQueryTagCount(t *testing.T) { func verifyStudiosTagCount(t *testing.T, tagCountCriterion models.IntCriterionInput) { withTxn(func(ctx context.Context) error { - sqb := db.Studio + sqb := db.GetRepo().Studio studioFilter := models.StudioFilterType{ TagCount: &tagCountCriterion, } @@ -811,7 +811,7 @@ func verifyStudiosTagCount(t *testing.T, tagCountCriterion models.IntCriterionIn func verifyStudioQuery(t *testing.T, filter models.StudioFilterType, verifyFn func(ctx context.Context, s *models.Studio)) { withTxn(func(ctx context.Context) error { t.Helper() - sqb := db.Studio + sqb := db.GetRepo().Studio studios := queryStudio(ctx, t, sqb, &filter, nil) @@ -828,7 +828,7 @@ func verifyStudioQuery(t *testing.T, filter models.StudioFilterType, verifyFn fu func verifyStudiosRating(t *testing.T, ratingCriterion models.IntCriterionInput) { withTxn(func(ctx context.Context) error { - sqb := db.Studio + sqb := db.GetRepo().Studio studioFilter := models.StudioFilterType{ Rating100: &ratingCriterion, } @@ -849,7 +849,7 @@ func verifyStudiosRating(t *testing.T, ratingCriterion models.IntCriterionInput) func TestStudioQueryIsMissingRating(t *testing.T) { withTxn(func(ctx context.Context) error { - sqb := db.Studio + sqb := db.GetRepo().Studio isMissing := "rating" studioFilter := models.StudioFilterType{ IsMissing: &isMissing, @@ -925,7 +925,7 @@ func TestStudioQueryAlias(t *testing.T) { verifyFn := func(ctx context.Context, studio *models.Studio) { t.Helper() - aliases, err := db.Studio.GetAliases(ctx, studio.ID) + aliases, err := db.GetRepo().Studio.GetAliases(ctx, studio.ID) if err != nil { t.Errorf("Error querying studios: %s", err.Error()) } @@ -960,11 +960,11 @@ func TestStudioQueryAlias(t *testing.T) { func TestStudioAlias(t *testing.T) { if err := withRollbackTxn(func(ctx context.Context) error { - qb := db.Studio + qb := db.GetRepo().Studio // create studio to test against const name = "TestStudioAlias" - created, err := createStudio(ctx, db.Studio, name, nil) + created, err := createStudio(ctx, db.GetRepo().Studio, name, nil) if err != nil { return fmt.Errorf("Error creating studio: %s", err.Error()) } @@ -986,7 +986,7 @@ func TestStudioAlias(t *testing.T) { } func testStudioAlias(ctx context.Context, t *testing.T, s *models.Studio) { - qb := db.Studio + qb := db.GetRepo().Studio if err := s.LoadAliases(ctx, qb); err != nil { t.Error(err.Error()) return @@ -1108,7 +1108,7 @@ func TestStudioQueryFast(t *testing.T) { } withTxn(func(ctx context.Context) error { - sqb := db.Studio + sqb := db.GetRepo().Studio for _, f := range filters { for _, ff := range findFilters { _, _, err := sqb.Query(ctx, &f, &ff) diff --git a/pkg/sqlite/tag_test.go b/pkg/sqlite/tag_test.go index 5359be78517..e9efcd0aa07 100644 --- a/pkg/sqlite/tag_test.go +++ b/pkg/sqlite/tag_test.go @@ -17,7 +17,7 @@ import ( func TestMarkerFindBySceneMarkerID(t *testing.T) { withTxn(func(ctx context.Context) error { - tqb := db.Tag + tqb := db.GetRepo().Tag markerID := markerIDs[markerIdxWithTag] @@ -44,7 +44,7 @@ func TestMarkerFindBySceneMarkerID(t *testing.T) { func TestTagFindByGroupID(t *testing.T) { withTxn(func(ctx context.Context) error { - tqb := db.Tag + tqb := db.GetRepo().Tag groupID := groupIDs[groupIdxWithTag] @@ -71,7 +71,7 @@ func TestTagFindByGroupID(t *testing.T) { func TestTagFindByName(t *testing.T) { withTxn(func(ctx context.Context) error { - tqb := db.Tag + tqb := db.GetRepo().Tag name := tagNames[tagIdxWithScene] // find a tag by name @@ -107,7 +107,7 @@ func TestTagQueryIgnoreAutoTag(t *testing.T) { IgnoreAutoTag: &ignoreAutoTag, } - sqb := db.Tag + sqb := db.GetRepo().Tag tags := queryTags(ctx, t, sqb, &tagFilter, nil) @@ -122,7 +122,7 @@ func TestTagQueryIgnoreAutoTag(t *testing.T) { func TestTagQueryForAutoTag(t *testing.T) { withTxn(func(ctx context.Context) error { - tqb := db.Tag + tqb := db.GetRepo().Tag name := tagNames[tagIdx1WithScene] // find a tag by name @@ -156,7 +156,7 @@ func TestTagFindByNames(t *testing.T) { var names []string withTxn(func(ctx context.Context) error { - tqb := db.Tag + tqb := db.GetRepo().Tag names = append(names, tagNames[tagIdxWithScene]) // find tags by names @@ -201,7 +201,7 @@ func TestTagFindByNames(t *testing.T) { func TestTagQuerySort(t *testing.T) { withTxn(func(ctx context.Context) error { - sqb := db.Tag + sqb := db.GetRepo().Tag sortBy := "scenes_count" dir := models.SortDirectionEnumDesc @@ -286,7 +286,7 @@ func TestTagQueryAlias(t *testing.T) { } verifyFn := func(ctx context.Context, tag *models.Tag) { - aliases, err := db.Tag.GetAliases(ctx, tag.ID) + aliases, err := db.GetRepo().Tag.GetAliases(ctx, tag.ID) if err != nil { t.Errorf("Error querying tags: %s", err.Error()) } @@ -321,7 +321,7 @@ func TestTagQueryAlias(t *testing.T) { func verifyTagQuery(t *testing.T, tagFilter *models.TagFilterType, findFilter *models.FindFilterType, verifyFn func(ctx context.Context, t *models.Tag)) { withTxn(func(ctx context.Context) error { - sqb := db.Tag + sqb := db.GetRepo().Tag tags := queryTags(ctx, t, sqb, tagFilter, findFilter) @@ -345,7 +345,7 @@ func queryTags(ctx context.Context, t *testing.T, qb models.TagReader, tagFilter func TestTagQueryIsMissingImage(t *testing.T) { withTxn(func(ctx context.Context) error { - qb := db.Tag + qb := db.GetRepo().Tag isMissing := "image" tagFilter := models.TagFilterType{ IsMissing: &isMissing, @@ -399,7 +399,7 @@ func TestTagQuerySceneCount(t *testing.T) { func verifyTagSceneCount(t *testing.T, sceneCountCriterion models.IntCriterionInput) { withTxn(func(ctx context.Context) error { - qb := db.Tag + qb := db.GetRepo().Tag tagFilter := models.TagFilterType{ SceneCount: &sceneCountCriterion, } @@ -438,7 +438,7 @@ func TestTagQueryMarkerCount(t *testing.T) { func verifyTagMarkerCount(t *testing.T, markerCountCriterion models.IntCriterionInput) { withTxn(func(ctx context.Context) error { - qb := db.Tag + qb := db.GetRepo().Tag tagFilter := models.TagFilterType{ MarkerCount: &markerCountCriterion, } @@ -477,7 +477,7 @@ func TestTagQueryImageCount(t *testing.T) { func verifyTagImageCount(t *testing.T, imageCountCriterion models.IntCriterionInput) { withTxn(func(ctx context.Context) error { - qb := db.Tag + qb := db.GetRepo().Tag tagFilter := models.TagFilterType{ ImageCount: &imageCountCriterion, } @@ -516,7 +516,7 @@ func TestTagQueryGalleryCount(t *testing.T) { func verifyTagGalleryCount(t *testing.T, imageCountCriterion models.IntCriterionInput) { withTxn(func(ctx context.Context) error { - qb := db.Tag + qb := db.GetRepo().Tag tagFilter := models.TagFilterType{ GalleryCount: &imageCountCriterion, } @@ -555,7 +555,7 @@ func TestTagQueryPerformerCount(t *testing.T) { func verifyTagPerformerCount(t *testing.T, imageCountCriterion models.IntCriterionInput) { withTxn(func(ctx context.Context) error { - qb := db.Tag + qb := db.GetRepo().Tag tagFilter := models.TagFilterType{ PerformerCount: &imageCountCriterion, } @@ -594,7 +594,7 @@ func TestTagQueryStudioCount(t *testing.T) { func verifyTagStudioCount(t *testing.T, imageCountCriterion models.IntCriterionInput) { withTxn(func(ctx context.Context) error { - qb := db.Tag + qb := db.GetRepo().Tag tagFilter := models.TagFilterType{ StudioCount: &imageCountCriterion, } @@ -633,7 +633,7 @@ func TestTagQueryParentCount(t *testing.T) { func verifyTagParentCount(t *testing.T, sceneCountCriterion models.IntCriterionInput) { withTxn(func(ctx context.Context) error { - qb := db.Tag + qb := db.GetRepo().Tag tagFilter := models.TagFilterType{ ParentCount: &sceneCountCriterion, } @@ -673,7 +673,7 @@ func TestTagQueryChildCount(t *testing.T) { func verifyTagChildCount(t *testing.T, sceneCountCriterion models.IntCriterionInput) { withTxn(func(ctx context.Context) error { - qb := db.Tag + qb := db.GetRepo().Tag tagFilter := models.TagFilterType{ ChildCount: &sceneCountCriterion, } @@ -695,7 +695,7 @@ func verifyTagChildCount(t *testing.T, sceneCountCriterion models.IntCriterionIn func TestTagQueryParent(t *testing.T) { withTxn(func(ctx context.Context) error { const nameField = "Name" - sqb := db.Tag + sqb := db.GetRepo().Tag tagCriterion := models.HierarchicalMultiCriterionInput{ Value: []string{ strconv.Itoa(tagIDs[tagIdxWithChildTag]), @@ -773,7 +773,7 @@ func TestTagQueryChild(t *testing.T) { withTxn(func(ctx context.Context) error { const nameField = "Name" - sqb := db.Tag + sqb := db.GetRepo().Tag tagCriterion := models.HierarchicalMultiCriterionInput{ Value: []string{ strconv.Itoa(tagIDs[tagIdxWithParentTag]), @@ -849,7 +849,7 @@ func TestTagQueryChild(t *testing.T) { func TestTagUpdateTagImage(t *testing.T) { if err := withTxn(func(ctx context.Context) error { - qb := db.Tag + qb := db.GetRepo().Tag // create tag to test against const name = "TestTagUpdateTagImage" @@ -869,7 +869,7 @@ func TestTagUpdateTagImage(t *testing.T) { func TestTagUpdateAlias(t *testing.T) { if err := withTxn(func(ctx context.Context) error { - qb := db.Tag + qb := db.GetRepo().Tag // create tag to test against const name = "TestTagUpdateAlias" @@ -905,8 +905,8 @@ func TestTagMerge(t *testing.T) { // merge tests - perform these in a transaction that we'll rollback if err := withRollbackTxn(func(ctx context.Context) error { - qb := db.Tag - mqb := db.SceneMarker + qb := db.GetRepo().Tag + mqb := db.GetRepo().SceneMarker // try merging into same tag err := qb.Merge(ctx, []int{tagIDs[tagIdx1WithScene]}, tagIDs[tagIdx1WithScene]) @@ -962,11 +962,11 @@ func TestTagMerge(t *testing.T) { } // ensure scene points to new tag - s, err := db.Scene.Find(ctx, sceneIDs[sceneIdxWithTwoTags]) + s, err := db.GetRepo().Scene.Find(ctx, sceneIDs[sceneIdxWithTwoTags]) if err != nil { return err } - if err := s.LoadTagIDs(ctx, db.Scene); err != nil { + if err := s.LoadTagIDs(ctx, db.GetRepo().Scene); err != nil { return err } sceneTagIDs := s.TagIDs.List() @@ -989,19 +989,19 @@ func TestTagMerge(t *testing.T) { assert.Contains(markerTagIDs, destID) // ensure image points to new tag - imageTagIDs, err := db.Image.GetTagIDs(ctx, imageIDs[imageIdxWithTwoTags]) + imageTagIDs, err := db.GetRepo().Image.GetTagIDs(ctx, imageIDs[imageIdxWithTwoTags]) if err != nil { return err } assert.Contains(imageTagIDs, destID) - g, err := db.Gallery.Find(ctx, galleryIDs[galleryIdxWithTwoTags]) + g, err := db.GetRepo().Gallery.Find(ctx, galleryIDs[galleryIdxWithTwoTags]) if err != nil { return err } - if err := g.LoadTagIDs(ctx, db.Gallery); err != nil { + if err := g.LoadTagIDs(ctx, db.GetRepo().Gallery); err != nil { return err } @@ -1009,7 +1009,7 @@ func TestTagMerge(t *testing.T) { assert.Contains(g.TagIDs.List(), destID) // ensure performer points to new tag - performerTagIDs, err := db.Performer.GetTagIDs(ctx, performerIDs[performerIdxWithTwoTags]) + performerTagIDs, err := db.GetRepo().Performer.GetTagIDs(ctx, performerIDs[performerIdxWithTwoTags]) if err != nil { return err } @@ -1017,7 +1017,7 @@ func TestTagMerge(t *testing.T) { assert.Contains(performerTagIDs, destID) // ensure studio points to new tag - studioTagIDs, err := db.Studio.GetTagIDs(ctx, studioIDs[studioIdxWithTwoTags]) + studioTagIDs, err := db.GetRepo().Studio.GetTagIDs(ctx, studioIDs[studioIdxWithTwoTags]) if err != nil { return err } diff --git a/pkg/sqlite/transaction_test.go b/pkg/sqlite/transaction_test.go index 513a60a2065..8a1e809779c 100644 --- a/pkg/sqlite/transaction_test.go +++ b/pkg/sqlite/transaction_test.go @@ -36,11 +36,11 @@ import ( // Title: "test", // } -// if err := db.Scene.Create(ctx, scene, nil); err != nil { +// if err := db.GetRepo().Scene.Create(ctx, scene, nil); err != nil { // return err // } -// if err := db.Scene.Destroy(ctx, scene.ID); err != nil { +// if err := db.GetRepo().Scene.Destroy(ctx, scene.ID); err != nil { // return err // } // } @@ -94,7 +94,7 @@ func waitForOtherThread(c chan struct{}) error { // Title: "test", // } -// if err := db.Scene.Create(ctx, scene, nil); err != nil { +// if err := db.GetRepo().Scene.Create(ctx, scene, nil); err != nil { // return err // } @@ -106,7 +106,7 @@ func waitForOtherThread(c chan struct{}) error { // return err // } -// if err := db.Scene.Destroy(ctx, scene.ID); err != nil { +// if err := db.GetRepo().Scene.Destroy(ctx, scene.ID); err != nil { // return err // } @@ -139,7 +139,7 @@ func waitForOtherThread(c chan struct{}) error { // // expect error when we try to do this, as the other thread has already // // modified this table // // this takes time to fail, so we need to wait for it -// if err := db.Scene.Create(ctx, scene, nil); err != nil { +// if err := db.GetRepo().Scene.Create(ctx, scene, nil); err != nil { // if !db.IsLocked(err) { // t.Errorf("unexpected error: %v", err) // } @@ -169,7 +169,7 @@ func TestConcurrentExclusiveAndReadTxn(t *testing.T) { Title: "test", } - if err := db.Scene.Create(ctx, scene, nil); err != nil { + if err := db.GetRepo().Scene.Create(ctx, scene, nil); err != nil { return err } @@ -181,7 +181,7 @@ func TestConcurrentExclusiveAndReadTxn(t *testing.T) { return err } - if err := db.Scene.Destroy(ctx, scene.ID); err != nil { + if err := db.GetRepo().Scene.Destroy(ctx, scene.ID); err != nil { return err } @@ -207,7 +207,7 @@ func TestConcurrentExclusiveAndReadTxn(t *testing.T) { } }() - if _, err := db.Scene.Find(ctx, sceneIDs[sceneIdx1WithPerformer]); err != nil { + if _, err := db.GetRepo().Scene.Find(ctx, sceneIDs[sceneIdx1WithPerformer]); err != nil { t.Errorf("unexpected error: %v", err) return err } @@ -241,11 +241,11 @@ func TestConcurrentExclusiveAndReadTxn(t *testing.T) { // Title: "test", // } -// if err := db.Scene.Create(ctx, scene, nil); err != nil { +// if err := db.GetRepo().Scene.Create(ctx, scene, nil); err != nil { // return err // } -// if err := db.Scene.Destroy(ctx, scene.ID); err != nil { +// if err := db.GetRepo().Scene.Destroy(ctx, scene.ID); err != nil { // return err // } // } @@ -267,7 +267,7 @@ func TestConcurrentExclusiveAndReadTxn(t *testing.T) { // for l := 0; l < loops; l++ { // if err := txn.WithReadTxn(ctx, db, func(ctx context.Context) error { // for ll := 0; ll < innerLoops; ll++ { -// if _, err := db.Scene.Find(ctx, sceneIDs[ll%totalScenes]); err != nil { +// if _, err := db.GetRepo().Scene.Find(ctx, sceneIDs[ll%totalScenes]); err != nil { // return err // } // } From 688e3652bb32426a2868448b048136e61790cc50 Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Sun, 13 Oct 2024 22:46:41 +0200 Subject: [PATCH 33/85] Added distinctIDs back and obey group by rules (if postgresql) --- pkg/sqlite/criterion_handlers.go | 2 +- pkg/sqlite/file.go | 4 ++-- pkg/sqlite/gallery.go | 2 +- pkg/sqlite/group.go | 2 +- pkg/sqlite/image.go | 12 ++++++------ pkg/sqlite/performer.go | 2 +- pkg/sqlite/query.go | 26 +++++++++++++++++--------- pkg/sqlite/repository.go | 15 ++++++++------- pkg/sqlite/scene.go | 18 ++++++++++-------- pkg/sqlite/scene_marker.go | 2 +- pkg/sqlite/sql.go | 18 +++++++++++++++++- pkg/sqlite/studio.go | 2 +- pkg/sqlite/tag.go | 2 +- 13 files changed, 67 insertions(+), 40 deletions(-) diff --git a/pkg/sqlite/criterion_handlers.go b/pkg/sqlite/criterion_handlers.go index ac209814165..0685f568343 100644 --- a/pkg/sqlite/criterion_handlers.go +++ b/pkg/sqlite/criterion_handlers.go @@ -674,7 +674,7 @@ WHERE id in {inBinding} {unionClause}) `, withClauseMap) - query := fmt.Sprintf("WITH RECURSIVE %s SELECT 'VALUES' || string_agg('(' || root_id || ', ' || item_id || ')', ',') AS val FROM items", withClause) + query := fmt.Sprintf("WITH RECURSIVE %s SELECT 'VALUES' || "+fixDBConcat("'(' || root_id || ', ' || item_id || ')'")+" AS val FROM items", withClause) var valuesClause sql.NullString err := dbWrapper.Get(ctx, &valuesClause, query, args...) diff --git a/pkg/sqlite/file.go b/pkg/sqlite/file.go index 7626a8c53bb..bc6fbb8e814 100644 --- a/pkg/sqlite/file.go +++ b/pkg/sqlite/file.go @@ -849,7 +849,7 @@ func (qb *FileStore) Query(ctx context.Context, options models.FileQueryOptions) query := qb.newQuery() query.join(folderTable, "", "files.parent_folder_id = folders.id") - selectIDs(&query, fileTable) + distinctIDs(&query, fileTable) if q := findFilter.Q; q != nil && *q != "" { filepathColumn := "folders.path || '" + string(filepath.Separator) + "' || files.basename" @@ -898,7 +898,7 @@ func (qb *FileStore) queryGroupedFields(ctx context.Context, options models.File aggregateQuery := qb.newQuery() if options.Count { - aggregateQuery.addColumn("COUNT(temp.id) as total") + aggregateQuery.addColumn("COUNT(temp.id) as total", nil) } const includeSortPagination = false diff --git a/pkg/sqlite/gallery.go b/pkg/sqlite/gallery.go index 008ab85f7f1..1624087c9e7 100644 --- a/pkg/sqlite/gallery.go +++ b/pkg/sqlite/gallery.go @@ -687,7 +687,7 @@ func (qb *GalleryStore) makeQuery(ctx context.Context, galleryFilter *models.Gal } query := galleryRepository.newQuery() - selectIDs(&query, galleryTable) + distinctIDs(&query, galleryTable) if q := findFilter.Q; q != nil && *q != "" { query.addJoins( diff --git a/pkg/sqlite/group.go b/pkg/sqlite/group.go index cd10461d3fb..ab19608a493 100644 --- a/pkg/sqlite/group.go +++ b/pkg/sqlite/group.go @@ -427,7 +427,7 @@ func (qb *GroupStore) makeQuery(ctx context.Context, groupFilter *models.GroupFi } query := groupRepository.newQuery() - selectIDs(&query, groupTable) + distinctIDs(&query, groupTable) if q := findFilter.Q; q != nil && *q != "" { searchColumns := []string{"groups.name", "groups.aliases"} diff --git a/pkg/sqlite/image.go b/pkg/sqlite/image.go index a7dd0d574bc..719d37e0132 100644 --- a/pkg/sqlite/image.go +++ b/pkg/sqlite/image.go @@ -778,7 +778,7 @@ func (qb *ImageStore) makeQuery(ctx context.Context, imageFilter *models.ImageFi } query := imageRepository.newQuery() - selectIDs(&query, imageTable) + distinctIDs(&query, imageTable) if q := findFilter.Q; q != nil && *q != "" { query.addJoins( @@ -849,7 +849,7 @@ func (qb *ImageStore) queryGroupedFields(ctx context.Context, options models.Ima aggregateQuery := imageRepository.newQuery() if options.Count { - aggregateQuery.addColumn("COUNT(DISTINCT temp.id) as total") + aggregateQuery.addColumn("COUNT(DISTINCT temp.id) as total", nil) } if options.Megapixels { @@ -863,8 +863,8 @@ func (qb *ImageStore) queryGroupedFields(ctx context.Context, options models.Ima onClause: "images_files.file_id = image_files.file_id", }, ) - query.addColumn("COALESCE(image_files.width, 0) * COALESCE(image_files.height, 0) as megapixels") - aggregateQuery.addColumn("COALESCE(SUM(temp.megapixels), 0) / 1000000 as megapixels") + query.addColumn("COALESCE(image_files.width, 0) * COALESCE(image_files.height, 0) as megapixels", []string{"image_files.width", "image_files.height"}) + aggregateQuery.addColumn("COALESCE(SUM(temp.megapixels), 0) / 1000000 as megapixels", nil) } if options.TotalSize { @@ -878,8 +878,8 @@ func (qb *ImageStore) queryGroupedFields(ctx context.Context, options models.Ima onClause: "images_files.file_id = files.id", }, ) - query.addColumn("COALESCE(files.size, 0) as size") - aggregateQuery.addColumn("SUM(temp.size) as size") + query.addColumn("COALESCE(files.size, 0) as size", []string{"files.size"}) + aggregateQuery.addColumn("SUM(temp.size) as size", nil) } const includeSortPagination = false diff --git a/pkg/sqlite/performer.go b/pkg/sqlite/performer.go index ee05a4bcbc6..4c953629ad9 100644 --- a/pkg/sqlite/performer.go +++ b/pkg/sqlite/performer.go @@ -596,7 +596,7 @@ func (qb *PerformerStore) makeQuery(ctx context.Context, performerFilter *models } query := performerRepository.newQuery() - selectIDs(&query, performerTable) + distinctIDs(&query, performerTable) if q := findFilter.Q; q != nil && *q != "" { query.join(performersAliasesTable, "", "performer_aliases.performer_id = performers.id") diff --git a/pkg/sqlite/query.go b/pkg/sqlite/query.go index 9c09d8beaed..6cfe7f10162 100644 --- a/pkg/sqlite/query.go +++ b/pkg/sqlite/query.go @@ -14,12 +14,13 @@ type queryBuilder struct { columns []string from string - joins joins - whereClauses []string - havingClauses []string - args []interface{} - withClauses []string - recursiveWith bool + joins joins + whereClauses []string + havingClauses []string + args []interface{} + withClauses []string + recursiveWith bool + groupByClauses []string sortAndPagination string } @@ -28,8 +29,15 @@ func (qb queryBuilder) body() string { return fmt.Sprintf("SELECT %s FROM %s%s", strings.Join(qb.columns, ", "), qb.from, qb.joins.toSQL()) } -func (qb *queryBuilder) addColumn(column string) { +func (qb *queryBuilder) addColumn(column string, nonaggregates []string) { qb.columns = append(qb.columns, column) + if len(nonaggregates) > 0 && dbWrapper.dbType == PostgresBackend { + qb.addGroupBy(nonaggregates) + } +} + +func (qb *queryBuilder) addGroupBy(aggregate []string) { + qb.groupByClauses = append(qb.groupByClauses, aggregate...) } func (qb queryBuilder) toSQL(includeSortPagination bool) string { @@ -44,7 +52,7 @@ func (qb queryBuilder) toSQL(includeSortPagination bool) string { withClause = "WITH " + recursive + strings.Join(qb.withClauses, ", ") + " " } - body = withClause + qb.repository.buildQueryBody(body, qb.whereClauses, qb.havingClauses) + body = withClause + qb.repository.buildQueryBody(body, qb.whereClauses, qb.havingClauses, qb.groupByClauses) if includeSortPagination { body += qb.sortAndPagination } @@ -75,7 +83,7 @@ func (qb queryBuilder) executeCount(ctx context.Context) (int, error) { withClause = "WITH " + recursive + strings.Join(qb.withClauses, ", ") + " " } - body = qb.repository.buildQueryBody(body, qb.whereClauses, qb.havingClauses) + body = qb.repository.buildQueryBody(body, qb.whereClauses, qb.havingClauses, nil) countQuery := withClause + qb.repository.buildCountQuery(body) return qb.repository.runCountQuery(ctx, countQuery, qb.args) } diff --git a/pkg/sqlite/repository.go b/pkg/sqlite/repository.go index 0d49bcee812..b422d75ae9a 100644 --- a/pkg/sqlite/repository.go +++ b/pkg/sqlite/repository.go @@ -10,7 +10,6 @@ import ( "github.com/jmoiron/sqlx" "github.com/stashapp/stash/pkg/models" - "github.com/stashapp/stash/pkg/sliceutil" ) const idColumn = "id" @@ -99,9 +98,6 @@ func (r *repository) runIdsQuery(ctx context.Context, query string, args []inter vsm[i] = v.Int } - // We removed distinctIDs for postgresql, but now we have duplicates - vsm = sliceutil.AppendUniques(nil, vsm) - return vsm, nil } @@ -174,12 +170,17 @@ func (r *repository) querySimple(ctx context.Context, query string, args []inter return nil } -func (r *repository) buildQueryBody(body string, whereClauses []string, havingClauses []string) string { +func (r *repository) buildQueryBody(body string, whereClauses []string, havingClauses []string, groupbyClauses []string) string { if len(whereClauses) > 0 { body = body + " WHERE " + strings.Join(whereClauses, " AND ") // TODO handle AND or OR } if len(havingClauses) > 0 { - body = body + " GROUP BY " + r.tableName + ".id " + groupbyClauses = append(groupbyClauses, r.tableName+".id") + } + if len(groupbyClauses) > 0 { + body = body + " GROUP BY " + strings.Join(groupbyClauses, ", ") + " " + } + if len(havingClauses) > 0 { body = body + " HAVING " + strings.Join(havingClauses, " AND ") // TODO handle AND or OR } @@ -187,7 +188,7 @@ func (r *repository) buildQueryBody(body string, whereClauses []string, havingCl } func (r *repository) executeFindQuery(ctx context.Context, body string, args []interface{}, sortAndPagination string, whereClauses []string, havingClauses []string, withClauses []string, recursiveWith bool) ([]int, int, error) { - body = r.buildQueryBody(body, whereClauses, havingClauses) + body = r.buildQueryBody(body, whereClauses, havingClauses, nil) withClause := "" if len(withClauses) > 0 { diff --git a/pkg/sqlite/scene.go b/pkg/sqlite/scene.go index e4da8777578..f831ba2e898 100644 --- a/pkg/sqlite/scene.go +++ b/pkg/sqlite/scene.go @@ -40,7 +40,7 @@ const ( ) var findExactDuplicateQuery = ` -SELECT GROUP_CONCAT(DISTINCT scene_id) as ids +SELECT %s as ids FROM ( SELECT scenes.id as scene_id , video_files.duration as file_duration @@ -915,7 +915,7 @@ func (qb *SceneStore) makeQuery(ctx context.Context, sceneFilter *models.SceneFi } query := sceneRepository.newQuery() - selectIDs(&query, sceneTable) + distinctIDs(&query, sceneTable) if q := findFilter.Q; q != nil && *q != "" { query.addJoins( @@ -991,7 +991,7 @@ func (qb *SceneStore) queryGroupedFields(ctx context.Context, options models.Sce aggregateQuery := sceneRepository.newQuery() if options.Count { - aggregateQuery.addColumn("COUNT(DISTINCT temp.id) as total") + aggregateQuery.addColumn("COUNT(DISTINCT temp.id) as total", nil) } if options.TotalDuration { @@ -1005,8 +1005,8 @@ func (qb *SceneStore) queryGroupedFields(ctx context.Context, options models.Sce onClause: "scenes_files.file_id = video_files.file_id", }, ) - query.addColumn("COALESCE(video_files.duration, 0) as duration") - aggregateQuery.addColumn("SUM(temp.duration) as duration") + query.addColumn("COALESCE(video_files.duration, 0) as duration", []string{"video_files.duration"}) + aggregateQuery.addColumn("SUM(temp.duration) as duration", nil) } if options.TotalSize { @@ -1020,8 +1020,8 @@ func (qb *SceneStore) queryGroupedFields(ctx context.Context, options models.Sce onClause: "scenes_files.file_id = files.id", }, ) - query.addColumn("COALESCE(files.size, 0) as size") - aggregateQuery.addColumn("SUM(temp.size) as size") + query.addColumn("COALESCE(files.size, 0) as size", []string{"files.size"}) + aggregateQuery.addColumn("SUM(temp.size) as size", nil) } const includeSortPagination = false @@ -1331,7 +1331,9 @@ func (qb *SceneStore) FindDuplicates(ctx context.Context, distance int, duration var dupeIds [][]int if distance == 0 { var ids []string - if err := dbWrapper.Select(ctx, &ids, findExactDuplicateQuery, durationDiff); err != nil { + + dbfix_findExactDuplicateQuery := fmt.Sprintf(findExactDuplicateQuery, fixDBConcat("DISTINCT scene_id")) + if err := dbWrapper.Select(ctx, &ids, dbfix_findExactDuplicateQuery, durationDiff); err != nil { return nil, err } diff --git a/pkg/sqlite/scene_marker.go b/pkg/sqlite/scene_marker.go index 2d3f59ec25c..87a849d2084 100644 --- a/pkg/sqlite/scene_marker.go +++ b/pkg/sqlite/scene_marker.go @@ -298,7 +298,7 @@ func (qb *SceneMarkerStore) makeQuery(ctx context.Context, sceneMarkerFilter *mo } query := sceneMarkerRepository.newQuery() - selectIDs(&query, sceneMarkerTable) + distinctIDs(&query, sceneMarkerTable) if q := findFilter.Q; q != nil && *q != "" { query.join(sceneTable, "", "scenes.id = scene_markers.scene_id") diff --git a/pkg/sqlite/sql.go b/pkg/sqlite/sql.go index 229ff633287..f8dcb159529 100644 --- a/pkg/sqlite/sql.go +++ b/pkg/sqlite/sql.go @@ -16,11 +16,27 @@ func selectAll(tableName string) string { return "SELECT " + idColumn + " FROM " + tableName + " " } +func distinctIDs(qb *queryBuilder, tableName string) { + columnId := getColumn(tableName, "id") + qb.addColumn("DISTINCT "+columnId, []string{columnId}) + qb.from = tableName +} + func selectIDs(qb *queryBuilder, tableName string) { - qb.addColumn(getColumn(tableName, "id")) + columnId := getColumn(tableName, "id") + qb.addColumn(getColumn(tableName, "id"), []string{columnId}) qb.from = tableName } +func fixDBConcat(columnName string) string { + switch dbWrapper.dbType { + case PostgresBackend: + return "STRING_AGG(" + columnName + "::TEXT, ',')" + default: + return "GROUP_CONCAT(" + columnName + ")" + } +} + func getColumn(tableName string, columnName string) string { return tableName + "." + columnName } diff --git a/pkg/sqlite/studio.go b/pkg/sqlite/studio.go index afe92976c58..26423e41778 100644 --- a/pkg/sqlite/studio.go +++ b/pkg/sqlite/studio.go @@ -522,7 +522,7 @@ func (qb *StudioStore) makeQuery(ctx context.Context, studioFilter *models.Studi } query := studioRepository.newQuery() - selectIDs(&query, studioTable) + distinctIDs(&query, studioTable) if q := findFilter.Q; q != nil && *q != "" { query.join(studioAliasesTable, "", "studio_aliases.studio_id = studios.id") diff --git a/pkg/sqlite/tag.go b/pkg/sqlite/tag.go index c3725b15e5a..2f1c05f737e 100644 --- a/pkg/sqlite/tag.go +++ b/pkg/sqlite/tag.go @@ -599,7 +599,7 @@ func (qb *TagStore) Query(ctx context.Context, tagFilter *models.TagFilterType, } query := tagRepository.newQuery() - selectIDs(&query, tagTable) + distinctIDs(&query, tagTable) if q := findFilter.Q; q != nil && *q != "" { query.join(tagAliasesTable, "", "tag_aliases.tag_id = tags.id") From 2260ab55d68b43be293860a33b2c326c5d1bfc0d Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Mon, 14 Oct 2024 12:36:50 +0200 Subject: [PATCH 34/85] Some small cosmetic query changes --- pkg/sqlite/criterion_handlers.go | 2 +- pkg/sqlite/query.go | 4 ++++ pkg/sqlite/scene.go | 2 +- pkg/sqlite/sql.go | 4 ++-- 4 files changed, 8 insertions(+), 4 deletions(-) diff --git a/pkg/sqlite/criterion_handlers.go b/pkg/sqlite/criterion_handlers.go index 0685f568343..ed83df9617e 100644 --- a/pkg/sqlite/criterion_handlers.go +++ b/pkg/sqlite/criterion_handlers.go @@ -674,7 +674,7 @@ WHERE id in {inBinding} {unionClause}) `, withClauseMap) - query := fmt.Sprintf("WITH RECURSIVE %s SELECT 'VALUES' || "+fixDBConcat("'(' || root_id || ', ' || item_id || ')'")+" AS val FROM items", withClause) + query := fmt.Sprintf("WITH RECURSIVE %s SELECT 'VALUES' || "+DBGroupConcat("'(' || root_id || ', ' || item_id || ')'")+" AS val FROM items", withClause) var valuesClause sql.NullString err := dbWrapper.Get(ctx, &valuesClause, query, args...) diff --git a/pkg/sqlite/query.go b/pkg/sqlite/query.go index 6cfe7f10162..0a92e44b16b 100644 --- a/pkg/sqlite/query.go +++ b/pkg/sqlite/query.go @@ -29,6 +29,10 @@ func (qb queryBuilder) body() string { return fmt.Sprintf("SELECT %s FROM %s%s", strings.Join(qb.columns, ", "), qb.from, qb.joins.toSQL()) } +/* + * Adds a column to select for the query + * Additionally allows doing group by on any non-aggregate columns (for pgsql) + */ func (qb *queryBuilder) addColumn(column string, nonaggregates []string) { qb.columns = append(qb.columns, column) if len(nonaggregates) > 0 && dbWrapper.dbType == PostgresBackend { diff --git a/pkg/sqlite/scene.go b/pkg/sqlite/scene.go index f831ba2e898..00925bfa1d8 100644 --- a/pkg/sqlite/scene.go +++ b/pkg/sqlite/scene.go @@ -1332,7 +1332,7 @@ func (qb *SceneStore) FindDuplicates(ctx context.Context, distance int, duration if distance == 0 { var ids []string - dbfix_findExactDuplicateQuery := fmt.Sprintf(findExactDuplicateQuery, fixDBConcat("DISTINCT scene_id")) + dbfix_findExactDuplicateQuery := fmt.Sprintf(findExactDuplicateQuery, DBGroupConcat("DISTINCT scene_id")) if err := dbWrapper.Select(ctx, &ids, dbfix_findExactDuplicateQuery, durationDiff); err != nil { return nil, err } diff --git a/pkg/sqlite/sql.go b/pkg/sqlite/sql.go index f8dcb159529..df051811460 100644 --- a/pkg/sqlite/sql.go +++ b/pkg/sqlite/sql.go @@ -24,11 +24,11 @@ func distinctIDs(qb *queryBuilder, tableName string) { func selectIDs(qb *queryBuilder, tableName string) { columnId := getColumn(tableName, "id") - qb.addColumn(getColumn(tableName, "id"), []string{columnId}) + qb.addColumn(columnId, []string{columnId}) qb.from = tableName } -func fixDBConcat(columnName string) string { +func DBGroupConcat(columnName string) string { switch dbWrapper.dbType { case PostgresBackend: return "STRING_AGG(" + columnName + "::TEXT, ',')" From 56fdfffb76f707439fc31235f1166893ed0f47c9 Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Mon, 14 Oct 2024 12:36:57 +0200 Subject: [PATCH 35/85] postgres backup system --- internal/manager/task/migrate.go | 2 +- pkg/sqlite/database.go | 23 ++++++++++++++ pkg/sqlite/database_postgres.go | 52 +++++++++++++++++++++++--------- pkg/sqlite/database_sqlite.go | 22 -------------- 4 files changed, 62 insertions(+), 37 deletions(-) diff --git a/internal/manager/task/migrate.go b/internal/manager/task/migrate.go index 609512b2f4b..8ce478ec80e 100644 --- a/internal/manager/task/migrate.go +++ b/internal/manager/task/migrate.go @@ -50,7 +50,7 @@ func (s *MigrateJob) Execute(ctx context.Context, progress *job.Progress) error // always backup so that we can roll back to the previous version if // migration fails backupPath := s.BackupPath - if backupPath == "" { + if backupPath == "" || s.Database.DatabaseType() == sqlite.PostgresBackend { backupPath = database.DatabaseBackupPath(s.Config.GetBackupDirectoryPath()) } else { // check if backup path is a filename or path diff --git a/pkg/sqlite/database.go b/pkg/sqlite/database.go index 9f846c28658..aec6e681130 100644 --- a/pkg/sqlite/database.go +++ b/pkg/sqlite/database.go @@ -6,6 +6,7 @@ import ( "embed" "errors" "fmt" + "path/filepath" "strconv" "time" @@ -358,6 +359,28 @@ func (db *Database) Version() uint { return db.schemaVersion } +func (db *Database) Reset() error { + if err := db.Remove(); err != nil { + return err + } + + if err := db.Open(); err != nil { + return fmt.Errorf("[reset DB] unable to initialize: %w", err) + } + + return nil +} + +func (db *Database) AnonymousDatabasePath(backupDirectoryPath string) string { + fn := fmt.Sprintf("%s.anonymous.%d.%s", filepath.Base(db.DatabasePath()), db.schemaVersion, time.Now().Format("20060102_150405")) + + if backupDirectoryPath != "" { + return filepath.Join(backupDirectoryPath, fn) + } + + return fn +} + func (db *Database) Optimise(ctx context.Context) error { logger.Info("Optimising database") diff --git a/pkg/sqlite/database_postgres.go b/pkg/sqlite/database_postgres.go index 0bd601b4e1b..2f667f451d6 100644 --- a/pkg/sqlite/database_postgres.go +++ b/pkg/sqlite/database_postgres.go @@ -2,12 +2,12 @@ package sqlite import ( "fmt" + "time" "github.com/doug-martin/goqu/v9" _ "github.com/doug-martin/goqu/v9/dialect/postgres" _ "github.com/jackc/pgx/v5/stdlib" "github.com/jmoiron/sqlx" - "github.com/stashapp/stash/pkg/logger" ) type PostgresDB struct { @@ -71,27 +71,51 @@ func (db *PostgresDB) open(disableForeignKeys bool, writable bool) (conn *sqlx.D return conn, nil } -func (db *PostgresDB) Remove() error { - logger.Warn("Postgres backend detected, ignoring Remove request") - return nil +func (db *PostgresDB) Remove() (err error) { + _, err = db.writeDB.Exec(` +DO $$ DECLARE + r RECORD; +BEGIN + -- Disable triggers to avoid foreign key constraint violations + EXECUTE 'SET session_replication_role = replica'; + + -- Drop all tables + FOR r IN (SELECT tablename FROM pg_tables WHERE schemaname = 'public') LOOP + EXECUTE 'DROP TABLE IF EXISTS ' || quote_ident(r.tablename) || ' CASCADE'; + END LOOP; + + -- Re-enable triggers + EXECUTE 'SET session_replication_role = DEFAULT'; +END $$; +`) + + return err } -func (db *PostgresDB) Reset() error { - logger.Warn("Postgres backend detected, ignoring Reset request") - return nil +// getDBCloneCommand returns the command to clone a database from a backup file +func getDBCloneCommand(backupPath string, dbname string) string { + return fmt.Sprintf(` +SELECT pg_terminate_backend(pg_stat_activity.pid) FROM pg_stat_activity +WHERE pg_stat_activity.datname = '%[2]s' AND pid <> pg_backend_pid(); +CREATE DATABASE %[1]s WITH TEMPLATE %[2]s; +`, backupPath, dbname) } +// Backup creates a backup of the database at the given path. func (db *PostgresDB) Backup(backupPath string) (err error) { - logger.Warn("Postgres backend detected, ignoring Backup request") - return nil + _, err = db.writeDB.Exec(getDBCloneCommand(backupPath, "stash")) + return err } -func (db *PostgresDB) RestoreFromBackup(backupPath string) error { - logger.Warn("Postgres backend detected, ignoring RestoreFromBackup request") - return nil +// RestoreFromBackup restores the database from a backup file at the given path. +func (db *PostgresDB) RestoreFromBackup(backupPath string) (err error) { + sqlcmd := "DROP DATABASE stash;\n" + getDBCloneCommand("stash", backupPath) + + _, err = db.writeDB.Exec(sqlcmd) + return err } +// DatabaseBackupPath returns the path to a database backup file for the given directory. func (db *PostgresDB) DatabaseBackupPath(backupDirectoryPath string) string { - logger.Warn("Postgres backend detected, ignoring DatabaseBackupPath request") - return "" + return fmt.Sprintf("stash_%d_%s", db.schemaVersion, time.Now().Format("20060102_150405")) } diff --git a/pkg/sqlite/database_sqlite.go b/pkg/sqlite/database_sqlite.go index 2a58d6c65ef..8d3451f7fb9 100644 --- a/pkg/sqlite/database_sqlite.go +++ b/pkg/sqlite/database_sqlite.go @@ -103,18 +103,6 @@ func (db *SQLiteDB) Remove() error { return nil } -func (db *SQLiteDB) Reset() error { - if err := db.Remove(); err != nil { - return err - } - - if err := db.Open(); err != nil { - return fmt.Errorf("[reset DB] unable to initialize: %w", err) - } - - return nil -} - // Backup the database. If db is nil, then uses the existing database // connection. func (db *SQLiteDB) Backup(backupPath string) (err error) { @@ -150,13 +138,3 @@ func (db *SQLiteDB) DatabaseBackupPath(backupDirectoryPath string) string { return fn } - -func (db *SQLiteDB) AnonymousDatabasePath(backupDirectoryPath string) string { - fn := fmt.Sprintf("%s.anonymous.%d.%s", filepath.Base(db.DatabasePath()), db.schemaVersion, time.Now().Format("20060102_150405")) - - if backupDirectoryPath != "" { - return filepath.Join(backupDirectoryPath, fn) - } - - return fn -} From 878c8e80878318c94a059e3f527fc83b8007e80a Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Mon, 14 Oct 2024 12:57:15 +0200 Subject: [PATCH 36/85] Cleanup PGSQL test --- internal/autotag/integration_test.go | 22 +++++++++++++--------- pkg/sqlite/setup_test.go | 9 +-------- 2 files changed, 14 insertions(+), 17 deletions(-) diff --git a/internal/autotag/integration_test.go b/internal/autotag/integration_test.go index d13cf60a6e8..a9796142aa5 100644 --- a/internal/autotag/integration_test.go +++ b/internal/autotag/integration_test.go @@ -49,6 +49,18 @@ func testTeardown(databaseFile string) { } } +func getNewDB(databaseFile string) sqlite.DBInterface { + dbUrl, valid := os.LookupEnv("PGSQL_TEST") + if valid { + db = sqlite.NewPostgresDatabase(dbUrl) + } else { + sqlite.RegisterSqliteDialect() + db = sqlite.NewSQLiteDatabase(databaseFile) + } + + return db +} + func runTests(m *testing.M) int { // create the database file f, err := os.CreateTemp("", "*.sqlite") @@ -57,16 +69,8 @@ func runTests(m *testing.M) int { } f.Close() - databaseFile := f.Name() - sqlite.RegisterSqliteDialect() - - dbUrl, valid := os.LookupEnv("PGSQL_TEST") - if valid { - db = sqlite.NewPostgresDatabase(dbUrl) - } else { - db = sqlite.NewSQLiteDatabase(databaseFile) - } + db = getNewDB(databaseFile) if err := db.Open(); err != nil { panic(fmt.Sprintf("Could not initialize database: %s", err.Error())) diff --git a/pkg/sqlite/setup_test.go b/pkg/sqlite/setup_test.go index f057394f7f0..2c5dd31322a 100644 --- a/pkg/sqlite/setup_test.go +++ b/pkg/sqlite/setup_test.go @@ -639,14 +639,7 @@ func runTests(m *testing.M) int { f.Close() databaseFile := f.Name() - sqlite.RegisterSqliteDialect() - - dbUrl, valid := os.LookupEnv("PGSQL_TEST") - if valid { - db = sqlite.NewPostgresDatabase(dbUrl) - } else { - db = sqlite.NewSQLiteDatabase(databaseFile) - } + db = getNewDB(databaseFile) db.SetBlobStoreOptions(sqlite.BlobStoreOptions{ UseDatabase: true, From 088dd14fe10dde0e06d8e335934d42a2b40b7193 Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Mon, 14 Oct 2024 15:00:50 +0200 Subject: [PATCH 37/85] groupby --- pkg/sqlite/file.go | 5 ++++- pkg/sqlite/gallery.go | 11 ++++++++-- pkg/sqlite/group.go | 13 ++++++++--- pkg/sqlite/image.go | 11 ++++++++-- pkg/sqlite/performer.go | 16 +++++++++----- pkg/sqlite/query.go | 13 ++++++----- pkg/sqlite/scene.go | 40 +++++++++++++++++++++++++-------- pkg/sqlite/scene_marker.go | 8 +++++-- pkg/sqlite/sql.go | 21 +++++++++++------- pkg/sqlite/studio.go | 16 +++++++++----- pkg/sqlite/tag.go | 45 +++++++++++++++++++++++++------------- 11 files changed, 142 insertions(+), 57 deletions(-) diff --git a/pkg/sqlite/file.go b/pkg/sqlite/file.go index bc6fbb8e814..c09a9fd4b02 100644 --- a/pkg/sqlite/file.go +++ b/pkg/sqlite/file.go @@ -941,8 +941,11 @@ func (qb *FileStore) setQuerySort(query *queryBuilder, findFilter *models.FindFi case "path": // special handling for path query.sortAndPagination += fmt.Sprintf(" ORDER BY folders.path %s, files.basename %[1]s", direction) + query.addGroupBy([]string{"folders.path", "files.basename"}, true) default: - query.sortAndPagination += getSort(sort, direction, "files") + add, agg := getSort(sort, direction, "files") + query.sortAndPagination += add + query.addGroupBy(agg, true) } return nil diff --git a/pkg/sqlite/gallery.go b/pkg/sqlite/gallery.go index 1624087c9e7..ba4d91e494c 100644 --- a/pkg/sqlite/gallery.go +++ b/pkg/sqlite/gallery.go @@ -838,20 +838,27 @@ func (qb *GalleryStore) setGallerySort(query *queryBuilder, findFilter *models.F addFileTable() addFolderTable() query.sortAndPagination += fmt.Sprintf(" ORDER BY COALESCE(folders.path, '') || COALESCE(file_folder.path, '') || COALESCE(files.basename, '') COLLATE NATURAL_CI %s", direction) + query.addGroupBy([]string{"folders.path", "file_folder.path", "files.basename"}, true) case "file_mod_time": sort = "mod_time" addFileTable() - query.sortAndPagination += getSort(sort, direction, fileTable) + add, agg := getSort(sort, direction, fileTable) + query.sortAndPagination += add + query.addGroupBy(agg, true) case "title": addFileTable() addFolderTable() query.sortAndPagination += " ORDER BY COALESCE(galleries.title, files.basename, basename(COALESCE(folders.path, ''))) COLLATE NATURAL_CI " + direction + ", file_folder.path COLLATE NATURAL_CI " + direction + query.addGroupBy([]string{"galleries.title", "files.basename", "folders.path", "file_folder.path"}, true) default: - query.sortAndPagination += getSort(sort, direction, "galleries") + add, agg := getSort(sort, direction, "galleries") + query.sortAndPagination += add + query.addGroupBy(agg, true) } // Whatever the sorting, always use title/id as a final sort query.sortAndPagination += ", COALESCE(galleries.title, cast(galleries.id as text)) COLLATE NATURAL_CI ASC" + query.addGroupBy([]string{"galleries.title", "galleries.id"}, true) return nil } diff --git a/pkg/sqlite/group.go b/pkg/sqlite/group.go index ab19608a493..09794db942d 100644 --- a/pkg/sqlite/group.go +++ b/pkg/sqlite/group.go @@ -513,23 +513,30 @@ func (qb *GroupStore) setGroupSort(query *queryBuilder, findFilter *models.FindF case "sub_group_order": // sub_group_order is a special sort that sorts by the order_index of the subgroups if query.hasJoin("groups_parents") { - query.sortAndPagination += getSort("order_index", direction, "groups_parents") + add, agg := getSort("order_index", direction, "groups_parents") + query.sortAndPagination += add + query.addGroupBy(agg, true) } else { // this will give unexpected results if the query is not filtered by a parent group and // the group has multiple parents and order indexes query.join(groupRelationsTable, "", "groups.id = groups_relations.sub_id") - query.sortAndPagination += getSort("order_index", direction, groupRelationsTable) + add, agg := getSort("order_index", direction, groupRelationsTable) + query.sortAndPagination += add + query.addGroupBy(agg, true) } case "tag_count": query.sortAndPagination += getCountSort(groupTable, groupsTagsTable, groupIDColumn, direction) case "scenes_count": // generic getSort won't work for this query.sortAndPagination += getCountSort(groupTable, groupsScenesTable, groupIDColumn, direction) default: - query.sortAndPagination += getSort(sort, direction, "groups") + add, agg := getSort(sort, direction, "groups") + query.sortAndPagination += add + query.addGroupBy(agg, true) } // Whatever the sorting, always use name/id as a final sort query.sortAndPagination += ", COALESCE(groups.name, cast(groups.id as text)) COLLATE NATURAL_CI ASC" + query.addGroupBy([]string{"groups.name", "groups.id"}, true) return nil } diff --git a/pkg/sqlite/image.go b/pkg/sqlite/image.go index 719d37e0132..4a1e3581c1e 100644 --- a/pkg/sqlite/image.go +++ b/pkg/sqlite/image.go @@ -969,6 +969,7 @@ func (qb *ImageStore) setImageSortAndPagination(q *queryBuilder, findFilter *mod addFilesJoin() addFolderJoin() sortClause = " ORDER BY COALESCE(folders.path, '') || COALESCE(files.basename, '') COLLATE NATURAL_CI " + direction + q.addGroupBy([]string{"folders.path", "files.basename"}, true) case "file_count": sortClause = getCountSort(imageTable, imagesFilesTable, imageIDColumn, direction) case "tag_count": @@ -977,17 +978,23 @@ func (qb *ImageStore) setImageSortAndPagination(q *queryBuilder, findFilter *mod sortClause = getCountSort(imageTable, performersImagesTable, imageIDColumn, direction) case "mod_time", "filesize": addFilesJoin() - sortClause = getSort(sort, direction, "files") + add, agg := getSort(sort, direction, "files") + sortClause = add + q.addGroupBy(agg, true) case "title": addFilesJoin() addFolderJoin() sortClause = " ORDER BY COALESCE(images.title, files.basename) COLLATE NATURAL_CI " + direction + ", folders.path COLLATE NATURAL_CI " + direction + q.addGroupBy([]string{"images.title", "files.basename", "folders.path"}, true) default: - sortClause = getSort(sort, direction, "images") + add, agg := getSort(sort, direction, "images") + sortClause = add + q.addGroupBy(agg, true) } // Whatever the sorting, always use title/id as a final sort sortClause += ", COALESCE(images.title, cast(images.id as text)) COLLATE NATURAL_CI ASC" + q.addGroupBy([]string{"images.title", "images.id"}, true) } q.sortAndPagination = sortClause + getPagination(findFilter) diff --git a/pkg/sqlite/performer.go b/pkg/sqlite/performer.go index 4c953629ad9..f734115393b 100644 --- a/pkg/sqlite/performer.go +++ b/pkg/sqlite/performer.go @@ -613,11 +613,13 @@ func (qb *PerformerStore) makeQuery(ctx context.Context, performerFilter *models } var err error - query.sortAndPagination, err = qb.getPerformerSort(findFilter) + var agg []string + query.sortAndPagination, agg, err = qb.getPerformerSort(findFilter) if err != nil { return nil, err } query.sortAndPagination += getPagination(findFilter) + query.addGroupBy(agg, true) return &query, nil } @@ -731,7 +733,7 @@ var performerSortOptions = sortOptions{ "weight", } -func (qb *PerformerStore) getPerformerSort(findFilter *models.FindFilterType) (string, error) { +func (qb *PerformerStore) getPerformerSort(findFilter *models.FindFilterType) (string, []string, error) { var sort string var direction string if findFilter == nil { @@ -744,9 +746,10 @@ func (qb *PerformerStore) getPerformerSort(findFilter *models.FindFilterType) (s // CVE-2024-32231 - ensure sort is in the list of allowed sorts if err := performerSortOptions.validateSort(sort); err != nil { - return "", err + return "", nil, err } + var agg []string sortQuery := "" switch sort { case "tag_count": @@ -766,12 +769,15 @@ func (qb *PerformerStore) getPerformerSort(findFilter *models.FindFilterType) (s case "last_o_at": sortQuery += qb.sortByLastOAt(direction) default: - sortQuery += getSort(sort, direction, "performers") + var add string + add, agg = getSort(sort, direction, "performers") + sortQuery += add } // Whatever the sorting, always use name/id as a final sort sortQuery += ", COALESCE(performers.name, cast(performers.id as text)) COLLATE NATURAL_CI ASC" - return sortQuery, nil + agg = append(agg, "performers.name", "performers.id") + return sortQuery, agg, nil } func (qb *PerformerStore) GetTagIDs(ctx context.Context, id int) ([]int, error) { diff --git a/pkg/sqlite/query.go b/pkg/sqlite/query.go index 0a92e44b16b..80327a4ef6b 100644 --- a/pkg/sqlite/query.go +++ b/pkg/sqlite/query.go @@ -6,6 +6,7 @@ import ( "strings" "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/sliceutil" ) type queryBuilder struct { @@ -35,13 +36,15 @@ func (qb queryBuilder) body() string { */ func (qb *queryBuilder) addColumn(column string, nonaggregates []string) { qb.columns = append(qb.columns, column) - if len(nonaggregates) > 0 && dbWrapper.dbType == PostgresBackend { - qb.addGroupBy(nonaggregates) - } + qb.addGroupBy(nonaggregates, dbWrapper.dbType == PostgresBackend) } -func (qb *queryBuilder) addGroupBy(aggregate []string) { - qb.groupByClauses = append(qb.groupByClauses, aggregate...) +func (qb *queryBuilder) addGroupBy(aggregate []string, pgsqlfix bool) { + if !pgsqlfix || len(aggregate) == 0 { + return + } + + qb.groupByClauses = sliceutil.AppendUniques(qb.groupByClauses, aggregate) } func (qb queryBuilder) toSQL(includeSortPagination bool) string { diff --git a/pkg/sqlite/scene.go b/pkg/sqlite/scene.go index 00925bfa1d8..1c349fd4fa1 100644 --- a/pkg/sqlite/scene.go +++ b/pkg/sqlite/scene.go @@ -1130,10 +1130,14 @@ func (qb *SceneStore) setSceneSort(query *queryBuilder, findFilter *models.FindF switch sort { case "movie_scene_number": query.join(groupsScenesTable, "", "scenes.id = groups_scenes.scene_id") - query.sortAndPagination += getSort("scene_index", direction, groupsScenesTable) + add, agg := getSort("scene_index", direction, groupsScenesTable) + query.sortAndPagination += add + query.addGroupBy(agg, true) case "group_scene_number": query.join(groupsScenesTable, "scene_group", "scenes.id = scene_group.scene_id") - query.sortAndPagination += getSort("scene_index", direction, "scene_group") + add, agg := getSort("scene_index", direction, "scene_group") + query.sortAndPagination += add + query.addGroupBy(agg, true) case "tag_count": query.sortAndPagination += getCountSort(sceneTable, scenesTagsTable, sceneIDColumn, direction) case "performer_count": @@ -1145,6 +1149,7 @@ func (qb *SceneStore) setSceneSort(query *queryBuilder, findFilter *models.FindF addFileTable() addFolderTable() query.sortAndPagination += fmt.Sprintf(" ORDER BY COALESCE(folders.path, '') || COALESCE(files.basename, '') COLLATE NATURAL_CI %s", direction) + query.addGroupBy([]string{"folders.path", "files.basename"}, true) case "perceptual_similarity": // special handling for phash addFileTable() @@ -1157,31 +1162,45 @@ func (qb *SceneStore) setSceneSort(query *queryBuilder, findFilter *models.FindF ) query.sortAndPagination += " ORDER BY fingerprints_phash.fingerprint " + direction + ", files.size DESC" + query.addGroupBy([]string{"fingerprints_phash.fingerprint", "files.size"}, true) case "bitrate": sort = "bit_rate" addVideoFileTable() - query.sortAndPagination += getSort(sort, direction, videoFileTable) + add, agg := getSort(sort, direction, videoFileTable) + query.sortAndPagination += add + query.addGroupBy(agg, true) case "file_mod_time": sort = "mod_time" addFileTable() - query.sortAndPagination += getSort(sort, direction, fileTable) + add, agg := getSort(sort, direction, fileTable) + query.sortAndPagination += add + query.addGroupBy(agg, true) case "framerate": sort = "frame_rate" addVideoFileTable() - query.sortAndPagination += getSort(sort, direction, videoFileTable) + add, agg := getSort(sort, direction, videoFileTable) + query.sortAndPagination += add + query.addGroupBy(agg, true) case "filesize": addFileTable() - query.sortAndPagination += getSort(sort, direction, fileTable) + add, agg := getSort(sort, direction, fileTable) + query.sortAndPagination += add + query.addGroupBy(agg, true) case "duration": addVideoFileTable() - query.sortAndPagination += getSort(sort, direction, videoFileTable) + add, agg := getSort(sort, direction, videoFileTable) + query.sortAndPagination += add + query.addGroupBy(agg, true) case "interactive", "interactive_speed": addVideoFileTable() - query.sortAndPagination += getSort(sort, direction, videoFileTable) + add, agg := getSort(sort, direction, videoFileTable) + query.sortAndPagination += add + query.addGroupBy(agg, true) case "title": addFileTable() addFolderTable() query.sortAndPagination += " ORDER BY COALESCE(scenes.title, files.basename) COLLATE NATURAL_CI " + direction + ", folders.path COLLATE NATURAL_CI " + direction + query.addGroupBy([]string{"scenes.title", "files.basename", "folders.path"}, true) case "play_count": query.sortAndPagination += getCountSort(sceneTable, scenesViewDatesTable, sceneIDColumn, direction) case "last_played_at": @@ -1191,11 +1210,14 @@ func (qb *SceneStore) setSceneSort(query *queryBuilder, findFilter *models.FindF case "o_counter": query.sortAndPagination += getCountSort(sceneTable, scenesODatesTable, sceneIDColumn, direction) default: - query.sortAndPagination += getSort(sort, direction, "scenes") + add, agg := getSort(sort, direction, "scenes") + query.sortAndPagination += add + query.addGroupBy(agg, true) } // Whatever the sorting, always use title/id as a final sort query.sortAndPagination += ", COALESCE(scenes.title, cast(scenes.id as text)) COLLATE NATURAL_CI ASC" + query.addGroupBy([]string{"scenes.title", "scenes.id"}, true) return nil } diff --git a/pkg/sqlite/scene_marker.go b/pkg/sqlite/scene_marker.go index 87a849d2084..b08ed31c5e8 100644 --- a/pkg/sqlite/scene_marker.go +++ b/pkg/sqlite/scene_marker.go @@ -375,12 +375,16 @@ func (qb *SceneMarkerStore) setSceneMarkerSort(query *queryBuilder, findFilter * case "scenes_updated_at": sort = "updated_at" query.join(sceneTable, "", "scenes.id = scene_markers.scene_id") - query.sortAndPagination += getSort(sort, direction, sceneTable) + add, agg := getSort(sort, direction, sceneTable) + query.sortAndPagination += add + query.addGroupBy(agg, true) case "title": query.join(tagTable, "", "scene_markers.primary_tag_id = tags.id") query.sortAndPagination += " ORDER BY COALESCE(NULLIF(scene_markers.title,''), tags.name) COLLATE NATURAL_CI " + direction default: - query.sortAndPagination += getSort(sort, direction, sceneMarkerTable) + add, agg := getSort(sort, direction, sceneMarkerTable) + query.sortAndPagination += add + query.addGroupBy(agg, true) } query.sortAndPagination += ", scene_markers.scene_id ASC, scene_markers.seconds ASC" diff --git a/pkg/sqlite/sql.go b/pkg/sqlite/sql.go index df051811460..14c8f0e6eee 100644 --- a/pkg/sqlite/sql.go +++ b/pkg/sqlite/sql.go @@ -89,17 +89,20 @@ func getSortDirection(direction string) string { return direction } } -func getSort(sort string, direction string, tableName string) string { +func getSort(sort string, direction string, tableName string) (string, []string) { direction = getSortDirection(direction) + nonaggregates := []string{} switch { case strings.HasSuffix(sort, "_count"): var relationTableName = strings.TrimSuffix(sort, "_count") // TODO: pluralize? colName := getColumn(relationTableName, "id") - return " ORDER BY COUNT(distinct " + colName + ") " + direction + nonaggregates = append(nonaggregates, colName) + return " ORDER BY COUNT(distinct " + colName + ") " + direction, nonaggregates case strings.Compare(sort, "filesize") == 0: colName := getColumn(tableName, "size") - return " ORDER BY " + colName + " " + direction + nonaggregates = append(nonaggregates, colName) + return " ORDER BY " + colName + " " + direction, nonaggregates case strings.HasPrefix(sort, randomSeedPrefix): // seed as a parameter from the UI seedStr := sort[len(randomSeedPrefix):] @@ -108,22 +111,24 @@ func getSort(sort string, direction string, tableName string) string { // fallback to a random seed seed = rand.Uint64() } - return getRandomSort(tableName, direction, seed) + return getRandomSort(tableName, direction, seed), nonaggregates case strings.Compare(sort, "random") == 0: - return getRandomSort(tableName, direction, rand.Uint64()) + return getRandomSort(tableName, direction, rand.Uint64()), nonaggregates default: colName := getColumn(tableName, sort) if strings.Contains(sort, ".") { colName = sort } + nonaggregates = append(nonaggregates, colName) + if strings.Compare(sort, "name") == 0 { - return " ORDER BY " + colName + " COLLATE NATURAL_CI " + direction + return " ORDER BY " + colName + " COLLATE NATURAL_CI " + direction, nonaggregates } if strings.Compare(sort, "title") == 0 { - return " ORDER BY " + colName + " COLLATE NATURAL_CI " + direction + return " ORDER BY " + colName + " COLLATE NATURAL_CI " + direction, nonaggregates } - return " ORDER BY " + colName + " " + direction + return " ORDER BY " + colName + " " + direction, nonaggregates } } diff --git a/pkg/sqlite/studio.go b/pkg/sqlite/studio.go index 26423e41778..154d24f0c26 100644 --- a/pkg/sqlite/studio.go +++ b/pkg/sqlite/studio.go @@ -539,11 +539,13 @@ func (qb *StudioStore) makeQuery(ctx context.Context, studioFilter *models.Studi } var err error - query.sortAndPagination, err = qb.getStudioSort(findFilter) + var agg []string + query.sortAndPagination, agg, err = qb.getStudioSort(findFilter) if err != nil { return nil, err } query.sortAndPagination += getPagination(findFilter) + query.addGroupBy(agg, true) return &query, nil } @@ -589,7 +591,7 @@ var studioSortOptions = sortOptions{ "updated_at", } -func (qb *StudioStore) getStudioSort(findFilter *models.FindFilterType) (string, error) { +func (qb *StudioStore) getStudioSort(findFilter *models.FindFilterType) (string, []string, error) { var sort string var direction string if findFilter == nil { @@ -602,9 +604,10 @@ func (qb *StudioStore) getStudioSort(findFilter *models.FindFilterType) (string, // CVE-2024-32231 - ensure sort is in the list of allowed sorts if err := studioSortOptions.validateSort(sort); err != nil { - return "", err + return "", nil, err } + var agg []string sortQuery := "" switch sort { case "tag_count": @@ -618,12 +621,15 @@ func (qb *StudioStore) getStudioSort(findFilter *models.FindFilterType) (string, case "child_count": sortQuery += getCountSort(studioTable, studioTable, studioParentIDColumn, direction) default: - sortQuery += getSort(sort, direction, "studios") + var add string + add, agg = getSort(sort, direction, "studios") + sortQuery += add } // Whatever the sorting, always use name/id as a final sort sortQuery += ", COALESCE(studios.name, cast(studios.id as text)) COLLATE NATURAL_CI ASC" - return sortQuery, nil + agg = append(agg, "studios.name", "studios.id") + return sortQuery, agg, nil } func (qb *StudioStore) GetImage(ctx context.Context, studioID int) ([]byte, error) { diff --git a/pkg/sqlite/tag.go b/pkg/sqlite/tag.go index 2f1c05f737e..657cf5a7705 100644 --- a/pkg/sqlite/tag.go +++ b/pkg/sqlite/tag.go @@ -383,7 +383,8 @@ func (qb *TagStore) FindBySceneID(ctx context.Context, sceneID int) ([]*models.T WHERE scenes_join.scene_id = ? GROUP BY tags.id ` - query += qb.getDefaultTagSort() + add, _ := qb.getDefaultTagSort() + query += add args := []interface{}{sceneID} return qb.queryTags(ctx, query, args) } @@ -395,7 +396,8 @@ func (qb *TagStore) FindByPerformerID(ctx context.Context, performerID int) ([]* WHERE performers_join.performer_id = ? GROUP BY tags.id ` - query += qb.getDefaultTagSort() + add, _ := qb.getDefaultTagSort() + query += add args := []interface{}{performerID} return qb.queryTags(ctx, query, args) } @@ -407,7 +409,8 @@ func (qb *TagStore) FindByImageID(ctx context.Context, imageID int) ([]*models.T WHERE images_join.image_id = ? GROUP BY tags.id ` - query += qb.getDefaultTagSort() + add, _ := qb.getDefaultTagSort() + query += add args := []interface{}{imageID} return qb.queryTags(ctx, query, args) } @@ -419,7 +422,8 @@ func (qb *TagStore) FindByGalleryID(ctx context.Context, galleryID int) ([]*mode WHERE galleries_join.gallery_id = ? GROUP BY tags.id ` - query += qb.getDefaultTagSort() + add, _ := qb.getDefaultTagSort() + query += add args := []interface{}{galleryID} return qb.queryTags(ctx, query, args) } @@ -431,7 +435,8 @@ func (qb *TagStore) FindByGroupID(ctx context.Context, groupID int) ([]*models.T WHERE groups_join.group_id = ? GROUP BY tags.id ` - query += qb.getDefaultTagSort() + add, _ := qb.getDefaultTagSort() + query += add args := []interface{}{groupID} return qb.queryTags(ctx, query, args) } @@ -443,7 +448,8 @@ func (qb *TagStore) FindBySceneMarkerID(ctx context.Context, sceneMarkerID int) WHERE scene_markers_join.scene_marker_id = ? GROUP BY tags.id ` - query += qb.getDefaultTagSort() + add, _ := qb.getDefaultTagSort() + query += add args := []interface{}{sceneMarkerID} return qb.queryTags(ctx, query, args) } @@ -455,7 +461,8 @@ func (qb *TagStore) FindByStudioID(ctx context.Context, studioID int) ([]*models WHERE studios_join.studio_id = ? GROUP BY tags.id ` - query += qb.getDefaultTagSort() + add, _ := qb.getDefaultTagSort() + query += add args := []interface{}{studioID} return qb.queryTags(ctx, query, args) } @@ -519,7 +526,8 @@ func (qb *TagStore) FindByParentTagID(ctx context.Context, parentID int) ([]*mod INNER JOIN tags_relations ON tags_relations.child_id = tags.id WHERE tags_relations.parent_id = ? ` - query += qb.getDefaultTagSort() + add, _ := qb.getDefaultTagSort() + query += add args := []interface{}{parentID} return qb.queryTags(ctx, query, args) } @@ -530,7 +538,8 @@ func (qb *TagStore) FindByChildTagID(ctx context.Context, parentID int) ([]*mode INNER JOIN tags_relations ON tags_relations.parent_id = tags.id WHERE tags_relations.child_id = ? ` - query += qb.getDefaultTagSort() + add, _ := qb.getDefaultTagSort() + query += add args := []interface{}{parentID} return qb.queryTags(ctx, query, args) } @@ -616,11 +625,13 @@ func (qb *TagStore) Query(ctx context.Context, tagFilter *models.TagFilterType, } var err error - query.sortAndPagination, err = qb.getTagSort(&query, findFilter) + var agg []string + query.sortAndPagination, agg, err = qb.getTagSort(&query, findFilter) if err != nil { return nil, 0, err } query.sortAndPagination += getPagination(findFilter) + query.addGroupBy(agg, true) idsResult, countResult, err := query.executeFind(ctx) if err != nil { return nil, 0, err @@ -650,11 +661,11 @@ var tagSortOptions = sortOptions{ "updated_at", } -func (qb *TagStore) getDefaultTagSort() string { +func (qb *TagStore) getDefaultTagSort() (string, []string) { return getSort("name", "ASC", "tags") } -func (qb *TagStore) getTagSort(query *queryBuilder, findFilter *models.FindFilterType) (string, error) { +func (qb *TagStore) getTagSort(query *queryBuilder, findFilter *models.FindFilterType) (string, []string, error) { var sort string var direction string if findFilter == nil { @@ -667,10 +678,11 @@ func (qb *TagStore) getTagSort(query *queryBuilder, findFilter *models.FindFilte // CVE-2024-32231 - ensure sort is in the list of allowed sorts if err := tagSortOptions.validateSort(sort); err != nil { - return "", err + return "", nil, err } sortQuery := "" + var agg []string switch sort { case "scenes_count": sortQuery += getCountSort(tagTable, scenesTagsTable, tagIDColumn, direction) @@ -687,12 +699,15 @@ func (qb *TagStore) getTagSort(query *queryBuilder, findFilter *models.FindFilte case "movies_count", "groups_count": sortQuery += getCountSort(tagTable, groupsTagsTable, tagIDColumn, direction) default: - sortQuery += getSort(sort, direction, "tags") + var add string + add, agg = getSort(sort, direction, "tags") + sortQuery += add } // Whatever the sorting, always use name/id as a final sort sortQuery += ", COALESCE(tags.name, cast(tags.id as text)) COLLATE NATURAL_CI ASC" - return sortQuery, nil + agg = append(agg, "tags.name", "tags.id") + return sortQuery, agg, nil } func (qb *TagStore) queryTags(ctx context.Context, query string, args []interface{}) ([]*models.Tag, error) { From 235b2bc9fcf747644a83c9166743164bfce7e238 Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Mon, 14 Oct 2024 15:02:07 +0200 Subject: [PATCH 38/85] Distinct on --- pkg/sqlite/database.go | 22 +----------------- pkg/sqlite/database_sqlite.go | 20 +++++++++++++++++ pkg/sqlite/file.go | 6 ++--- pkg/sqlite/gallery.go | 20 ++++++++--------- pkg/sqlite/group.go | 14 ++++++------ pkg/sqlite/image.go | 7 +++--- pkg/sqlite/performer.go | 7 +++--- pkg/sqlite/query.go | 10 ++++++--- pkg/sqlite/repository.go | 4 ++-- pkg/sqlite/scene.go | 42 +++++++++++++++++------------------ pkg/sqlite/scene_marker.go | 10 ++++----- pkg/sqlite/sql.go | 26 ++++++++++++++++------ pkg/sqlite/studio.go | 7 +++--- pkg/sqlite/tag.go | 7 +++--- 14 files changed, 108 insertions(+), 94 deletions(-) diff --git a/pkg/sqlite/database.go b/pkg/sqlite/database.go index aec6e681130..c0d5e56a410 100644 --- a/pkg/sqlite/database.go +++ b/pkg/sqlite/database.go @@ -84,7 +84,7 @@ type storeRepository struct { type DatabaseType string const ( - PostgresBackend DatabaseType = "POSTGRES" + PostgresBackend DatabaseType = "POSTGRESQL" SqliteBackend DatabaseType = "SQLITE" ) @@ -195,26 +195,6 @@ func (db *Database) GetRepo() *storeRepository { return db.storeRepository } -// lock locks the database for writing. This method will block until the lock is acquired. -func (db *Database) lock() { - db.lockChan <- struct{}{} -} - -// unlock unlocks the database -func (db *Database) unlock() { - // will block the caller if the lock is not held, so check first - select { - case <-db.lockChan: - return - default: - panic("database is not locked") - } -} - -func (db *Database) AppSchemaVersion() uint { - return appSchemaVersion -} - func (db *Database) SetBlobStoreOptions(options BlobStoreOptions) { *db.Blobs = *NewBlobStore(options) } diff --git a/pkg/sqlite/database_sqlite.go b/pkg/sqlite/database_sqlite.go index 8d3451f7fb9..7bcfb7ca809 100644 --- a/pkg/sqlite/database_sqlite.go +++ b/pkg/sqlite/database_sqlite.go @@ -48,6 +48,26 @@ func (db *SQLiteDB) DatabasePath() string { return (db.dbConfig).(string) } +func (db *SQLiteDB) AppSchemaVersion() uint { + return appSchemaVersion +} + +// lock locks the database for writing. This method will block until the lock is acquired. +func (db *SQLiteDB) lock() { + db.lockChan <- struct{}{} +} + +// unlock unlocks the database +func (db *SQLiteDB) unlock() { + // will block the caller if the lock is not held, so check first + select { + case <-db.lockChan: + return + default: + panic("database is not locked") + } +} + func (db *SQLiteDB) open(disableForeignKeys bool, writable bool) (conn *sqlx.DB, err error) { // https://github.com/mattn/go-sqlite3 url := "file:" + db.DatabasePath() + "?_journal=WAL&_sync=NORMAL&_busy_timeout=50" diff --git a/pkg/sqlite/file.go b/pkg/sqlite/file.go index c09a9fd4b02..48cf1bf55f7 100644 --- a/pkg/sqlite/file.go +++ b/pkg/sqlite/file.go @@ -869,7 +869,7 @@ func (qb *FileStore) Query(ctx context.Context, options models.FileQueryOptions) if err := qb.setQuerySort(&query, findFilter); err != nil { return nil, err } - query.sortAndPagination += getPagination(findFilter) + query.sortAndPagination[len(query.sortAndPagination)-1] += getPagination(findFilter) result, err := qb.queryGroupedFields(ctx, options, query) if err != nil { @@ -940,11 +940,11 @@ func (qb *FileStore) setQuerySort(query *queryBuilder, findFilter *models.FindFi switch sort { case "path": // special handling for path - query.sortAndPagination += fmt.Sprintf(" ORDER BY folders.path %s, files.basename %[1]s", direction) + query.addSort(fmt.Sprintf("folders.path %s, files.basename %[1]s", direction)) query.addGroupBy([]string{"folders.path", "files.basename"}, true) default: add, agg := getSort(sort, direction, "files") - query.sortAndPagination += add + query.addSort(add) query.addGroupBy(agg, true) } diff --git a/pkg/sqlite/gallery.go b/pkg/sqlite/gallery.go index ba4d91e494c..d6e32963c8a 100644 --- a/pkg/sqlite/gallery.go +++ b/pkg/sqlite/gallery.go @@ -735,7 +735,7 @@ func (qb *GalleryStore) makeQuery(ctx context.Context, galleryFilter *models.Gal if err := qb.setGallerySort(&query, findFilter); err != nil { return nil, err } - query.sortAndPagination += getPagination(findFilter) + query.sortAndPagination[len(query.sortAndPagination)-1] += getPagination(findFilter) return &query, nil } @@ -826,38 +826,38 @@ func (qb *GalleryStore) setGallerySort(query *queryBuilder, findFilter *models.F switch sort { case "file_count": - query.sortAndPagination += getCountSort(galleryTable, galleriesFilesTable, galleryIDColumn, direction) + query.addSort(getCountSort(galleryTable, galleriesFilesTable, galleryIDColumn, direction)) case "images_count": - query.sortAndPagination += getCountSort(galleryTable, galleriesImagesTable, galleryIDColumn, direction) + query.addSort(getCountSort(galleryTable, galleriesImagesTable, galleryIDColumn, direction)) case "tag_count": - query.sortAndPagination += getCountSort(galleryTable, galleriesTagsTable, galleryIDColumn, direction) + query.addSort(getCountSort(galleryTable, galleriesTagsTable, galleryIDColumn, direction)) case "performer_count": - query.sortAndPagination += getCountSort(galleryTable, performersGalleriesTable, galleryIDColumn, direction) + query.addSort(getCountSort(galleryTable, performersGalleriesTable, galleryIDColumn, direction)) case "path": // special handling for path addFileTable() addFolderTable() - query.sortAndPagination += fmt.Sprintf(" ORDER BY COALESCE(folders.path, '') || COALESCE(file_folder.path, '') || COALESCE(files.basename, '') COLLATE NATURAL_CI %s", direction) + query.addSort(fmt.Sprintf("COALESCE(folders.path, '') || COALESCE(file_folder.path, '') || COALESCE(files.basename, '') COLLATE NATURAL_CI %s", direction)) query.addGroupBy([]string{"folders.path", "file_folder.path", "files.basename"}, true) case "file_mod_time": sort = "mod_time" addFileTable() add, agg := getSort(sort, direction, fileTable) - query.sortAndPagination += add + query.addSort(add) query.addGroupBy(agg, true) case "title": addFileTable() addFolderTable() - query.sortAndPagination += " ORDER BY COALESCE(galleries.title, files.basename, basename(COALESCE(folders.path, ''))) COLLATE NATURAL_CI " + direction + ", file_folder.path COLLATE NATURAL_CI " + direction + query.addSort("COALESCE(galleries.title, files.basename, basename(COALESCE(folders.path, ''))) COLLATE NATURAL_CI " + direction + ", file_folder.path COLLATE NATURAL_CI " + direction) query.addGroupBy([]string{"galleries.title", "files.basename", "folders.path", "file_folder.path"}, true) default: add, agg := getSort(sort, direction, "galleries") - query.sortAndPagination += add + query.addSort(add) query.addGroupBy(agg, true) } // Whatever the sorting, always use title/id as a final sort - query.sortAndPagination += ", COALESCE(galleries.title, cast(galleries.id as text)) COLLATE NATURAL_CI ASC" + query.addSort("COALESCE(galleries.title, cast(galleries.id as text)) COLLATE NATURAL_CI ASC") query.addGroupBy([]string{"galleries.title", "galleries.id"}, true) return nil diff --git a/pkg/sqlite/group.go b/pkg/sqlite/group.go index 09794db942d..d8ecaf9d3d5 100644 --- a/pkg/sqlite/group.go +++ b/pkg/sqlite/group.go @@ -446,7 +446,7 @@ func (qb *GroupStore) makeQuery(ctx context.Context, groupFilter *models.GroupFi return nil, err } - query.sortAndPagination += getPagination(findFilter) + query.sortAndPagination[len(query.sortAndPagination)-1] += getPagination(findFilter) return &query, nil } @@ -514,28 +514,28 @@ func (qb *GroupStore) setGroupSort(query *queryBuilder, findFilter *models.FindF // sub_group_order is a special sort that sorts by the order_index of the subgroups if query.hasJoin("groups_parents") { add, agg := getSort("order_index", direction, "groups_parents") - query.sortAndPagination += add + query.addSort(add) query.addGroupBy(agg, true) } else { // this will give unexpected results if the query is not filtered by a parent group and // the group has multiple parents and order indexes query.join(groupRelationsTable, "", "groups.id = groups_relations.sub_id") add, agg := getSort("order_index", direction, groupRelationsTable) - query.sortAndPagination += add + query.addSort(add) query.addGroupBy(agg, true) } case "tag_count": - query.sortAndPagination += getCountSort(groupTable, groupsTagsTable, groupIDColumn, direction) + query.addSort(getCountSort(groupTable, groupsTagsTable, groupIDColumn, direction)) case "scenes_count": // generic getSort won't work for this - query.sortAndPagination += getCountSort(groupTable, groupsScenesTable, groupIDColumn, direction) + query.addSort(getCountSort(groupTable, groupsScenesTable, groupIDColumn, direction)) default: add, agg := getSort(sort, direction, "groups") - query.sortAndPagination += add + query.addSort(add) query.addGroupBy(agg, true) } // Whatever the sorting, always use name/id as a final sort - query.sortAndPagination += ", COALESCE(groups.name, cast(groups.id as text)) COLLATE NATURAL_CI ASC" + query.addSort("COALESCE(groups.name, cast(groups.id as text)) COLLATE NATURAL_CI ASC") query.addGroupBy([]string{"groups.name", "groups.id"}, true) return nil } diff --git a/pkg/sqlite/image.go b/pkg/sqlite/image.go index 4a1e3581c1e..e2141868f56 100644 --- a/pkg/sqlite/image.go +++ b/pkg/sqlite/image.go @@ -968,7 +968,7 @@ func (qb *ImageStore) setImageSortAndPagination(q *queryBuilder, findFilter *mod case "path": addFilesJoin() addFolderJoin() - sortClause = " ORDER BY COALESCE(folders.path, '') || COALESCE(files.basename, '') COLLATE NATURAL_CI " + direction + sortClause = "COALESCE(folders.path, '') || COALESCE(files.basename, '') COLLATE NATURAL_CI " + direction q.addGroupBy([]string{"folders.path", "files.basename"}, true) case "file_count": sortClause = getCountSort(imageTable, imagesFilesTable, imageIDColumn, direction) @@ -984,7 +984,7 @@ func (qb *ImageStore) setImageSortAndPagination(q *queryBuilder, findFilter *mod case "title": addFilesJoin() addFolderJoin() - sortClause = " ORDER BY COALESCE(images.title, files.basename) COLLATE NATURAL_CI " + direction + ", folders.path COLLATE NATURAL_CI " + direction + sortClause = "COALESCE(images.title, files.basename) COLLATE NATURAL_CI " + direction + ", folders.path COLLATE NATURAL_CI " + direction q.addGroupBy([]string{"images.title", "files.basename", "folders.path"}, true) default: add, agg := getSort(sort, direction, "images") @@ -997,7 +997,8 @@ func (qb *ImageStore) setImageSortAndPagination(q *queryBuilder, findFilter *mod q.addGroupBy([]string{"images.title", "images.id"}, true) } - q.sortAndPagination = sortClause + getPagination(findFilter) + q.addSort(sortClause) + q.sortAndPagination[len(q.sortAndPagination)-1] += getPagination(findFilter) return nil } diff --git a/pkg/sqlite/performer.go b/pkg/sqlite/performer.go index f734115393b..9c810776a2a 100644 --- a/pkg/sqlite/performer.go +++ b/pkg/sqlite/performer.go @@ -612,13 +612,12 @@ func (qb *PerformerStore) makeQuery(ctx context.Context, performerFilter *models return nil, err } - var err error - var agg []string - query.sortAndPagination, agg, err = qb.getPerformerSort(findFilter) + add, agg, err := qb.getPerformerSort(findFilter) if err != nil { return nil, err } - query.sortAndPagination += getPagination(findFilter) + query.addSort(add) + query.sortAndPagination[len(query.sortAndPagination)-1] += getPagination(findFilter) query.addGroupBy(agg, true) return &query, nil diff --git a/pkg/sqlite/query.go b/pkg/sqlite/query.go index 80327a4ef6b..854f33273ee 100644 --- a/pkg/sqlite/query.go +++ b/pkg/sqlite/query.go @@ -23,7 +23,7 @@ type queryBuilder struct { recursiveWith bool groupByClauses []string - sortAndPagination string + sortAndPagination []string } func (qb queryBuilder) body() string { @@ -47,6 +47,10 @@ func (qb *queryBuilder) addGroupBy(aggregate []string, pgsqlfix bool) { qb.groupByClauses = sliceutil.AppendUniques(qb.groupByClauses, aggregate) } +func (qb *queryBuilder) addSort(sortby string) { + qb.sortAndPagination = append(qb.sortAndPagination, sortby) +} + func (qb queryBuilder) toSQL(includeSortPagination bool) string { body := qb.body() @@ -60,8 +64,8 @@ func (qb queryBuilder) toSQL(includeSortPagination bool) string { } body = withClause + qb.repository.buildQueryBody(body, qb.whereClauses, qb.havingClauses, qb.groupByClauses) - if includeSortPagination { - body += qb.sortAndPagination + if includeSortPagination && len(qb.sortAndPagination) > 0 { + body += " ORDER BY " + strings.Join(qb.sortAndPagination, ", ") + " " } return body diff --git a/pkg/sqlite/repository.go b/pkg/sqlite/repository.go index b422d75ae9a..eb5171582f5 100644 --- a/pkg/sqlite/repository.go +++ b/pkg/sqlite/repository.go @@ -187,7 +187,7 @@ func (r *repository) buildQueryBody(body string, whereClauses []string, havingCl return body } -func (r *repository) executeFindQuery(ctx context.Context, body string, args []interface{}, sortAndPagination string, whereClauses []string, havingClauses []string, withClauses []string, recursiveWith bool) ([]int, int, error) { +func (r *repository) executeFindQuery(ctx context.Context, body string, args []interface{}, sortAndPagination []string, whereClauses []string, havingClauses []string, withClauses []string, recursiveWith bool) ([]int, int, error) { body = r.buildQueryBody(body, whereClauses, havingClauses, nil) withClause := "" @@ -200,7 +200,7 @@ func (r *repository) executeFindQuery(ctx context.Context, body string, args []i } countQuery := withClause + r.buildCountQuery(body) - idsQuery := withClause + body + sortAndPagination + idsQuery := withClause + body + " ORDER BY " + strings.Join(sortAndPagination, ", ") + " " // Perform query and fetch result var countResult int diff --git a/pkg/sqlite/scene.go b/pkg/sqlite/scene.go index 1c349fd4fa1..483dc76700a 100644 --- a/pkg/sqlite/scene.go +++ b/pkg/sqlite/scene.go @@ -957,7 +957,7 @@ func (qb *SceneStore) makeQuery(ctx context.Context, sceneFilter *models.SceneFi if err := qb.setSceneSort(&query, findFilter); err != nil { return nil, err } - query.sortAndPagination += getPagination(findFilter) + query.sortAndPagination[len(query.sortAndPagination)-1] += getPagination(findFilter) return &query, nil } @@ -1131,24 +1131,24 @@ func (qb *SceneStore) setSceneSort(query *queryBuilder, findFilter *models.FindF case "movie_scene_number": query.join(groupsScenesTable, "", "scenes.id = groups_scenes.scene_id") add, agg := getSort("scene_index", direction, groupsScenesTable) - query.sortAndPagination += add + query.addSort(add) query.addGroupBy(agg, true) case "group_scene_number": query.join(groupsScenesTable, "scene_group", "scenes.id = scene_group.scene_id") add, agg := getSort("scene_index", direction, "scene_group") - query.sortAndPagination += add + query.addSort(add) query.addGroupBy(agg, true) case "tag_count": - query.sortAndPagination += getCountSort(sceneTable, scenesTagsTable, sceneIDColumn, direction) + query.addSort(getCountSort(sceneTable, scenesTagsTable, sceneIDColumn, direction)) case "performer_count": - query.sortAndPagination += getCountSort(sceneTable, performersScenesTable, sceneIDColumn, direction) + query.addSort(getCountSort(sceneTable, performersScenesTable, sceneIDColumn, direction)) case "file_count": - query.sortAndPagination += getCountSort(sceneTable, scenesFilesTable, sceneIDColumn, direction) + query.addSort(getCountSort(sceneTable, scenesFilesTable, sceneIDColumn, direction)) case "path": // special handling for path addFileTable() addFolderTable() - query.sortAndPagination += fmt.Sprintf(" ORDER BY COALESCE(folders.path, '') || COALESCE(files.basename, '') COLLATE NATURAL_CI %s", direction) + query.addSort(fmt.Sprintf("COALESCE(folders.path, '') || COALESCE(files.basename, '') COLLATE NATURAL_CI %s", direction)) query.addGroupBy([]string{"folders.path", "files.basename"}, true) case "perceptual_similarity": // special handling for phash @@ -1161,62 +1161,62 @@ func (qb *SceneStore) setSceneSort(query *queryBuilder, findFilter *models.FindF }, ) - query.sortAndPagination += " ORDER BY fingerprints_phash.fingerprint " + direction + ", files.size DESC" + query.addSort("fingerprints_phash.fingerprint " + direction + ", files.size DESC") query.addGroupBy([]string{"fingerprints_phash.fingerprint", "files.size"}, true) case "bitrate": sort = "bit_rate" addVideoFileTable() add, agg := getSort(sort, direction, videoFileTable) - query.sortAndPagination += add + query.addSort(add) query.addGroupBy(agg, true) case "file_mod_time": sort = "mod_time" addFileTable() add, agg := getSort(sort, direction, fileTable) - query.sortAndPagination += add + query.addSort(add) query.addGroupBy(agg, true) case "framerate": sort = "frame_rate" addVideoFileTable() add, agg := getSort(sort, direction, videoFileTable) - query.sortAndPagination += add + query.addSort(add) query.addGroupBy(agg, true) case "filesize": addFileTable() add, agg := getSort(sort, direction, fileTable) - query.sortAndPagination += add + query.addSort(add) query.addGroupBy(agg, true) case "duration": addVideoFileTable() add, agg := getSort(sort, direction, videoFileTable) - query.sortAndPagination += add + query.addSort(add) query.addGroupBy(agg, true) case "interactive", "interactive_speed": addVideoFileTable() add, agg := getSort(sort, direction, videoFileTable) - query.sortAndPagination += add + query.addSort(add) query.addGroupBy(agg, true) case "title": addFileTable() addFolderTable() - query.sortAndPagination += " ORDER BY COALESCE(scenes.title, files.basename) COLLATE NATURAL_CI " + direction + ", folders.path COLLATE NATURAL_CI " + direction + query.addSort("COALESCE(scenes.title, files.basename) COLLATE NATURAL_CI " + direction + ", folders.path COLLATE NATURAL_CI " + direction) query.addGroupBy([]string{"scenes.title", "files.basename", "folders.path"}, true) case "play_count": - query.sortAndPagination += getCountSort(sceneTable, scenesViewDatesTable, sceneIDColumn, direction) + query.addSort(getCountSort(sceneTable, scenesViewDatesTable, sceneIDColumn, direction)) case "last_played_at": - query.sortAndPagination += fmt.Sprintf(" ORDER BY (SELECT MAX(view_date) FROM %s AS sort WHERE sort.%s = %s.id) %s", scenesViewDatesTable, sceneIDColumn, sceneTable, getSortDirection(direction)) + query.addSort(fmt.Sprintf("(SELECT MAX(view_date) FROM %s AS sort WHERE sort.%s = %s.id) %s", scenesViewDatesTable, sceneIDColumn, sceneTable, getSortDirection(direction))) case "last_o_at": - query.sortAndPagination += fmt.Sprintf(" ORDER BY (SELECT MAX(o_date) FROM %s AS sort WHERE sort.%s = %s.id) %s", scenesODatesTable, sceneIDColumn, sceneTable, getSortDirection(direction)) + query.addSort(fmt.Sprintf("(SELECT MAX(o_date) FROM %s AS sort WHERE sort.%s = %s.id) %s", scenesODatesTable, sceneIDColumn, sceneTable, getSortDirection(direction))) case "o_counter": - query.sortAndPagination += getCountSort(sceneTable, scenesODatesTable, sceneIDColumn, direction) + query.addSort(getCountSort(sceneTable, scenesODatesTable, sceneIDColumn, direction)) default: add, agg := getSort(sort, direction, "scenes") - query.sortAndPagination += add + query.addSort(add) query.addGroupBy(agg, true) } // Whatever the sorting, always use title/id as a final sort - query.sortAndPagination += ", COALESCE(scenes.title, cast(scenes.id as text)) COLLATE NATURAL_CI ASC" + query.addSort("COALESCE(scenes.title, cast(scenes.id as text)) COLLATE NATURAL_CI ASC") query.addGroupBy([]string{"scenes.title", "scenes.id"}, true) return nil diff --git a/pkg/sqlite/scene_marker.go b/pkg/sqlite/scene_marker.go index b08ed31c5e8..6914a2a21b7 100644 --- a/pkg/sqlite/scene_marker.go +++ b/pkg/sqlite/scene_marker.go @@ -318,7 +318,7 @@ func (qb *SceneMarkerStore) makeQuery(ctx context.Context, sceneMarkerFilter *mo if err := qb.setSceneMarkerSort(&query, findFilter); err != nil { return nil, err } - query.sortAndPagination += getPagination(findFilter) + query.sortAndPagination[len(query.sortAndPagination)-1] += getPagination(findFilter) return &query, nil } @@ -376,18 +376,18 @@ func (qb *SceneMarkerStore) setSceneMarkerSort(query *queryBuilder, findFilter * sort = "updated_at" query.join(sceneTable, "", "scenes.id = scene_markers.scene_id") add, agg := getSort(sort, direction, sceneTable) - query.sortAndPagination += add + query.addSort(add) query.addGroupBy(agg, true) case "title": query.join(tagTable, "", "scene_markers.primary_tag_id = tags.id") - query.sortAndPagination += " ORDER BY COALESCE(NULLIF(scene_markers.title,''), tags.name) COLLATE NATURAL_CI " + direction + query.addSort("COALESCE(NULLIF(scene_markers.title,''), tags.name) COLLATE NATURAL_CI " + direction) default: add, agg := getSort(sort, direction, sceneMarkerTable) - query.sortAndPagination += add + query.addSort(add) query.addGroupBy(agg, true) } - query.sortAndPagination += ", scene_markers.scene_id ASC, scene_markers.seconds ASC" + query.addSort("scene_markers.scene_id ASC, scene_markers.seconds ASC") return nil } diff --git a/pkg/sqlite/sql.go b/pkg/sqlite/sql.go index 14c8f0e6eee..c9b037ca69e 100644 --- a/pkg/sqlite/sql.go +++ b/pkg/sqlite/sql.go @@ -17,11 +17,23 @@ func selectAll(tableName string) string { } func distinctIDs(qb *queryBuilder, tableName string) { + if dbWrapper.dbType == PostgresBackend { + distinctOnIDs(qb, tableName) + return + } + columnId := getColumn(tableName, "id") qb.addColumn("DISTINCT "+columnId, []string{columnId}) qb.from = tableName } +func distinctOnIDs(qb *queryBuilder, tableName string) { + columnId := getColumn(tableName, "id") + qb.addColumn("DISTINCT ON ("+columnId+") "+columnId, nil) + qb.addSort(columnId) + qb.from = tableName +} + func selectIDs(qb *queryBuilder, tableName string) { columnId := getColumn(tableName, "id") qb.addColumn(columnId, []string{columnId}) @@ -98,11 +110,11 @@ func getSort(sort string, direction string, tableName string) (string, []string) var relationTableName = strings.TrimSuffix(sort, "_count") // TODO: pluralize? colName := getColumn(relationTableName, "id") nonaggregates = append(nonaggregates, colName) - return " ORDER BY COUNT(distinct " + colName + ") " + direction, nonaggregates + return "COUNT(distinct " + colName + ") " + direction, nonaggregates case strings.Compare(sort, "filesize") == 0: colName := getColumn(tableName, "size") nonaggregates = append(nonaggregates, colName) - return " ORDER BY " + colName + " " + direction, nonaggregates + return colName + " " + direction, nonaggregates case strings.HasPrefix(sort, randomSeedPrefix): // seed as a parameter from the UI seedStr := sort[len(randomSeedPrefix):] @@ -122,13 +134,13 @@ func getSort(sort string, direction string, tableName string) (string, []string) nonaggregates = append(nonaggregates, colName) if strings.Compare(sort, "name") == 0 { - return " ORDER BY " + colName + " COLLATE NATURAL_CI " + direction, nonaggregates + return colName + " COLLATE NATURAL_CI " + direction, nonaggregates } if strings.Compare(sort, "title") == 0 { - return " ORDER BY " + colName + " COLLATE NATURAL_CI " + direction, nonaggregates + return colName + " COLLATE NATURAL_CI " + direction, nonaggregates } - return " ORDER BY " + colName + " " + direction, nonaggregates + return colName + " " + direction, nonaggregates } } @@ -146,11 +158,11 @@ func getRandomSort(tableName string, direction string, seed uint64) string { // ORDER BY ((n+seed)*(n+seed)*p1 + (n+seed)*p2) % p3 // since sqlite converts overflowing numbers to reals, a custom db function that uses uints with overflow should be faster, // however in practice the overhead of calling a custom function vastly outweighs the benefits - return fmt.Sprintf(" ORDER BY mod((%[1]s + %[2]d) * (%[1]s + %[2]d) * 52959209 + (%[1]s + %[2]d) * 1047483763, 2147483647) %[3]s", colName, seed, direction) + return fmt.Sprintf("mod((%[1]s + %[2]d) * (%[1]s + %[2]d) * 52959209 + (%[1]s + %[2]d) * 1047483763, 2147483647) %[3]s", colName, seed, direction) } func getCountSort(primaryTable, joinTable, primaryFK, direction string) string { - return fmt.Sprintf(" ORDER BY (SELECT COUNT(*) FROM %s AS sort WHERE sort.%s = %s.id) %s", joinTable, primaryFK, primaryTable, getSortDirection(direction)) + return fmt.Sprintf("(SELECT COUNT(*) FROM %s AS sort WHERE sort.%s = %s.id) %s", joinTable, primaryFK, primaryTable, getSortDirection(direction)) } func getStringSearchClause(columns []string, q string, not bool) sqlClause { diff --git a/pkg/sqlite/studio.go b/pkg/sqlite/studio.go index 154d24f0c26..536bd1c4c6b 100644 --- a/pkg/sqlite/studio.go +++ b/pkg/sqlite/studio.go @@ -538,13 +538,12 @@ func (qb *StudioStore) makeQuery(ctx context.Context, studioFilter *models.Studi return nil, err } - var err error - var agg []string - query.sortAndPagination, agg, err = qb.getStudioSort(findFilter) + add, agg, err := qb.getStudioSort(findFilter) if err != nil { return nil, err } - query.sortAndPagination += getPagination(findFilter) + query.addSort(add) + query.sortAndPagination[len(query.sortAndPagination)-1] += getPagination(findFilter) query.addGroupBy(agg, true) return &query, nil diff --git a/pkg/sqlite/tag.go b/pkg/sqlite/tag.go index 657cf5a7705..e0688508b8f 100644 --- a/pkg/sqlite/tag.go +++ b/pkg/sqlite/tag.go @@ -624,13 +624,12 @@ func (qb *TagStore) Query(ctx context.Context, tagFilter *models.TagFilterType, return nil, 0, err } - var err error - var agg []string - query.sortAndPagination, agg, err = qb.getTagSort(&query, findFilter) + add, agg, err := qb.getTagSort(&query, findFilter) if err != nil { return nil, 0, err } - query.sortAndPagination += getPagination(findFilter) + query.addSort(add) + query.sortAndPagination[len(query.sortAndPagination)-1] += getPagination(findFilter) query.addGroupBy(agg, true) idsResult, countResult, err := query.executeFind(ctx) if err != nil { From b019f3f9a1f95fa8f74511f13c69c693cc9be339 Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Mon, 14 Oct 2024 15:20:04 +0200 Subject: [PATCH 39/85] Fix images pagination bug --- pkg/sqlite/query.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pkg/sqlite/query.go b/pkg/sqlite/query.go index 854f33273ee..24c922d0f0d 100644 --- a/pkg/sqlite/query.go +++ b/pkg/sqlite/query.go @@ -48,7 +48,9 @@ func (qb *queryBuilder) addGroupBy(aggregate []string, pgsqlfix bool) { } func (qb *queryBuilder) addSort(sortby string) { - qb.sortAndPagination = append(qb.sortAndPagination, sortby) + if len(sortby) > 0 { + qb.sortAndPagination = append(qb.sortAndPagination, sortby) + } } func (qb queryBuilder) toSQL(includeSortPagination bool) string { From 4bf5d2ad55a16046379b8b0b053014d30280242e Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Mon, 14 Oct 2024 15:27:13 +0200 Subject: [PATCH 40/85] Git fix --- pkg/sqlite/database.go | 22 +++++++++++++++++++++- pkg/sqlite/database_sqlite.go | 20 -------------------- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/pkg/sqlite/database.go b/pkg/sqlite/database.go index c0d5e56a410..aec6e681130 100644 --- a/pkg/sqlite/database.go +++ b/pkg/sqlite/database.go @@ -84,7 +84,7 @@ type storeRepository struct { type DatabaseType string const ( - PostgresBackend DatabaseType = "POSTGRESQL" + PostgresBackend DatabaseType = "POSTGRES" SqliteBackend DatabaseType = "SQLITE" ) @@ -195,6 +195,26 @@ func (db *Database) GetRepo() *storeRepository { return db.storeRepository } +// lock locks the database for writing. This method will block until the lock is acquired. +func (db *Database) lock() { + db.lockChan <- struct{}{} +} + +// unlock unlocks the database +func (db *Database) unlock() { + // will block the caller if the lock is not held, so check first + select { + case <-db.lockChan: + return + default: + panic("database is not locked") + } +} + +func (db *Database) AppSchemaVersion() uint { + return appSchemaVersion +} + func (db *Database) SetBlobStoreOptions(options BlobStoreOptions) { *db.Blobs = *NewBlobStore(options) } diff --git a/pkg/sqlite/database_sqlite.go b/pkg/sqlite/database_sqlite.go index 7bcfb7ca809..8d3451f7fb9 100644 --- a/pkg/sqlite/database_sqlite.go +++ b/pkg/sqlite/database_sqlite.go @@ -48,26 +48,6 @@ func (db *SQLiteDB) DatabasePath() string { return (db.dbConfig).(string) } -func (db *SQLiteDB) AppSchemaVersion() uint { - return appSchemaVersion -} - -// lock locks the database for writing. This method will block until the lock is acquired. -func (db *SQLiteDB) lock() { - db.lockChan <- struct{}{} -} - -// unlock unlocks the database -func (db *SQLiteDB) unlock() { - // will block the caller if the lock is not held, so check first - select { - case <-db.lockChan: - return - default: - panic("database is not locked") - } -} - func (db *SQLiteDB) open(disableForeignKeys bool, writable bool) (conn *sqlx.DB, err error) { // https://github.com/mattn/go-sqlite3 url := "file:" + db.DatabasePath() + "?_journal=WAL&_sync=NORMAL&_busy_timeout=50" From f3c830be49bf066a758dfd0165c6117cc969320f Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Mon, 14 Oct 2024 16:07:32 +0200 Subject: [PATCH 41/85] pagination and sort --- pkg/sqlite/database.go | 22 +-------------------- pkg/sqlite/database_sqlite.go | 20 +++++++++++++++++++ pkg/sqlite/file.go | 2 +- pkg/sqlite/gallery.go | 2 +- pkg/sqlite/group.go | 2 +- pkg/sqlite/image.go | 2 +- pkg/sqlite/performer.go | 10 +++++----- pkg/sqlite/query.go | 18 +++++++++++++----- pkg/sqlite/repository.go | 10 ++++++++-- pkg/sqlite/scene.go | 2 +- pkg/sqlite/scene_marker.go | 2 +- pkg/sqlite/setup_test.go | 12 ++++++++++++ pkg/sqlite/studio.go | 2 +- pkg/sqlite/tag.go | 36 ++++++++++++++--------------------- 14 files changed, 80 insertions(+), 62 deletions(-) diff --git a/pkg/sqlite/database.go b/pkg/sqlite/database.go index aec6e681130..c0d5e56a410 100644 --- a/pkg/sqlite/database.go +++ b/pkg/sqlite/database.go @@ -84,7 +84,7 @@ type storeRepository struct { type DatabaseType string const ( - PostgresBackend DatabaseType = "POSTGRES" + PostgresBackend DatabaseType = "POSTGRESQL" SqliteBackend DatabaseType = "SQLITE" ) @@ -195,26 +195,6 @@ func (db *Database) GetRepo() *storeRepository { return db.storeRepository } -// lock locks the database for writing. This method will block until the lock is acquired. -func (db *Database) lock() { - db.lockChan <- struct{}{} -} - -// unlock unlocks the database -func (db *Database) unlock() { - // will block the caller if the lock is not held, so check first - select { - case <-db.lockChan: - return - default: - panic("database is not locked") - } -} - -func (db *Database) AppSchemaVersion() uint { - return appSchemaVersion -} - func (db *Database) SetBlobStoreOptions(options BlobStoreOptions) { *db.Blobs = *NewBlobStore(options) } diff --git a/pkg/sqlite/database_sqlite.go b/pkg/sqlite/database_sqlite.go index 8d3451f7fb9..c8d9ebdda29 100644 --- a/pkg/sqlite/database_sqlite.go +++ b/pkg/sqlite/database_sqlite.go @@ -40,6 +40,26 @@ func NewSQLiteDatabase(dbPath string) *SQLiteDB { return db } +// lock locks the database for writing. This method will block until the lock is acquired. +func (db *SQLiteDB) lock() { + db.lockChan <- struct{}{} +} + +// unlock unlocks the database +func (db *SQLiteDB) unlock() { + // will block the caller if the lock is not held, so check first + select { + case <-db.lockChan: + return + default: + panic("database is not locked") + } +} + +func (db *SQLiteDB) AppSchemaVersion() uint { + return appSchemaVersion +} + func (db *SQLiteDB) DatabaseType() DatabaseType { return SqliteBackend } diff --git a/pkg/sqlite/file.go b/pkg/sqlite/file.go index 48cf1bf55f7..73c6cfccbf5 100644 --- a/pkg/sqlite/file.go +++ b/pkg/sqlite/file.go @@ -869,7 +869,7 @@ func (qb *FileStore) Query(ctx context.Context, options models.FileQueryOptions) if err := qb.setQuerySort(&query, findFilter); err != nil { return nil, err } - query.sortAndPagination[len(query.sortAndPagination)-1] += getPagination(findFilter) + query.addPagination(getPagination(findFilter)) result, err := qb.queryGroupedFields(ctx, options, query) if err != nil { diff --git a/pkg/sqlite/gallery.go b/pkg/sqlite/gallery.go index d6e32963c8a..4e171026491 100644 --- a/pkg/sqlite/gallery.go +++ b/pkg/sqlite/gallery.go @@ -735,7 +735,7 @@ func (qb *GalleryStore) makeQuery(ctx context.Context, galleryFilter *models.Gal if err := qb.setGallerySort(&query, findFilter); err != nil { return nil, err } - query.sortAndPagination[len(query.sortAndPagination)-1] += getPagination(findFilter) + query.addPagination(getPagination(findFilter)) return &query, nil } diff --git a/pkg/sqlite/group.go b/pkg/sqlite/group.go index d8ecaf9d3d5..6fcccf5d403 100644 --- a/pkg/sqlite/group.go +++ b/pkg/sqlite/group.go @@ -446,7 +446,7 @@ func (qb *GroupStore) makeQuery(ctx context.Context, groupFilter *models.GroupFi return nil, err } - query.sortAndPagination[len(query.sortAndPagination)-1] += getPagination(findFilter) + query.addPagination(getPagination(findFilter)) return &query, nil } diff --git a/pkg/sqlite/image.go b/pkg/sqlite/image.go index e2141868f56..94cc1c512eb 100644 --- a/pkg/sqlite/image.go +++ b/pkg/sqlite/image.go @@ -998,7 +998,7 @@ func (qb *ImageStore) setImageSortAndPagination(q *queryBuilder, findFilter *mod } q.addSort(sortClause) - q.sortAndPagination[len(q.sortAndPagination)-1] += getPagination(findFilter) + q.addPagination(getPagination(findFilter)) return nil } diff --git a/pkg/sqlite/performer.go b/pkg/sqlite/performer.go index 9c810776a2a..4410c5d7ff0 100644 --- a/pkg/sqlite/performer.go +++ b/pkg/sqlite/performer.go @@ -617,7 +617,7 @@ func (qb *PerformerStore) makeQuery(ctx context.Context, performerFilter *models return nil, err } query.addSort(add) - query.sortAndPagination[len(query.sortAndPagination)-1] += getPagination(findFilter) + query.addPagination(getPagination(findFilter)) query.addGroupBy(agg, true) return &query, nil @@ -653,12 +653,12 @@ func (qb *PerformerStore) QueryCount(ctx context.Context, performerFilter *model func (qb *PerformerStore) sortByOCounter(direction string) string { // need to sum the o_counter from scenes and images - return " ORDER BY (" + selectPerformerOCountSQL + ") " + direction + return " (" + selectPerformerOCountSQL + ") " + direction } func (qb *PerformerStore) sortByPlayCount(direction string) string { // need to sum the o_counter from scenes and images - return " ORDER BY (" + selectPerformerPlayCountSQL + ") " + direction + return " (" + selectPerformerPlayCountSQL + ") " + direction } // used for sorting on performer last o_date @@ -682,7 +682,7 @@ var selectPerformerLastOAtSQL = utils.StrFormat( func (qb *PerformerStore) sortByLastOAt(direction string) string { // need to get the o_dates from scenes - return " ORDER BY (" + selectPerformerLastOAtSQL + ") " + direction + return " (" + selectPerformerLastOAtSQL + ") " + direction } // used for sorting on performer last view_date @@ -706,7 +706,7 @@ var selectPerformerLastPlayedAtSQL = utils.StrFormat( func (qb *PerformerStore) sortByLastPlayedAt(direction string) string { // need to get the view_dates from scenes - return " ORDER BY (" + selectPerformerLastPlayedAtSQL + ") " + direction + return " (" + selectPerformerLastPlayedAtSQL + ") " + direction } var performerSortOptions = sortOptions{ diff --git a/pkg/sqlite/query.go b/pkg/sqlite/query.go index 24c922d0f0d..cb66d083951 100644 --- a/pkg/sqlite/query.go +++ b/pkg/sqlite/query.go @@ -23,7 +23,8 @@ type queryBuilder struct { recursiveWith bool groupByClauses []string - sortAndPagination []string + sort []string + pagination string } func (qb queryBuilder) body() string { @@ -49,10 +50,14 @@ func (qb *queryBuilder) addGroupBy(aggregate []string, pgsqlfix bool) { func (qb *queryBuilder) addSort(sortby string) { if len(sortby) > 0 { - qb.sortAndPagination = append(qb.sortAndPagination, sortby) + qb.sort = append(qb.sort, sortby) } } +func (qb *queryBuilder) addPagination(pag string) { + qb.pagination += pag +} + func (qb queryBuilder) toSQL(includeSortPagination bool) string { body := qb.body() @@ -66,8 +71,11 @@ func (qb queryBuilder) toSQL(includeSortPagination bool) string { } body = withClause + qb.repository.buildQueryBody(body, qb.whereClauses, qb.havingClauses, qb.groupByClauses) - if includeSortPagination && len(qb.sortAndPagination) > 0 { - body += " ORDER BY " + strings.Join(qb.sortAndPagination, ", ") + " " + if includeSortPagination { + if len(qb.sort) > 0 { + body += " ORDER BY " + strings.Join(qb.sort, ", ") + " " + } + body += qb.pagination } return body @@ -81,7 +89,7 @@ func (qb queryBuilder) findIDs(ctx context.Context) ([]int, error) { func (qb queryBuilder) executeFind(ctx context.Context) ([]int, int, error) { body := qb.body() - return qb.repository.executeFindQuery(ctx, body, qb.args, qb.sortAndPagination, qb.whereClauses, qb.havingClauses, qb.withClauses, qb.recursiveWith) + return qb.repository.executeFindQuery(ctx, body, qb.args, qb.sort, qb.pagination, qb.whereClauses, qb.havingClauses, qb.withClauses, qb.recursiveWith) } func (qb queryBuilder) executeCount(ctx context.Context) (int, error) { diff --git a/pkg/sqlite/repository.go b/pkg/sqlite/repository.go index eb5171582f5..30ece35fbce 100644 --- a/pkg/sqlite/repository.go +++ b/pkg/sqlite/repository.go @@ -187,7 +187,7 @@ func (r *repository) buildQueryBody(body string, whereClauses []string, havingCl return body } -func (r *repository) executeFindQuery(ctx context.Context, body string, args []interface{}, sortAndPagination []string, whereClauses []string, havingClauses []string, withClauses []string, recursiveWith bool) ([]int, int, error) { +func (r *repository) executeFindQuery(ctx context.Context, body string, args []interface{}, sort []string, pagination string, whereClauses []string, havingClauses []string, withClauses []string, recursiveWith bool) ([]int, int, error) { body = r.buildQueryBody(body, whereClauses, havingClauses, nil) withClause := "" @@ -200,7 +200,13 @@ func (r *repository) executeFindQuery(ctx context.Context, body string, args []i } countQuery := withClause + r.buildCountQuery(body) - idsQuery := withClause + body + " ORDER BY " + strings.Join(sortAndPagination, ", ") + " " + idsQuery := withClause + body + if len(sort) > 0 { + idsQuery += " ORDER BY " + strings.Join(sort, ", ") + " " + } + if len(pagination) > 0 { + idsQuery += pagination + } // Perform query and fetch result var countResult int diff --git a/pkg/sqlite/scene.go b/pkg/sqlite/scene.go index 483dc76700a..40e0dffc7b3 100644 --- a/pkg/sqlite/scene.go +++ b/pkg/sqlite/scene.go @@ -957,7 +957,7 @@ func (qb *SceneStore) makeQuery(ctx context.Context, sceneFilter *models.SceneFi if err := qb.setSceneSort(&query, findFilter); err != nil { return nil, err } - query.sortAndPagination[len(query.sortAndPagination)-1] += getPagination(findFilter) + query.addPagination(getPagination(findFilter)) return &query, nil } diff --git a/pkg/sqlite/scene_marker.go b/pkg/sqlite/scene_marker.go index 6914a2a21b7..d75a660e775 100644 --- a/pkg/sqlite/scene_marker.go +++ b/pkg/sqlite/scene_marker.go @@ -318,7 +318,7 @@ func (qb *SceneMarkerStore) makeQuery(ctx context.Context, sceneMarkerFilter *mo if err := qb.setSceneMarkerSort(&query, findFilter); err != nil { return nil, err } - query.sortAndPagination[len(query.sortAndPagination)-1] += getPagination(findFilter) + query.addPagination(getPagination(findFilter)) return &query, nil } diff --git a/pkg/sqlite/setup_test.go b/pkg/sqlite/setup_test.go index 2c5dd31322a..80685540464 100644 --- a/pkg/sqlite/setup_test.go +++ b/pkg/sqlite/setup_test.go @@ -630,6 +630,18 @@ func testTeardown(databaseFile string) { } } +func getNewDB(databaseFile string) sqlite.DBInterface { + dbUrl, valid := os.LookupEnv("PGSQL_TEST") + if valid { + db = sqlite.NewPostgresDatabase(dbUrl) + } else { + sqlite.RegisterSqliteDialect() + db = sqlite.NewSQLiteDatabase(databaseFile) + } + + return db +} + func runTests(m *testing.M) int { // create the database file f, err := os.CreateTemp("", "*.sqlite") diff --git a/pkg/sqlite/studio.go b/pkg/sqlite/studio.go index 536bd1c4c6b..b4870f20a35 100644 --- a/pkg/sqlite/studio.go +++ b/pkg/sqlite/studio.go @@ -543,7 +543,7 @@ func (qb *StudioStore) makeQuery(ctx context.Context, studioFilter *models.Studi return nil, err } query.addSort(add) - query.sortAndPagination[len(query.sortAndPagination)-1] += getPagination(findFilter) + query.addPagination(getPagination(findFilter)) query.addGroupBy(agg, true) return &query, nil diff --git a/pkg/sqlite/tag.go b/pkg/sqlite/tag.go index e0688508b8f..16c93901eab 100644 --- a/pkg/sqlite/tag.go +++ b/pkg/sqlite/tag.go @@ -383,8 +383,7 @@ func (qb *TagStore) FindBySceneID(ctx context.Context, sceneID int) ([]*models.T WHERE scenes_join.scene_id = ? GROUP BY tags.id ` - add, _ := qb.getDefaultTagSort() - query += add + query += qb.getDefaultTagSort() args := []interface{}{sceneID} return qb.queryTags(ctx, query, args) } @@ -396,8 +395,7 @@ func (qb *TagStore) FindByPerformerID(ctx context.Context, performerID int) ([]* WHERE performers_join.performer_id = ? GROUP BY tags.id ` - add, _ := qb.getDefaultTagSort() - query += add + query += qb.getDefaultTagSort() args := []interface{}{performerID} return qb.queryTags(ctx, query, args) } @@ -409,8 +407,7 @@ func (qb *TagStore) FindByImageID(ctx context.Context, imageID int) ([]*models.T WHERE images_join.image_id = ? GROUP BY tags.id ` - add, _ := qb.getDefaultTagSort() - query += add + query += qb.getDefaultTagSort() args := []interface{}{imageID} return qb.queryTags(ctx, query, args) } @@ -422,8 +419,7 @@ func (qb *TagStore) FindByGalleryID(ctx context.Context, galleryID int) ([]*mode WHERE galleries_join.gallery_id = ? GROUP BY tags.id ` - add, _ := qb.getDefaultTagSort() - query += add + query += qb.getDefaultTagSort() args := []interface{}{galleryID} return qb.queryTags(ctx, query, args) } @@ -435,8 +431,7 @@ func (qb *TagStore) FindByGroupID(ctx context.Context, groupID int) ([]*models.T WHERE groups_join.group_id = ? GROUP BY tags.id ` - add, _ := qb.getDefaultTagSort() - query += add + query += qb.getDefaultTagSort() args := []interface{}{groupID} return qb.queryTags(ctx, query, args) } @@ -448,8 +443,7 @@ func (qb *TagStore) FindBySceneMarkerID(ctx context.Context, sceneMarkerID int) WHERE scene_markers_join.scene_marker_id = ? GROUP BY tags.id ` - add, _ := qb.getDefaultTagSort() - query += add + query += qb.getDefaultTagSort() args := []interface{}{sceneMarkerID} return qb.queryTags(ctx, query, args) } @@ -461,8 +455,7 @@ func (qb *TagStore) FindByStudioID(ctx context.Context, studioID int) ([]*models WHERE studios_join.studio_id = ? GROUP BY tags.id ` - add, _ := qb.getDefaultTagSort() - query += add + query += qb.getDefaultTagSort() args := []interface{}{studioID} return qb.queryTags(ctx, query, args) } @@ -526,8 +519,7 @@ func (qb *TagStore) FindByParentTagID(ctx context.Context, parentID int) ([]*mod INNER JOIN tags_relations ON tags_relations.child_id = tags.id WHERE tags_relations.parent_id = ? ` - add, _ := qb.getDefaultTagSort() - query += add + query += qb.getDefaultTagSort() args := []interface{}{parentID} return qb.queryTags(ctx, query, args) } @@ -538,8 +530,7 @@ func (qb *TagStore) FindByChildTagID(ctx context.Context, parentID int) ([]*mode INNER JOIN tags_relations ON tags_relations.parent_id = tags.id WHERE tags_relations.child_id = ? ` - add, _ := qb.getDefaultTagSort() - query += add + query += qb.getDefaultTagSort() args := []interface{}{parentID} return qb.queryTags(ctx, query, args) } @@ -629,7 +620,7 @@ func (qb *TagStore) Query(ctx context.Context, tagFilter *models.TagFilterType, return nil, 0, err } query.addSort(add) - query.sortAndPagination[len(query.sortAndPagination)-1] += getPagination(findFilter) + query.addPagination(getPagination(findFilter)) query.addGroupBy(agg, true) idsResult, countResult, err := query.executeFind(ctx) if err != nil { @@ -660,8 +651,9 @@ var tagSortOptions = sortOptions{ "updated_at", } -func (qb *TagStore) getDefaultTagSort() (string, []string) { - return getSort("name", "ASC", "tags") +func (qb *TagStore) getDefaultTagSort() string { + add, _ := getSort("name", "ASC", "tags") + return " ORDER BY " + add } func (qb *TagStore) getTagSort(query *queryBuilder, findFilter *models.FindFilterType) (string, []string, error) { @@ -686,7 +678,7 @@ func (qb *TagStore) getTagSort(query *queryBuilder, findFilter *models.FindFilte case "scenes_count": sortQuery += getCountSort(tagTable, scenesTagsTable, tagIDColumn, direction) case "scene_markers_count": - sortQuery += fmt.Sprintf(" ORDER BY (SELECT COUNT(*) FROM scene_markers_tags WHERE tags.id = scene_markers_tags.tag_id)+(SELECT COUNT(*) FROM scene_markers WHERE tags.id = scene_markers.primary_tag_id) %s", getSortDirection(direction)) + sortQuery += fmt.Sprintf("(SELECT COUNT(*) FROM scene_markers_tags WHERE tags.id = scene_markers_tags.tag_id)+(SELECT COUNT(*) FROM scene_markers WHERE tags.id = scene_markers.primary_tag_id) %s", getSortDirection(direction)) case "images_count": sortQuery += getCountSort(tagTable, imagesTagsTable, tagIDColumn, direction) case "galleries_count": From 1c537b821fa56a0e6d372e565d77511358e0adcf Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Tue, 15 Oct 2024 10:22:40 +0200 Subject: [PATCH 42/85] Revert: postgres backup system --- internal/manager/task/migrate.go | 2 +- pkg/sqlite/database_postgres.go | 25 +++++++------------------ 2 files changed, 8 insertions(+), 19 deletions(-) diff --git a/internal/manager/task/migrate.go b/internal/manager/task/migrate.go index 8ce478ec80e..609512b2f4b 100644 --- a/internal/manager/task/migrate.go +++ b/internal/manager/task/migrate.go @@ -50,7 +50,7 @@ func (s *MigrateJob) Execute(ctx context.Context, progress *job.Progress) error // always backup so that we can roll back to the previous version if // migration fails backupPath := s.BackupPath - if backupPath == "" || s.Database.DatabaseType() == sqlite.PostgresBackend { + if backupPath == "" { backupPath = database.DatabaseBackupPath(s.Config.GetBackupDirectoryPath()) } else { // check if backup path is a filename or path diff --git a/pkg/sqlite/database_postgres.go b/pkg/sqlite/database_postgres.go index 2f667f451d6..a5cba4a9309 100644 --- a/pkg/sqlite/database_postgres.go +++ b/pkg/sqlite/database_postgres.go @@ -2,12 +2,12 @@ package sqlite import ( "fmt" - "time" "github.com/doug-martin/goqu/v9" _ "github.com/doug-martin/goqu/v9/dialect/postgres" _ "github.com/jackc/pgx/v5/stdlib" "github.com/jmoiron/sqlx" + "github.com/stashapp/stash/pkg/logger" ) type PostgresDB struct { @@ -92,30 +92,19 @@ END $$; return err } -// getDBCloneCommand returns the command to clone a database from a backup file -func getDBCloneCommand(backupPath string, dbname string) string { - return fmt.Sprintf(` -SELECT pg_terminate_backend(pg_stat_activity.pid) FROM pg_stat_activity -WHERE pg_stat_activity.datname = '%[2]s' AND pid <> pg_backend_pid(); -CREATE DATABASE %[1]s WITH TEMPLATE %[2]s; -`, backupPath, dbname) -} - -// Backup creates a backup of the database at the given path. func (db *PostgresDB) Backup(backupPath string) (err error) { - _, err = db.writeDB.Exec(getDBCloneCommand(backupPath, "stash")) - return err + logger.Warn("Postgres backend detected, ignoring Backup request") + return nil } // RestoreFromBackup restores the database from a backup file at the given path. func (db *PostgresDB) RestoreFromBackup(backupPath string) (err error) { - sqlcmd := "DROP DATABASE stash;\n" + getDBCloneCommand("stash", backupPath) - - _, err = db.writeDB.Exec(sqlcmd) - return err + logger.Warn("Postgres backend detected, ignoring RestoreFromBackup request") + return nil } // DatabaseBackupPath returns the path to a database backup file for the given directory. func (db *PostgresDB) DatabaseBackupPath(backupDirectoryPath string) string { - return fmt.Sprintf("stash_%d_%s", db.schemaVersion, time.Now().Format("20060102_150405")) + logger.Warn("Postgres backend detected, ignoring DatabaseBackupPath request") + return "" } From 992f481f919f7cf3e62929c8783cf23c93dad99e Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Tue, 15 Oct 2024 16:07:04 +0200 Subject: [PATCH 43/85] Fix some tests for pgsql --- internal/autotag/integration_test.go | 7 +------ pkg/sqlite/migrationsPostgres/1_initial.up.sql | 2 +- pkg/sqlite/setup_test.go | 9 +++++++-- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/internal/autotag/integration_test.go b/internal/autotag/integration_test.go index a9796142aa5..a35f7d0c0c5 100644 --- a/internal/autotag/integration_test.go +++ b/internal/autotag/integration_test.go @@ -37,16 +37,11 @@ var db sqlite.DBInterface var r models.Repository func testTeardown(databaseFile string) { - err := db.Close() + err := db.Remove() if err != nil { panic(err) } - - err = os.Remove(databaseFile) - if err != nil { - panic(err) - } } func getNewDB(databaseFile string) sqlite.DBInterface { diff --git a/pkg/sqlite/migrationsPostgres/1_initial.up.sql b/pkg/sqlite/migrationsPostgres/1_initial.up.sql index 958510d4cd9..a01808395d6 100644 --- a/pkg/sqlite/migrationsPostgres/1_initial.up.sql +++ b/pkg/sqlite/migrationsPostgres/1_initial.up.sql @@ -64,7 +64,7 @@ CREATE TABLE IF NOT EXISTS performers ( ignore_auto_tag boolean not null default FALSE, image_blob varchar(255) REFERENCES blobs(checksum), penis_length float, - circumcised varchar[10] + circumcised varchar(255) ); CREATE TABLE IF NOT EXISTS studios ( id serial not null primary key, diff --git a/pkg/sqlite/setup_test.go b/pkg/sqlite/setup_test.go index 80685540464..f58deda2790 100644 --- a/pkg/sqlite/setup_test.go +++ b/pkg/sqlite/setup_test.go @@ -1039,7 +1039,7 @@ func getObjectDate(index int) *models.Date { func sceneStashID(i int) models.StashID { return models.StashID{ - StashID: getSceneStringValue(i, "stashid"), + StashID: getUUID("stashid"), Endpoint: getSceneStringValue(i, "endpoint"), } } @@ -1504,9 +1504,14 @@ func getIgnoreAutoTag(index int) bool { return index%5 == 0 } +func getUUID(_ string) string { + // TODO: Encode input string + return "00000000-0000-0000-0000-000000000000" +} + func performerStashID(i int) models.StashID { return models.StashID{ - StashID: getPerformerStringValue(i, "stashid"), + StashID: getUUID("stashid"), Endpoint: getPerformerStringValue(i, "endpoint"), } } From 390059de56b91c9e52d48611f405adf5f15ce5df Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Tue, 15 Oct 2024 17:11:36 +0200 Subject: [PATCH 44/85] more test fixes for pgsql --- pkg/models/relationships.go | 5 +++++ pkg/sqlite/database.go | 26 -------------------------- pkg/sqlite/database_postgres.go | 21 +++++++++++++++++++-- pkg/sqlite/database_sqlite.go | 26 ++++++++++++++++++++++++++ pkg/sqlite/gallery_test.go | 20 ++++++++++++++++++++ 5 files changed, 70 insertions(+), 28 deletions(-) diff --git a/pkg/models/relationships.go b/pkg/models/relationships.go index 5495f858b17..a899490ec52 100644 --- a/pkg/models/relationships.go +++ b/pkg/models/relationships.go @@ -2,6 +2,7 @@ package models import ( "context" + "slices" "github.com/stashapp/stash/pkg/sliceutil" ) @@ -86,6 +87,10 @@ func (r RelatedIDs) Loaded() bool { return r.list != nil } +func (r RelatedIDs) Sort() { + slices.Sort(r.list) +} + func (r RelatedIDs) mustLoaded() { if !r.Loaded() { panic("list has not been loaded") diff --git a/pkg/sqlite/database.go b/pkg/sqlite/database.go index c0d5e56a410..e68a14aa209 100644 --- a/pkg/sqlite/database.go +++ b/pkg/sqlite/database.go @@ -299,32 +299,6 @@ func (db *Database) initialise() error { return nil } -func (db *Database) openReadDB() error { - const ( - disableForeignKeys = false - writable = false - ) - var err error - db.readDB, err = db.open(disableForeignKeys, writable) - db.readDB.SetMaxOpenConns(maxReadConnections) - db.readDB.SetMaxIdleConns(maxReadConnections) - db.readDB.SetConnMaxIdleTime(dbConnTimeout) - return err -} - -func (db *Database) openWriteDB() error { - const ( - disableForeignKeys = false - writable = true - ) - var err error - db.writeDB, err = db.open(disableForeignKeys, writable) - db.writeDB.SetMaxOpenConns(maxWriteConnections) - db.writeDB.SetMaxIdleConns(maxWriteConnections) - db.writeDB.SetConnMaxIdleTime(dbConnTimeout) - return err -} - func (db *Database) Anonymise(outPath string) error { anon, err := NewAnonymiser(db, outPath) diff --git a/pkg/sqlite/database_postgres.go b/pkg/sqlite/database_postgres.go index a5cba4a9309..e4b3540bb2a 100644 --- a/pkg/sqlite/database_postgres.go +++ b/pkg/sqlite/database_postgres.go @@ -34,6 +34,25 @@ func NewPostgresDatabase(dbConnector string) *PostgresDB { func (db *PostgresDB) lock() {} func (db *PostgresDB) unlock() {} +func (db *PostgresDB) openReadDB() error { + const ( + disableForeignKeys = false + writable = true + ) + var err error + db.readDB, err = db.open(disableForeignKeys, writable) + db.readDB.SetConnMaxIdleTime(dbConnTimeout) + db.writeDB = db.readDB + return err +} + +func (db *PostgresDB) openWriteDB() error { + if db.writeDB == nil { + return db.openReadDB() + } + return nil +} + func (db *PostgresDB) DatabaseType() DatabaseType { return PostgresBackend } @@ -97,13 +116,11 @@ func (db *PostgresDB) Backup(backupPath string) (err error) { return nil } -// RestoreFromBackup restores the database from a backup file at the given path. func (db *PostgresDB) RestoreFromBackup(backupPath string) (err error) { logger.Warn("Postgres backend detected, ignoring RestoreFromBackup request") return nil } -// DatabaseBackupPath returns the path to a database backup file for the given directory. func (db *PostgresDB) DatabaseBackupPath(backupDirectoryPath string) string { logger.Warn("Postgres backend detected, ignoring DatabaseBackupPath request") return "" diff --git a/pkg/sqlite/database_sqlite.go b/pkg/sqlite/database_sqlite.go index c8d9ebdda29..4f302c10643 100644 --- a/pkg/sqlite/database_sqlite.go +++ b/pkg/sqlite/database_sqlite.go @@ -56,6 +56,32 @@ func (db *SQLiteDB) unlock() { } } +func (db *SQLiteDB) openReadDB() error { + const ( + disableForeignKeys = false + writable = false + ) + var err error + db.readDB, err = db.open(disableForeignKeys, writable) + db.readDB.SetMaxOpenConns(maxReadConnections) + db.readDB.SetMaxIdleConns(maxReadConnections) + db.readDB.SetConnMaxIdleTime(dbConnTimeout) + return err +} + +func (db *SQLiteDB) openWriteDB() error { + const ( + disableForeignKeys = false + writable = true + ) + var err error + db.writeDB, err = db.open(disableForeignKeys, writable) + db.writeDB.SetMaxOpenConns(maxWriteConnections) + db.writeDB.SetMaxIdleConns(maxWriteConnections) + db.writeDB.SetConnMaxIdleTime(dbConnTimeout) + return err +} + func (db *SQLiteDB) AppSchemaVersion() uint { return appSchemaVersion } diff --git a/pkg/sqlite/gallery_test.go b/pkg/sqlite/gallery_test.go index ee602ef05b6..9408ec1b40e 100644 --- a/pkg/sqlite/gallery_test.go +++ b/pkg/sqlite/gallery_test.go @@ -54,6 +54,13 @@ func loadGalleryRelationships(ctx context.Context, expected models.Gallery, actu return nil } +func sortGallery(copy *models.Gallery) { + // Ordering is not ensured + copy.SceneIDs.Sort() + copy.PerformerIDs.Sort() + copy.TagIDs.Sort() +} + func Test_galleryQueryBuilder_Create(t *testing.T) { var ( title = "title" @@ -180,6 +187,10 @@ func Test_galleryQueryBuilder_Create(t *testing.T) { return } + // Ordering is not ensured + sortGallery(copy) + sortGallery(s) + assert.Equal(copy, s) // ensure can find the scene @@ -380,6 +391,10 @@ func Test_galleryQueryBuilder_Update(t *testing.T) { return } + // Ordering is not ensured + sortGallery(copy) + sortGallery(s) + assert.Equal(copy, *s) return @@ -809,6 +824,11 @@ func Test_galleryQueryBuilder_UpdatePartialRelationships(t *testing.T) { return } + // Ordering is not ensured + sortGallery(copy) + sortGallery(s) + sortGallery(got) + // only compare fields that were in the partial if tt.partial.PerformerIDs != nil { assert.ElementsMatch(tt.want.PerformerIDs.List(), got.PerformerIDs.List()) From 9cd1e5d624896a4f471706828f43da1fb4c2889d Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Tue, 15 Oct 2024 17:11:52 +0200 Subject: [PATCH 45/85] for this compilation fix for tests for pgsql --- pkg/sqlite/gallery_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/sqlite/gallery_test.go b/pkg/sqlite/gallery_test.go index 9408ec1b40e..00f4ccffac6 100644 --- a/pkg/sqlite/gallery_test.go +++ b/pkg/sqlite/gallery_test.go @@ -188,8 +188,8 @@ func Test_galleryQueryBuilder_Create(t *testing.T) { } // Ordering is not ensured - sortGallery(copy) - sortGallery(s) + sortGallery(©) + sortGallery(&s) assert.Equal(copy, s) @@ -392,7 +392,7 @@ func Test_galleryQueryBuilder_Update(t *testing.T) { } // Ordering is not ensured - sortGallery(copy) + sortGallery(©) sortGallery(s) assert.Equal(copy, *s) @@ -825,9 +825,9 @@ func Test_galleryQueryBuilder_UpdatePartialRelationships(t *testing.T) { } // Ordering is not ensured - sortGallery(copy) sortGallery(s) sortGallery(got) + sortGallery(&tt.want) // only compare fields that were in the partial if tt.partial.PerformerIDs != nil { From 2edc21491c9675888068999be6f39ecc071b9214 Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Tue, 15 Oct 2024 17:14:19 +0200 Subject: [PATCH 46/85] tests for sqlite bugfix --- pkg/sqlite/gallery_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pkg/sqlite/gallery_test.go b/pkg/sqlite/gallery_test.go index 00f4ccffac6..9381db5d898 100644 --- a/pkg/sqlite/gallery_test.go +++ b/pkg/sqlite/gallery_test.go @@ -209,6 +209,9 @@ func Test_galleryQueryBuilder_Create(t *testing.T) { return } + sortGallery(©) + sortGallery(found) + assert.Equal(copy, *found) return From 8fa2b3811ea52da43010afa2397619015410e808 Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Thu, 17 Oct 2024 13:56:44 +0200 Subject: [PATCH 47/85] pgsql fix tests (suffering) --- pkg/sqlite/database_postgres.go | 20 ++++++++------------ pkg/sqlite/gallery_filter.go | 6 +++++- pkg/sqlite/gallery_test.go | 13 +++++++++++++ pkg/sqlite/scene_filter.go | 6 +++++- 4 files changed, 31 insertions(+), 14 deletions(-) diff --git a/pkg/sqlite/database_postgres.go b/pkg/sqlite/database_postgres.go index e4b3540bb2a..afa670c9326 100644 --- a/pkg/sqlite/database_postgres.go +++ b/pkg/sqlite/database_postgres.go @@ -92,20 +92,16 @@ func (db *PostgresDB) open(disableForeignKeys bool, writable bool) (conn *sqlx.D func (db *PostgresDB) Remove() (err error) { _, err = db.writeDB.Exec(` -DO $$ DECLARE - r RECORD; +DO $$ +DECLARE + r record; BEGIN - -- Disable triggers to avoid foreign key constraint violations - EXECUTE 'SET session_replication_role = replica'; - - -- Drop all tables - FOR r IN (SELECT tablename FROM pg_tables WHERE schemaname = 'public') LOOP - EXECUTE 'DROP TABLE IF EXISTS ' || quote_ident(r.tablename) || ' CASCADE'; + FOR r IN SELECT quote_ident(tablename) AS tablename, quote_ident(schemaname) AS schemaname FROM pg_tables WHERE schemaname = 'public' + LOOP + RAISE INFO 'Dropping table %.%', r.schemaname, r.tablename; + EXECUTE format('DROP TABLE IF EXISTS %I.%I CASCADE', r.schemaname, r.tablename); END LOOP; - - -- Re-enable triggers - EXECUTE 'SET session_replication_role = DEFAULT'; -END $$; +END$$; `) return err diff --git a/pkg/sqlite/gallery_filter.go b/pkg/sqlite/gallery_filter.go index d3465eb6cf6..7a369836e33 100644 --- a/pkg/sqlite/gallery_filter.go +++ b/pkg/sqlite/gallery_filter.go @@ -267,7 +267,11 @@ func (qb *galleryFilterHandler) missingCriterionHandler(isMissing *string) crite galleryRepository.performers.join(f, "performers_join", "galleries.id") f.addWhere("performers_join.gallery_id IS NULL") case "date": - f.addWhere("galleries.date IS NULL OR galleries.date IS \"\"") + q := "galleries.date IS NULL" + if dbWrapper.dbType == SqliteBackend { + q += ` OR galleries.date IS ""` + } + f.addWhere(q) case "tags": galleryRepository.tags.join(f, "tags_join", "galleries.id") f.addWhere("tags_join.gallery_id IS NULL") diff --git a/pkg/sqlite/gallery_test.go b/pkg/sqlite/gallery_test.go index 9381db5d898..b94af647128 100644 --- a/pkg/sqlite/gallery_test.go +++ b/pkg/sqlite/gallery_test.go @@ -6,6 +6,7 @@ package sqlite_test import ( "context" "math" + "sort" "strconv" "testing" "time" @@ -61,6 +62,12 @@ func sortGallery(copy *models.Gallery) { copy.TagIDs.Sort() } +func sortByID[T any](list []T, getID func(T) int) { + sort.Slice(list, func(i, j int) bool { + return getID(list[i]) < getID(list[j]) + }) +} + func Test_galleryQueryBuilder_Create(t *testing.T) { var ( title = "title" @@ -1131,6 +1138,8 @@ func Test_galleryQueryBuilder_FindByChecksums(t *testing.T) { return } + sortByID(tt.want, func(g *models.Gallery) int { return g.ID }) + sortByID(got, func(g *models.Gallery) int { return g.ID }) assert.Equal(tt.want, got) }) } @@ -1231,6 +1240,8 @@ func Test_galleryQueryBuilder_FindBySceneID(t *testing.T) { return } + sortByID(tt.want, func(g *models.Gallery) int { return g.ID }) + sortByID(got, func(g *models.Gallery) int { return g.ID }) assert.Equal(tt.want, got) }) } @@ -1276,6 +1287,8 @@ func Test_galleryQueryBuilder_FindByImageID(t *testing.T) { return } + sortByID(tt.want, func(g *models.Gallery) int { return g.ID }) + sortByID(got, func(g *models.Gallery) int { return g.ID }) assert.Equal(tt.want, got) }) } diff --git a/pkg/sqlite/scene_filter.go b/pkg/sqlite/scene_filter.go index 632152c8ebb..00ea8cb8d7f 100644 --- a/pkg/sqlite/scene_filter.go +++ b/pkg/sqlite/scene_filter.go @@ -326,7 +326,11 @@ func (qb *sceneFilterHandler) isMissingCriterionHandler(isMissing *string) crite sceneRepository.performers.join(f, "performers_join", "scenes.id") f.addWhere("performers_join.scene_id IS NULL") case "date": - f.addWhere(`scenes.date IS NULL OR scenes.date IS ""`) + q := "scenes.date IS NULL" + if dbWrapper.dbType == SqliteBackend { + q += ` OR scenes.date IS ""` + } + f.addWhere(q) case "tags": sceneRepository.tags.join(f, "tags_join", "scenes.id") f.addWhere("tags_join.scene_id IS NULL") From aa4f2575faa20f2d66741eeaee6de58eb8169a7b Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Fri, 18 Oct 2024 16:38:02 +0200 Subject: [PATCH 48/85] pgsql fix more tests i fix bugs, therefore i am --- pkg/sqlite/criterion_handlers.go | 39 +++++++++++++++++-------------- pkg/sqlite/database.go | 17 ++++++++++++++ pkg/sqlite/gallery_filter.go | 2 +- pkg/sqlite/performer_filter.go | 4 ++-- pkg/sqlite/performer_test.go | 12 +++++----- pkg/sqlite/scene_marker_filter.go | 12 +++++----- pkg/sqlite/scene_test.go | 16 ++++++------- 7 files changed, 61 insertions(+), 41 deletions(-) diff --git a/pkg/sqlite/criterion_handlers.go b/pkg/sqlite/criterion_handlers.go index ed83df9617e..2cc0d91055b 100644 --- a/pkg/sqlite/criterion_handlers.go +++ b/pkg/sqlite/criterion_handlers.go @@ -283,7 +283,7 @@ func resolutionCriterionHandler(resolution *models.ResolutionCriterionInput, hei min := resolution.Value.GetMinResolution() max := resolution.Value.GetMaxResolution() - widthHeight := fmt.Sprintf("MIN(%s, %s)", widthColumn, heightColumn) + widthHeight := fmt.Sprintf("%s(%s, %s)", getDBMinFunc(), widthColumn, heightColumn) switch resolution.Modifier { case models.CriterionModifierEquals: @@ -596,7 +596,7 @@ type hierarchicalMultiCriterionHandlerBuilder struct { relationsTable string } -func getHierarchicalValues(ctx context.Context, values []string, table, relationsTable, parentFK string, childFK string, depth *int) (string, error) { +func getHierarchicalValues(ctx context.Context, values []string, table, relationsTable, parentFK string, childFK string, depth *int, parenthesis bool) (string, error) { var args []interface{} if parentFK == "" { @@ -627,7 +627,11 @@ func getHierarchicalValues(ctx context.Context, values []string, table, relation } if valid { - return "VALUES" + strings.Join(valuesClauses, ","), nil + values := "VALUES" + strings.Join(valuesClauses, ",") + if parenthesis { + values = "(" + values + ")" + getDBValuesFix() + } + return values, nil } } @@ -690,6 +694,10 @@ WHERE id in {inBinding} valuesClause.String = "VALUES" + strings.Join(values, ",") } + if parenthesis { + valuesClause.String = "(" + valuesClause.String + ")" + getDBValuesFix() + } + return valuesClause.String, nil } @@ -742,13 +750,8 @@ func (m *hierarchicalMultiCriterionHandlerBuilder) handler(c *models.Hierarchica criterion.Value = nil } - var pgsql_fix string - if dbWrapper.dbType == PostgresBackend { - pgsql_fix = " AS v(column1, column2)" - } - if len(criterion.Value) > 0 { - valuesClause, err := getHierarchicalValues(ctx, criterion.Value, m.foreignTable, m.relationsTable, m.parentFK, m.childFK, criterion.Depth) + valuesClause, err := getHierarchicalValues(ctx, criterion.Value, m.foreignTable, m.relationsTable, m.parentFK, m.childFK, criterion.Depth, true) if err != nil { f.setError(err) return @@ -756,21 +759,21 @@ func (m *hierarchicalMultiCriterionHandlerBuilder) handler(c *models.Hierarchica switch criterion.Modifier { case models.CriterionModifierIncludes: - f.addWhere(fmt.Sprintf("%s.%s IN (SELECT column2 FROM (%s)%s)", m.primaryTable, m.foreignFK, valuesClause, pgsql_fix)) + f.addWhere(fmt.Sprintf("%s.%s IN (SELECT column2 FROM %s)", m.primaryTable, m.foreignFK, valuesClause)) case models.CriterionModifierIncludesAll: - f.addWhere(fmt.Sprintf("%s.%s IN (SELECT column2 FROM (%s)%s)", m.primaryTable, m.foreignFK, valuesClause, pgsql_fix)) + f.addWhere(fmt.Sprintf("%s.%s IN (SELECT column2 FROM %s)", m.primaryTable, m.foreignFK, valuesClause)) f.addHaving(fmt.Sprintf("count(distinct %s.%s) = %d", m.primaryTable, m.foreignFK, len(criterion.Value))) } } if len(criterion.Excludes) > 0 { - valuesClause, err := getHierarchicalValues(ctx, criterion.Excludes, m.foreignTable, m.relationsTable, m.parentFK, m.childFK, criterion.Depth) + valuesClause, err := getHierarchicalValues(ctx, criterion.Excludes, m.foreignTable, m.relationsTable, m.parentFK, m.childFK, criterion.Depth, true) if err != nil { f.setError(err) return } - f.addWhere(fmt.Sprintf("%s.%s NOT IN (SELECT column2 FROM (%s)%s) OR %[1]s.%[2]s IS NULL", m.primaryTable, m.foreignFK, valuesClause, pgsql_fix)) + f.addWhere(fmt.Sprintf("%s.%s NOT IN (SELECT column2 FROM %s) OR %[1]s.%[2]s IS NULL", m.primaryTable, m.foreignFK, valuesClause)) } } } @@ -859,7 +862,7 @@ func (m *joinedHierarchicalMultiCriterionHandlerBuilder) handler(c *models.Hiera } if len(criterion.Value) > 0 { - valuesClause, err := getHierarchicalValues(ctx, criterion.Value, m.foreignTable, m.relationsTable, m.parentFK, m.childFK, criterion.Depth) + valuesClause, err := getHierarchicalValues(ctx, criterion.Value, m.foreignTable, m.relationsTable, m.parentFK, m.childFK, criterion.Depth, false) if err != nil { f.setError(err) return @@ -881,7 +884,7 @@ func (m *joinedHierarchicalMultiCriterionHandlerBuilder) handler(c *models.Hiera } if len(criterion.Excludes) > 0 { - valuesClause, err := getHierarchicalValues(ctx, criterion.Excludes, m.foreignTable, m.relationsTable, m.parentFK, m.childFK, criterion.Depth) + valuesClause, err := getHierarchicalValues(ctx, criterion.Excludes, m.foreignTable, m.relationsTable, m.parentFK, m.childFK, criterion.Depth, false) if err != nil { f.setError(err) return @@ -959,7 +962,7 @@ func (h *joinedPerformerTagsHandler) handle(ctx context.Context, f *filterBuilde } if len(criterion.Value) > 0 { - valuesClause, err := getHierarchicalValues(ctx, criterion.Value, tagTable, "tags_relations", "", "", criterion.Depth) + valuesClause, err := getHierarchicalValues(ctx, criterion.Value, tagTable, "tags_relations", "", "", criterion.Depth, false) if err != nil { f.setError(err) return @@ -977,13 +980,13 @@ INNER JOIN (`+valuesClause+`) t ON t.column2 = pt.tag_id } if len(criterion.Excludes) > 0 { - valuesClause, err := getHierarchicalValues(ctx, criterion.Excludes, tagTable, "tags_relations", "", "", criterion.Depth) + valuesClause, err := getHierarchicalValues(ctx, criterion.Excludes, tagTable, "tags_relations", "", "", criterion.Depth, true) if err != nil { f.setError(err) return } - clause := utils.StrFormat("{primaryTable}.id NOT IN (SELECT {joinTable}.{joinPrimaryKey} FROM {joinTable} INNER JOIN performers_tags ON {joinTable}.performer_id = performers_tags.performer_id WHERE performers_tags.tag_id IN (SELECT column2 FROM (%s)))", strFormatMap) + clause := utils.StrFormat("{primaryTable}.id NOT IN (SELECT {joinTable}.{joinPrimaryKey} FROM {joinTable} INNER JOIN performers_tags ON {joinTable}.performer_id = performers_tags.performer_id WHERE performers_tags.tag_id IN (SELECT column2 FROM %s))", strFormatMap) f.addWhere(fmt.Sprintf(clause, valuesClause)) } } diff --git a/pkg/sqlite/database.go b/pkg/sqlite/database.go index e68a14aa209..b85451aabeb 100644 --- a/pkg/sqlite/database.go +++ b/pkg/sqlite/database.go @@ -187,6 +187,23 @@ func getDBBoolean(val bool) string { } } +func getDBValuesFix() (val string) { + if dbWrapper.dbType == PostgresBackend { + val = " AS v(column1, column2)" + } + + return val +} + +func getDBMinFunc() string { + switch dbWrapper.dbType { + case PostgresBackend: + return "LEAST" + default: + return "MIN" + } +} + func (db *Database) SetSchemaVersion(version uint) { db.schemaVersion = version } diff --git a/pkg/sqlite/gallery_filter.go b/pkg/sqlite/gallery_filter.go index 7a369836e33..d4483ea3547 100644 --- a/pkg/sqlite/gallery_filter.go +++ b/pkg/sqlite/gallery_filter.go @@ -421,7 +421,7 @@ func (qb *galleryFilterHandler) averageResolutionCriterionHandler(resolution *mo min := resolution.Value.GetMinResolution() max := resolution.Value.GetMaxResolution() - const widthHeight = "avg(MIN(image_files.width, image_files.height))" + var widthHeight = "avg(" + getDBMinFunc() + "(image_files.width, image_files.height))" switch resolution.Modifier { case models.CriterionModifierEquals: diff --git a/pkg/sqlite/performer_filter.go b/pkg/sqlite/performer_filter.go index 8d532835478..f92777245ea 100644 --- a/pkg/sqlite/performer_filter.go +++ b/pkg/sqlite/performer_filter.go @@ -456,12 +456,12 @@ func (qb *performerFilterHandler) studiosCriterionHandler(studios *models.Hierar } const derivedPerformerStudioTable = "performer_studio" - valuesClause, err := getHierarchicalValues(ctx, studios.Value, studioTable, "", "parent_id", "child_id", studios.Depth) + valuesClause, err := getHierarchicalValues(ctx, studios.Value, studioTable, "", "parent_id", "child_id", studios.Depth, true) if err != nil { f.setError(err) return } - f.addWith("studio(root_id, item_id) AS (" + valuesClause + ")") + f.addWith("studio(root_id, item_id) AS " + valuesClause) templStr := `SELECT performer_id FROM {primaryTable} INNER JOIN {joinTable} ON {primaryTable}.id = {joinTable}.{primaryFK} diff --git a/pkg/sqlite/performer_test.go b/pkg/sqlite/performer_test.go index e03ee42063e..d900eed9f15 100644 --- a/pkg/sqlite/performer_test.go +++ b/pkg/sqlite/performer_test.go @@ -70,8 +70,8 @@ func Test_PerformerStore_Create(t *testing.T) { favorite = true endpoint1 = "endpoint1" endpoint2 = "endpoint2" - stashID1 = "stashid1" - stashID2 = "stashid2" + stashID1 = getUUID("stashid1") + stashID2 = getUUID("stashid2") createdAt = time.Date(2001, 1, 1, 0, 0, 0, 0, time.UTC) updatedAt = time.Date(2001, 1, 1, 0, 0, 0, 0, time.UTC) @@ -217,8 +217,8 @@ func Test_PerformerStore_Update(t *testing.T) { favorite = true endpoint1 = "endpoint1" endpoint2 = "endpoint2" - stashID1 = "stashid1" - stashID2 = "stashid2" + stashID1 = getUUID("stashid1") + stashID2 = getUUID("stashid2") createdAt = time.Date(2001, 1, 1, 0, 0, 0, 0, time.UTC) updatedAt = time.Date(2001, 1, 1, 0, 0, 0, 0, time.UTC) @@ -398,8 +398,8 @@ func Test_PerformerStore_UpdatePartial(t *testing.T) { favorite = true endpoint1 = "endpoint1" endpoint2 = "endpoint2" - stashID1 = "stashid1" - stashID2 = "stashid2" + stashID1 = getUUID("stashid1") + stashID2 = getUUID("stashid2") createdAt = time.Date(2001, 1, 1, 0, 0, 0, 0, time.UTC) updatedAt = time.Date(2001, 1, 1, 0, 0, 0, 0, time.UTC) diff --git a/pkg/sqlite/scene_marker_filter.go b/pkg/sqlite/scene_marker_filter.go index c001a09f6b9..85c125dcd4f 100644 --- a/pkg/sqlite/scene_marker_filter.go +++ b/pkg/sqlite/scene_marker_filter.go @@ -95,7 +95,7 @@ func (qb *sceneMarkerFilterHandler) tagsCriterionHandler(criterion *models.Hiera } if len(tags.Value) > 0 { - valuesClause, err := getHierarchicalValues(ctx, tags.Value, tagTable, "tags_relations", "parent_id", "child_id", tags.Depth) + valuesClause, err := getHierarchicalValues(ctx, tags.Value, tagTable, "tags_relations", "parent_id", "child_id", tags.Depth, true) if err != nil { f.setError(err) return @@ -103,10 +103,10 @@ func (qb *sceneMarkerFilterHandler) tagsCriterionHandler(criterion *models.Hiera f.addWith(`marker_tags AS ( SELECT mt.scene_marker_id, t.column1 AS root_tag_id FROM scene_markers_tags mt - INNER JOIN (` + valuesClause + `) t ON t.column2 = mt.tag_id + INNER JOIN ` + valuesClause + ` t ON t.column2 = mt.tag_id UNION SELECT m.id, t.column1 FROM scene_markers m - INNER JOIN (` + valuesClause + `) t ON t.column2 = m.primary_tag_id + INNER JOIN ` + valuesClause + ` t ON t.column2 = m.primary_tag_id )`) f.addLeftJoin("marker_tags", "", "marker_tags.scene_marker_id = scene_markers.id") @@ -127,16 +127,16 @@ func (qb *sceneMarkerFilterHandler) tagsCriterionHandler(criterion *models.Hiera } if len(criterion.Excludes) > 0 { - valuesClause, err := getHierarchicalValues(ctx, tags.Excludes, tagTable, "tags_relations", "parent_id", "child_id", tags.Depth) + valuesClause, err := getHierarchicalValues(ctx, tags.Excludes, tagTable, "tags_relations", "parent_id", "child_id", tags.Depth, true) if err != nil { f.setError(err) return } - clause := "scene_markers.id NOT IN (SELECT scene_markers_tags.scene_marker_id FROM scene_markers_tags WHERE scene_markers_tags.tag_id IN (SELECT column2 FROM (%s)))" + clause := "scene_markers.id NOT IN (SELECT scene_markers_tags.scene_marker_id FROM scene_markers_tags WHERE scene_markers_tags.tag_id IN (SELECT column2 FROM %s))" f.addWhere(fmt.Sprintf(clause, valuesClause)) - f.addWhere(fmt.Sprintf("scene_markers.primary_tag_id NOT IN (SELECT column2 FROM (%s))", valuesClause)) + f.addWhere(fmt.Sprintf("scene_markers.primary_tag_id NOT IN (SELECT column2 FROM %s)", valuesClause)) } } } diff --git a/pkg/sqlite/scene_test.go b/pkg/sqlite/scene_test.go index 97a9f8bea02..7faba071992 100644 --- a/pkg/sqlite/scene_test.go +++ b/pkg/sqlite/scene_test.go @@ -91,8 +91,8 @@ func Test_sceneQueryBuilder_Create(t *testing.T) { sceneIndex2 = 234 endpoint1 = "endpoint1" endpoint2 = "endpoint2" - stashID1 = "stashid1" - stashID2 = "stashid2" + stashID1 = getUUID("stashid1") + stashID2 = getUUID("stashid2") date, _ = models.ParseDate("2003-02-01") @@ -321,8 +321,8 @@ func Test_sceneQueryBuilder_Update(t *testing.T) { sceneIndex2 = 234 endpoint1 = "endpoint1" endpoint2 = "endpoint2" - stashID1 = "stashid1" - stashID2 = "stashid2" + stashID1 = getUUID("stashid1") + stashID2 = getUUID("stashid2") date, _ = models.ParseDate("2003-02-01") ) @@ -531,8 +531,8 @@ func Test_sceneQueryBuilder_UpdatePartial(t *testing.T) { sceneIndex2 = 234 endpoint1 = "endpoint1" endpoint2 = "endpoint2" - stashID1 = "stashid1" - stashID2 = "stashid2" + stashID1 = getUUID("stashid1") + stashID2 = getUUID("stashid2") date, _ = models.ParseDate("2003-02-01") ) @@ -725,8 +725,8 @@ func Test_sceneQueryBuilder_UpdatePartialRelationships(t *testing.T) { sceneIndex2 = 234 endpoint1 = "endpoint1" endpoint2 = "endpoint2" - stashID1 = "stashid1" - stashID2 = "stashid2" + stashID1 = getUUID("stashid1") + stashID2 = getUUID("stashid2") groupScenes = []models.GroupsScenes{ { From df48fc709cf4a7feaa165b4231492afe6c1e122d Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Sat, 19 Oct 2024 19:06:32 +0200 Subject: [PATCH 49/85] pgsql more test fixes --- internal/autotag/integration_test.go | 4 ++-- internal/manager/init.go | 6 +++--- pkg/sqlite/anonymise.go | 6 +++++- pkg/sqlite/database_postgres.go | 9 +++++---- pkg/sqlite/database_sqlite.go | 10 ++++++---- pkg/sqlite/setup_test.go | 4 ++-- 6 files changed, 23 insertions(+), 16 deletions(-) diff --git a/internal/autotag/integration_test.go b/internal/autotag/integration_test.go index a35f7d0c0c5..a79428652b6 100644 --- a/internal/autotag/integration_test.go +++ b/internal/autotag/integration_test.go @@ -47,10 +47,10 @@ func testTeardown(databaseFile string) { func getNewDB(databaseFile string) sqlite.DBInterface { dbUrl, valid := os.LookupEnv("PGSQL_TEST") if valid { - db = sqlite.NewPostgresDatabase(dbUrl) + db = sqlite.NewPostgresDatabase(dbUrl, true) } else { sqlite.RegisterSqliteDialect() - db = sqlite.NewSQLiteDatabase(databaseFile) + db = sqlite.NewSQLiteDatabase(databaseFile, true) } return db diff --git a/internal/manager/init.go b/internal/manager/init.go index fab76b60d5c..f89178443e2 100644 --- a/internal/manager/init.go +++ b/internal/manager/init.go @@ -41,12 +41,12 @@ func Initialize(cfg *config.Config, l *log.Logger) (*Manager, error) { upperUrl := strings.ToUpper(dbUrl) switch { case strings.HasPrefix(upperUrl, string(sqlite.PostgresBackend)+":"): - db = sqlite.NewPostgresDatabase(dbUrl) + db = sqlite.NewPostgresDatabase(dbUrl, true) case strings.HasPrefix(upperUrl, string(sqlite.SqliteBackend)+":"): - db = sqlite.NewSQLiteDatabase(dbUrl[len(sqlite.SqliteBackend)+1:]) + db = sqlite.NewSQLiteDatabase(dbUrl[len(sqlite.SqliteBackend)+1:], true) default: // Assume it's the path to a SQLite database - for backwards compat - db = sqlite.NewSQLiteDatabase(dbUrl) + db = sqlite.NewSQLiteDatabase(dbUrl, true) } repo := db.Repository() diff --git a/pkg/sqlite/anonymise.go b/pkg/sqlite/anonymise.go index 7ef4ae2588a..e8eb072f9ee 100644 --- a/pkg/sqlite/anonymise.go +++ b/pkg/sqlite/anonymise.go @@ -28,11 +28,15 @@ type Anonymiser struct { } func NewAnonymiser(db DBInterface, outPath string) (*Anonymiser, error) { + if dbWrapper.dbType == PostgresBackend { + return nil, fmt.Errorf("anonymise is not yet implemented for postgres backend") + } + if _, err := db.GetWriteDB().Exec(fmt.Sprintf(`VACUUM INTO "%s"`, outPath)); err != nil { return nil, fmt.Errorf("vacuuming into %s: %w", outPath, err) } - newDB := NewSQLiteDatabase(outPath) + newDB := NewSQLiteDatabase(outPath, false) if err := newDB.Open(); err != nil { return nil, fmt.Errorf("opening %s: %w", outPath, err) } diff --git a/pkg/sqlite/database_postgres.go b/pkg/sqlite/database_postgres.go index afa670c9326..601cf302919 100644 --- a/pkg/sqlite/database_postgres.go +++ b/pkg/sqlite/database_postgres.go @@ -14,9 +14,7 @@ type PostgresDB struct { Database } -func NewPostgresDatabase(dbConnector string) *PostgresDB { - dialect = goqu.Dialect("postgres") - +func NewPostgresDatabase(dbConnector string, init bool) *PostgresDB { db := &PostgresDB{ Database: Database{ storeRepository: newDatabase(), @@ -25,7 +23,10 @@ func NewPostgresDatabase(dbConnector string) *PostgresDB { } db.DBInterface = db - dbWrapper.dbType = PostgresBackend + if init { + dialect = goqu.Dialect("postgres") + dbWrapper.dbType = PostgresBackend + } return db } diff --git a/pkg/sqlite/database_sqlite.go b/pkg/sqlite/database_sqlite.go index 4f302c10643..2a0c7c65439 100644 --- a/pkg/sqlite/database_sqlite.go +++ b/pkg/sqlite/database_sqlite.go @@ -21,11 +21,10 @@ func RegisterSqliteDialect() { opts := sqlite3.DialectOptions() opts.SupportsReturn = true goqu.RegisterDialect("sqlite3new", opts) -} -func NewSQLiteDatabase(dbPath string) *SQLiteDB { - dialect = goqu.Dialect("sqlite3new") +} +func NewSQLiteDatabase(dbPath string, init bool) *SQLiteDB { db := &SQLiteDB{ Database: Database{ storeRepository: newDatabase(), @@ -35,7 +34,10 @@ func NewSQLiteDatabase(dbPath string) *SQLiteDB { } db.DBInterface = db - dbWrapper.dbType = SqliteBackend + if init { + dialect = goqu.Dialect("sqlite3new") + dbWrapper.dbType = SqliteBackend + } return db } diff --git a/pkg/sqlite/setup_test.go b/pkg/sqlite/setup_test.go index f58deda2790..e9d166091e4 100644 --- a/pkg/sqlite/setup_test.go +++ b/pkg/sqlite/setup_test.go @@ -633,10 +633,10 @@ func testTeardown(databaseFile string) { func getNewDB(databaseFile string) sqlite.DBInterface { dbUrl, valid := os.LookupEnv("PGSQL_TEST") if valid { - db = sqlite.NewPostgresDatabase(dbUrl) + db = sqlite.NewPostgresDatabase(dbUrl, true) } else { sqlite.RegisterSqliteDialect() - db = sqlite.NewSQLiteDatabase(databaseFile) + db = sqlite.NewSQLiteDatabase(databaseFile, true) } return db From 45bbb76bd6a3b3f10789776bf9335d0423bfdadb Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Sun, 20 Oct 2024 21:16:23 +0200 Subject: [PATCH 50/85] pgsql: more work on passing tests --- pkg/sqlite/database.go | 9 +++++ pkg/sqlite/file.go | 11 +++--- pkg/sqlite/gallery.go | 27 ++++++-------- pkg/sqlite/group.go | 21 ++++------- pkg/sqlite/image.go | 30 ++++++---------- pkg/sqlite/performer.go | 21 +++++------ pkg/sqlite/query.go | 54 +++++++--------------------- pkg/sqlite/repository.go | 21 +++-------- pkg/sqlite/scene.go | 74 ++++++++++++++------------------------ pkg/sqlite/scene_marker.go | 14 +++----- pkg/sqlite/sql.go | 47 +++++++++--------------- pkg/sqlite/studio.go | 21 +++++------ pkg/sqlite/tag.go | 26 ++++++-------- 13 files changed, 133 insertions(+), 243 deletions(-) diff --git a/pkg/sqlite/database.go b/pkg/sqlite/database.go index b85451aabeb..955b4104668 100644 --- a/pkg/sqlite/database.go +++ b/pkg/sqlite/database.go @@ -204,6 +204,15 @@ func getDBMinFunc() string { } } +func DBGroupConcat(columnName string) string { + switch dbWrapper.dbType { + case PostgresBackend: + return "STRING_AGG(" + columnName + "::TEXT, ',')" + default: + return "GROUP_CONCAT(" + columnName + ")" + } +} + func (db *Database) SetSchemaVersion(version uint) { db.schemaVersion = version } diff --git a/pkg/sqlite/file.go b/pkg/sqlite/file.go index 73c6cfccbf5..d462db4d862 100644 --- a/pkg/sqlite/file.go +++ b/pkg/sqlite/file.go @@ -869,7 +869,7 @@ func (qb *FileStore) Query(ctx context.Context, options models.FileQueryOptions) if err := qb.setQuerySort(&query, findFilter); err != nil { return nil, err } - query.addPagination(getPagination(findFilter)) + query.sortAndPagination += getPagination(findFilter) result, err := qb.queryGroupedFields(ctx, options, query) if err != nil { @@ -898,7 +898,7 @@ func (qb *FileStore) queryGroupedFields(ctx context.Context, options models.File aggregateQuery := qb.newQuery() if options.Count { - aggregateQuery.addColumn("COUNT(temp.id) as total", nil) + aggregateQuery.addColumn("COUNT(temp.id) as total") } const includeSortPagination = false @@ -940,12 +940,9 @@ func (qb *FileStore) setQuerySort(query *queryBuilder, findFilter *models.FindFi switch sort { case "path": // special handling for path - query.addSort(fmt.Sprintf("folders.path %s, files.basename %[1]s", direction)) - query.addGroupBy([]string{"folders.path", "files.basename"}, true) + query.sortAndPagination += fmt.Sprintf(" ORDER BY folders.path %s, files.basename %[1]s", direction) default: - add, agg := getSort(sort, direction, "files") - query.addSort(add) - query.addGroupBy(agg, true) + query.sortAndPagination += getSort(sort, direction, "files") } return nil diff --git a/pkg/sqlite/gallery.go b/pkg/sqlite/gallery.go index 4e171026491..f439325ff98 100644 --- a/pkg/sqlite/gallery.go +++ b/pkg/sqlite/gallery.go @@ -735,7 +735,7 @@ func (qb *GalleryStore) makeQuery(ctx context.Context, galleryFilter *models.Gal if err := qb.setGallerySort(&query, findFilter); err != nil { return nil, err } - query.addPagination(getPagination(findFilter)) + query.sortAndPagination += getPagination(findFilter) return &query, nil } @@ -826,39 +826,32 @@ func (qb *GalleryStore) setGallerySort(query *queryBuilder, findFilter *models.F switch sort { case "file_count": - query.addSort(getCountSort(galleryTable, galleriesFilesTable, galleryIDColumn, direction)) + query.sortAndPagination += getCountSort(galleryTable, galleriesFilesTable, galleryIDColumn, direction) case "images_count": - query.addSort(getCountSort(galleryTable, galleriesImagesTable, galleryIDColumn, direction)) + query.sortAndPagination += getCountSort(galleryTable, galleriesImagesTable, galleryIDColumn, direction) case "tag_count": - query.addSort(getCountSort(galleryTable, galleriesTagsTable, galleryIDColumn, direction)) + query.sortAndPagination += getCountSort(galleryTable, galleriesTagsTable, galleryIDColumn, direction) case "performer_count": - query.addSort(getCountSort(galleryTable, performersGalleriesTable, galleryIDColumn, direction)) + query.sortAndPagination += getCountSort(galleryTable, performersGalleriesTable, galleryIDColumn, direction) case "path": // special handling for path addFileTable() addFolderTable() - query.addSort(fmt.Sprintf("COALESCE(folders.path, '') || COALESCE(file_folder.path, '') || COALESCE(files.basename, '') COLLATE NATURAL_CI %s", direction)) - query.addGroupBy([]string{"folders.path", "file_folder.path", "files.basename"}, true) + query.sortAndPagination += fmt.Sprintf(" ORDER BY COALESCE(folders.path, '') || COALESCE(file_folder.path, '') || COALESCE(files.basename, '') COLLATE NATURAL_CI %s", direction) case "file_mod_time": sort = "mod_time" addFileTable() - add, agg := getSort(sort, direction, fileTable) - query.addSort(add) - query.addGroupBy(agg, true) + query.sortAndPagination += getSort(sort, direction, fileTable) case "title": addFileTable() addFolderTable() - query.addSort("COALESCE(galleries.title, files.basename, basename(COALESCE(folders.path, ''))) COLLATE NATURAL_CI " + direction + ", file_folder.path COLLATE NATURAL_CI " + direction) - query.addGroupBy([]string{"galleries.title", "files.basename", "folders.path", "file_folder.path"}, true) + query.sortAndPagination += " ORDER BY COALESCE(galleries.title, files.basename, basename(COALESCE(folders.path, ''))) COLLATE NATURAL_CI " + direction + ", file_folder.path COLLATE NATURAL_CI " + direction default: - add, agg := getSort(sort, direction, "galleries") - query.addSort(add) - query.addGroupBy(agg, true) + query.sortAndPagination += getSort(sort, direction, "galleries") } // Whatever the sorting, always use title/id as a final sort - query.addSort("COALESCE(galleries.title, cast(galleries.id as text)) COLLATE NATURAL_CI ASC") - query.addGroupBy([]string{"galleries.title", "galleries.id"}, true) + query.sortAndPagination += ", COALESCE(galleries.title, CAST(galleries.id as text)) COLLATE NATURAL_CI ASC" return nil } diff --git a/pkg/sqlite/group.go b/pkg/sqlite/group.go index 6fcccf5d403..a5b919ff7bb 100644 --- a/pkg/sqlite/group.go +++ b/pkg/sqlite/group.go @@ -446,7 +446,7 @@ func (qb *GroupStore) makeQuery(ctx context.Context, groupFilter *models.GroupFi return nil, err } - query.addPagination(getPagination(findFilter)) + query.sortAndPagination += getPagination(findFilter) return &query, nil } @@ -513,30 +513,23 @@ func (qb *GroupStore) setGroupSort(query *queryBuilder, findFilter *models.FindF case "sub_group_order": // sub_group_order is a special sort that sorts by the order_index of the subgroups if query.hasJoin("groups_parents") { - add, agg := getSort("order_index", direction, "groups_parents") - query.addSort(add) - query.addGroupBy(agg, true) + query.sortAndPagination += getSort("order_index", direction, "groups_parents") } else { // this will give unexpected results if the query is not filtered by a parent group and // the group has multiple parents and order indexes query.join(groupRelationsTable, "", "groups.id = groups_relations.sub_id") - add, agg := getSort("order_index", direction, groupRelationsTable) - query.addSort(add) - query.addGroupBy(agg, true) + query.sortAndPagination += getSort("order_index", direction, groupRelationsTable) } case "tag_count": - query.addSort(getCountSort(groupTable, groupsTagsTable, groupIDColumn, direction)) + query.sortAndPagination += getCountSort(groupTable, groupsTagsTable, groupIDColumn, direction) case "scenes_count": // generic getSort won't work for this - query.addSort(getCountSort(groupTable, groupsScenesTable, groupIDColumn, direction)) + query.sortAndPagination += getCountSort(groupTable, groupsScenesTable, groupIDColumn, direction) default: - add, agg := getSort(sort, direction, "groups") - query.addSort(add) - query.addGroupBy(agg, true) + query.sortAndPagination += getSort(sort, direction, "groups") } // Whatever the sorting, always use name/id as a final sort - query.addSort("COALESCE(groups.name, cast(groups.id as text)) COLLATE NATURAL_CI ASC") - query.addGroupBy([]string{"groups.name", "groups.id"}, true) + query.sortAndPagination += ", COALESCE(groups.name, CAST(groups.id as text)) COLLATE NATURAL_CI ASC" return nil } diff --git a/pkg/sqlite/image.go b/pkg/sqlite/image.go index 94cc1c512eb..aafe740fb0d 100644 --- a/pkg/sqlite/image.go +++ b/pkg/sqlite/image.go @@ -849,7 +849,7 @@ func (qb *ImageStore) queryGroupedFields(ctx context.Context, options models.Ima aggregateQuery := imageRepository.newQuery() if options.Count { - aggregateQuery.addColumn("COUNT(DISTINCT temp.id) as total", nil) + aggregateQuery.addColumn("COUNT(DISTINCT temp.id) as total") } if options.Megapixels { @@ -863,8 +863,8 @@ func (qb *ImageStore) queryGroupedFields(ctx context.Context, options models.Ima onClause: "images_files.file_id = image_files.file_id", }, ) - query.addColumn("COALESCE(image_files.width, 0) * COALESCE(image_files.height, 0) as megapixels", []string{"image_files.width", "image_files.height"}) - aggregateQuery.addColumn("COALESCE(SUM(temp.megapixels), 0) / 1000000 as megapixels", nil) + query.addColumn("COALESCE(image_files.width, 0) * COALESCE(image_files.height, 0) as megapixels") + aggregateQuery.addColumn("COALESCE(SUM(temp.megapixels), 0) / 1000000 as megapixels") } if options.TotalSize { @@ -878,8 +878,8 @@ func (qb *ImageStore) queryGroupedFields(ctx context.Context, options models.Ima onClause: "images_files.file_id = files.id", }, ) - query.addColumn("COALESCE(files.size, 0) as size", []string{"files.size"}) - aggregateQuery.addColumn("SUM(temp.size) as size", nil) + query.addColumn("COALESCE(files.size, 0) as size") + aggregateQuery.addColumn("SUM(temp.size) as size") } const includeSortPagination = false @@ -968,8 +968,7 @@ func (qb *ImageStore) setImageSortAndPagination(q *queryBuilder, findFilter *mod case "path": addFilesJoin() addFolderJoin() - sortClause = "COALESCE(folders.path, '') || COALESCE(files.basename, '') COLLATE NATURAL_CI " + direction - q.addGroupBy([]string{"folders.path", "files.basename"}, true) + sortClause = " ORDER BY COALESCE(folders.path, '') || COALESCE(files.basename, '') COLLATE NATURAL_CI " + direction case "file_count": sortClause = getCountSort(imageTable, imagesFilesTable, imageIDColumn, direction) case "tag_count": @@ -978,27 +977,20 @@ func (qb *ImageStore) setImageSortAndPagination(q *queryBuilder, findFilter *mod sortClause = getCountSort(imageTable, performersImagesTable, imageIDColumn, direction) case "mod_time", "filesize": addFilesJoin() - add, agg := getSort(sort, direction, "files") - sortClause = add - q.addGroupBy(agg, true) + sortClause = getSort(sort, direction, "files") case "title": addFilesJoin() addFolderJoin() - sortClause = "COALESCE(images.title, files.basename) COLLATE NATURAL_CI " + direction + ", folders.path COLLATE NATURAL_CI " + direction - q.addGroupBy([]string{"images.title", "files.basename", "folders.path"}, true) + sortClause = " ORDER BY COALESCE(images.title, files.basename) COLLATE NATURAL_CI " + direction + ", folders.path COLLATE NATURAL_CI " + direction default: - add, agg := getSort(sort, direction, "images") - sortClause = add - q.addGroupBy(agg, true) + sortClause = getSort(sort, direction, "images") } // Whatever the sorting, always use title/id as a final sort - sortClause += ", COALESCE(images.title, cast(images.id as text)) COLLATE NATURAL_CI ASC" - q.addGroupBy([]string{"images.title", "images.id"}, true) + sortClause += ", COALESCE(images.title, CAST(images.id as text)) COLLATE NATURAL_CI ASC" } - q.addSort(sortClause) - q.addPagination(getPagination(findFilter)) + q.sortAndPagination = sortClause + getPagination(findFilter) return nil } diff --git a/pkg/sqlite/performer.go b/pkg/sqlite/performer.go index 4410c5d7ff0..d97217fe3ef 100644 --- a/pkg/sqlite/performer.go +++ b/pkg/sqlite/performer.go @@ -612,13 +612,12 @@ func (qb *PerformerStore) makeQuery(ctx context.Context, performerFilter *models return nil, err } - add, agg, err := qb.getPerformerSort(findFilter) + var err error + query.sortAndPagination, err = qb.getPerformerSort(findFilter) if err != nil { return nil, err } - query.addSort(add) - query.addPagination(getPagination(findFilter)) - query.addGroupBy(agg, true) + query.sortAndPagination += getPagination(findFilter) return &query, nil } @@ -732,7 +731,7 @@ var performerSortOptions = sortOptions{ "weight", } -func (qb *PerformerStore) getPerformerSort(findFilter *models.FindFilterType) (string, []string, error) { +func (qb *PerformerStore) getPerformerSort(findFilter *models.FindFilterType) (string, error) { var sort string var direction string if findFilter == nil { @@ -745,10 +744,9 @@ func (qb *PerformerStore) getPerformerSort(findFilter *models.FindFilterType) (s // CVE-2024-32231 - ensure sort is in the list of allowed sorts if err := performerSortOptions.validateSort(sort); err != nil { - return "", nil, err + return "", err } - var agg []string sortQuery := "" switch sort { case "tag_count": @@ -768,15 +766,12 @@ func (qb *PerformerStore) getPerformerSort(findFilter *models.FindFilterType) (s case "last_o_at": sortQuery += qb.sortByLastOAt(direction) default: - var add string - add, agg = getSort(sort, direction, "performers") - sortQuery += add + sortQuery += getSort(sort, direction, "performers") } // Whatever the sorting, always use name/id as a final sort - sortQuery += ", COALESCE(performers.name, cast(performers.id as text)) COLLATE NATURAL_CI ASC" - agg = append(agg, "performers.name", "performers.id") - return sortQuery, agg, nil + sortQuery += ", COALESCE(performers.name, CAST(performers.id as text)) COLLATE NATURAL_CI ASC" + return sortQuery, nil } func (qb *PerformerStore) GetTagIDs(ctx context.Context, id int) ([]int, error) { diff --git a/pkg/sqlite/query.go b/pkg/sqlite/query.go index cb66d083951..4cf24bbdd30 100644 --- a/pkg/sqlite/query.go +++ b/pkg/sqlite/query.go @@ -6,7 +6,6 @@ import ( "strings" "github.com/stashapp/stash/pkg/models" - "github.com/stashapp/stash/pkg/sliceutil" ) type queryBuilder struct { @@ -15,47 +14,22 @@ type queryBuilder struct { columns []string from string - joins joins - whereClauses []string - havingClauses []string - args []interface{} - withClauses []string - recursiveWith bool - groupByClauses []string + joins joins + whereClauses []string + havingClauses []string + args []interface{} + withClauses []string + recursiveWith bool - sort []string - pagination string + sortAndPagination string } func (qb queryBuilder) body() string { return fmt.Sprintf("SELECT %s FROM %s%s", strings.Join(qb.columns, ", "), qb.from, qb.joins.toSQL()) } -/* - * Adds a column to select for the query - * Additionally allows doing group by on any non-aggregate columns (for pgsql) - */ -func (qb *queryBuilder) addColumn(column string, nonaggregates []string) { +func (qb *queryBuilder) addColumn(column string) { qb.columns = append(qb.columns, column) - qb.addGroupBy(nonaggregates, dbWrapper.dbType == PostgresBackend) -} - -func (qb *queryBuilder) addGroupBy(aggregate []string, pgsqlfix bool) { - if !pgsqlfix || len(aggregate) == 0 { - return - } - - qb.groupByClauses = sliceutil.AppendUniques(qb.groupByClauses, aggregate) -} - -func (qb *queryBuilder) addSort(sortby string) { - if len(sortby) > 0 { - qb.sort = append(qb.sort, sortby) - } -} - -func (qb *queryBuilder) addPagination(pag string) { - qb.pagination += pag } func (qb queryBuilder) toSQL(includeSortPagination bool) string { @@ -70,12 +44,10 @@ func (qb queryBuilder) toSQL(includeSortPagination bool) string { withClause = "WITH " + recursive + strings.Join(qb.withClauses, ", ") + " " } - body = withClause + qb.repository.buildQueryBody(body, qb.whereClauses, qb.havingClauses, qb.groupByClauses) + body = withClause + qb.repository.buildQueryBody(body, qb.whereClauses, qb.havingClauses) + if includeSortPagination { - if len(qb.sort) > 0 { - body += " ORDER BY " + strings.Join(qb.sort, ", ") + " " - } - body += qb.pagination + body += qb.sortAndPagination } return body @@ -89,7 +61,7 @@ func (qb queryBuilder) findIDs(ctx context.Context) ([]int, error) { func (qb queryBuilder) executeFind(ctx context.Context) ([]int, int, error) { body := qb.body() - return qb.repository.executeFindQuery(ctx, body, qb.args, qb.sort, qb.pagination, qb.whereClauses, qb.havingClauses, qb.withClauses, qb.recursiveWith) + return qb.repository.executeFindQuery(ctx, body, qb.from, qb.args, qb.sortAndPagination, qb.whereClauses, qb.havingClauses, qb.withClauses, qb.recursiveWith) } func (qb queryBuilder) executeCount(ctx context.Context) (int, error) { @@ -104,7 +76,7 @@ func (qb queryBuilder) executeCount(ctx context.Context) (int, error) { withClause = "WITH " + recursive + strings.Join(qb.withClauses, ", ") + " " } - body = qb.repository.buildQueryBody(body, qb.whereClauses, qb.havingClauses, nil) + body = qb.repository.buildQueryBody(body, qb.whereClauses, qb.havingClauses) countQuery := withClause + qb.repository.buildCountQuery(body) return qb.repository.runCountQuery(ctx, countQuery, qb.args) } diff --git a/pkg/sqlite/repository.go b/pkg/sqlite/repository.go index 30ece35fbce..da2bba69b9f 100644 --- a/pkg/sqlite/repository.go +++ b/pkg/sqlite/repository.go @@ -170,25 +170,20 @@ func (r *repository) querySimple(ctx context.Context, query string, args []inter return nil } -func (r *repository) buildQueryBody(body string, whereClauses []string, havingClauses []string, groupbyClauses []string) string { +func (r *repository) buildQueryBody(body string, whereClauses []string, havingClauses []string) string { if len(whereClauses) > 0 { body = body + " WHERE " + strings.Join(whereClauses, " AND ") // TODO handle AND or OR } if len(havingClauses) > 0 { - groupbyClauses = append(groupbyClauses, r.tableName+".id") - } - if len(groupbyClauses) > 0 { - body = body + " GROUP BY " + strings.Join(groupbyClauses, ", ") + " " - } - if len(havingClauses) > 0 { + body = body + " GROUP BY " + r.tableName + ".id " body = body + " HAVING " + strings.Join(havingClauses, " AND ") // TODO handle AND or OR } return body } -func (r *repository) executeFindQuery(ctx context.Context, body string, args []interface{}, sort []string, pagination string, whereClauses []string, havingClauses []string, withClauses []string, recursiveWith bool) ([]int, int, error) { - body = r.buildQueryBody(body, whereClauses, havingClauses, nil) +func (r *repository) executeFindQuery(ctx context.Context, body string, table string, args []interface{}, sortAndPagination string, whereClauses []string, havingClauses []string, withClauses []string, recursiveWith bool) ([]int, int, error) { + body = r.buildQueryBody(body, whereClauses, havingClauses) withClause := "" if len(withClauses) > 0 { @@ -200,13 +195,7 @@ func (r *repository) executeFindQuery(ctx context.Context, body string, args []i } countQuery := withClause + r.buildCountQuery(body) - idsQuery := withClause + body - if len(sort) > 0 { - idsQuery += " ORDER BY " + strings.Join(sort, ", ") + " " - } - if len(pagination) > 0 { - idsQuery += pagination - } + idsQuery := withClause + body + sortAndPagination // Perform query and fetch result var countResult int diff --git a/pkg/sqlite/scene.go b/pkg/sqlite/scene.go index 40e0dffc7b3..be6dc5259b6 100644 --- a/pkg/sqlite/scene.go +++ b/pkg/sqlite/scene.go @@ -957,7 +957,7 @@ func (qb *SceneStore) makeQuery(ctx context.Context, sceneFilter *models.SceneFi if err := qb.setSceneSort(&query, findFilter); err != nil { return nil, err } - query.addPagination(getPagination(findFilter)) + query.sortAndPagination += getPagination(findFilter) return &query, nil } @@ -991,7 +991,7 @@ func (qb *SceneStore) queryGroupedFields(ctx context.Context, options models.Sce aggregateQuery := sceneRepository.newQuery() if options.Count { - aggregateQuery.addColumn("COUNT(DISTINCT temp.id) as total", nil) + aggregateQuery.addColumn("COUNT(DISTINCT temp.id) as total") } if options.TotalDuration { @@ -1005,8 +1005,8 @@ func (qb *SceneStore) queryGroupedFields(ctx context.Context, options models.Sce onClause: "scenes_files.file_id = video_files.file_id", }, ) - query.addColumn("COALESCE(video_files.duration, 0) as duration", []string{"video_files.duration"}) - aggregateQuery.addColumn("SUM(temp.duration) as duration", nil) + query.addColumn("COALESCE(video_files.duration, 0) as duration") + aggregateQuery.addColumn("SUM(temp.duration) as duration") } if options.TotalSize { @@ -1020,8 +1020,8 @@ func (qb *SceneStore) queryGroupedFields(ctx context.Context, options models.Sce onClause: "scenes_files.file_id = files.id", }, ) - query.addColumn("COALESCE(files.size, 0) as size", []string{"files.size"}) - aggregateQuery.addColumn("SUM(temp.size) as size", nil) + query.addColumn("COALESCE(files.size, 0) as size") + aggregateQuery.addColumn("SUM(temp.size) as size") } const includeSortPagination = false @@ -1130,26 +1130,21 @@ func (qb *SceneStore) setSceneSort(query *queryBuilder, findFilter *models.FindF switch sort { case "movie_scene_number": query.join(groupsScenesTable, "", "scenes.id = groups_scenes.scene_id") - add, agg := getSort("scene_index", direction, groupsScenesTable) - query.addSort(add) - query.addGroupBy(agg, true) + query.sortAndPagination += getSort("scene_index", direction, groupsScenesTable) case "group_scene_number": query.join(groupsScenesTable, "scene_group", "scenes.id = scene_group.scene_id") - add, agg := getSort("scene_index", direction, "scene_group") - query.addSort(add) - query.addGroupBy(agg, true) + query.sortAndPagination += getSort("scene_index", direction, "scene_group") case "tag_count": - query.addSort(getCountSort(sceneTable, scenesTagsTable, sceneIDColumn, direction)) + query.sortAndPagination += getCountSort(sceneTable, scenesTagsTable, sceneIDColumn, direction) case "performer_count": - query.addSort(getCountSort(sceneTable, performersScenesTable, sceneIDColumn, direction)) + query.sortAndPagination += getCountSort(sceneTable, performersScenesTable, sceneIDColumn, direction) case "file_count": - query.addSort(getCountSort(sceneTable, scenesFilesTable, sceneIDColumn, direction)) + query.sortAndPagination += getCountSort(sceneTable, scenesFilesTable, sceneIDColumn, direction) case "path": // special handling for path addFileTable() addFolderTable() - query.addSort(fmt.Sprintf("COALESCE(folders.path, '') || COALESCE(files.basename, '') COLLATE NATURAL_CI %s", direction)) - query.addGroupBy([]string{"folders.path", "files.basename"}, true) + query.sortAndPagination += fmt.Sprintf(" ORDER BY COALESCE(folders.path, '') || COALESCE(files.basename, '') COLLATE NATURAL_CI %s", direction) case "perceptual_similarity": // special handling for phash addFileTable() @@ -1161,63 +1156,46 @@ func (qb *SceneStore) setSceneSort(query *queryBuilder, findFilter *models.FindF }, ) - query.addSort("fingerprints_phash.fingerprint " + direction + ", files.size DESC") - query.addGroupBy([]string{"fingerprints_phash.fingerprint", "files.size"}, true) + query.sortAndPagination += " ORDER BY fingerprints_phash.fingerprint " + direction + ", files.size DESC" case "bitrate": sort = "bit_rate" addVideoFileTable() - add, agg := getSort(sort, direction, videoFileTable) - query.addSort(add) - query.addGroupBy(agg, true) + query.sortAndPagination += getSort(sort, direction, videoFileTable) case "file_mod_time": sort = "mod_time" addFileTable() - add, agg := getSort(sort, direction, fileTable) - query.addSort(add) - query.addGroupBy(agg, true) + query.sortAndPagination += getSort(sort, direction, fileTable) case "framerate": sort = "frame_rate" addVideoFileTable() - add, agg := getSort(sort, direction, videoFileTable) - query.addSort(add) - query.addGroupBy(agg, true) + query.sortAndPagination += getSort(sort, direction, videoFileTable) case "filesize": addFileTable() - add, agg := getSort(sort, direction, fileTable) - query.addSort(add) - query.addGroupBy(agg, true) + query.sortAndPagination += getSort(sort, direction, fileTable) case "duration": addVideoFileTable() - add, agg := getSort(sort, direction, videoFileTable) - query.addSort(add) - query.addGroupBy(agg, true) + query.sortAndPagination += getSort(sort, direction, videoFileTable) case "interactive", "interactive_speed": addVideoFileTable() - add, agg := getSort(sort, direction, videoFileTable) - query.addSort(add) - query.addGroupBy(agg, true) + query.sortAndPagination += getSort(sort, direction, videoFileTable) case "title": addFileTable() addFolderTable() - query.addSort("COALESCE(scenes.title, files.basename) COLLATE NATURAL_CI " + direction + ", folders.path COLLATE NATURAL_CI " + direction) - query.addGroupBy([]string{"scenes.title", "files.basename", "folders.path"}, true) + query.sortAndPagination += " ORDER BY COALESCE(scenes.title, files.basename) COLLATE NATURAL_CI " + direction + ", folders.path COLLATE NATURAL_CI " + direction case "play_count": - query.addSort(getCountSort(sceneTable, scenesViewDatesTable, sceneIDColumn, direction)) + query.sortAndPagination += getCountSort(sceneTable, scenesViewDatesTable, sceneIDColumn, direction) case "last_played_at": - query.addSort(fmt.Sprintf("(SELECT MAX(view_date) FROM %s AS sort WHERE sort.%s = %s.id) %s", scenesViewDatesTable, sceneIDColumn, sceneTable, getSortDirection(direction))) + query.sortAndPagination += fmt.Sprintf(" ORDER BY (SELECT MAX(view_date) FROM %s AS sort WHERE sort.%s = %s.id) %s", scenesViewDatesTable, sceneIDColumn, sceneTable, getSortDirection(direction)) case "last_o_at": - query.addSort(fmt.Sprintf("(SELECT MAX(o_date) FROM %s AS sort WHERE sort.%s = %s.id) %s", scenesODatesTable, sceneIDColumn, sceneTable, getSortDirection(direction))) + query.sortAndPagination += fmt.Sprintf(" ORDER BY (SELECT MAX(o_date) FROM %s AS sort WHERE sort.%s = %s.id) %s", scenesODatesTable, sceneIDColumn, sceneTable, getSortDirection(direction)) case "o_counter": - query.addSort(getCountSort(sceneTable, scenesODatesTable, sceneIDColumn, direction)) + query.sortAndPagination += getCountSort(sceneTable, scenesODatesTable, sceneIDColumn, direction) default: - add, agg := getSort(sort, direction, "scenes") - query.addSort(add) - query.addGroupBy(agg, true) + query.sortAndPagination += getSort(sort, direction, "scenes") } // Whatever the sorting, always use title/id as a final sort - query.addSort("COALESCE(scenes.title, cast(scenes.id as text)) COLLATE NATURAL_CI ASC") - query.addGroupBy([]string{"scenes.title", "scenes.id"}, true) + query.sortAndPagination += ", COALESCE(scenes.title, CAST(scenes.id as text)) COLLATE NATURAL_CI ASC" return nil } diff --git a/pkg/sqlite/scene_marker.go b/pkg/sqlite/scene_marker.go index d75a660e775..87a849d2084 100644 --- a/pkg/sqlite/scene_marker.go +++ b/pkg/sqlite/scene_marker.go @@ -318,7 +318,7 @@ func (qb *SceneMarkerStore) makeQuery(ctx context.Context, sceneMarkerFilter *mo if err := qb.setSceneMarkerSort(&query, findFilter); err != nil { return nil, err } - query.addPagination(getPagination(findFilter)) + query.sortAndPagination += getPagination(findFilter) return &query, nil } @@ -375,19 +375,15 @@ func (qb *SceneMarkerStore) setSceneMarkerSort(query *queryBuilder, findFilter * case "scenes_updated_at": sort = "updated_at" query.join(sceneTable, "", "scenes.id = scene_markers.scene_id") - add, agg := getSort(sort, direction, sceneTable) - query.addSort(add) - query.addGroupBy(agg, true) + query.sortAndPagination += getSort(sort, direction, sceneTable) case "title": query.join(tagTable, "", "scene_markers.primary_tag_id = tags.id") - query.addSort("COALESCE(NULLIF(scene_markers.title,''), tags.name) COLLATE NATURAL_CI " + direction) + query.sortAndPagination += " ORDER BY COALESCE(NULLIF(scene_markers.title,''), tags.name) COLLATE NATURAL_CI " + direction default: - add, agg := getSort(sort, direction, sceneMarkerTable) - query.addSort(add) - query.addGroupBy(agg, true) + query.sortAndPagination += getSort(sort, direction, sceneMarkerTable) } - query.addSort("scene_markers.scene_id ASC, scene_markers.seconds ASC") + query.sortAndPagination += ", scene_markers.scene_id ASC, scene_markers.seconds ASC" return nil } diff --git a/pkg/sqlite/sql.go b/pkg/sqlite/sql.go index c9b037ca69e..d4bda4ba5e7 100644 --- a/pkg/sqlite/sql.go +++ b/pkg/sqlite/sql.go @@ -18,37 +18,29 @@ func selectAll(tableName string) string { func distinctIDs(qb *queryBuilder, tableName string) { if dbWrapper.dbType == PostgresBackend { - distinctOnIDs(qb, tableName) + distinctPGIDs(qb, tableName) return } columnId := getColumn(tableName, "id") - qb.addColumn("DISTINCT "+columnId, []string{columnId}) + qb.addColumn("DISTINCT " + columnId) qb.from = tableName } -func distinctOnIDs(qb *queryBuilder, tableName string) { +func distinctPGIDs(qb *queryBuilder, tableName string) { columnId := getColumn(tableName, "id") - qb.addColumn("DISTINCT ON ("+columnId+") "+columnId, nil) - qb.addSort(columnId) + + qb.addWhere("(" + columnId + " IN (SELECT DISTINCT " + columnId + " FROM " + tableName + "))") + qb.addColumn(columnId) qb.from = tableName } func selectIDs(qb *queryBuilder, tableName string) { columnId := getColumn(tableName, "id") - qb.addColumn(columnId, []string{columnId}) + qb.addColumn(columnId) qb.from = tableName } -func DBGroupConcat(columnName string) string { - switch dbWrapper.dbType { - case PostgresBackend: - return "STRING_AGG(" + columnName + "::TEXT, ',')" - default: - return "GROUP_CONCAT(" + columnName + ")" - } -} - func getColumn(tableName string, columnName string) string { return tableName + "." + columnName } @@ -101,20 +93,17 @@ func getSortDirection(direction string) string { return direction } } -func getSort(sort string, direction string, tableName string) (string, []string) { +func getSort(sort string, direction string, tableName string) string { direction = getSortDirection(direction) - nonaggregates := []string{} switch { case strings.HasSuffix(sort, "_count"): var relationTableName = strings.TrimSuffix(sort, "_count") // TODO: pluralize? colName := getColumn(relationTableName, "id") - nonaggregates = append(nonaggregates, colName) - return "COUNT(distinct " + colName + ") " + direction, nonaggregates + return " ORDER BY COUNT(distinct " + colName + ") " + direction case strings.Compare(sort, "filesize") == 0: colName := getColumn(tableName, "size") - nonaggregates = append(nonaggregates, colName) - return colName + " " + direction, nonaggregates + return " ORDER BY " + colName + " " + direction case strings.HasPrefix(sort, randomSeedPrefix): // seed as a parameter from the UI seedStr := sort[len(randomSeedPrefix):] @@ -123,24 +112,22 @@ func getSort(sort string, direction string, tableName string) (string, []string) // fallback to a random seed seed = rand.Uint64() } - return getRandomSort(tableName, direction, seed), nonaggregates + return getRandomSort(tableName, direction, seed) case strings.Compare(sort, "random") == 0: - return getRandomSort(tableName, direction, rand.Uint64()), nonaggregates + return getRandomSort(tableName, direction, rand.Uint64()) default: colName := getColumn(tableName, sort) if strings.Contains(sort, ".") { colName = sort } - nonaggregates = append(nonaggregates, colName) - if strings.Compare(sort, "name") == 0 { - return colName + " COLLATE NATURAL_CI " + direction, nonaggregates + return " ORDER BY " + colName + " COLLATE NATURAL_CI " + direction } if strings.Compare(sort, "title") == 0 { - return colName + " COLLATE NATURAL_CI " + direction, nonaggregates + return " ORDER BY " + colName + " COLLATE NATURAL_CI " + direction } - return colName + " " + direction, nonaggregates + return " ORDER BY " + colName + " " + direction } } @@ -158,11 +145,11 @@ func getRandomSort(tableName string, direction string, seed uint64) string { // ORDER BY ((n+seed)*(n+seed)*p1 + (n+seed)*p2) % p3 // since sqlite converts overflowing numbers to reals, a custom db function that uses uints with overflow should be faster, // however in practice the overhead of calling a custom function vastly outweighs the benefits - return fmt.Sprintf("mod((%[1]s + %[2]d) * (%[1]s + %[2]d) * 52959209 + (%[1]s + %[2]d) * 1047483763, 2147483647) %[3]s", colName, seed, direction) + return fmt.Sprintf(" ORDER BY mod((%[1]s + %[2]d) * (%[1]s + %[2]d) * 52959209 + (%[1]s + %[2]d) * 1047483763, 2147483647) %[3]s", colName, seed, direction) } func getCountSort(primaryTable, joinTable, primaryFK, direction string) string { - return fmt.Sprintf("(SELECT COUNT(*) FROM %s AS sort WHERE sort.%s = %s.id) %s", joinTable, primaryFK, primaryTable, getSortDirection(direction)) + return fmt.Sprintf(" ORDER BY (SELECT COUNT(*) FROM %s AS sort WHERE sort.%s = %s.id) %s", joinTable, primaryFK, primaryTable, getSortDirection(direction)) } func getStringSearchClause(columns []string, q string, not bool) sqlClause { diff --git a/pkg/sqlite/studio.go b/pkg/sqlite/studio.go index b4870f20a35..0e27f3aa6f5 100644 --- a/pkg/sqlite/studio.go +++ b/pkg/sqlite/studio.go @@ -538,13 +538,12 @@ func (qb *StudioStore) makeQuery(ctx context.Context, studioFilter *models.Studi return nil, err } - add, agg, err := qb.getStudioSort(findFilter) + var err error + query.sortAndPagination, err = qb.getStudioSort(findFilter) if err != nil { return nil, err } - query.addSort(add) - query.addPagination(getPagination(findFilter)) - query.addGroupBy(agg, true) + query.sortAndPagination += getPagination(findFilter) return &query, nil } @@ -590,7 +589,7 @@ var studioSortOptions = sortOptions{ "updated_at", } -func (qb *StudioStore) getStudioSort(findFilter *models.FindFilterType) (string, []string, error) { +func (qb *StudioStore) getStudioSort(findFilter *models.FindFilterType) (string, error) { var sort string var direction string if findFilter == nil { @@ -603,10 +602,9 @@ func (qb *StudioStore) getStudioSort(findFilter *models.FindFilterType) (string, // CVE-2024-32231 - ensure sort is in the list of allowed sorts if err := studioSortOptions.validateSort(sort); err != nil { - return "", nil, err + return "", err } - var agg []string sortQuery := "" switch sort { case "tag_count": @@ -620,15 +618,12 @@ func (qb *StudioStore) getStudioSort(findFilter *models.FindFilterType) (string, case "child_count": sortQuery += getCountSort(studioTable, studioTable, studioParentIDColumn, direction) default: - var add string - add, agg = getSort(sort, direction, "studios") - sortQuery += add + sortQuery += getSort(sort, direction, "studios") } // Whatever the sorting, always use name/id as a final sort - sortQuery += ", COALESCE(studios.name, cast(studios.id as text)) COLLATE NATURAL_CI ASC" - agg = append(agg, "studios.name", "studios.id") - return sortQuery, agg, nil + sortQuery += ", COALESCE(studios.name, CAST(studios.id as text)) COLLATE NATURAL_CI ASC" + return sortQuery, nil } func (qb *StudioStore) GetImage(ctx context.Context, studioID int) ([]byte, error) { diff --git a/pkg/sqlite/tag.go b/pkg/sqlite/tag.go index 16c93901eab..c15ec21c26a 100644 --- a/pkg/sqlite/tag.go +++ b/pkg/sqlite/tag.go @@ -615,13 +615,12 @@ func (qb *TagStore) Query(ctx context.Context, tagFilter *models.TagFilterType, return nil, 0, err } - add, agg, err := qb.getTagSort(&query, findFilter) + var err error + query.sortAndPagination, err = qb.getTagSort(&query, findFilter) if err != nil { return nil, 0, err } - query.addSort(add) - query.addPagination(getPagination(findFilter)) - query.addGroupBy(agg, true) + query.sortAndPagination += getPagination(findFilter) idsResult, countResult, err := query.executeFind(ctx) if err != nil { return nil, 0, err @@ -652,11 +651,10 @@ var tagSortOptions = sortOptions{ } func (qb *TagStore) getDefaultTagSort() string { - add, _ := getSort("name", "ASC", "tags") - return " ORDER BY " + add + return getSort("name", "ASC", "tags") } -func (qb *TagStore) getTagSort(query *queryBuilder, findFilter *models.FindFilterType) (string, []string, error) { +func (qb *TagStore) getTagSort(query *queryBuilder, findFilter *models.FindFilterType) (string, error) { var sort string var direction string if findFilter == nil { @@ -669,16 +667,15 @@ func (qb *TagStore) getTagSort(query *queryBuilder, findFilter *models.FindFilte // CVE-2024-32231 - ensure sort is in the list of allowed sorts if err := tagSortOptions.validateSort(sort); err != nil { - return "", nil, err + return "", err } sortQuery := "" - var agg []string switch sort { case "scenes_count": sortQuery += getCountSort(tagTable, scenesTagsTable, tagIDColumn, direction) case "scene_markers_count": - sortQuery += fmt.Sprintf("(SELECT COUNT(*) FROM scene_markers_tags WHERE tags.id = scene_markers_tags.tag_id)+(SELECT COUNT(*) FROM scene_markers WHERE tags.id = scene_markers.primary_tag_id) %s", getSortDirection(direction)) + sortQuery += fmt.Sprintf(" ORDER BY (SELECT COUNT(*) FROM scene_markers_tags WHERE tags.id = scene_markers_tags.tag_id)+(SELECT COUNT(*) FROM scene_markers WHERE tags.id = scene_markers.primary_tag_id) %s", getSortDirection(direction)) case "images_count": sortQuery += getCountSort(tagTable, imagesTagsTable, tagIDColumn, direction) case "galleries_count": @@ -690,15 +687,12 @@ func (qb *TagStore) getTagSort(query *queryBuilder, findFilter *models.FindFilte case "movies_count", "groups_count": sortQuery += getCountSort(tagTable, groupsTagsTable, tagIDColumn, direction) default: - var add string - add, agg = getSort(sort, direction, "tags") - sortQuery += add + sortQuery += getSort(sort, direction, "tags") } // Whatever the sorting, always use name/id as a final sort - sortQuery += ", COALESCE(tags.name, cast(tags.id as text)) COLLATE NATURAL_CI ASC" - agg = append(agg, "tags.name", "tags.id") - return sortQuery, agg, nil + sortQuery += ", COALESCE(tags.name, CAST(tags.id as text)) COLLATE NATURAL_CI ASC" + return sortQuery, nil } func (qb *TagStore) queryTags(ctx context.Context, query string, args []interface{}) ([]*models.Tag, error) { From 2a9b8dbd56984de5b8416324acc4cf514f7e9f4e Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Sun, 20 Oct 2024 22:04:13 +0200 Subject: [PATCH 51/85] small remove --- pkg/sqlite/query.go | 2 +- pkg/sqlite/repository.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/sqlite/query.go b/pkg/sqlite/query.go index 4cf24bbdd30..2555e3fec24 100644 --- a/pkg/sqlite/query.go +++ b/pkg/sqlite/query.go @@ -61,7 +61,7 @@ func (qb queryBuilder) findIDs(ctx context.Context) ([]int, error) { func (qb queryBuilder) executeFind(ctx context.Context) ([]int, int, error) { body := qb.body() - return qb.repository.executeFindQuery(ctx, body, qb.from, qb.args, qb.sortAndPagination, qb.whereClauses, qb.havingClauses, qb.withClauses, qb.recursiveWith) + return qb.repository.executeFindQuery(ctx, body, qb.args, qb.sortAndPagination, qb.whereClauses, qb.havingClauses, qb.withClauses, qb.recursiveWith) } func (qb queryBuilder) executeCount(ctx context.Context) (int, error) { diff --git a/pkg/sqlite/repository.go b/pkg/sqlite/repository.go index da2bba69b9f..b17c49d7b09 100644 --- a/pkg/sqlite/repository.go +++ b/pkg/sqlite/repository.go @@ -182,7 +182,7 @@ func (r *repository) buildQueryBody(body string, whereClauses []string, havingCl return body } -func (r *repository) executeFindQuery(ctx context.Context, body string, table string, args []interface{}, sortAndPagination string, whereClauses []string, havingClauses []string, withClauses []string, recursiveWith bool) ([]int, int, error) { +func (r *repository) executeFindQuery(ctx context.Context, body string, args []interface{}, sortAndPagination string, whereClauses []string, havingClauses []string, withClauses []string, recursiveWith bool) ([]int, int, error) { body = r.buildQueryBody(body, whereClauses, havingClauses) withClause := "" From db0cbc2c645116f9b96dfa04b5ae8e9218c2245c Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Mon, 21 Oct 2024 12:46:35 +0200 Subject: [PATCH 52/85] pgsql remove distinct (again), and use group by instead --- pkg/sqlite/file.go | 5 ++++- pkg/sqlite/gallery.go | 10 +++++++-- pkg/sqlite/group.go | 13 ++++++++--- pkg/sqlite/image.go | 11 ++++++++-- pkg/sqlite/performer.go | 16 +++++++++----- pkg/sqlite/query.go | 26 ++++++++++++++-------- pkg/sqlite/repository.go | 13 +++++++---- pkg/sqlite/scene.go | 40 +++++++++++++++++++++++++-------- pkg/sqlite/scene_marker.go | 8 +++++-- pkg/sqlite/sql.go | 30 ++++++++----------------- pkg/sqlite/studio.go | 16 +++++++++----- pkg/sqlite/tag.go | 45 +++++++++++++++++++++++++------------- 12 files changed, 155 insertions(+), 78 deletions(-) diff --git a/pkg/sqlite/file.go b/pkg/sqlite/file.go index d462db4d862..3711bf7da61 100644 --- a/pkg/sqlite/file.go +++ b/pkg/sqlite/file.go @@ -941,8 +941,11 @@ func (qb *FileStore) setQuerySort(query *queryBuilder, findFilter *models.FindFi case "path": // special handling for path query.sortAndPagination += fmt.Sprintf(" ORDER BY folders.path %s, files.basename %[1]s", direction) + query.addGroupBy([]string{"folders.path", "files.basename"}) default: - query.sortAndPagination += getSort(sort, direction, "files") + add, agg := getSort(sort, direction, "files") + query.sortAndPagination += add + query.addGroupBy(agg) } return nil diff --git a/pkg/sqlite/gallery.go b/pkg/sqlite/gallery.go index f439325ff98..ee08fdd17ab 100644 --- a/pkg/sqlite/gallery.go +++ b/pkg/sqlite/gallery.go @@ -838,20 +838,26 @@ func (qb *GalleryStore) setGallerySort(query *queryBuilder, findFilter *models.F addFileTable() addFolderTable() query.sortAndPagination += fmt.Sprintf(" ORDER BY COALESCE(folders.path, '') || COALESCE(file_folder.path, '') || COALESCE(files.basename, '') COLLATE NATURAL_CI %s", direction) + query.addGroupBy([]string{"folders.path", "file_folder.path", "files.basename"}) case "file_mod_time": sort = "mod_time" addFileTable() - query.sortAndPagination += getSort(sort, direction, fileTable) + add, agg := getSort(sort, direction, fileTable) + query.sortAndPagination += add + query.addGroupBy(agg) case "title": addFileTable() addFolderTable() query.sortAndPagination += " ORDER BY COALESCE(galleries.title, files.basename, basename(COALESCE(folders.path, ''))) COLLATE NATURAL_CI " + direction + ", file_folder.path COLLATE NATURAL_CI " + direction default: - query.sortAndPagination += getSort(sort, direction, "galleries") + add, agg := getSort(sort, direction, "galleries") + query.sortAndPagination += add + query.addGroupBy(agg) } // Whatever the sorting, always use title/id as a final sort query.sortAndPagination += ", COALESCE(galleries.title, CAST(galleries.id as text)) COLLATE NATURAL_CI ASC" + query.addGroupBy([]string{"galleries.title", "galleries.id"}) return nil } diff --git a/pkg/sqlite/group.go b/pkg/sqlite/group.go index a5b919ff7bb..86212356ecd 100644 --- a/pkg/sqlite/group.go +++ b/pkg/sqlite/group.go @@ -513,23 +513,30 @@ func (qb *GroupStore) setGroupSort(query *queryBuilder, findFilter *models.FindF case "sub_group_order": // sub_group_order is a special sort that sorts by the order_index of the subgroups if query.hasJoin("groups_parents") { - query.sortAndPagination += getSort("order_index", direction, "groups_parents") + add, agg := getSort("order_index", direction, "groups_parents") + query.sortAndPagination += add + query.addGroupBy(agg) } else { // this will give unexpected results if the query is not filtered by a parent group and // the group has multiple parents and order indexes query.join(groupRelationsTable, "", "groups.id = groups_relations.sub_id") - query.sortAndPagination += getSort("order_index", direction, groupRelationsTable) + add, agg := getSort("order_index", direction, groupRelationsTable) + query.sortAndPagination += add + query.addGroupBy(agg) } case "tag_count": query.sortAndPagination += getCountSort(groupTable, groupsTagsTable, groupIDColumn, direction) case "scenes_count": // generic getSort won't work for this query.sortAndPagination += getCountSort(groupTable, groupsScenesTable, groupIDColumn, direction) default: - query.sortAndPagination += getSort(sort, direction, "groups") + add, agg := getSort(sort, direction, "groups") + query.sortAndPagination += add + query.addGroupBy(agg) } // Whatever the sorting, always use name/id as a final sort query.sortAndPagination += ", COALESCE(groups.name, CAST(groups.id as text)) COLLATE NATURAL_CI ASC" + query.addGroupBy([]string{"groups.name", "groups.id"}) return nil } diff --git a/pkg/sqlite/image.go b/pkg/sqlite/image.go index aafe740fb0d..dd946a536b9 100644 --- a/pkg/sqlite/image.go +++ b/pkg/sqlite/image.go @@ -969,6 +969,7 @@ func (qb *ImageStore) setImageSortAndPagination(q *queryBuilder, findFilter *mod addFilesJoin() addFolderJoin() sortClause = " ORDER BY COALESCE(folders.path, '') || COALESCE(files.basename, '') COLLATE NATURAL_CI " + direction + q.addGroupBy([]string{"folders.path", "files.basename"}) case "file_count": sortClause = getCountSort(imageTable, imagesFilesTable, imageIDColumn, direction) case "tag_count": @@ -977,17 +978,23 @@ func (qb *ImageStore) setImageSortAndPagination(q *queryBuilder, findFilter *mod sortClause = getCountSort(imageTable, performersImagesTable, imageIDColumn, direction) case "mod_time", "filesize": addFilesJoin() - sortClause = getSort(sort, direction, "files") + add, agg := getSort(sort, direction, "files") + sortClause = add + q.addGroupBy(agg) case "title": addFilesJoin() addFolderJoin() sortClause = " ORDER BY COALESCE(images.title, files.basename) COLLATE NATURAL_CI " + direction + ", folders.path COLLATE NATURAL_CI " + direction + q.addGroupBy([]string{"images.title", "files.basename", "folders.path"}) default: - sortClause = getSort(sort, direction, "images") + add, agg := getSort(sort, direction, "images") + sortClause = add + q.addGroupBy(agg) } // Whatever the sorting, always use title/id as a final sort sortClause += ", COALESCE(images.title, CAST(images.id as text)) COLLATE NATURAL_CI ASC" + q.addGroupBy([]string{"images.title", "images.id"}) } q.sortAndPagination = sortClause + getPagination(findFilter) diff --git a/pkg/sqlite/performer.go b/pkg/sqlite/performer.go index d97217fe3ef..1951b79859b 100644 --- a/pkg/sqlite/performer.go +++ b/pkg/sqlite/performer.go @@ -613,11 +613,13 @@ func (qb *PerformerStore) makeQuery(ctx context.Context, performerFilter *models } var err error - query.sortAndPagination, err = qb.getPerformerSort(findFilter) + var agg []string + query.sortAndPagination, agg, err = qb.getPerformerSort(findFilter) if err != nil { return nil, err } query.sortAndPagination += getPagination(findFilter) + query.addGroupBy(agg) return &query, nil } @@ -731,7 +733,7 @@ var performerSortOptions = sortOptions{ "weight", } -func (qb *PerformerStore) getPerformerSort(findFilter *models.FindFilterType) (string, error) { +func (qb *PerformerStore) getPerformerSort(findFilter *models.FindFilterType) (string, []string, error) { var sort string var direction string if findFilter == nil { @@ -744,9 +746,10 @@ func (qb *PerformerStore) getPerformerSort(findFilter *models.FindFilterType) (s // CVE-2024-32231 - ensure sort is in the list of allowed sorts if err := performerSortOptions.validateSort(sort); err != nil { - return "", err + return "", nil, err } + var agg []string sortQuery := "" switch sort { case "tag_count": @@ -766,12 +769,15 @@ func (qb *PerformerStore) getPerformerSort(findFilter *models.FindFilterType) (s case "last_o_at": sortQuery += qb.sortByLastOAt(direction) default: - sortQuery += getSort(sort, direction, "performers") + var add string + add, agg = getSort(sort, direction, "performers") + sortQuery += add } // Whatever the sorting, always use name/id as a final sort sortQuery += ", COALESCE(performers.name, CAST(performers.id as text)) COLLATE NATURAL_CI ASC" - return sortQuery, nil + agg = append(agg, "performers.name", "performers.id") + return sortQuery, agg, nil } func (qb *PerformerStore) GetTagIDs(ctx context.Context, id int) ([]int, error) { diff --git a/pkg/sqlite/query.go b/pkg/sqlite/query.go index 2555e3fec24..77ab3bc44e5 100644 --- a/pkg/sqlite/query.go +++ b/pkg/sqlite/query.go @@ -6,6 +6,7 @@ import ( "strings" "github.com/stashapp/stash/pkg/models" + "github.com/stashapp/stash/pkg/sliceutil" ) type queryBuilder struct { @@ -14,12 +15,13 @@ type queryBuilder struct { columns []string from string - joins joins - whereClauses []string - havingClauses []string - args []interface{} - withClauses []string - recursiveWith bool + joins joins + whereClauses []string + havingClauses []string + args []interface{} + withClauses []string + recursiveWith bool + groupByClauses []string sortAndPagination string } @@ -32,6 +34,12 @@ func (qb *queryBuilder) addColumn(column string) { qb.columns = append(qb.columns, column) } +func (qb *queryBuilder) addGroupBy(columns []string) { + if len(columns) > 0 { + qb.groupByClauses = sliceutil.AppendUniques(qb.groupByClauses, columns) + } +} + func (qb queryBuilder) toSQL(includeSortPagination bool) string { body := qb.body() @@ -44,7 +52,7 @@ func (qb queryBuilder) toSQL(includeSortPagination bool) string { withClause = "WITH " + recursive + strings.Join(qb.withClauses, ", ") + " " } - body = withClause + qb.repository.buildQueryBody(body, qb.whereClauses, qb.havingClauses) + body = withClause + qb.repository.buildQueryBody(body, qb.whereClauses, qb.havingClauses, qb.groupByClauses) if includeSortPagination { body += qb.sortAndPagination @@ -61,7 +69,7 @@ func (qb queryBuilder) findIDs(ctx context.Context) ([]int, error) { func (qb queryBuilder) executeFind(ctx context.Context) ([]int, int, error) { body := qb.body() - return qb.repository.executeFindQuery(ctx, body, qb.args, qb.sortAndPagination, qb.whereClauses, qb.havingClauses, qb.withClauses, qb.recursiveWith) + return qb.repository.executeFindQuery(ctx, body, qb.args, qb.sortAndPagination, qb.whereClauses, qb.havingClauses, qb.withClauses, qb.groupByClauses, qb.recursiveWith) } func (qb queryBuilder) executeCount(ctx context.Context) (int, error) { @@ -76,7 +84,7 @@ func (qb queryBuilder) executeCount(ctx context.Context) (int, error) { withClause = "WITH " + recursive + strings.Join(qb.withClauses, ", ") + " " } - body = qb.repository.buildQueryBody(body, qb.whereClauses, qb.havingClauses) + body = qb.repository.buildQueryBody(body, qb.whereClauses, qb.havingClauses, qb.groupByClauses) countQuery := withClause + qb.repository.buildCountQuery(body) return qb.repository.runCountQuery(ctx, countQuery, qb.args) } diff --git a/pkg/sqlite/repository.go b/pkg/sqlite/repository.go index b17c49d7b09..3a3a08b331f 100644 --- a/pkg/sqlite/repository.go +++ b/pkg/sqlite/repository.go @@ -170,20 +170,25 @@ func (r *repository) querySimple(ctx context.Context, query string, args []inter return nil } -func (r *repository) buildQueryBody(body string, whereClauses []string, havingClauses []string) string { +func (r *repository) buildQueryBody(body string, whereClauses []string, havingClauses []string, groupByClauses []string) string { if len(whereClauses) > 0 { body = body + " WHERE " + strings.Join(whereClauses, " AND ") // TODO handle AND or OR } if len(havingClauses) > 0 { - body = body + " GROUP BY " + r.tableName + ".id " + groupByClauses = append(groupByClauses, r.tableName+".id") + } + if len(groupByClauses) > 0 { + body += " GROUP BY " + strings.Join(groupByClauses, ", ") + " " + } + if len(havingClauses) > 0 { body = body + " HAVING " + strings.Join(havingClauses, " AND ") // TODO handle AND or OR } return body } -func (r *repository) executeFindQuery(ctx context.Context, body string, args []interface{}, sortAndPagination string, whereClauses []string, havingClauses []string, withClauses []string, recursiveWith bool) ([]int, int, error) { - body = r.buildQueryBody(body, whereClauses, havingClauses) +func (r *repository) executeFindQuery(ctx context.Context, body string, args []interface{}, sortAndPagination string, whereClauses []string, havingClauses []string, withClauses []string, groupByClauses []string, recursiveWith bool) ([]int, int, error) { + body = r.buildQueryBody(body, whereClauses, havingClauses, groupByClauses) withClause := "" if len(withClauses) > 0 { diff --git a/pkg/sqlite/scene.go b/pkg/sqlite/scene.go index be6dc5259b6..55d85431fc6 100644 --- a/pkg/sqlite/scene.go +++ b/pkg/sqlite/scene.go @@ -1130,10 +1130,14 @@ func (qb *SceneStore) setSceneSort(query *queryBuilder, findFilter *models.FindF switch sort { case "movie_scene_number": query.join(groupsScenesTable, "", "scenes.id = groups_scenes.scene_id") - query.sortAndPagination += getSort("scene_index", direction, groupsScenesTable) + add, group := getSort("scene_index", direction, groupsScenesTable) + query.sortAndPagination += add + query.addGroupBy(group) case "group_scene_number": query.join(groupsScenesTable, "scene_group", "scenes.id = scene_group.scene_id") - query.sortAndPagination += getSort("scene_index", direction, "scene_group") + add, group := getSort("scene_index", direction, "scene_group") + query.sortAndPagination += add + query.addGroupBy(group) case "tag_count": query.sortAndPagination += getCountSort(sceneTable, scenesTagsTable, sceneIDColumn, direction) case "performer_count": @@ -1145,6 +1149,7 @@ func (qb *SceneStore) setSceneSort(query *queryBuilder, findFilter *models.FindF addFileTable() addFolderTable() query.sortAndPagination += fmt.Sprintf(" ORDER BY COALESCE(folders.path, '') || COALESCE(files.basename, '') COLLATE NATURAL_CI %s", direction) + query.addGroupBy([]string{"folders.path", "files.basename"}) case "perceptual_similarity": // special handling for phash addFileTable() @@ -1157,31 +1162,45 @@ func (qb *SceneStore) setSceneSort(query *queryBuilder, findFilter *models.FindF ) query.sortAndPagination += " ORDER BY fingerprints_phash.fingerprint " + direction + ", files.size DESC" + query.addGroupBy([]string{"fingerprints_phash.fingerprint", "files.size"}) case "bitrate": sort = "bit_rate" addVideoFileTable() - query.sortAndPagination += getSort(sort, direction, videoFileTable) + add, group := getSort(sort, direction, videoFileTable) + query.sortAndPagination += add + query.addGroupBy(group) case "file_mod_time": sort = "mod_time" addFileTable() - query.sortAndPagination += getSort(sort, direction, fileTable) + add, agg := getSort(sort, direction, fileTable) + query.sortAndPagination += add + query.addGroupBy(agg) case "framerate": sort = "frame_rate" addVideoFileTable() - query.sortAndPagination += getSort(sort, direction, videoFileTable) + add, agg := getSort(sort, direction, videoFileTable) + query.sortAndPagination += add + query.addGroupBy(agg) case "filesize": addFileTable() - query.sortAndPagination += getSort(sort, direction, fileTable) + add, agg := getSort(sort, direction, fileTable) + query.sortAndPagination += add + query.addGroupBy(agg) case "duration": addVideoFileTable() - query.sortAndPagination += getSort(sort, direction, videoFileTable) + add, agg := getSort(sort, direction, videoFileTable) + query.sortAndPagination += add + query.addGroupBy(agg) case "interactive", "interactive_speed": addVideoFileTable() - query.sortAndPagination += getSort(sort, direction, videoFileTable) + add, agg := getSort(sort, direction, videoFileTable) + query.sortAndPagination += add + query.addGroupBy(agg) case "title": addFileTable() addFolderTable() query.sortAndPagination += " ORDER BY COALESCE(scenes.title, files.basename) COLLATE NATURAL_CI " + direction + ", folders.path COLLATE NATURAL_CI " + direction + query.addGroupBy([]string{"scenes.title", "files.basename", "folders.path"}) case "play_count": query.sortAndPagination += getCountSort(sceneTable, scenesViewDatesTable, sceneIDColumn, direction) case "last_played_at": @@ -1191,11 +1210,14 @@ func (qb *SceneStore) setSceneSort(query *queryBuilder, findFilter *models.FindF case "o_counter": query.sortAndPagination += getCountSort(sceneTable, scenesODatesTable, sceneIDColumn, direction) default: - query.sortAndPagination += getSort(sort, direction, "scenes") + add, agg := getSort(sort, direction, "scenes") + query.sortAndPagination += add + query.addGroupBy(agg) } // Whatever the sorting, always use title/id as a final sort query.sortAndPagination += ", COALESCE(scenes.title, CAST(scenes.id as text)) COLLATE NATURAL_CI ASC" + query.addGroupBy([]string{"scenes.title", "scenes.id"}) return nil } diff --git a/pkg/sqlite/scene_marker.go b/pkg/sqlite/scene_marker.go index 87a849d2084..3a7f5badd59 100644 --- a/pkg/sqlite/scene_marker.go +++ b/pkg/sqlite/scene_marker.go @@ -375,12 +375,16 @@ func (qb *SceneMarkerStore) setSceneMarkerSort(query *queryBuilder, findFilter * case "scenes_updated_at": sort = "updated_at" query.join(sceneTable, "", "scenes.id = scene_markers.scene_id") - query.sortAndPagination += getSort(sort, direction, sceneTable) + add, agg := getSort(sort, direction, sceneTable) + query.sortAndPagination += add + query.addGroupBy(agg) case "title": query.join(tagTable, "", "scene_markers.primary_tag_id = tags.id") query.sortAndPagination += " ORDER BY COALESCE(NULLIF(scene_markers.title,''), tags.name) COLLATE NATURAL_CI " + direction default: - query.sortAndPagination += getSort(sort, direction, sceneMarkerTable) + add, agg := getSort(sort, direction, sceneMarkerTable) + query.sortAndPagination += add + query.addGroupBy(agg) } query.sortAndPagination += ", scene_markers.scene_id ASC, scene_markers.seconds ASC" diff --git a/pkg/sqlite/sql.go b/pkg/sqlite/sql.go index d4bda4ba5e7..87eb4c1e9f8 100644 --- a/pkg/sqlite/sql.go +++ b/pkg/sqlite/sql.go @@ -17,21 +17,9 @@ func selectAll(tableName string) string { } func distinctIDs(qb *queryBuilder, tableName string) { - if dbWrapper.dbType == PostgresBackend { - distinctPGIDs(qb, tableName) - return - } - columnId := getColumn(tableName, "id") - qb.addColumn("DISTINCT " + columnId) - qb.from = tableName -} - -func distinctPGIDs(qb *queryBuilder, tableName string) { - columnId := getColumn(tableName, "id") - - qb.addWhere("(" + columnId + " IN (SELECT DISTINCT " + columnId + " FROM " + tableName + "))") qb.addColumn(columnId) + qb.addGroupBy([]string{columnId}) qb.from = tableName } @@ -93,17 +81,17 @@ func getSortDirection(direction string) string { return direction } } -func getSort(sort string, direction string, tableName string) string { +func getSort(sort string, direction string, tableName string) (string, []string) { direction = getSortDirection(direction) switch { case strings.HasSuffix(sort, "_count"): var relationTableName = strings.TrimSuffix(sort, "_count") // TODO: pluralize? colName := getColumn(relationTableName, "id") - return " ORDER BY COUNT(distinct " + colName + ") " + direction + return " ORDER BY COUNT(distinct " + colName + ") " + direction, nil case strings.Compare(sort, "filesize") == 0: colName := getColumn(tableName, "size") - return " ORDER BY " + colName + " " + direction + return " ORDER BY " + colName + " " + direction, []string{colName} case strings.HasPrefix(sort, randomSeedPrefix): // seed as a parameter from the UI seedStr := sort[len(randomSeedPrefix):] @@ -112,22 +100,22 @@ func getSort(sort string, direction string, tableName string) string { // fallback to a random seed seed = rand.Uint64() } - return getRandomSort(tableName, direction, seed) + return getRandomSort(tableName, direction, seed), nil case strings.Compare(sort, "random") == 0: - return getRandomSort(tableName, direction, rand.Uint64()) + return getRandomSort(tableName, direction, rand.Uint64()), nil default: colName := getColumn(tableName, sort) if strings.Contains(sort, ".") { colName = sort } if strings.Compare(sort, "name") == 0 { - return " ORDER BY " + colName + " COLLATE NATURAL_CI " + direction + return " ORDER BY " + colName + " COLLATE NATURAL_CI " + direction, []string{colName} } if strings.Compare(sort, "title") == 0 { - return " ORDER BY " + colName + " COLLATE NATURAL_CI " + direction + return " ORDER BY " + colName + " COLLATE NATURAL_CI " + direction, []string{colName} } - return " ORDER BY " + colName + " " + direction + return " ORDER BY " + colName + " " + direction, []string{colName} } } diff --git a/pkg/sqlite/studio.go b/pkg/sqlite/studio.go index 0e27f3aa6f5..0b2a09d4015 100644 --- a/pkg/sqlite/studio.go +++ b/pkg/sqlite/studio.go @@ -539,11 +539,13 @@ func (qb *StudioStore) makeQuery(ctx context.Context, studioFilter *models.Studi } var err error - query.sortAndPagination, err = qb.getStudioSort(findFilter) + var group []string + query.sortAndPagination, group, err = qb.getStudioSort(findFilter) if err != nil { return nil, err } query.sortAndPagination += getPagination(findFilter) + query.addGroupBy(group) return &query, nil } @@ -589,7 +591,7 @@ var studioSortOptions = sortOptions{ "updated_at", } -func (qb *StudioStore) getStudioSort(findFilter *models.FindFilterType) (string, error) { +func (qb *StudioStore) getStudioSort(findFilter *models.FindFilterType) (string, []string, error) { var sort string var direction string if findFilter == nil { @@ -602,9 +604,10 @@ func (qb *StudioStore) getStudioSort(findFilter *models.FindFilterType) (string, // CVE-2024-32231 - ensure sort is in the list of allowed sorts if err := studioSortOptions.validateSort(sort); err != nil { - return "", err + return "", nil, err } + group := []string{} sortQuery := "" switch sort { case "tag_count": @@ -618,12 +621,15 @@ func (qb *StudioStore) getStudioSort(findFilter *models.FindFilterType) (string, case "child_count": sortQuery += getCountSort(studioTable, studioTable, studioParentIDColumn, direction) default: - sortQuery += getSort(sort, direction, "studios") + var add string + add, group = getSort(sort, direction, "studios") + sortQuery += add } // Whatever the sorting, always use name/id as a final sort sortQuery += ", COALESCE(studios.name, CAST(studios.id as text)) COLLATE NATURAL_CI ASC" - return sortQuery, nil + group = append(group, "studios.name", "studios.id") + return sortQuery, group, nil } func (qb *StudioStore) GetImage(ctx context.Context, studioID int) ([]byte, error) { diff --git a/pkg/sqlite/tag.go b/pkg/sqlite/tag.go index c15ec21c26a..eb93b0503ba 100644 --- a/pkg/sqlite/tag.go +++ b/pkg/sqlite/tag.go @@ -383,7 +383,8 @@ func (qb *TagStore) FindBySceneID(ctx context.Context, sceneID int) ([]*models.T WHERE scenes_join.scene_id = ? GROUP BY tags.id ` - query += qb.getDefaultTagSort() + add, _ := qb.getDefaultTagSort() + query += add args := []interface{}{sceneID} return qb.queryTags(ctx, query, args) } @@ -395,7 +396,8 @@ func (qb *TagStore) FindByPerformerID(ctx context.Context, performerID int) ([]* WHERE performers_join.performer_id = ? GROUP BY tags.id ` - query += qb.getDefaultTagSort() + add, _ := qb.getDefaultTagSort() + query += add args := []interface{}{performerID} return qb.queryTags(ctx, query, args) } @@ -407,7 +409,8 @@ func (qb *TagStore) FindByImageID(ctx context.Context, imageID int) ([]*models.T WHERE images_join.image_id = ? GROUP BY tags.id ` - query += qb.getDefaultTagSort() + add, _ := qb.getDefaultTagSort() + query += add args := []interface{}{imageID} return qb.queryTags(ctx, query, args) } @@ -419,7 +422,8 @@ func (qb *TagStore) FindByGalleryID(ctx context.Context, galleryID int) ([]*mode WHERE galleries_join.gallery_id = ? GROUP BY tags.id ` - query += qb.getDefaultTagSort() + add, _ := qb.getDefaultTagSort() + query += add args := []interface{}{galleryID} return qb.queryTags(ctx, query, args) } @@ -431,7 +435,8 @@ func (qb *TagStore) FindByGroupID(ctx context.Context, groupID int) ([]*models.T WHERE groups_join.group_id = ? GROUP BY tags.id ` - query += qb.getDefaultTagSort() + add, _ := qb.getDefaultTagSort() + query += add args := []interface{}{groupID} return qb.queryTags(ctx, query, args) } @@ -443,7 +448,8 @@ func (qb *TagStore) FindBySceneMarkerID(ctx context.Context, sceneMarkerID int) WHERE scene_markers_join.scene_marker_id = ? GROUP BY tags.id ` - query += qb.getDefaultTagSort() + add, _ := qb.getDefaultTagSort() + query += add args := []interface{}{sceneMarkerID} return qb.queryTags(ctx, query, args) } @@ -455,7 +461,8 @@ func (qb *TagStore) FindByStudioID(ctx context.Context, studioID int) ([]*models WHERE studios_join.studio_id = ? GROUP BY tags.id ` - query += qb.getDefaultTagSort() + add, _ := qb.getDefaultTagSort() + query += add args := []interface{}{studioID} return qb.queryTags(ctx, query, args) } @@ -519,7 +526,8 @@ func (qb *TagStore) FindByParentTagID(ctx context.Context, parentID int) ([]*mod INNER JOIN tags_relations ON tags_relations.child_id = tags.id WHERE tags_relations.parent_id = ? ` - query += qb.getDefaultTagSort() + add, _ := qb.getDefaultTagSort() + query += add args := []interface{}{parentID} return qb.queryTags(ctx, query, args) } @@ -530,7 +538,8 @@ func (qb *TagStore) FindByChildTagID(ctx context.Context, parentID int) ([]*mode INNER JOIN tags_relations ON tags_relations.parent_id = tags.id WHERE tags_relations.child_id = ? ` - query += qb.getDefaultTagSort() + add, _ := qb.getDefaultTagSort() + query += add args := []interface{}{parentID} return qb.queryTags(ctx, query, args) } @@ -616,11 +625,13 @@ func (qb *TagStore) Query(ctx context.Context, tagFilter *models.TagFilterType, } var err error - query.sortAndPagination, err = qb.getTagSort(&query, findFilter) + var group []string + query.sortAndPagination, group, err = qb.getTagSort(&query, findFilter) if err != nil { return nil, 0, err } query.sortAndPagination += getPagination(findFilter) + query.addGroupBy(group) idsResult, countResult, err := query.executeFind(ctx) if err != nil { return nil, 0, err @@ -650,11 +661,11 @@ var tagSortOptions = sortOptions{ "updated_at", } -func (qb *TagStore) getDefaultTagSort() string { +func (qb *TagStore) getDefaultTagSort() (string, []string) { return getSort("name", "ASC", "tags") } -func (qb *TagStore) getTagSort(query *queryBuilder, findFilter *models.FindFilterType) (string, error) { +func (qb *TagStore) getTagSort(query *queryBuilder, findFilter *models.FindFilterType) (string, []string, error) { var sort string var direction string if findFilter == nil { @@ -667,9 +678,10 @@ func (qb *TagStore) getTagSort(query *queryBuilder, findFilter *models.FindFilte // CVE-2024-32231 - ensure sort is in the list of allowed sorts if err := tagSortOptions.validateSort(sort); err != nil { - return "", err + return "", nil, err } + group := []string{} sortQuery := "" switch sort { case "scenes_count": @@ -687,12 +699,15 @@ func (qb *TagStore) getTagSort(query *queryBuilder, findFilter *models.FindFilte case "movies_count", "groups_count": sortQuery += getCountSort(tagTable, groupsTagsTable, tagIDColumn, direction) default: - sortQuery += getSort(sort, direction, "tags") + var add string + add, group = getSort(sort, direction, "tags") + sortQuery += add } // Whatever the sorting, always use name/id as a final sort sortQuery += ", COALESCE(tags.name, CAST(tags.id as text)) COLLATE NATURAL_CI ASC" - return sortQuery, nil + group = append(group, "tags.name", "tags.id") + return sortQuery, group, nil } func (qb *TagStore) queryTags(ctx context.Context, query string, args []interface{}) ([]*models.Tag, error) { From 1ab44eb467de58f2a7213b1b0459e8b9a82331f0 Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Mon, 21 Oct 2024 13:05:20 +0200 Subject: [PATCH 53/85] pgsql missing some groupby's (some like title are unnecessary, but it works for now) --- pkg/sqlite/file.go | 4 ++-- pkg/sqlite/gallery.go | 8 ++++---- pkg/sqlite/group.go | 8 ++++---- pkg/sqlite/image.go | 12 +++++++----- pkg/sqlite/performer.go | 2 +- pkg/sqlite/query.go | 2 +- pkg/sqlite/scene.go | 28 +++++++++++++++------------- pkg/sqlite/scene_marker.go | 4 ++-- pkg/sqlite/sql.go | 2 +- pkg/sqlite/studio.go | 2 +- pkg/sqlite/tag.go | 2 +- 11 files changed, 39 insertions(+), 35 deletions(-) diff --git a/pkg/sqlite/file.go b/pkg/sqlite/file.go index 3711bf7da61..90c0ac04f95 100644 --- a/pkg/sqlite/file.go +++ b/pkg/sqlite/file.go @@ -941,11 +941,11 @@ func (qb *FileStore) setQuerySort(query *queryBuilder, findFilter *models.FindFi case "path": // special handling for path query.sortAndPagination += fmt.Sprintf(" ORDER BY folders.path %s, files.basename %[1]s", direction) - query.addGroupBy([]string{"folders.path", "files.basename"}) + query.addGroupBy("folders.path", "files.basename") default: add, agg := getSort(sort, direction, "files") query.sortAndPagination += add - query.addGroupBy(agg) + query.addGroupBy(agg...) } return nil diff --git a/pkg/sqlite/gallery.go b/pkg/sqlite/gallery.go index ee08fdd17ab..1e030bcdb8a 100644 --- a/pkg/sqlite/gallery.go +++ b/pkg/sqlite/gallery.go @@ -838,13 +838,13 @@ func (qb *GalleryStore) setGallerySort(query *queryBuilder, findFilter *models.F addFileTable() addFolderTable() query.sortAndPagination += fmt.Sprintf(" ORDER BY COALESCE(folders.path, '') || COALESCE(file_folder.path, '') || COALESCE(files.basename, '') COLLATE NATURAL_CI %s", direction) - query.addGroupBy([]string{"folders.path", "file_folder.path", "files.basename"}) + query.addGroupBy("folders.path", "file_folder.path", "files.basename") case "file_mod_time": sort = "mod_time" addFileTable() add, agg := getSort(sort, direction, fileTable) query.sortAndPagination += add - query.addGroupBy(agg) + query.addGroupBy(agg...) case "title": addFileTable() addFolderTable() @@ -852,12 +852,12 @@ func (qb *GalleryStore) setGallerySort(query *queryBuilder, findFilter *models.F default: add, agg := getSort(sort, direction, "galleries") query.sortAndPagination += add - query.addGroupBy(agg) + query.addGroupBy(agg...) } // Whatever the sorting, always use title/id as a final sort query.sortAndPagination += ", COALESCE(galleries.title, CAST(galleries.id as text)) COLLATE NATURAL_CI ASC" - query.addGroupBy([]string{"galleries.title", "galleries.id"}) + query.addGroupBy("galleries.title", "galleries.id") return nil } diff --git a/pkg/sqlite/group.go b/pkg/sqlite/group.go index 86212356ecd..4d4e54a001c 100644 --- a/pkg/sqlite/group.go +++ b/pkg/sqlite/group.go @@ -515,14 +515,14 @@ func (qb *GroupStore) setGroupSort(query *queryBuilder, findFilter *models.FindF if query.hasJoin("groups_parents") { add, agg := getSort("order_index", direction, "groups_parents") query.sortAndPagination += add - query.addGroupBy(agg) + query.addGroupBy(agg...) } else { // this will give unexpected results if the query is not filtered by a parent group and // the group has multiple parents and order indexes query.join(groupRelationsTable, "", "groups.id = groups_relations.sub_id") add, agg := getSort("order_index", direction, groupRelationsTable) query.sortAndPagination += add - query.addGroupBy(agg) + query.addGroupBy(agg...) } case "tag_count": query.sortAndPagination += getCountSort(groupTable, groupsTagsTable, groupIDColumn, direction) @@ -531,12 +531,12 @@ func (qb *GroupStore) setGroupSort(query *queryBuilder, findFilter *models.FindF default: add, agg := getSort(sort, direction, "groups") query.sortAndPagination += add - query.addGroupBy(agg) + query.addGroupBy(agg...) } // Whatever the sorting, always use name/id as a final sort query.sortAndPagination += ", COALESCE(groups.name, CAST(groups.id as text)) COLLATE NATURAL_CI ASC" - query.addGroupBy([]string{"groups.name", "groups.id"}) + query.addGroupBy("groups.name", "groups.id") return nil } diff --git a/pkg/sqlite/image.go b/pkg/sqlite/image.go index dd946a536b9..71a8e05843e 100644 --- a/pkg/sqlite/image.go +++ b/pkg/sqlite/image.go @@ -865,6 +865,7 @@ func (qb *ImageStore) queryGroupedFields(ctx context.Context, options models.Ima ) query.addColumn("COALESCE(image_files.width, 0) * COALESCE(image_files.height, 0) as megapixels") aggregateQuery.addColumn("COALESCE(SUM(temp.megapixels), 0) / 1000000 as megapixels") + query.addGroupBy("image_files.width", "image_files.height") } if options.TotalSize { @@ -880,6 +881,7 @@ func (qb *ImageStore) queryGroupedFields(ctx context.Context, options models.Ima ) query.addColumn("COALESCE(files.size, 0) as size") aggregateQuery.addColumn("SUM(temp.size) as size") + query.addGroupBy("files.size") } const includeSortPagination = false @@ -969,7 +971,7 @@ func (qb *ImageStore) setImageSortAndPagination(q *queryBuilder, findFilter *mod addFilesJoin() addFolderJoin() sortClause = " ORDER BY COALESCE(folders.path, '') || COALESCE(files.basename, '') COLLATE NATURAL_CI " + direction - q.addGroupBy([]string{"folders.path", "files.basename"}) + q.addGroupBy("folders.path", "files.basename") case "file_count": sortClause = getCountSort(imageTable, imagesFilesTable, imageIDColumn, direction) case "tag_count": @@ -980,21 +982,21 @@ func (qb *ImageStore) setImageSortAndPagination(q *queryBuilder, findFilter *mod addFilesJoin() add, agg := getSort(sort, direction, "files") sortClause = add - q.addGroupBy(agg) + q.addGroupBy(agg...) case "title": addFilesJoin() addFolderJoin() sortClause = " ORDER BY COALESCE(images.title, files.basename) COLLATE NATURAL_CI " + direction + ", folders.path COLLATE NATURAL_CI " + direction - q.addGroupBy([]string{"images.title", "files.basename", "folders.path"}) + q.addGroupBy("images.title", "files.basename", "folders.path") default: add, agg := getSort(sort, direction, "images") sortClause = add - q.addGroupBy(agg) + q.addGroupBy(agg...) } // Whatever the sorting, always use title/id as a final sort sortClause += ", COALESCE(images.title, CAST(images.id as text)) COLLATE NATURAL_CI ASC" - q.addGroupBy([]string{"images.title", "images.id"}) + q.addGroupBy("images.title", "images.id") } q.sortAndPagination = sortClause + getPagination(findFilter) diff --git a/pkg/sqlite/performer.go b/pkg/sqlite/performer.go index 1951b79859b..6fb604aedf1 100644 --- a/pkg/sqlite/performer.go +++ b/pkg/sqlite/performer.go @@ -619,7 +619,7 @@ func (qb *PerformerStore) makeQuery(ctx context.Context, performerFilter *models return nil, err } query.sortAndPagination += getPagination(findFilter) - query.addGroupBy(agg) + query.addGroupBy(agg...) return &query, nil } diff --git a/pkg/sqlite/query.go b/pkg/sqlite/query.go index 77ab3bc44e5..eba5b67f4c7 100644 --- a/pkg/sqlite/query.go +++ b/pkg/sqlite/query.go @@ -34,7 +34,7 @@ func (qb *queryBuilder) addColumn(column string) { qb.columns = append(qb.columns, column) } -func (qb *queryBuilder) addGroupBy(columns []string) { +func (qb *queryBuilder) addGroupBy(columns ...string) { if len(columns) > 0 { qb.groupByClauses = sliceutil.AppendUniques(qb.groupByClauses, columns) } diff --git a/pkg/sqlite/scene.go b/pkg/sqlite/scene.go index 55d85431fc6..f927b301fa0 100644 --- a/pkg/sqlite/scene.go +++ b/pkg/sqlite/scene.go @@ -1007,6 +1007,7 @@ func (qb *SceneStore) queryGroupedFields(ctx context.Context, options models.Sce ) query.addColumn("COALESCE(video_files.duration, 0) as duration") aggregateQuery.addColumn("SUM(temp.duration) as duration") + query.addGroupBy("video_files.duration") } if options.TotalSize { @@ -1022,6 +1023,7 @@ func (qb *SceneStore) queryGroupedFields(ctx context.Context, options models.Sce ) query.addColumn("COALESCE(files.size, 0) as size") aggregateQuery.addColumn("SUM(temp.size) as size") + query.addGroupBy("files.size") } const includeSortPagination = false @@ -1132,12 +1134,12 @@ func (qb *SceneStore) setSceneSort(query *queryBuilder, findFilter *models.FindF query.join(groupsScenesTable, "", "scenes.id = groups_scenes.scene_id") add, group := getSort("scene_index", direction, groupsScenesTable) query.sortAndPagination += add - query.addGroupBy(group) + query.addGroupBy(group...) case "group_scene_number": query.join(groupsScenesTable, "scene_group", "scenes.id = scene_group.scene_id") add, group := getSort("scene_index", direction, "scene_group") query.sortAndPagination += add - query.addGroupBy(group) + query.addGroupBy(group...) case "tag_count": query.sortAndPagination += getCountSort(sceneTable, scenesTagsTable, sceneIDColumn, direction) case "performer_count": @@ -1149,7 +1151,7 @@ func (qb *SceneStore) setSceneSort(query *queryBuilder, findFilter *models.FindF addFileTable() addFolderTable() query.sortAndPagination += fmt.Sprintf(" ORDER BY COALESCE(folders.path, '') || COALESCE(files.basename, '') COLLATE NATURAL_CI %s", direction) - query.addGroupBy([]string{"folders.path", "files.basename"}) + query.addGroupBy("folders.path", "files.basename") case "perceptual_similarity": // special handling for phash addFileTable() @@ -1162,45 +1164,45 @@ func (qb *SceneStore) setSceneSort(query *queryBuilder, findFilter *models.FindF ) query.sortAndPagination += " ORDER BY fingerprints_phash.fingerprint " + direction + ", files.size DESC" - query.addGroupBy([]string{"fingerprints_phash.fingerprint", "files.size"}) + query.addGroupBy("fingerprints_phash.fingerprint", "files.size") case "bitrate": sort = "bit_rate" addVideoFileTable() add, group := getSort(sort, direction, videoFileTable) query.sortAndPagination += add - query.addGroupBy(group) + query.addGroupBy(group...) case "file_mod_time": sort = "mod_time" addFileTable() add, agg := getSort(sort, direction, fileTable) query.sortAndPagination += add - query.addGroupBy(agg) + query.addGroupBy(agg...) case "framerate": sort = "frame_rate" addVideoFileTable() add, agg := getSort(sort, direction, videoFileTable) query.sortAndPagination += add - query.addGroupBy(agg) + query.addGroupBy(agg...) case "filesize": addFileTable() add, agg := getSort(sort, direction, fileTable) query.sortAndPagination += add - query.addGroupBy(agg) + query.addGroupBy(agg...) case "duration": addVideoFileTable() add, agg := getSort(sort, direction, videoFileTable) query.sortAndPagination += add - query.addGroupBy(agg) + query.addGroupBy(agg...) case "interactive", "interactive_speed": addVideoFileTable() add, agg := getSort(sort, direction, videoFileTable) query.sortAndPagination += add - query.addGroupBy(agg) + query.addGroupBy(agg...) case "title": addFileTable() addFolderTable() query.sortAndPagination += " ORDER BY COALESCE(scenes.title, files.basename) COLLATE NATURAL_CI " + direction + ", folders.path COLLATE NATURAL_CI " + direction - query.addGroupBy([]string{"scenes.title", "files.basename", "folders.path"}) + query.addGroupBy("scenes.title", "files.basename", "folders.path") case "play_count": query.sortAndPagination += getCountSort(sceneTable, scenesViewDatesTable, sceneIDColumn, direction) case "last_played_at": @@ -1212,12 +1214,12 @@ func (qb *SceneStore) setSceneSort(query *queryBuilder, findFilter *models.FindF default: add, agg := getSort(sort, direction, "scenes") query.sortAndPagination += add - query.addGroupBy(agg) + query.addGroupBy(agg...) } // Whatever the sorting, always use title/id as a final sort query.sortAndPagination += ", COALESCE(scenes.title, CAST(scenes.id as text)) COLLATE NATURAL_CI ASC" - query.addGroupBy([]string{"scenes.title", "scenes.id"}) + query.addGroupBy("scenes.title", "scenes.id") return nil } diff --git a/pkg/sqlite/scene_marker.go b/pkg/sqlite/scene_marker.go index 3a7f5badd59..1070d02c05f 100644 --- a/pkg/sqlite/scene_marker.go +++ b/pkg/sqlite/scene_marker.go @@ -377,14 +377,14 @@ func (qb *SceneMarkerStore) setSceneMarkerSort(query *queryBuilder, findFilter * query.join(sceneTable, "", "scenes.id = scene_markers.scene_id") add, agg := getSort(sort, direction, sceneTable) query.sortAndPagination += add - query.addGroupBy(agg) + query.addGroupBy(agg...) case "title": query.join(tagTable, "", "scene_markers.primary_tag_id = tags.id") query.sortAndPagination += " ORDER BY COALESCE(NULLIF(scene_markers.title,''), tags.name) COLLATE NATURAL_CI " + direction default: add, agg := getSort(sort, direction, sceneMarkerTable) query.sortAndPagination += add - query.addGroupBy(agg) + query.addGroupBy(agg...) } query.sortAndPagination += ", scene_markers.scene_id ASC, scene_markers.seconds ASC" diff --git a/pkg/sqlite/sql.go b/pkg/sqlite/sql.go index 87eb4c1e9f8..88df4846bc7 100644 --- a/pkg/sqlite/sql.go +++ b/pkg/sqlite/sql.go @@ -19,7 +19,7 @@ func selectAll(tableName string) string { func distinctIDs(qb *queryBuilder, tableName string) { columnId := getColumn(tableName, "id") qb.addColumn(columnId) - qb.addGroupBy([]string{columnId}) + qb.addGroupBy(columnId) qb.from = tableName } diff --git a/pkg/sqlite/studio.go b/pkg/sqlite/studio.go index 0b2a09d4015..b5ba2624a00 100644 --- a/pkg/sqlite/studio.go +++ b/pkg/sqlite/studio.go @@ -545,7 +545,7 @@ func (qb *StudioStore) makeQuery(ctx context.Context, studioFilter *models.Studi return nil, err } query.sortAndPagination += getPagination(findFilter) - query.addGroupBy(group) + query.addGroupBy(group...) return &query, nil } diff --git a/pkg/sqlite/tag.go b/pkg/sqlite/tag.go index eb93b0503ba..4d58538ceea 100644 --- a/pkg/sqlite/tag.go +++ b/pkg/sqlite/tag.go @@ -631,7 +631,7 @@ func (qb *TagStore) Query(ctx context.Context, tagFilter *models.TagFilterType, return nil, 0, err } query.sortAndPagination += getPagination(findFilter) - query.addGroupBy(group) + query.addGroupBy(group...) idsResult, countResult, err := query.executeFind(ctx) if err != nil { return nil, 0, err From 754030488b4e158ec4540db827cee23fae5d8959 Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Mon, 21 Oct 2024 13:31:15 +0200 Subject: [PATCH 54/85] pgsql fix more sql bugs --- internal/autotag/integration_test.go | 1 + pkg/sqlite/database.go | 1 + pkg/sqlite/database_postgres.go | 20 ++++++++++++++------ pkg/sqlite/database_sqlite.go | 3 +++ pkg/sqlite/gallery.go | 1 + pkg/sqlite/scene_marker.go | 1 + pkg/sqlite/setup_test.go | 1 + 7 files changed, 22 insertions(+), 6 deletions(-) diff --git a/internal/autotag/integration_test.go b/internal/autotag/integration_test.go index a79428652b6..7e47624ebd0 100644 --- a/internal/autotag/integration_test.go +++ b/internal/autotag/integration_test.go @@ -70,6 +70,7 @@ func runTests(m *testing.M) int { if err := db.Open(); err != nil { panic(fmt.Sprintf("Could not initialize database: %s", err.Error())) } + db.TestMode() r = db.Repository() diff --git a/pkg/sqlite/database.go b/pkg/sqlite/database.go index 955b4104668..d857f18b721 100644 --- a/pkg/sqlite/database.go +++ b/pkg/sqlite/database.go @@ -130,6 +130,7 @@ type DBInterface interface { Vacuum(ctx context.Context) error Version() uint WithDatabase(ctx context.Context) (context.Context, error) + TestMode() } type Database struct { diff --git a/pkg/sqlite/database_postgres.go b/pkg/sqlite/database_postgres.go index 601cf302919..55e6377ecb3 100644 --- a/pkg/sqlite/database_postgres.go +++ b/pkg/sqlite/database_postgres.go @@ -38,20 +38,28 @@ func (db *PostgresDB) unlock() {} func (db *PostgresDB) openReadDB() error { const ( disableForeignKeys = false - writable = true + writable = false ) var err error db.readDB, err = db.open(disableForeignKeys, writable) db.readDB.SetConnMaxIdleTime(dbConnTimeout) - db.writeDB = db.readDB return err } func (db *PostgresDB) openWriteDB() error { - if db.writeDB == nil { - return db.openReadDB() - } - return nil + const ( + disableForeignKeys = false + writable = true + ) + var err error + db.writeDB, err = db.open(disableForeignKeys, writable) + db.writeDB.SetConnMaxIdleTime(dbConnTimeout) + return err +} + +// Ensure single connection for testing to avoid race conditions +func (db *PostgresDB) TestMode() { + db.readDB = db.writeDB } func (db *PostgresDB) DatabaseType() DatabaseType { diff --git a/pkg/sqlite/database_sqlite.go b/pkg/sqlite/database_sqlite.go index 2a0c7c65439..d652190b9c3 100644 --- a/pkg/sqlite/database_sqlite.go +++ b/pkg/sqlite/database_sqlite.go @@ -42,6 +42,9 @@ func NewSQLiteDatabase(dbPath string, init bool) *SQLiteDB { return db } +// Does nothing +func (db *SQLiteDB) TestMode() {} + // lock locks the database for writing. This method will block until the lock is acquired. func (db *SQLiteDB) lock() { db.lockChan <- struct{}{} diff --git a/pkg/sqlite/gallery.go b/pkg/sqlite/gallery.go index 1e030bcdb8a..6bf5060255d 100644 --- a/pkg/sqlite/gallery.go +++ b/pkg/sqlite/gallery.go @@ -849,6 +849,7 @@ func (qb *GalleryStore) setGallerySort(query *queryBuilder, findFilter *models.F addFileTable() addFolderTable() query.sortAndPagination += " ORDER BY COALESCE(galleries.title, files.basename, basename(COALESCE(folders.path, ''))) COLLATE NATURAL_CI " + direction + ", file_folder.path COLLATE NATURAL_CI " + direction + query.addGroupBy("galleries.title", "files.basename", "folders.path", "file_folder.path") default: add, agg := getSort(sort, direction, "galleries") query.sortAndPagination += add diff --git a/pkg/sqlite/scene_marker.go b/pkg/sqlite/scene_marker.go index 1070d02c05f..a6fa5033fd3 100644 --- a/pkg/sqlite/scene_marker.go +++ b/pkg/sqlite/scene_marker.go @@ -381,6 +381,7 @@ func (qb *SceneMarkerStore) setSceneMarkerSort(query *queryBuilder, findFilter * case "title": query.join(tagTable, "", "scene_markers.primary_tag_id = tags.id") query.sortAndPagination += " ORDER BY COALESCE(NULLIF(scene_markers.title,''), tags.name) COLLATE NATURAL_CI " + direction + query.addGroupBy("scene_markers.title", "tags.name") default: add, agg := getSort(sort, direction, sceneMarkerTable) query.sortAndPagination += add diff --git a/pkg/sqlite/setup_test.go b/pkg/sqlite/setup_test.go index e9d166091e4..3944fc68f2f 100644 --- a/pkg/sqlite/setup_test.go +++ b/pkg/sqlite/setup_test.go @@ -661,6 +661,7 @@ func runTests(m *testing.M) int { if err := db.Open(); err != nil { panic(fmt.Sprintf("Could not initialize database: %s", err.Error())) } + db.TestMode() // defer close and delete the database defer testTeardown(databaseFile) From b50070c349ec733b0a88e10ef6b6ef293fd9f669 Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Mon, 21 Oct 2024 13:36:33 +0200 Subject: [PATCH 55/85] missed --- pkg/sqlite/performer.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/sqlite/performer.go b/pkg/sqlite/performer.go index 6fb604aedf1..79495de2b9a 100644 --- a/pkg/sqlite/performer.go +++ b/pkg/sqlite/performer.go @@ -654,12 +654,12 @@ func (qb *PerformerStore) QueryCount(ctx context.Context, performerFilter *model func (qb *PerformerStore) sortByOCounter(direction string) string { // need to sum the o_counter from scenes and images - return " (" + selectPerformerOCountSQL + ") " + direction + return " ORDER BY (" + selectPerformerOCountSQL + ") " + direction } func (qb *PerformerStore) sortByPlayCount(direction string) string { // need to sum the o_counter from scenes and images - return " (" + selectPerformerPlayCountSQL + ") " + direction + return " ORDER BY (" + selectPerformerPlayCountSQL + ") " + direction } // used for sorting on performer last o_date @@ -683,7 +683,7 @@ var selectPerformerLastOAtSQL = utils.StrFormat( func (qb *PerformerStore) sortByLastOAt(direction string) string { // need to get the o_dates from scenes - return " (" + selectPerformerLastOAtSQL + ") " + direction + return " ORDER BY (" + selectPerformerLastOAtSQL + ") " + direction } // used for sorting on performer last view_date @@ -707,7 +707,7 @@ var selectPerformerLastPlayedAtSQL = utils.StrFormat( func (qb *PerformerStore) sortByLastPlayedAt(direction string) string { // need to get the view_dates from scenes - return " (" + selectPerformerLastPlayedAtSQL + ") " + direction + return " ORDER BY (" + selectPerformerLastPlayedAtSQL + ") " + direction } var performerSortOptions = sortOptions{ From 45f27571895a2fa00a115853ba856686deeb4b79 Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Tue, 22 Oct 2024 17:39:19 +0200 Subject: [PATCH 56/85] in postgresql errors cause the transaction to cancel, fix this by using savepoint --- pkg/sqlite/blob.go | 23 +++++++++++++++++++---- pkg/sqlite/table.go | 27 +++++++++++++++++++++++++++ 2 files changed, 46 insertions(+), 4 deletions(-) diff --git a/pkg/sqlite/blob.go b/pkg/sqlite/blob.go index 21b1f78b86d..0c814ffa183 100644 --- a/pkg/sqlite/blob.go +++ b/pkg/sqlite/blob.go @@ -9,6 +9,7 @@ import ( "github.com/doug-martin/goqu/v9" "github.com/doug-martin/goqu/v9/exp" + "github.com/jackc/pgx/v5/pgconn" "github.com/jmoiron/sqlx" "github.com/mattn/go-sqlite3" "github.com/stashapp/stash/pkg/file" @@ -307,12 +308,17 @@ func (qb *BlobStore) readFromDatabase(ctx context.Context, checksum string) (sql // Delete marks a checksum as no longer in use by a single reference. // If no references remain, the blob is deleted from the database and filesystem. func (qb *BlobStore) Delete(ctx context.Context, checksum string) error { + rollid, err := savepoint(ctx) + if err != nil { + return fmt.Errorf("savepoint %s: %w", rollid, err) + } + // try to delete the blob from the database if err := qb.delete(ctx, checksum); err != nil { if qb.isConstraintError(err) { // blob is still referenced - do not delete logger.Debugf("Blob %s is still referenced - not deleting", checksum) - return nil + return rollbackToSavepoint(ctx, rollid) } // unexpected error @@ -331,9 +337,18 @@ func (qb *BlobStore) Delete(ctx context.Context, checksum string) error { } func (qb *BlobStore) isConstraintError(err error) bool { - var sqliteError sqlite3.Error - if errors.As(err, &sqliteError) { - return sqliteError.Code == sqlite3.ErrConstraint + switch dbWrapper.dbType { + case PostgresBackend: + var pgErr *pgconn.PgError + if errors.As(err, &pgErr) { + // Class 23 — Integrity Constraint Violation + return pgErr.Code[:2] == "23" + } + case SqliteBackend: + var sqliteError sqlite3.Error + if errors.As(err, &sqliteError) { + return sqliteError.Code == sqlite3.ErrConstraint + } } return false } diff --git a/pkg/sqlite/table.go b/pkg/sqlite/table.go index b2c473b9f0c..cba0d53c2e9 100644 --- a/pkg/sqlite/table.go +++ b/pkg/sqlite/table.go @@ -12,6 +12,7 @@ import ( "github.com/jmoiron/sqlx" "gopkg.in/guregu/null.v4" + "github.com/stashapp/stash/pkg/hash" "github.com/stashapp/stash/pkg/logger" "github.com/stashapp/stash/pkg/models" "github.com/stashapp/stash/pkg/sliceutil" @@ -1154,6 +1155,32 @@ func execID(ctx context.Context, stmt sqler) (*int64, error) { return &id, nil } +func savepoint(ctx context.Context) (string, error) { + tx, err := getTx(ctx) + if err != nil { + return "", err + } + + // Generate savepoint + rnd, err := hash.GenerateRandomKey(64) + if err != nil { + return "", err + } + + _, err = tx.QueryxContext(ctx, "SAVEPOINT "+rnd) + return rnd, err +} + +func rollbackToSavepoint(ctx context.Context, id string) error { + tx, err := getTx(ctx) + if err != nil { + return err + } + + _, err = tx.QueryxContext(ctx, "ROLLBACK TO SAVEPOINT "+id) + return err +} + func count(ctx context.Context, q *goqu.SelectDataset) (int, error) { var count int if err := querySimple(ctx, q, &count); err != nil { From 9654813c06e2e8a8ee3b9c435539d9e37d87f78f Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Wed, 23 Oct 2024 01:19:19 +0200 Subject: [PATCH 57/85] rollback wrapper --- pkg/sqlite/blob.go | 14 ++++++------ pkg/sqlite/performer_test.go | 9 +++++--- pkg/sqlite/table.go | 27 ------------------------ pkg/sqlite/tx.go | 41 ++++++++++++++++++++++++++++++++++++ 4 files changed, 53 insertions(+), 38 deletions(-) diff --git a/pkg/sqlite/blob.go b/pkg/sqlite/blob.go index 0c814ffa183..36703ea1d31 100644 --- a/pkg/sqlite/blob.go +++ b/pkg/sqlite/blob.go @@ -308,17 +308,12 @@ func (qb *BlobStore) readFromDatabase(ctx context.Context, checksum string) (sql // Delete marks a checksum as no longer in use by a single reference. // If no references remain, the blob is deleted from the database and filesystem. func (qb *BlobStore) Delete(ctx context.Context, checksum string) error { - rollid, err := savepoint(ctx) - if err != nil { - return fmt.Errorf("savepoint %s: %w", rollid, err) - } - // try to delete the blob from the database if err := qb.delete(ctx, checksum); err != nil { if qb.isConstraintError(err) { // blob is still referenced - do not delete logger.Debugf("Blob %s is still referenced - not deleting", checksum) - return rollbackToSavepoint(ctx, rollid) + return nil } // unexpected error @@ -358,11 +353,14 @@ func (qb *BlobStore) delete(ctx context.Context, checksum string) error { q := dialect.Delete(table).Where(goqu.C(blobChecksumColumn).Eq(checksum)) - _, err := exec(ctx, q) + err := withSavepoint(ctx, func(ctx context.Context) error { + _, err := exec(ctx, q) + return err + }) + if err != nil { return fmt.Errorf("deleting from %s: %w", table, err) } - return nil } diff --git a/pkg/sqlite/performer_test.go b/pkg/sqlite/performer_test.go index d900eed9f15..254d07dd8bd 100644 --- a/pkg/sqlite/performer_test.go +++ b/pkg/sqlite/performer_test.go @@ -1155,9 +1155,12 @@ func TestPerformerQueryForAutoTag(t *testing.T) { t.Errorf("Error finding performers: %s", err.Error()) } - assert.Len(t, performers, 2) - assert.Equal(t, strings.ToLower(performerNames[performerIdx1WithScene]), strings.ToLower(performers[0].Name)) - assert.Equal(t, strings.ToLower(performerNames[performerIdx1WithScene]), strings.ToLower(performers[1].Name)) + if assert.Len(t, performers, 2) { + assert.Equal(t, strings.ToLower(performerNames[performerIdx1WithScene]), strings.ToLower(performers[0].Name)) + assert.Equal(t, strings.ToLower(performerNames[performerIdx1WithScene]), strings.ToLower(performers[1].Name)) + } else { + t.Errorf("Skipping performer comparison as atleast 1 is missing") + } return nil }) diff --git a/pkg/sqlite/table.go b/pkg/sqlite/table.go index cba0d53c2e9..b2c473b9f0c 100644 --- a/pkg/sqlite/table.go +++ b/pkg/sqlite/table.go @@ -12,7 +12,6 @@ import ( "github.com/jmoiron/sqlx" "gopkg.in/guregu/null.v4" - "github.com/stashapp/stash/pkg/hash" "github.com/stashapp/stash/pkg/logger" "github.com/stashapp/stash/pkg/models" "github.com/stashapp/stash/pkg/sliceutil" @@ -1155,32 +1154,6 @@ func execID(ctx context.Context, stmt sqler) (*int64, error) { return &id, nil } -func savepoint(ctx context.Context) (string, error) { - tx, err := getTx(ctx) - if err != nil { - return "", err - } - - // Generate savepoint - rnd, err := hash.GenerateRandomKey(64) - if err != nil { - return "", err - } - - _, err = tx.QueryxContext(ctx, "SAVEPOINT "+rnd) - return rnd, err -} - -func rollbackToSavepoint(ctx context.Context, id string) error { - tx, err := getTx(ctx) - if err != nil { - return err - } - - _, err = tx.QueryxContext(ctx, "ROLLBACK TO SAVEPOINT "+id) - return err -} - func count(ctx context.Context, q *goqu.SelectDataset) (int, error) { var count int if err := querySimple(ctx, q, &count); err != nil { diff --git a/pkg/sqlite/tx.go b/pkg/sqlite/tx.go index e0aa2265485..46eb68fb76a 100644 --- a/pkg/sqlite/tx.go +++ b/pkg/sqlite/tx.go @@ -7,6 +7,7 @@ import ( "time" "github.com/jmoiron/sqlx" + "github.com/stashapp/stash/pkg/hash" "github.com/stashapp/stash/pkg/logger" ) @@ -174,3 +175,43 @@ func (db *dbWrapperType) ExecStmt(ctx context.Context, stmt *stmt, args ...inter return ret, sqlError(err, stmt.query, args...) } + +type SavepointAction func(ctx context.Context) error + +func withSavepoint(ctx context.Context, action SavepointAction) error { + tx, err := getTx(ctx) + if err != nil { + return err + } + + // Generate savepoint + rnd, err := hash.GenerateRandomKey(64) + if err != nil { + return err + } + rnd = "savepoint_" + rnd + + // Create a savepoint + _, err = tx.Exec("SAVEPOINT " + rnd) + if err != nil { + return fmt.Errorf("failed to create savepoint: %w", err) + } + + // Execute the action + err = action(ctx) + if err != nil { + // Rollback to savepoint on error + if _, rbErr := tx.Exec("ROLLBACK TO SAVEPOINT " + rnd); rbErr != nil { + return fmt.Errorf("action failed and rollback to savepoint failed: %w", rbErr) + } + return fmt.Errorf("action failed: %w", err) + } + + // Release the savepoint on success + _, err = tx.Exec("RELEASE SAVEPOINT " + rnd) + if err != nil { + return fmt.Errorf("failed to release savepoint: %w", err) + } + + return nil +} From a27710befaf7441348e4d9baca8a943f27baa1d6 Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Wed, 23 Oct 2024 01:30:33 +0200 Subject: [PATCH 58/85] IsLocked for pgsql (i think) --- pkg/sqlite/transaction.go | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/pkg/sqlite/transaction.go b/pkg/sqlite/transaction.go index fb86723bdff..17b927cfff9 100644 --- a/pkg/sqlite/transaction.go +++ b/pkg/sqlite/transaction.go @@ -6,6 +6,7 @@ import ( "fmt" "runtime/debug" + "github.com/jackc/pgx/v5/pgconn" "github.com/jmoiron/sqlx" "github.com/mattn/go-sqlite3" "github.com/stashapp/stash/pkg/logger" @@ -108,9 +109,18 @@ func getDBReader(ctx context.Context) (dbReader, error) { } func (db *Database) IsLocked(err error) bool { - var sqliteError sqlite3.Error - if errors.As(err, &sqliteError) { - return sqliteError.Code == sqlite3.ErrBusy + switch dbWrapper.dbType { + case PostgresBackend: + var pgErr *pgconn.PgError + if errors.As(err, &pgErr) { + // Class 53 — Insufficient Resources + return pgErr.Code[:2] == "53" + } + case SqliteBackend: + var sqliteError sqlite3.Error + if errors.As(err, &sqliteError) { + return sqliteError.Code == sqlite3.ErrBusy + } } return false } From 3487545d4b34db754220bfa574d572d0dcbf69ea Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Thu, 24 Oct 2024 12:03:06 +0200 Subject: [PATCH 59/85] Fix ismissing for pgsql --- pkg/sqlite/gallery_filter.go | 2 +- pkg/sqlite/group_filter.go | 2 +- pkg/sqlite/image_filter.go | 2 +- pkg/sqlite/performer_filter.go | 2 +- pkg/sqlite/scene_filter.go | 2 +- pkg/sqlite/studio_filter.go | 2 +- pkg/sqlite/tag_filter.go | 2 +- 7 files changed, 7 insertions(+), 7 deletions(-) diff --git a/pkg/sqlite/gallery_filter.go b/pkg/sqlite/gallery_filter.go index d4483ea3547..b0d7a63b4f0 100644 --- a/pkg/sqlite/gallery_filter.go +++ b/pkg/sqlite/gallery_filter.go @@ -276,7 +276,7 @@ func (qb *galleryFilterHandler) missingCriterionHandler(isMissing *string) crite galleryRepository.tags.join(f, "tags_join", "galleries.id") f.addWhere("tags_join.gallery_id IS NULL") default: - f.addWhere("(galleries." + *isMissing + " IS NULL OR TRIM(galleries." + *isMissing + ") = '')") + f.addWhere("(galleries." + *isMissing + " IS NULL OR TRIM(CAST(galleries." + *isMissing + " AS TEXT)) = '')") } } } diff --git a/pkg/sqlite/group_filter.go b/pkg/sqlite/group_filter.go index dcb7bcdfc94..11bc51bd49d 100644 --- a/pkg/sqlite/group_filter.go +++ b/pkg/sqlite/group_filter.go @@ -110,7 +110,7 @@ func (qb *groupFilterHandler) missingCriterionHandler(isMissing *string) criteri f.addLeftJoin("groups_scenes", "", "groups_scenes.group_id = groups.id") f.addWhere("groups_scenes.scene_id IS NULL") default: - f.addWhere("(groups." + *isMissing + " IS NULL OR TRIM(groups." + *isMissing + ") = '')") + f.addWhere("(groups." + *isMissing + " IS NULL OR TRIM(CAST(groups." + *isMissing + " AS TEXT)) = '')") } } } diff --git a/pkg/sqlite/image_filter.go b/pkg/sqlite/image_filter.go index 255451f178f..ed38d993dfa 100644 --- a/pkg/sqlite/image_filter.go +++ b/pkg/sqlite/image_filter.go @@ -152,7 +152,7 @@ func (qb *imageFilterHandler) missingCriterionHandler(isMissing *string) criteri imageRepository.tags.join(f, "tags_join", "images.id") f.addWhere("tags_join.image_id IS NULL") default: - f.addWhere("(images." + *isMissing + " IS NULL OR TRIM(images." + *isMissing + ") = '')") + f.addWhere("(images." + *isMissing + " IS NULL OR TRIM(CAST(images." + *isMissing + " AS TEXT)) = '')") } } } diff --git a/pkg/sqlite/performer_filter.go b/pkg/sqlite/performer_filter.go index f92777245ea..d89454819b6 100644 --- a/pkg/sqlite/performer_filter.go +++ b/pkg/sqlite/performer_filter.go @@ -226,7 +226,7 @@ func (qb *performerFilterHandler) performerIsMissingCriterionHandler(isMissing * performersAliasesTableMgr.join(f, "", "performers.id") f.addWhere("performer_aliases.alias IS NULL") default: - f.addWhere("(performers." + *isMissing + " IS NULL OR TRIM(performers." + *isMissing + ") = '')") + f.addWhere("(performers." + *isMissing + " IS NULL OR TRIM(CAST(performers." + *isMissing + " AS TEXT)) = '')") } } } diff --git a/pkg/sqlite/scene_filter.go b/pkg/sqlite/scene_filter.go index 00ea8cb8d7f..494fb4a410f 100644 --- a/pkg/sqlite/scene_filter.go +++ b/pkg/sqlite/scene_filter.go @@ -344,7 +344,7 @@ func (qb *sceneFilterHandler) isMissingCriterionHandler(isMissing *string) crite case "cover": f.addWhere("scenes.cover_blob IS NULL") default: - f.addWhere("(scenes." + *isMissing + " IS NULL OR TRIM(scenes." + *isMissing + ") = '')") + f.addWhere("(scenes." + *isMissing + " IS NULL OR TRIM(CAST(scenes." + *isMissing + " AS TEXT)) = '')") } } } diff --git a/pkg/sqlite/studio_filter.go b/pkg/sqlite/studio_filter.go index c514364c4ff..07bfb4e8ae8 100644 --- a/pkg/sqlite/studio_filter.go +++ b/pkg/sqlite/studio_filter.go @@ -124,7 +124,7 @@ func (qb *studioFilterHandler) isMissingCriterionHandler(isMissing *string) crit studioRepository.stashIDs.join(f, "studio_stash_ids", "studios.id") f.addWhere("studio_stash_ids.studio_id IS NULL") default: - f.addWhere("(studios." + *isMissing + " IS NULL OR TRIM(studios." + *isMissing + ") = '')") + f.addWhere("(studios." + *isMissing + " IS NULL OR TRIM(CAST(studios." + *isMissing + " AS TEXT)) = '')") } } } diff --git a/pkg/sqlite/tag_filter.go b/pkg/sqlite/tag_filter.go index ba9e9bb08ec..638fca5e853 100644 --- a/pkg/sqlite/tag_filter.go +++ b/pkg/sqlite/tag_filter.go @@ -136,7 +136,7 @@ func (qb *tagFilterHandler) isMissingCriterionHandler(isMissing *string) criteri case "image": f.addWhere("tags.image_blob IS NULL") default: - f.addWhere("(tags." + *isMissing + " IS NULL OR TRIM(tags." + *isMissing + ") = '')") + f.addWhere("(tags." + *isMissing + " IS NULL OR TRIM(CAST(tags." + *isMissing + " AS TEXT)) = '')") } } } From 2239acd530d354523427662680d4a0033274d95e Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Thu, 24 Oct 2024 16:42:25 +0200 Subject: [PATCH 60/85] pgsql bugfixes Sort by nulls last fix for tests. UUID handler Switch strftime for native postgresql types Fix bug in performer studio filter --- pkg/sqlite/criterion_handlers.go | 56 ++++++++++++++++++++++++++++++-- pkg/sqlite/gallery_filter.go | 10 +++++- pkg/sqlite/image_filter.go | 10 +++++- pkg/sqlite/performer_filter.go | 27 +++++++++++---- pkg/sqlite/scene_filter.go | 12 +++++-- pkg/sqlite/sql.go | 13 +++++--- pkg/sqlite/studio_filter.go | 2 +- 7 files changed, 112 insertions(+), 18 deletions(-) diff --git a/pkg/sqlite/criterion_handlers.go b/pkg/sqlite/criterion_handlers.go index 2cc0d91055b..e9fa8cb0f51 100644 --- a/pkg/sqlite/criterion_handlers.go +++ b/pkg/sqlite/criterion_handlers.go @@ -70,6 +70,49 @@ func stringCriterionHandler(c *models.StringCriterionInput, column string) crite } } +func uuidCriterionHandler(c *models.StringCriterionInput, column string) criterionHandlerFunc { + if dbWrapper.dbType == SqliteBackend { + return stringCriterionHandler(c, column) + } + + return func(ctx context.Context, f *filterBuilder) { + columnCast := "CAST(" + column + " AS TEXT)" + + if c != nil { + if modifier := c.Modifier; c.Modifier.IsValid() { + switch modifier { + case models.CriterionModifierIncludes: + f.whereClauses = append(f.whereClauses, getStringSearchClause([]string{columnCast}, c.Value, false)) + case models.CriterionModifierExcludes: + f.whereClauses = append(f.whereClauses, getStringSearchClause([]string{columnCast}, c.Value, true)) + case models.CriterionModifierEquals: + f.addWhere(columnCast+" LIKE ?", c.Value) + case models.CriterionModifierNotEquals: + f.addWhere(columnCast+" NOT LIKE ?", c.Value) + case models.CriterionModifierMatchesRegex: + if _, err := regexp.Compile(c.Value); err != nil { + f.setError(err) + return + } + f.addWhere(fmt.Sprintf("(%s IS NOT NULL AND regexp(?, %s))", column, columnCast), c.Value) + case models.CriterionModifierNotMatchesRegex: + if _, err := regexp.Compile(c.Value); err != nil { + f.setError(err) + return + } + f.addWhere(fmt.Sprintf("(%s IS NULL OR NOT regexp(?, %s))", column, columnCast), c.Value) + case models.CriterionModifierIsNull: + f.addWhere("(" + column + " IS NULL)") + case models.CriterionModifierNotNull: + f.addWhere("(" + column + " IS NOT NULL)") + default: + panic("unsupported string filter modifier") + } + } + } + } +} + func enumCriterionHandler(modifier models.CriterionModifier, values []string, column string) criterionHandlerFunc { return func(ctx context.Context, f *filterBuilder) { if modifier.IsValid() { @@ -267,7 +310,16 @@ func (h *timestampCriterionHandler) handle(ctx context.Context, f *filterBuilder func yearFilterCriterionHandler(year *models.IntCriterionInput, col string) criterionHandlerFunc { return func(ctx context.Context, f *filterBuilder) { if year != nil && year.Modifier.IsValid() { - clause, args := getIntCriterionWhereClause("cast(strftime('%Y', "+col+") as int)", *year) + var clause string + var args []interface{} + + switch dbWrapper.dbType { + case PostgresBackend: + clause, args = getIntCriterionWhereClause("TO_CHAR("+col+", 'YYYY')::int", *year) + case SqliteBackend: + clause, args = getIntCriterionWhereClause("cast(strftime('%Y', "+col+") as int)", *year) + } + f.addWhere(clause, args...) } } @@ -1022,7 +1074,7 @@ func (h *stashIDCriterionHandler) handle(ctx context.Context, f *filterBuilder) v = *h.c.StashID } - stringCriterionHandler(&models.StringCriterionInput{ + uuidCriterionHandler(&models.StringCriterionInput{ Value: v, Modifier: h.c.Modifier, }, t+".stash_id")(ctx, f) diff --git a/pkg/sqlite/gallery_filter.go b/pkg/sqlite/gallery_filter.go index b0d7a63b4f0..228f0e05b25 100644 --- a/pkg/sqlite/gallery_filter.go +++ b/pkg/sqlite/gallery_filter.go @@ -403,7 +403,15 @@ func (qb *galleryFilterHandler) performerAgeCriterionHandler(performerAge *model f.addWhere("galleries.date != '' AND performers.birthdate != ''") f.addWhere("galleries.date IS NOT NULL AND performers.birthdate IS NOT NULL") - ageCalc := "cast(strftime('%Y.%m%d', galleries.date) - strftime('%Y.%m%d', performers.birthdate) as int)" + var ageCalc string + + switch dbWrapper.dbType { + case PostgresBackend: + ageCalc = "EXTRACT(YEAR FROM AGE(galleries.date, performers.birthdate))" + case SqliteBackend: + ageCalc = "cast(strftime('%Y.%m%d', galleries.date) - strftime('%Y.%m%d', performers.birthdate) as int)" + } + whereClause, args := getIntWhereClause(ageCalc, performerAge.Modifier, performerAge.Value, performerAge.Value2) f.addWhere(whereClause, args...) } diff --git a/pkg/sqlite/image_filter.go b/pkg/sqlite/image_filter.go index ed38d993dfa..e440e39fa13 100644 --- a/pkg/sqlite/image_filter.go +++ b/pkg/sqlite/image_filter.go @@ -275,7 +275,15 @@ func (qb *imageFilterHandler) performerAgeCriterionHandler(performerAge *models. f.addWhere("images.date != '' AND performers.birthdate != ''") f.addWhere("images.date IS NOT NULL AND performers.birthdate IS NOT NULL") - ageCalc := "cast(strftime('%Y.%m%d', images.date) - strftime('%Y.%m%d', performers.birthdate) as int)" + var ageCalc string + + switch dbWrapper.dbType { + case PostgresBackend: + ageCalc = "EXTRACT(YEAR FROM AGE(images.date, performers.birthdate))" + case SqliteBackend: + ageCalc = "cast(strftime('%Y.%m%d', images.date) - strftime('%Y.%m%d', performers.birthdate) as int)" + } + whereClause, args := getIntWhereClause(ageCalc, performerAge.Modifier, performerAge.Value, performerAge.Value2) f.addWhere(whereClause, args...) } diff --git a/pkg/sqlite/performer_filter.go b/pkg/sqlite/performer_filter.go index d89454819b6..dad7cd1062e 100644 --- a/pkg/sqlite/performer_filter.go +++ b/pkg/sqlite/performer_filter.go @@ -139,7 +139,7 @@ func (qb *performerFilterHandler) criterionHandler() criterionHandler { criterionHandlerFunc(func(ctx context.Context, f *filterBuilder) { if filter.StashID != nil { performerRepository.stashIDs.join(f, "performer_stash_ids", "performers.id") - stringCriterionHandler(filter.StashID, "performer_stash_ids.stash_id")(ctx, f) + uuidCriterionHandler(filter.StashID, "performer_stash_ids.stash_id")(ctx, f) } }), &stashIDCriterionHandler{ @@ -235,10 +235,23 @@ func (qb *performerFilterHandler) performerIsMissingCriterionHandler(isMissing * func (qb *performerFilterHandler) performerAgeFilterCriterionHandler(age *models.IntCriterionInput) criterionHandlerFunc { return func(ctx context.Context, f *filterBuilder) { if age != nil && age.Modifier.IsValid() { - clause, args := getIntCriterionWhereClause( - "cast(IFNULL(strftime('%Y.%m%d', performers.death_date), strftime('%Y.%m%d', 'now')) - strftime('%Y.%m%d', performers.birthdate) as int)", - *age, - ) + + var clause string + var args []interface{} + + switch dbWrapper.dbType { + case PostgresBackend: + clause, args = getIntCriterionWhereClause( + "EXTRACT(YEAR FROM COALESCE(performers.death_date, CURRENT_DATE)) - EXTRACT(YEAR FROM performers.birthdate)", + *age, + ) + case SqliteBackend: + clause, args = getIntCriterionWhereClause( + "cast(IFNULL(strftime('%Y.%m%d', performers.death_date), strftime('%Y.%m%d', 'now')) - strftime('%Y.%m%d', performers.birthdate) as int)", + *age, + ) + } + f.addWhere(clause, args...) } } @@ -456,12 +469,12 @@ func (qb *performerFilterHandler) studiosCriterionHandler(studios *models.Hierar } const derivedPerformerStudioTable = "performer_studio" - valuesClause, err := getHierarchicalValues(ctx, studios.Value, studioTable, "", "parent_id", "child_id", studios.Depth, true) + valuesClause, err := getHierarchicalValues(ctx, studios.Value, studioTable, "", "parent_id", "child_id", studios.Depth, false) if err != nil { f.setError(err) return } - f.addWith("studio(root_id, item_id) AS " + valuesClause) + f.addWith("studio(root_id, item_id) AS (" + valuesClause + ")") templStr := `SELECT performer_id FROM {primaryTable} INNER JOIN {joinTable} ON {primaryTable}.id = {joinTable}.{primaryFK} diff --git a/pkg/sqlite/scene_filter.go b/pkg/sqlite/scene_filter.go index 494fb4a410f..f08fcd01ccb 100644 --- a/pkg/sqlite/scene_filter.go +++ b/pkg/sqlite/scene_filter.go @@ -111,7 +111,7 @@ func (qb *sceneFilterHandler) criterionHandler() criterionHandler { criterionHandlerFunc(func(ctx context.Context, f *filterBuilder) { if sceneFilter.StashID != nil { sceneRepository.stashIDs.join(f, "scene_stash_ids", "scenes.id") - stringCriterionHandler(sceneFilter.StashID, "scene_stash_ids.stash_id")(ctx, f) + uuidCriterionHandler(sceneFilter.StashID, "scene_stash_ids.stash_id")(ctx, f) } }), @@ -480,7 +480,15 @@ func (qb *sceneFilterHandler) performerAgeCriterionHandler(performerAge *models. f.addWhere("scenes.date != '' AND performers.birthdate != ''") f.addWhere("scenes.date IS NOT NULL AND performers.birthdate IS NOT NULL") - ageCalc := "cast(strftime('%Y.%m%d', scenes.date) - strftime('%Y.%m%d', performers.birthdate) as int)" + var ageCalc string + + switch dbWrapper.dbType { + case PostgresBackend: + ageCalc = "EXTRACT(YEAR FROM AGE(scenes.date, performers.birthdate))" + case SqliteBackend: + ageCalc = "cast(strftime('%Y.%m%d', scenes.date) - strftime('%Y.%m%d', performers.birthdate) as int)" + } + whereClause, args := getIntWhereClause(ageCalc, performerAge.Modifier, performerAge.Value, performerAge.Value2) f.addWhere(whereClause, args...) } diff --git a/pkg/sqlite/sql.go b/pkg/sqlite/sql.go index 88df4846bc7..65f9fb4b23b 100644 --- a/pkg/sqlite/sql.go +++ b/pkg/sqlite/sql.go @@ -84,6 +84,11 @@ func getSortDirection(direction string) string { func getSort(sort string, direction string, tableName string) (string, []string) { direction = getSortDirection(direction) + nullsfix := "" + if dbWrapper.dbType == PostgresBackend { + nullsfix = " NULLS LAST" + } + switch { case strings.HasSuffix(sort, "_count"): var relationTableName = strings.TrimSuffix(sort, "_count") // TODO: pluralize? @@ -91,7 +96,7 @@ func getSort(sort string, direction string, tableName string) (string, []string) return " ORDER BY COUNT(distinct " + colName + ") " + direction, nil case strings.Compare(sort, "filesize") == 0: colName := getColumn(tableName, "size") - return " ORDER BY " + colName + " " + direction, []string{colName} + return " ORDER BY " + colName + " " + direction + nullsfix, []string{colName} case strings.HasPrefix(sort, randomSeedPrefix): // seed as a parameter from the UI seedStr := sort[len(randomSeedPrefix):] @@ -109,13 +114,13 @@ func getSort(sort string, direction string, tableName string) (string, []string) colName = sort } if strings.Compare(sort, "name") == 0 { - return " ORDER BY " + colName + " COLLATE NATURAL_CI " + direction, []string{colName} + return " ORDER BY " + colName + " COLLATE NATURAL_CI " + direction + nullsfix, []string{colName} } if strings.Compare(sort, "title") == 0 { - return " ORDER BY " + colName + " COLLATE NATURAL_CI " + direction, []string{colName} + return " ORDER BY " + colName + " COLLATE NATURAL_CI " + direction + nullsfix, []string{colName} } - return " ORDER BY " + colName + " " + direction, []string{colName} + return " ORDER BY " + colName + " " + direction + nullsfix, []string{colName} } } diff --git a/pkg/sqlite/studio_filter.go b/pkg/sqlite/studio_filter.go index 07bfb4e8ae8..fdfe7563f09 100644 --- a/pkg/sqlite/studio_filter.go +++ b/pkg/sqlite/studio_filter.go @@ -63,7 +63,7 @@ func (qb *studioFilterHandler) criterionHandler() criterionHandler { criterionHandlerFunc(func(ctx context.Context, f *filterBuilder) { if studioFilter.StashID != nil { studioRepository.stashIDs.join(f, "studio_stash_ids", "studios.id") - stringCriterionHandler(studioFilter.StashID, "studio_stash_ids.stash_id")(ctx, f) + uuidCriterionHandler(studioFilter.StashID, "studio_stash_ids.stash_id")(ctx, f) } }), &stashIDCriterionHandler{ From 49cea00a0f6261a94e2a05af8fe84165e70b923b Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Fri, 25 Oct 2024 13:39:51 +0200 Subject: [PATCH 61/85] pgsql more bugfixes --- pkg/scene/update_test.go | 9 +++++++-- pkg/sqlite/performer_test.go | 4 ++-- pkg/sqlite/scene_marker_filter.go | 6 +++--- pkg/sqlite/scene_test.go | 2 +- pkg/sqlite/stash_id_test.go | 2 +- pkg/sqlite/studio_test.go | 2 +- 6 files changed, 15 insertions(+), 10 deletions(-) diff --git a/pkg/scene/update_test.go b/pkg/scene/update_test.go index 96ebb491f66..3f0829b5958 100644 --- a/pkg/scene/update_test.go +++ b/pkg/scene/update_test.go @@ -92,6 +92,11 @@ func TestUpdater_IsEmpty(t *testing.T) { } } +func getUUID(_ string) string { + // TODO: Encode input string + return "00000000-0000-0000-0000-000000000000" +} + func TestUpdater_Update(t *testing.T) { const ( sceneID = iota + 1 @@ -106,7 +111,7 @@ func TestUpdater_Update(t *testing.T) { performerIDs := []int{performerID} tagIDs := []int{tagID} - stashID := "stashID" + stashID := getUUID("stashID") endpoint := "endpoint" title := "title" @@ -234,7 +239,7 @@ func TestUpdateSet_UpdateInput(t *testing.T) { performerIDStrs := intslice.IntSliceToStringSlice(performerIDs) tagIDs := []int{tagID} tagIDStrs := intslice.IntSliceToStringSlice(tagIDs) - stashID := "stashID" + stashID := getUUID("stashID") endpoint := "endpoint" stashIDs := []models.StashID{ { diff --git a/pkg/sqlite/performer_test.go b/pkg/sqlite/performer_test.go index 254d07dd8bd..a6964203db3 100644 --- a/pkg/sqlite/performer_test.go +++ b/pkg/sqlite/performer_test.go @@ -1703,7 +1703,7 @@ func testPerformerStashIDs(ctx context.Context, t *testing.T, s *models.Performe assert.Len(t, s.StashIDs.List(), 0) // add stash ids - const stashIDStr = "stashID" + var stashIDStr = getUUID("stashID") const endpoint = "endpoint" stashID := models.StashID{ StashID: stashIDStr, @@ -1969,7 +1969,7 @@ func TestPerformerStore_FindByStashID(t *testing.T) { { name: "non-existing", stashID: models.StashID{ - StashID: getPerformerStringValue(performerIdxWithScene, "stashid"), + StashID: getUUID(getPerformerStringValue(performerIdxWithScene, "stashid")), Endpoint: "non-existing", }, expectedIDs: []int{}, diff --git a/pkg/sqlite/scene_marker_filter.go b/pkg/sqlite/scene_marker_filter.go index 85c125dcd4f..1bee6b83489 100644 --- a/pkg/sqlite/scene_marker_filter.go +++ b/pkg/sqlite/scene_marker_filter.go @@ -95,7 +95,7 @@ func (qb *sceneMarkerFilterHandler) tagsCriterionHandler(criterion *models.Hiera } if len(tags.Value) > 0 { - valuesClause, err := getHierarchicalValues(ctx, tags.Value, tagTable, "tags_relations", "parent_id", "child_id", tags.Depth, true) + valuesClause, err := getHierarchicalValues(ctx, tags.Value, tagTable, "tags_relations", "parent_id", "child_id", tags.Depth, false) if err != nil { f.setError(err) return @@ -103,10 +103,10 @@ func (qb *sceneMarkerFilterHandler) tagsCriterionHandler(criterion *models.Hiera f.addWith(`marker_tags AS ( SELECT mt.scene_marker_id, t.column1 AS root_tag_id FROM scene_markers_tags mt - INNER JOIN ` + valuesClause + ` t ON t.column2 = mt.tag_id + INNER JOIN (` + valuesClause + `) t ON t.column2 = mt.tag_id UNION SELECT m.id, t.column1 FROM scene_markers m - INNER JOIN ` + valuesClause + ` t ON t.column2 = m.primary_tag_id + INNER JOIN (` + valuesClause + `) t ON t.column2 = m.primary_tag_id )`) f.addLeftJoin("marker_tags", "", "marker_tags.scene_marker_id = scene_markers.id") diff --git a/pkg/sqlite/scene_test.go b/pkg/sqlite/scene_test.go index 7faba071992..9c97eda12da 100644 --- a/pkg/sqlite/scene_test.go +++ b/pkg/sqlite/scene_test.go @@ -4374,7 +4374,7 @@ func testSceneStashIDs(ctx context.Context, t *testing.T, s *models.Scene) { assert.Len(t, s.StashIDs.List(), 0) // add stash ids - const stashIDStr = "stashID" + var stashIDStr = getUUID("stashID") const endpoint = "endpoint" stashID := models.StashID{ StashID: stashIDStr, diff --git a/pkg/sqlite/stash_id_test.go b/pkg/sqlite/stash_id_test.go index 10949b47533..bf5e9b30c41 100644 --- a/pkg/sqlite/stash_id_test.go +++ b/pkg/sqlite/stash_id_test.go @@ -24,7 +24,7 @@ func testStashIDReaderWriter(ctx context.Context, t *testing.T, r stashIDReaderW testNoStashIDs(ctx, t, r, -1) // add stash ids - const stashIDStr = "stashID" + var stashIDStr = getUUID("stashID") const endpoint = "endpoint" stashID := models.StashID{ StashID: stashIDStr, diff --git a/pkg/sqlite/studio_test.go b/pkg/sqlite/studio_test.go index b5a25314c4c..0834b7dad3f 100644 --- a/pkg/sqlite/studio_test.go +++ b/pkg/sqlite/studio_test.go @@ -592,7 +592,7 @@ func testStudioStashIDs(ctx context.Context, t *testing.T, s *models.Studio) { assert.Len(t, s.StashIDs.List(), 0) // add stash ids - const stashIDStr = "stashID" + var stashIDStr = getUUID("stashID") const endpoint = "endpoint" stashID := models.StashID{ StashID: stashIDStr, From 2c0c95106af951f2f26fa5a9be22c0e2603ee29b Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Sat, 26 Oct 2024 13:48:21 +0200 Subject: [PATCH 62/85] Fix case insensitive LIKE for postgres LIKE in SQLITE is case insensetive by default, where its SENSETIVE in postgres. Postgres has ILIKE which is insensitive, we should use that --- pkg/sqlite/criterion_handlers.go | 14 +++++++------- pkg/sqlite/database.go | 11 ++++++++++- pkg/sqlite/file.go | 6 +++--- pkg/sqlite/filter_internal_test.go | 12 ++++++------ pkg/sqlite/folder.go | 2 +- pkg/sqlite/gallery_filter.go | 4 ++-- pkg/sqlite/performer.go | 4 ++-- pkg/sqlite/performer_test.go | 9 +++------ pkg/sqlite/query.go | 6 +++--- pkg/sqlite/scene.go | 8 ++++---- pkg/sqlite/scene_filter.go | 2 +- pkg/sqlite/scene_marker.go | 4 ++-- pkg/sqlite/sql.go | 4 ++-- pkg/sqlite/studio.go | 4 ++-- 14 files changed, 48 insertions(+), 42 deletions(-) diff --git a/pkg/sqlite/criterion_handlers.go b/pkg/sqlite/criterion_handlers.go index e9fa8cb0f51..7cd1aec1b72 100644 --- a/pkg/sqlite/criterion_handlers.go +++ b/pkg/sqlite/criterion_handlers.go @@ -43,9 +43,9 @@ func stringCriterionHandler(c *models.StringCriterionInput, column string) crite case models.CriterionModifierExcludes: f.whereClauses = append(f.whereClauses, getStringSearchClause([]string{column}, c.Value, true)) case models.CriterionModifierEquals: - f.addWhere(column+" LIKE ?", c.Value) + f.addWhere(column+" "+getDBLike()+" ?", c.Value) case models.CriterionModifierNotEquals: - f.addWhere(column+" NOT LIKE ?", c.Value) + f.addWhere(column+" NOT "+getDBLike()+" ?", c.Value) case models.CriterionModifierMatchesRegex: if _, err := regexp.Compile(c.Value); err != nil { f.setError(err) @@ -86,9 +86,9 @@ func uuidCriterionHandler(c *models.StringCriterionInput, column string) criteri case models.CriterionModifierExcludes: f.whereClauses = append(f.whereClauses, getStringSearchClause([]string{columnCast}, c.Value, true)) case models.CriterionModifierEquals: - f.addWhere(columnCast+" LIKE ?", c.Value) + f.addWhere(columnCast+" "+getDBLike()+" ?", c.Value) case models.CriterionModifierNotEquals: - f.addWhere(columnCast+" NOT LIKE ?", c.Value) + f.addWhere(columnCast+" NOT "+getDBLike()+" ?", c.Value) case models.CriterionModifierMatchesRegex: if _, err := regexp.Compile(c.Value); err != nil { f.setError(err) @@ -191,7 +191,7 @@ func getPathSearchClause(pathColumn, basenameColumn, p string, addWildcards, not } filepathColumn := fmt.Sprintf("%s || '%s' || %s", pathColumn, string(filepath.Separator), basenameColumn) - ret := makeClause(fmt.Sprintf("%s LIKE ?", filepathColumn), p) + ret := makeClause(fmt.Sprintf("%s "+getDBLike()+" ?", filepathColumn), p) if not { ret = ret.not() @@ -589,7 +589,7 @@ func (m *stringListCriterionHandlerBuilder) handler(criterion *models.StringCrit // excludes all of the provided values // need to use actual join table name for this // .id NOT IN (select . from where . in ) - whereClause := utils.StrFormat("{primaryTable}.id NOT IN (SELECT {joinTable}.{primaryFK} from {joinTable} where {joinTable}.{stringColumn} LIKE ?)", + whereClause := utils.StrFormat("{primaryTable}.id NOT IN (SELECT {joinTable}.{primaryFK} from {joinTable} where {joinTable}.{stringColumn} "+getDBLike()+" ?)", utils.StrFormatMap{ "primaryTable": m.primaryTable, "joinTable": m.joinTable, @@ -730,7 +730,7 @@ WHERE id in {inBinding} {unionClause}) `, withClauseMap) - query := fmt.Sprintf("WITH RECURSIVE %s SELECT 'VALUES' || "+DBGroupConcat("'(' || root_id || ', ' || item_id || ')'")+" AS val FROM items", withClause) + query := fmt.Sprintf("WITH RECURSIVE %s SELECT 'VALUES' || "+getDBGroupConcat("'(' || root_id || ', ' || item_id || ')'")+" AS val FROM items", withClause) var valuesClause sql.NullString err := dbWrapper.Get(ctx, &valuesClause, query, args...) diff --git a/pkg/sqlite/database.go b/pkg/sqlite/database.go index d857f18b721..122c4128b42 100644 --- a/pkg/sqlite/database.go +++ b/pkg/sqlite/database.go @@ -205,7 +205,16 @@ func getDBMinFunc() string { } } -func DBGroupConcat(columnName string) string { +func getDBLike() string { + switch dbWrapper.dbType { + case PostgresBackend: + return "ILIKE" + default: + return "LIKE" + } +} + +func getDBGroupConcat(columnName string) string { switch dbWrapper.dbType { case PostgresBackend: return "STRING_AGG(" + columnName + "::TEXT, ',')" diff --git a/pkg/sqlite/file.go b/pkg/sqlite/file.go index 90c0ac04f95..de7d356132e 100644 --- a/pkg/sqlite/file.go +++ b/pkg/sqlite/file.go @@ -622,8 +622,8 @@ func (qb *FileStore) FindAllByPath(ctx context.Context, p string) ([]models.File if strings.Contains(basename, "%") || strings.Contains(dirName, "%") { q = q.Where( - folderTable.Col("path").Like(dirName), - table.Col("basename").Like(basename), + folderTable.Col("path").ILike(dirName), + table.Col("basename").ILike(basename), ) } else { q = q.Where( @@ -647,7 +647,7 @@ func (qb *FileStore) allInPaths(q *goqu.SelectDataset, p []string) *goqu.SelectD for _, pp := range p { ppWildcard := pp + string(filepath.Separator) + "%" - conds = append(conds, folderTable.Col("path").Eq(pp), folderTable.Col("path").Like(ppWildcard)) + conds = append(conds, folderTable.Col("path").Eq(pp), folderTable.Col("path").ILike(ppWildcard)) } return q.Where( diff --git a/pkg/sqlite/filter_internal_test.go b/pkg/sqlite/filter_internal_test.go index 54a6390621b..7e8f0a28701 100644 --- a/pkg/sqlite/filter_internal_test.go +++ b/pkg/sqlite/filter_internal_test.go @@ -471,7 +471,7 @@ func TestStringCriterionHandlerIncludes(t *testing.T) { }, column)) assert.Len(f.whereClauses, 1) - assert.Equal(fmt.Sprintf("(%[1]s LIKE ? OR %[1]s LIKE ?)", column), f.whereClauses[0].sql) + assert.Equal(fmt.Sprintf("(%[1]s "+getDBLike()+" ? OR %[1]s "+getDBLike()+" ?)", column), f.whereClauses[0].sql) assert.Len(f.whereClauses[0].args, 2) assert.Equal("%two%", f.whereClauses[0].args[0]) assert.Equal("%words%", f.whereClauses[0].args[1]) @@ -483,7 +483,7 @@ func TestStringCriterionHandlerIncludes(t *testing.T) { }, column)) assert.Len(f.whereClauses, 1) - assert.Equal(fmt.Sprintf("(%[1]s LIKE ?)", column), f.whereClauses[0].sql) + assert.Equal(fmt.Sprintf("(%[1]s "+getDBLike()+" ?)", column), f.whereClauses[0].sql) assert.Len(f.whereClauses[0].args, 1) assert.Equal("%two words%", f.whereClauses[0].args[0]) } @@ -502,7 +502,7 @@ func TestStringCriterionHandlerExcludes(t *testing.T) { }, column)) assert.Len(f.whereClauses, 1) - assert.Equal(fmt.Sprintf("(%[1]s NOT LIKE ? AND %[1]s NOT LIKE ?)", column), f.whereClauses[0].sql) + assert.Equal(fmt.Sprintf("(%[1]s NOT "+getDBLike()+" ? AND %[1]s NOT "+getDBLike()+" ?)", column), f.whereClauses[0].sql) assert.Len(f.whereClauses[0].args, 2) assert.Equal("%two%", f.whereClauses[0].args[0]) assert.Equal("%words%", f.whereClauses[0].args[1]) @@ -514,7 +514,7 @@ func TestStringCriterionHandlerExcludes(t *testing.T) { }, column)) assert.Len(f.whereClauses, 1) - assert.Equal(fmt.Sprintf("(%[1]s NOT LIKE ?)", column), f.whereClauses[0].sql) + assert.Equal(fmt.Sprintf("(%[1]s NOT "+getDBLike()+" ?)", column), f.whereClauses[0].sql) assert.Len(f.whereClauses[0].args, 1) assert.Equal("%two words%", f.whereClauses[0].args[0]) } @@ -532,7 +532,7 @@ func TestStringCriterionHandlerEquals(t *testing.T) { }, column)) assert.Len(f.whereClauses, 1) - assert.Equal(fmt.Sprintf("%[1]s LIKE ?", column), f.whereClauses[0].sql) + assert.Equal(fmt.Sprintf("%[1]s "+getDBLike()+" ?", column), f.whereClauses[0].sql) assert.Len(f.whereClauses[0].args, 1) assert.Equal(value1, f.whereClauses[0].args[0]) } @@ -550,7 +550,7 @@ func TestStringCriterionHandlerNotEquals(t *testing.T) { }, column)) assert.Len(f.whereClauses, 1) - assert.Equal(fmt.Sprintf("%[1]s NOT LIKE ?", column), f.whereClauses[0].sql) + assert.Equal(fmt.Sprintf("%[1]s NOT "+getDBLike()+" ?", column), f.whereClauses[0].sql) assert.Len(f.whereClauses[0].args, 1) assert.Equal(value1, f.whereClauses[0].args[0]) } diff --git a/pkg/sqlite/folder.go b/pkg/sqlite/folder.go index 4cf632d49e6..8d04d283a9f 100644 --- a/pkg/sqlite/folder.go +++ b/pkg/sqlite/folder.go @@ -254,7 +254,7 @@ func (qb *FolderStore) allInPaths(q *goqu.SelectDataset, p []string) *goqu.Selec for _, pp := range p { ppWildcard := pp + string(filepath.Separator) + "%" - conds = append(conds, table.Col("path").Eq(pp), table.Col("path").Like(ppWildcard)) + conds = append(conds, table.Col("path").Eq(pp), table.Col("path").ILike(ppWildcard)) } return q.Where( diff --git a/pkg/sqlite/gallery_filter.go b/pkg/sqlite/gallery_filter.go index 228f0e05b25..7ef870ecb29 100644 --- a/pkg/sqlite/gallery_filter.go +++ b/pkg/sqlite/gallery_filter.go @@ -201,13 +201,13 @@ func (qb *galleryFilterHandler) pathCriterionHandler(c *models.StringCriterionIn case models.CriterionModifierEquals: addWildcards = false clause := getPathSearchClause(pathColumn, basenameColumn, c.Value, addWildcards, not) - clause2 := makeClause(folderPathColumn+" LIKE ?", c.Value) + clause2 := makeClause(folderPathColumn+" "+getDBLike()+" ?", c.Value) f.whereClauses = append(f.whereClauses, orClauses(clause, clause2)) case models.CriterionModifierNotEquals: addWildcards = false not = true clause := getPathSearchClause(pathColumn, basenameColumn, c.Value, addWildcards, not) - clause2 := makeClause(folderPathColumn+" NOT LIKE ?", c.Value) + clause2 := makeClause(folderPathColumn+" NOT "+getDBLike()+" ?", c.Value) f.whereClauses = append(f.whereClauses, orClauses(clause, clause2)) case models.CriterionModifierMatchesRegex: if _, err := regexp.Compile(c.Value); err != nil { diff --git a/pkg/sqlite/performer.go b/pkg/sqlite/performer.go index 79495de2b9a..1b303d532c3 100644 --- a/pkg/sqlite/performer.go +++ b/pkg/sqlite/performer.go @@ -568,9 +568,9 @@ func (qb *PerformerStore) QueryForAutoTag(ctx context.Context, words []string) ( var whereClauses []exp.Expression for _, w := range words { - whereClauses = append(whereClauses, table.Col("name").Like(w+"%")) + whereClauses = append(whereClauses, table.Col("name").ILike(w+"%")) // TODO - see above - // whereClauses = append(whereClauses, performersAliasesJoinTable.Col("alias").Like(w+"%")) + // whereClauses = append(whereClauses, performersAliasesJoinTable.Col("alias").ILike(w+"%")) } sq = sq.Where( diff --git a/pkg/sqlite/performer_test.go b/pkg/sqlite/performer_test.go index a6964203db3..f522a102501 100644 --- a/pkg/sqlite/performer_test.go +++ b/pkg/sqlite/performer_test.go @@ -1155,12 +1155,9 @@ func TestPerformerQueryForAutoTag(t *testing.T) { t.Errorf("Error finding performers: %s", err.Error()) } - if assert.Len(t, performers, 2) { - assert.Equal(t, strings.ToLower(performerNames[performerIdx1WithScene]), strings.ToLower(performers[0].Name)) - assert.Equal(t, strings.ToLower(performerNames[performerIdx1WithScene]), strings.ToLower(performers[1].Name)) - } else { - t.Errorf("Skipping performer comparison as atleast 1 is missing") - } + assert.Len(t, performers, 2) + assert.Equal(t, strings.ToLower(performerNames[performerIdx1WithScene]), strings.ToLower(performers[0].Name)) + assert.Equal(t, strings.ToLower(performerNames[performerIdx1WithScene]), strings.ToLower(performers[1].Name)) return nil }) diff --git a/pkg/sqlite/query.go b/pkg/sqlite/query.go index eba5b67f4c7..f5bc91f0820 100644 --- a/pkg/sqlite/query.go +++ b/pkg/sqlite/query.go @@ -190,7 +190,7 @@ func (qb *queryBuilder) parseQueryString(columns []string, q string) { var clauses []string for _, column := range columns { - clauses = append(clauses, column+" LIKE ?") + clauses = append(clauses, column+" "+getDBLike()+" ?") qb.addArg(like(t)) } @@ -199,7 +199,7 @@ func (qb *queryBuilder) parseQueryString(columns []string, q string) { for _, t := range specs.MustNot { for _, column := range columns { - qb.addWhere(coalesce(column) + " NOT LIKE ?") + qb.addWhere(coalesce(column) + " NOT " + getDBLike() + " ?") qb.addArg(like(t)) } } @@ -209,7 +209,7 @@ func (qb *queryBuilder) parseQueryString(columns []string, q string) { for _, column := range columns { for _, v := range set { - clauses = append(clauses, column+" LIKE ?") + clauses = append(clauses, column+" "+getDBLike()+" ?") qb.addArg(like(v)) } } diff --git a/pkg/sqlite/scene.go b/pkg/sqlite/scene.go index f927b301fa0..be1ca1c013c 100644 --- a/pkg/sqlite/scene.go +++ b/pkg/sqlite/scene.go @@ -710,8 +710,8 @@ func (qb *SceneStore) FindByPath(ctx context.Context, p string) ([]*models.Scene foldersTable, goqu.On(foldersTable.Col(idColumn).Eq(filesTable.Col("parent_folder_id"))), ).Select(scenesFilesJoinTable.Col(sceneIDColumn)).Where( - foldersTable.Col("path").Like(dir), - filesTable.Col("basename").Like(basename), + foldersTable.Col("path").ILike(dir), + filesTable.Col("basename").ILike(basename), ) ret, err := qb.findBySubquery(ctx, sq) @@ -890,7 +890,7 @@ func (qb *SceneStore) Wall(ctx context.Context, q *string) ([]*models.Scene, err } table := qb.table() - qq := qb.selectDataset().Prepared(true).Where(table.Col("details").Like("%" + s + "%")).Order(goqu.L("RANDOM()").Asc()).Limit(80) + qq := qb.selectDataset().Prepared(true).Where(table.Col("details").ILike("%" + s + "%")).Order(goqu.L("RANDOM()").Asc()).Limit(80) return qb.getMany(ctx, qq) } @@ -1356,7 +1356,7 @@ func (qb *SceneStore) FindDuplicates(ctx context.Context, distance int, duration if distance == 0 { var ids []string - dbfix_findExactDuplicateQuery := fmt.Sprintf(findExactDuplicateQuery, DBGroupConcat("DISTINCT scene_id")) + dbfix_findExactDuplicateQuery := fmt.Sprintf(findExactDuplicateQuery, getDBGroupConcat("DISTINCT scene_id")) if err := dbWrapper.Select(ctx, &ids, dbfix_findExactDuplicateQuery, durationDiff); err != nil { return nil, err } diff --git a/pkg/sqlite/scene_filter.go b/pkg/sqlite/scene_filter.go index f08fcd01ccb..f8d2c746905 100644 --- a/pkg/sqlite/scene_filter.go +++ b/pkg/sqlite/scene_filter.go @@ -389,7 +389,7 @@ func (qb *sceneFilterHandler) captionCriterionHandler(captions *models.StringCri excludeClause := `scenes.id NOT IN ( SELECT scenes_files.scene_id from scenes_files INNER JOIN video_captions on video_captions.file_id = scenes_files.file_id - WHERE video_captions.language_code LIKE ? + WHERE video_captions.language_code " + getDBLike() + " ? )` f.addWhere(excludeClause, criterion.Value) diff --git a/pkg/sqlite/scene_marker.go b/pkg/sqlite/scene_marker.go index a6fa5033fd3..c42304c7338 100644 --- a/pkg/sqlite/scene_marker.go +++ b/pkg/sqlite/scene_marker.go @@ -266,7 +266,7 @@ func (qb *SceneMarkerStore) CountByTagID(ctx context.Context, tagID int) (int, e func (qb *SceneMarkerStore) GetMarkerStrings(ctx context.Context, q *string, sort *string) ([]*models.MarkerStringsResultType, error) { query := "SELECT count(*) as `count`, scene_markers.id as id, scene_markers.title as title FROM scene_markers" if q != nil { - query += " WHERE title LIKE '%" + *q + "%'" + query += " WHERE title " + getDBLike() + " '%" + *q + "%'" } query += " GROUP BY title" if sort != nil && *sort == "count" { @@ -285,7 +285,7 @@ func (qb *SceneMarkerStore) Wall(ctx context.Context, q *string) ([]*models.Scen } table := qb.table() - qq := qb.selectDataset().Prepared(true).Where(table.Col("title").Like("%" + s + "%")).Order(goqu.L("RANDOM()").Asc()).Limit(80) + qq := qb.selectDataset().Prepared(true).Where(table.Col("title").ILike("%" + s + "%")).Order(goqu.L("RANDOM()").Asc()).Limit(80) return qb.getMany(ctx, qq) } diff --git a/pkg/sqlite/sql.go b/pkg/sqlite/sql.go index 65f9fb4b23b..babe5bcfd90 100644 --- a/pkg/sqlite/sql.go +++ b/pkg/sqlite/sql.go @@ -164,14 +164,14 @@ func getStringSearchClause(columns []string, q string, not bool) sqlClause { // Search for any word for _, word := range queryWords { for _, column := range columns { - likeClauses = append(likeClauses, column+notStr+" LIKE ?") + likeClauses = append(likeClauses, column+notStr+" "+getDBLike()+" ?") args = append(args, "%"+word+"%") } } } else { // Search the exact query for _, column := range columns { - likeClauses = append(likeClauses, column+notStr+" LIKE ?") + likeClauses = append(likeClauses, column+notStr+" "+getDBLike()+" ?") args = append(args, "%"+trimmedQuery+"%") } } diff --git a/pkg/sqlite/studio.go b/pkg/sqlite/studio.go index b5ba2624a00..e06f12a6047 100644 --- a/pkg/sqlite/studio.go +++ b/pkg/sqlite/studio.go @@ -495,8 +495,8 @@ func (qb *StudioStore) QueryForAutoTag(ctx context.Context, words []string) ([]* var whereClauses []exp.Expression for _, w := range words { - whereClauses = append(whereClauses, table.Col(studioNameColumn).Like(w+"%")) - whereClauses = append(whereClauses, studiosAliasesJoinTable.Col("alias").Like(w+"%")) + whereClauses = append(whereClauses, table.Col(studioNameColumn).ILike(w+"%")) + whereClauses = append(whereClauses, studiosAliasesJoinTable.Col("alias").ILike(w+"%")) } sq = sq.Where( From 4e34a5009e59a036632b197e71e72777a21cbee8 Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Mon, 28 Oct 2024 12:16:55 +0100 Subject: [PATCH 63/85] Fingerprints fix for postgres --- pkg/sqlite/file.go | 2 ++ pkg/sqlite/fingerprint.go | 15 +++++++++++++++ pkg/sqlite/scene_test.go | 15 +++++++++++++++ 3 files changed, 32 insertions(+) diff --git a/pkg/sqlite/file.go b/pkg/sqlite/file.go index de7d356132e..f3236e68a04 100644 --- a/pkg/sqlite/file.go +++ b/pkg/sqlite/file.go @@ -549,6 +549,8 @@ func (qb *FileStore) getMany(ctx context.Context, q *goqu.SelectDataset) ([]mode return err } + f.fingerprintQueryRow.correct() + rows = append(rows, f) return nil }); err != nil { diff --git a/pkg/sqlite/fingerprint.go b/pkg/sqlite/fingerprint.go index d65f6bab55a..08bf371fb53 100644 --- a/pkg/sqlite/fingerprint.go +++ b/pkg/sqlite/fingerprint.go @@ -3,6 +3,8 @@ package sqlite import ( "context" "fmt" + "strconv" + "strings" "github.com/doug-martin/goqu/v9" "github.com/doug-martin/goqu/v9/exp" @@ -23,6 +25,19 @@ func (r fingerprintQueryRow) valid() bool { return r.Type.Valid } +func (r *fingerprintQueryRow) correct() { + if !r.Type.Valid || strings.ToLower(r.Type.String) != "phash" { + return + } + switch r.Fingerprint.(type) { + case string: + i, err := strconv.ParseInt(r.Fingerprint.(string), 10, 64) + if err == nil { + r.Fingerprint = i + } + } +} + func (r *fingerprintQueryRow) resolve() models.Fingerprint { return models.Fingerprint{ Type: r.Type.String, diff --git a/pkg/sqlite/scene_test.go b/pkg/sqlite/scene_test.go index 9c97eda12da..c316c32870a 100644 --- a/pkg/sqlite/scene_test.go +++ b/pkg/sqlite/scene_test.go @@ -75,6 +75,13 @@ func loadSceneRelationships(ctx context.Context, expected models.Scene, actual * return nil } +func sortScene(copy *models.Scene) { + // Ordering is not ensured + copy.GalleryIDs.Sort() + copy.TagIDs.Sort() + copy.PerformerIDs.Sort() +} + func Test_sceneQueryBuilder_Create(t *testing.T) { var ( title = "title" @@ -267,6 +274,8 @@ func Test_sceneQueryBuilder_Create(t *testing.T) { return } + sortScene(©) + sortScene(&s) assert.Equal(copy, s) // ensure can find the scene @@ -284,6 +293,7 @@ func Test_sceneQueryBuilder_Create(t *testing.T) { t.Errorf("loadSceneRelationships() error = %v", err) return } + sortScene(found) assert.Equal(copy, *found) return @@ -492,6 +502,8 @@ func Test_sceneQueryBuilder_Update(t *testing.T) { return } + sortScene(©) + sortScene(s) assert.Equal(copy, *s) }) } @@ -699,6 +711,8 @@ func Test_sceneQueryBuilder_UpdatePartial(t *testing.T) { // ignore file ids clearSceneFileIDs(got) + sortScene(&tt.want) + sortScene(got) assert.Equal(tt.want, *got) s, err := qb.Find(ctx, tt.id) @@ -714,6 +728,7 @@ func Test_sceneQueryBuilder_UpdatePartial(t *testing.T) { // ignore file ids clearSceneFileIDs(s) + sortScene(s) assert.Equal(tt.want, *s) }) } From ac7456c7f534cb64e5116097fc951adcf13eebbd Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Mon, 28 Oct 2024 12:33:31 +0100 Subject: [PATCH 64/85] Fix rowid missing in pgsql --- pkg/sqlite/database.go | 9 +++++++++ pkg/sqlite/table.go | 8 +++++--- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/pkg/sqlite/database.go b/pkg/sqlite/database.go index 122c4128b42..fac77a6a217 100644 --- a/pkg/sqlite/database.go +++ b/pkg/sqlite/database.go @@ -223,6 +223,15 @@ func getDBGroupConcat(columnName string) string { } } +func getDBRowId() string { + switch dbWrapper.dbType { + case PostgresBackend: + return "ctid" + default: + return "rowid" + } +} + func (db *Database) SetSchemaVersion(version uint) { db.schemaVersion = version } diff --git a/pkg/sqlite/table.go b/pkg/sqlite/table.go index b2c473b9f0c..94872f22111 100644 --- a/pkg/sqlite/table.go +++ b/pkg/sqlite/table.go @@ -1073,21 +1073,23 @@ func (t *viewHistoryTable) deleteDates(ctx context.Context, id int, dates []time dates = []time.Time{time.Now()} } + rowIdColumn := getDBRowId() + for _, date := range dates { var subquery *goqu.SelectDataset if mostRecent { // delete the most recent - subquery = dialect.Select("rowid").From(table).Where( + subquery = dialect.Select(rowIdColumn).From(table).Where( t.idColumn.Eq(id), ).Order(t.dateColumn.Desc()).Limit(1) } else { - subquery = dialect.Select("rowid").From(table).Where( + subquery = dialect.Select(rowIdColumn).From(table).Where( t.idColumn.Eq(id), t.dateColumn.Eq(UTCTimestamp{Timestamp{date}}), ).Limit(1) } - q := dialect.Delete(table).Where(goqu.I("rowid").Eq(subquery)) + q := dialect.Delete(table).Where(goqu.I(rowIdColumn).Eq(subquery)) if _, err := exec(ctx, q); err != nil { return nil, fmt.Errorf("deleting from %s: %w", table.GetTable(), err) From cb01336962b0ebca7a2cfac08722771204afd957 Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Mon, 28 Oct 2024 13:05:22 +0100 Subject: [PATCH 65/85] pgsql fix image test (sorting by nothing). Fix findExactDuplicateQuery for sqlite and pgsql use. Fix random sort for pgsql. Fix studioquery test by setting the id as a number (sqlite isnt strict, but pgsql is) --- pkg/sqlite/image_test.go | 2 ++ pkg/sqlite/scene.go | 10 +++++----- pkg/sqlite/sql.go | 5 +++++ pkg/sqlite/studio_test.go | 7 ++++--- 4 files changed, 16 insertions(+), 8 deletions(-) diff --git a/pkg/sqlite/image_test.go b/pkg/sqlite/image_test.go index a823f8fc146..6e6d345ade0 100644 --- a/pkg/sqlite/image_test.go +++ b/pkg/sqlite/image_test.go @@ -2978,8 +2978,10 @@ func TestImageQuerySorting(t *testing.T) { func TestImageQueryPagination(t *testing.T) { withTxn(func(ctx context.Context) error { perPage := 1 + sortBy := "id" findFilter := models.FindFilterType{ PerPage: &perPage, + Sort: &sortBy, // Without sort, using pagination is unpredictable } sqb := db.GetRepo().Image diff --git a/pkg/sqlite/scene.go b/pkg/sqlite/scene.go index be1ca1c013c..eda5eaf6847 100644 --- a/pkg/sqlite/scene.go +++ b/pkg/sqlite/scene.go @@ -51,10 +51,10 @@ FROM ( INNER JOIN scenes_files ON (scenes.id = scenes_files.scene_id) INNER JOIN files ON (scenes_files.file_id = files.id) INNER JOIN files_fingerprints ON (scenes_files.file_id = files_fingerprints.file_id AND files_fingerprints.type = 'phash') - INNER JOIN video_files ON (files.id == video_files.file_id) -) -WHERE durationDiff <= ?1 - OR ?1 < 0 -- Always TRUE if the parameter is negative. + INNER JOIN video_files ON (files.id = video_files.file_id) +) as subq +WHERE durationDiff <= $1 + OR $1 < 0 -- Always TRUE if the parameter is negative. -- That will disable the durationDiff checking. GROUP BY phash HAVING COUNT(phash) > 1 @@ -70,7 +70,7 @@ FROM scenes INNER JOIN scenes_files ON (scenes.id = scenes_files.scene_id) INNER JOIN files ON (scenes_files.file_id = files.id) INNER JOIN files_fingerprints ON (scenes_files.file_id = files_fingerprints.file_id AND files_fingerprints.type = 'phash') -INNER JOIN video_files ON (files.id == video_files.file_id) +INNER JOIN video_files ON (files.id = video_files.file_id) ORDER BY files.size DESC; ` diff --git a/pkg/sqlite/sql.go b/pkg/sqlite/sql.go index babe5bcfd90..a345c596cef 100644 --- a/pkg/sqlite/sql.go +++ b/pkg/sqlite/sql.go @@ -138,6 +138,11 @@ func getRandomSort(tableName string, direction string, seed uint64) string { // ORDER BY ((n+seed)*(n+seed)*p1 + (n+seed)*p2) % p3 // since sqlite converts overflowing numbers to reals, a custom db function that uses uints with overflow should be faster, // however in practice the overhead of calling a custom function vastly outweighs the benefits + + if dbWrapper.dbType == PostgresBackend { + colName = "CAST(" + colName + " AS DECIMAL)" + } + return fmt.Sprintf(" ORDER BY mod((%[1]s + %[2]d) * (%[1]s + %[2]d) * 52959209 + (%[1]s + %[2]d) * 1047483763, 2147483647) %[3]s", colName, seed, direction) } diff --git a/pkg/sqlite/studio_test.go b/pkg/sqlite/studio_test.go index 0834b7dad3f..5ddba93ff16 100644 --- a/pkg/sqlite/studio_test.go +++ b/pkg/sqlite/studio_test.go @@ -1044,13 +1044,14 @@ func TestStudioQueryFast(t *testing.T) { tsString := "test" tsInt := 1 + tsId := "1" testStringCriterion := models.StringCriterionInput{ Value: tsString, Modifier: models.CriterionModifierEquals, } - testIncludesMultiCriterion := models.MultiCriterionInput{ - Value: []string{tsString}, + testIncludesMultiCriterionId := models.MultiCriterionInput{ + Value: []string{tsId}, Modifier: models.CriterionModifierIncludes, } testIntCriterion := models.IntCriterionInput{ @@ -1080,7 +1081,7 @@ func TestStudioQueryFast(t *testing.T) { SceneCount: &testIntCriterion, } parentsFilter := models.StudioFilterType{ - Parents: &testIncludesMultiCriterion, + Parents: &testIncludesMultiCriterionId, } filters := []models.StudioFilterType{nameFilter, aliasesFilter, stashIDFilter, urlFilter, ratingFilter, sceneCountFilter, imageCountFilter, parentsFilter} From d681d776ab6c05ff3101f3cb42f8d85594dfade6 Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Mon, 28 Oct 2024 13:10:55 +0100 Subject: [PATCH 66/85] tag queryforautotag forgot to replace like --- pkg/sqlite/tag.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/sqlite/tag.go b/pkg/sqlite/tag.go index 4d58538ceea..104350c42d7 100644 --- a/pkg/sqlite/tag.go +++ b/pkg/sqlite/tag.go @@ -583,11 +583,11 @@ func (qb *TagStore) QueryForAutoTag(ctx context.Context, words []string) ([]*mod for _, w := range words { ww := w + "%" - whereClauses = append(whereClauses, "tags.name like ?") + whereClauses = append(whereClauses, "tags.name "+getDBLike()+" ?") args = append(args, ww) // include aliases - whereClauses = append(whereClauses, "tag_aliases.alias like ?") + whereClauses = append(whereClauses, "tag_aliases.alias "+getDBLike()+" ?") args = append(args, ww) } From 1cd2c0dc12d1323654a373dea6f78cec183b476f Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Mon, 28 Oct 2024 13:22:36 +0100 Subject: [PATCH 67/85] fingerprint correct make the linter happy --- pkg/sqlite/fingerprint.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/pkg/sqlite/fingerprint.go b/pkg/sqlite/fingerprint.go index 08bf371fb53..c3bfaa6ddcb 100644 --- a/pkg/sqlite/fingerprint.go +++ b/pkg/sqlite/fingerprint.go @@ -29,10 +29,9 @@ func (r *fingerprintQueryRow) correct() { if !r.Type.Valid || strings.ToLower(r.Type.String) != "phash" { return } - switch r.Fingerprint.(type) { - case string: - i, err := strconv.ParseInt(r.Fingerprint.(string), 10, 64) - if err == nil { + + if val, ok := r.Fingerprint.(string); ok { + if i, err := strconv.ParseInt(val, 10, 64); err == nil { r.Fingerprint = i } } From 26ae6035c30e0a1f3db857d8c0ec1871ece9def7 Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Tue, 29 Oct 2024 12:44:16 +0100 Subject: [PATCH 68/85] Some commit removed the sqlite dialect init --- internal/manager/init.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/internal/manager/init.go b/internal/manager/init.go index f89178443e2..32865574bc5 100644 --- a/internal/manager/init.go +++ b/internal/manager/init.go @@ -37,6 +37,8 @@ func Initialize(cfg *config.Config, l *log.Logger) (*Manager, error) { var db sqlite.DBInterface + sqlite.RegisterSqliteDialect() + dbUrl := cfg.GetDatabaseUrl() upperUrl := strings.ToUpper(dbUrl) switch { From 39e9b3e615ad1d84cd8592bf939965c9e3046f52 Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Tue, 29 Oct 2024 12:54:23 +0100 Subject: [PATCH 69/85] Remove custom sqlite dialect in favor of upgrading goqu --- go.mod | 2 +- go.sum | 2 ++ internal/autotag/integration_test.go | 1 - internal/manager/init.go | 2 -- pkg/sqlite/database_sqlite.go | 10 +--------- pkg/sqlite/setup_test.go | 1 - 6 files changed, 4 insertions(+), 14 deletions(-) diff --git a/go.mod b/go.mod index fd21a2906d1..9dabee3ac77 100644 --- a/go.mod +++ b/go.mod @@ -14,7 +14,7 @@ require ( github.com/corona10/goimagehash v1.1.0 github.com/disintegration/imaging v1.6.2 github.com/dop251/goja v0.0.0-20231027120936-b396bb4c349d - github.com/doug-martin/goqu/v9 v9.18.0 + github.com/doug-martin/goqu/v9 v9.19.1-0.20231214054827-21b6e6d1cb1b github.com/go-chi/chi/v5 v5.0.12 github.com/go-chi/cors v1.2.1 github.com/go-chi/httplog v0.3.1 diff --git a/go.sum b/go.sum index 3ad2733e482..2a67217ab52 100644 --- a/go.sum +++ b/go.sum @@ -200,6 +200,8 @@ github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA github.com/dop251/goja_nodejs v0.0.0-20211022123610-8dd9abb0616d/go.mod h1:DngW8aVqWbuLRMHItjPUyqdj+HWPvnQe8V8y1nDpIbM= github.com/doug-martin/goqu/v9 v9.18.0 h1:/6bcuEtAe6nsSMVK/M+fOiXUNfyFF3yYtE07DBPFMYY= github.com/doug-martin/goqu/v9 v9.18.0/go.mod h1:nf0Wc2/hV3gYK9LiyqIrzBEVGlI8qW3GuDCEobC4wBQ= +github.com/doug-martin/goqu/v9 v9.19.1-0.20231214054827-21b6e6d1cb1b h1:WaCes6lOJCbIDgABfA8gB1ADMQo6+ftGEkj+oIB+vm4= +github.com/doug-martin/goqu/v9 v9.19.1-0.20231214054827-21b6e6d1cb1b/go.mod h1:1MqhYk2p5QFEUT9ZzH+M02Jv8BbOYlvzupULdHl7Mjs= github.com/dustin/go-humanize v0.0.0-20180421182945-02af3965c54e/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= diff --git a/internal/autotag/integration_test.go b/internal/autotag/integration_test.go index 7e47624ebd0..f7200ea9e0d 100644 --- a/internal/autotag/integration_test.go +++ b/internal/autotag/integration_test.go @@ -49,7 +49,6 @@ func getNewDB(databaseFile string) sqlite.DBInterface { if valid { db = sqlite.NewPostgresDatabase(dbUrl, true) } else { - sqlite.RegisterSqliteDialect() db = sqlite.NewSQLiteDatabase(databaseFile, true) } diff --git a/internal/manager/init.go b/internal/manager/init.go index 32865574bc5..f89178443e2 100644 --- a/internal/manager/init.go +++ b/internal/manager/init.go @@ -37,8 +37,6 @@ func Initialize(cfg *config.Config, l *log.Logger) (*Manager, error) { var db sqlite.DBInterface - sqlite.RegisterSqliteDialect() - dbUrl := cfg.GetDatabaseUrl() upperUrl := strings.ToUpper(dbUrl) switch { diff --git a/pkg/sqlite/database_sqlite.go b/pkg/sqlite/database_sqlite.go index d652190b9c3..31da52ba8f4 100644 --- a/pkg/sqlite/database_sqlite.go +++ b/pkg/sqlite/database_sqlite.go @@ -7,7 +7,6 @@ import ( "time" "github.com/doug-martin/goqu/v9" - "github.com/doug-martin/goqu/v9/dialect/sqlite3" "github.com/jmoiron/sqlx" "github.com/stashapp/stash/pkg/fsutil" "github.com/stashapp/stash/pkg/logger" @@ -17,13 +16,6 @@ type SQLiteDB struct { Database } -func RegisterSqliteDialect() { - opts := sqlite3.DialectOptions() - opts.SupportsReturn = true - goqu.RegisterDialect("sqlite3new", opts) - -} - func NewSQLiteDatabase(dbPath string, init bool) *SQLiteDB { db := &SQLiteDB{ Database: Database{ @@ -35,7 +27,7 @@ func NewSQLiteDatabase(dbPath string, init bool) *SQLiteDB { db.DBInterface = db if init { - dialect = goqu.Dialect("sqlite3new") + dialect = goqu.Dialect("sqlite3") dbWrapper.dbType = SqliteBackend } diff --git a/pkg/sqlite/setup_test.go b/pkg/sqlite/setup_test.go index 3944fc68f2f..cc27f68dc18 100644 --- a/pkg/sqlite/setup_test.go +++ b/pkg/sqlite/setup_test.go @@ -635,7 +635,6 @@ func getNewDB(databaseFile string) sqlite.DBInterface { if valid { db = sqlite.NewPostgresDatabase(dbUrl, true) } else { - sqlite.RegisterSqliteDialect() db = sqlite.NewSQLiteDatabase(databaseFile, true) } From 6fb7bcfe4d4a67f5b6a23bc6d427778d5a336159 Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Tue, 29 Oct 2024 16:31:56 +0100 Subject: [PATCH 70/85] Ensure order on findfilter to prevent unstable db return order --- internal/autotag/integration_test.go | 2 + pkg/models/find_filter.go | 13 +++ pkg/sqlite/file.go | 4 +- pkg/sqlite/gallery.go | 5 +- pkg/sqlite/image.go | 119 +++++++++++++-------------- pkg/sqlite/image_test.go | 2 - pkg/sqlite/scene.go | 4 +- pkg/sqlite/setup_test.go | 2 + 8 files changed, 79 insertions(+), 72 deletions(-) diff --git a/internal/autotag/integration_test.go b/internal/autotag/integration_test.go index f7200ea9e0d..4a4099040f8 100644 --- a/internal/autotag/integration_test.go +++ b/internal/autotag/integration_test.go @@ -47,8 +47,10 @@ func testTeardown(databaseFile string) { func getNewDB(databaseFile string) sqlite.DBInterface { dbUrl, valid := os.LookupEnv("PGSQL_TEST") if valid { + fmt.Printf("Postgres backend for tests detected\n") db = sqlite.NewPostgresDatabase(dbUrl, true) } else { + fmt.Printf("SQLite backend for tests detected\n") db = sqlite.NewSQLiteDatabase(databaseFile, true) } diff --git a/pkg/models/find_filter.go b/pkg/models/find_filter.go index 9934a9ea9c4..4f9b0248c78 100644 --- a/pkg/models/find_filter.go +++ b/pkg/models/find_filter.go @@ -127,3 +127,16 @@ func BatchFindFilter(batchSize int) *FindFilterType { Page: &page, } } + +// EnsureFindFilterSorted sets an ordering if one is missing +// Database ordering is unstable otherwise +func EnsureFindFilterSorted(findFilter *FindFilterType) { + if findFilter == nil { + findFilter = &FindFilterType{} + } + + if findFilter.Sort == nil || *findFilter.Sort == "" { + idStr := "id" + findFilter.Sort = &idStr + } +} diff --git a/pkg/sqlite/file.go b/pkg/sqlite/file.go index f3236e68a04..7a3a1ca2e4a 100644 --- a/pkg/sqlite/file.go +++ b/pkg/sqlite/file.go @@ -928,9 +928,7 @@ var fileSortOptions = sortOptions{ } func (qb *FileStore) setQuerySort(query *queryBuilder, findFilter *models.FindFilterType) error { - if findFilter == nil || findFilter.Sort == nil || *findFilter.Sort == "" { - return nil - } + models.EnsureFindFilterSorted(findFilter) sort := findFilter.GetSort("path") // CVE-2024-32231 - ensure sort is in the list of allowed sorts diff --git a/pkg/sqlite/gallery.go b/pkg/sqlite/gallery.go index 6bf5060255d..435240d4fe6 100644 --- a/pkg/sqlite/gallery.go +++ b/pkg/sqlite/gallery.go @@ -785,10 +785,7 @@ var gallerySortOptions = sortOptions{ } func (qb *GalleryStore) setGallerySort(query *queryBuilder, findFilter *models.FindFilterType) error { - if findFilter == nil || findFilter.Sort == nil || *findFilter.Sort == "" { - return nil - } - + models.EnsureFindFilterSorted(findFilter) sort := findFilter.GetSort("path") direction := findFilter.GetDirection() diff --git a/pkg/sqlite/image.go b/pkg/sqlite/image.go index 71a8e05843e..7df2190fd0f 100644 --- a/pkg/sqlite/image.go +++ b/pkg/sqlite/image.go @@ -930,75 +930,74 @@ var imageSortOptions = sortOptions{ } func (qb *ImageStore) setImageSortAndPagination(q *queryBuilder, findFilter *models.FindFilterType) error { + models.EnsureFindFilterSorted(findFilter) sortClause := "" - if findFilter != nil && findFilter.Sort != nil && *findFilter.Sort != "" { - sort := findFilter.GetSort("title") - direction := findFilter.GetDirection() + sort := findFilter.GetSort("title") + direction := findFilter.GetDirection() - // CVE-2024-32231 - ensure sort is in the list of allowed sorts - if err := imageSortOptions.validateSort(sort); err != nil { - return err - } - - // translate sort field - if sort == "file_mod_time" { - sort = "mod_time" - } + // CVE-2024-32231 - ensure sort is in the list of allowed sorts + if err := imageSortOptions.validateSort(sort); err != nil { + return err + } - addFilesJoin := func() { - q.addJoins( - join{ - table: imagesFilesTable, - onClause: "images_files.image_id = images.id", - }, - join{ - table: fileTable, - onClause: "images_files.file_id = files.id", - }, - ) - } + // translate sort field + if sort == "file_mod_time" { + sort = "mod_time" + } - addFolderJoin := func() { - q.addJoins(join{ - table: folderTable, - onClause: "files.parent_folder_id = folders.id", - }) - } + addFilesJoin := func() { + q.addJoins( + join{ + table: imagesFilesTable, + onClause: "images_files.image_id = images.id", + }, + join{ + table: fileTable, + onClause: "images_files.file_id = files.id", + }, + ) + } - switch sort { - case "path": - addFilesJoin() - addFolderJoin() - sortClause = " ORDER BY COALESCE(folders.path, '') || COALESCE(files.basename, '') COLLATE NATURAL_CI " + direction - q.addGroupBy("folders.path", "files.basename") - case "file_count": - sortClause = getCountSort(imageTable, imagesFilesTable, imageIDColumn, direction) - case "tag_count": - sortClause = getCountSort(imageTable, imagesTagsTable, imageIDColumn, direction) - case "performer_count": - sortClause = getCountSort(imageTable, performersImagesTable, imageIDColumn, direction) - case "mod_time", "filesize": - addFilesJoin() - add, agg := getSort(sort, direction, "files") - sortClause = add - q.addGroupBy(agg...) - case "title": - addFilesJoin() - addFolderJoin() - sortClause = " ORDER BY COALESCE(images.title, files.basename) COLLATE NATURAL_CI " + direction + ", folders.path COLLATE NATURAL_CI " + direction - q.addGroupBy("images.title", "files.basename", "folders.path") - default: - add, agg := getSort(sort, direction, "images") - sortClause = add - q.addGroupBy(agg...) - } + addFolderJoin := func() { + q.addJoins(join{ + table: folderTable, + onClause: "files.parent_folder_id = folders.id", + }) + } - // Whatever the sorting, always use title/id as a final sort - sortClause += ", COALESCE(images.title, CAST(images.id as text)) COLLATE NATURAL_CI ASC" - q.addGroupBy("images.title", "images.id") + switch sort { + case "path": + addFilesJoin() + addFolderJoin() + sortClause = " ORDER BY COALESCE(folders.path, '') || COALESCE(files.basename, '') COLLATE NATURAL_CI " + direction + q.addGroupBy("folders.path", "files.basename") + case "file_count": + sortClause = getCountSort(imageTable, imagesFilesTable, imageIDColumn, direction) + case "tag_count": + sortClause = getCountSort(imageTable, imagesTagsTable, imageIDColumn, direction) + case "performer_count": + sortClause = getCountSort(imageTable, performersImagesTable, imageIDColumn, direction) + case "mod_time", "filesize": + addFilesJoin() + add, agg := getSort(sort, direction, "files") + sortClause = add + q.addGroupBy(agg...) + case "title": + addFilesJoin() + addFolderJoin() + sortClause = " ORDER BY COALESCE(images.title, files.basename) COLLATE NATURAL_CI " + direction + ", folders.path COLLATE NATURAL_CI " + direction + q.addGroupBy("images.title", "files.basename", "folders.path") + default: + add, agg := getSort(sort, direction, "images") + sortClause = add + q.addGroupBy(agg...) } + // Whatever the sorting, always use title/id as a final sort + sortClause += ", COALESCE(images.title, CAST(images.id as text)) COLLATE NATURAL_CI ASC" + q.addGroupBy("images.title", "images.id") + q.sortAndPagination = sortClause + getPagination(findFilter) return nil diff --git a/pkg/sqlite/image_test.go b/pkg/sqlite/image_test.go index 6e6d345ade0..a823f8fc146 100644 --- a/pkg/sqlite/image_test.go +++ b/pkg/sqlite/image_test.go @@ -2978,10 +2978,8 @@ func TestImageQuerySorting(t *testing.T) { func TestImageQueryPagination(t *testing.T) { withTxn(func(ctx context.Context) error { perPage := 1 - sortBy := "id" findFilter := models.FindFilterType{ PerPage: &perPage, - Sort: &sortBy, // Without sort, using pagination is unpredictable } sqb := db.GetRepo().Image diff --git a/pkg/sqlite/scene.go b/pkg/sqlite/scene.go index eda5eaf6847..0ded274b2d7 100644 --- a/pkg/sqlite/scene.go +++ b/pkg/sqlite/scene.go @@ -1086,9 +1086,7 @@ var sceneSortOptions = sortOptions{ } func (qb *SceneStore) setSceneSort(query *queryBuilder, findFilter *models.FindFilterType) error { - if findFilter == nil || findFilter.Sort == nil || *findFilter.Sort == "" { - return nil - } + models.EnsureFindFilterSorted(findFilter) sort := findFilter.GetSort("title") // CVE-2024-32231 - ensure sort is in the list of allowed sorts diff --git a/pkg/sqlite/setup_test.go b/pkg/sqlite/setup_test.go index cc27f68dc18..f775caeb933 100644 --- a/pkg/sqlite/setup_test.go +++ b/pkg/sqlite/setup_test.go @@ -633,8 +633,10 @@ func testTeardown(databaseFile string) { func getNewDB(databaseFile string) sqlite.DBInterface { dbUrl, valid := os.LookupEnv("PGSQL_TEST") if valid { + fmt.Printf("Postgres backend for tests detected\n") db = sqlite.NewPostgresDatabase(dbUrl, true) } else { + fmt.Printf("SQLite backend for tests detected\n") db = sqlite.NewSQLiteDatabase(databaseFile, true) } From 05f4d8538e538d3c2d21251c9ac1336f1aa9f59e Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Wed, 30 Oct 2024 14:13:38 +0100 Subject: [PATCH 71/85] made TagStore Merge compatible with pgsql --- pkg/sqlite/blob.go | 21 +-------------------- pkg/sqlite/database.go | 19 +++++++++++++++++++ pkg/sqlite/database_postgres.go | 1 + pkg/sqlite/tag.go | 23 ++++++++++++++--------- 4 files changed, 35 insertions(+), 29 deletions(-) diff --git a/pkg/sqlite/blob.go b/pkg/sqlite/blob.go index 36703ea1d31..3970836fc3b 100644 --- a/pkg/sqlite/blob.go +++ b/pkg/sqlite/blob.go @@ -9,9 +9,7 @@ import ( "github.com/doug-martin/goqu/v9" "github.com/doug-martin/goqu/v9/exp" - "github.com/jackc/pgx/v5/pgconn" "github.com/jmoiron/sqlx" - "github.com/mattn/go-sqlite3" "github.com/stashapp/stash/pkg/file" "github.com/stashapp/stash/pkg/hash/md5" "github.com/stashapp/stash/pkg/logger" @@ -310,7 +308,7 @@ func (qb *BlobStore) readFromDatabase(ctx context.Context, checksum string) (sql func (qb *BlobStore) Delete(ctx context.Context, checksum string) error { // try to delete the blob from the database if err := qb.delete(ctx, checksum); err != nil { - if qb.isConstraintError(err) { + if isConstraintError(err) { // blob is still referenced - do not delete logger.Debugf("Blob %s is still referenced - not deleting", checksum) return nil @@ -331,23 +329,6 @@ func (qb *BlobStore) Delete(ctx context.Context, checksum string) error { return nil } -func (qb *BlobStore) isConstraintError(err error) bool { - switch dbWrapper.dbType { - case PostgresBackend: - var pgErr *pgconn.PgError - if errors.As(err, &pgErr) { - // Class 23 — Integrity Constraint Violation - return pgErr.Code[:2] == "23" - } - case SqliteBackend: - var sqliteError sqlite3.Error - if errors.As(err, &sqliteError) { - return sqliteError.Code == sqlite3.ErrConstraint - } - } - return false -} - func (qb *BlobStore) delete(ctx context.Context, checksum string) error { table := qb.table() diff --git a/pkg/sqlite/database.go b/pkg/sqlite/database.go index fac77a6a217..b5bbacfc23f 100644 --- a/pkg/sqlite/database.go +++ b/pkg/sqlite/database.go @@ -10,7 +10,9 @@ import ( "strconv" "time" + "github.com/jackc/pgx/v5/pgconn" "github.com/jmoiron/sqlx" + "github.com/mattn/go-sqlite3" "github.com/stashapp/stash/pkg/logger" "github.com/stashapp/stash/pkg/models" @@ -232,6 +234,23 @@ func getDBRowId() string { } } +func isConstraintError(err error) bool { + switch dbWrapper.dbType { + case PostgresBackend: + var pgErr *pgconn.PgError + if errors.As(err, &pgErr) { + // Class 23 — Integrity Constraint Violation + return pgErr.Code[:2] == "23" + } + case SqliteBackend: + var sqliteError sqlite3.Error + if errors.As(err, &sqliteError) { + return sqliteError.Code == sqlite3.ErrConstraint + } + } + return false +} + func (db *Database) SetSchemaVersion(version uint) { db.schemaVersion = version } diff --git a/pkg/sqlite/database_postgres.go b/pkg/sqlite/database_postgres.go index 55e6377ecb3..af21a775b20 100644 --- a/pkg/sqlite/database_postgres.go +++ b/pkg/sqlite/database_postgres.go @@ -59,6 +59,7 @@ func (db *PostgresDB) openWriteDB() error { // Ensure single connection for testing to avoid race conditions func (db *PostgresDB) TestMode() { + db.readDB.Close() db.readDB = db.writeDB } diff --git a/pkg/sqlite/tag.go b/pkg/sqlite/tag.go index 104350c42d7..a97929e735b 100644 --- a/pkg/sqlite/tag.go +++ b/pkg/sqlite/tag.go @@ -801,16 +801,21 @@ func (qb *TagStore) Merge(ctx context.Context, source []int, destination int) er "studios_tags": "studio_id", } - args = append(args, destination) for table, idColumn := range tagTables { - _, err := dbWrapper.Exec(ctx, `UPDATE OR IGNORE `+table+` -SET tag_id = ? -WHERE tag_id IN `+inBinding+` -AND NOT EXISTS(SELECT 1 FROM `+table+` o WHERE o.`+idColumn+` = `+table+`.`+idColumn+` AND o.tag_id = ?)`, - args..., - ) - if err != nil { - return err + for _, to_migrate_id := range srcArgs { + err := withSavepoint(ctx, func(ctx context.Context) error { + _, err := dbWrapper.Exec(ctx, `UPDATE `+table+` + SET tag_id = $1 + WHERE tag_id = $2 + AND NOT EXISTS(SELECT 1 FROM `+table+` o WHERE o.`+idColumn+` = `+table+`.`+idColumn+` AND o.tag_id = $1)`, + destination, to_migrate_id, + ) + return err + }) + + if err != nil && !isConstraintError(err) { + return err + } } // delete source tag ids from the table where they couldn't be set From cdd192d90087291f1ccb51cdcb177c807b224ca3 Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Fri, 1 Nov 2024 12:19:40 +0100 Subject: [PATCH 72/85] Made anonymise cross-db compatible --- pkg/sqlite/anonymise.go | 191 ++++++++++++++---- .../migrationsPostgres/1_initial.up.sql | 29 +-- pkg/sqlite/tx.go | 4 + 3 files changed, 173 insertions(+), 51 deletions(-) diff --git a/pkg/sqlite/anonymise.go b/pkg/sqlite/anonymise.go index e8eb072f9ee..a36642b0d39 100644 --- a/pkg/sqlite/anonymise.go +++ b/pkg/sqlite/anonymise.go @@ -25,23 +25,18 @@ const ( type Anonymiser struct { *SQLiteDB + sourceDb DBInterface } -func NewAnonymiser(db DBInterface, outPath string) (*Anonymiser, error) { - if dbWrapper.dbType == PostgresBackend { - return nil, fmt.Errorf("anonymise is not yet implemented for postgres backend") - } - - if _, err := db.GetWriteDB().Exec(fmt.Sprintf(`VACUUM INTO "%s"`, outPath)); err != nil { - return nil, fmt.Errorf("vacuuming into %s: %w", outPath, err) - } +var anon_dialect = goqu.Dialect("sqlite3") +func NewAnonymiser(db DBInterface, outPath string) (*Anonymiser, error) { newDB := NewSQLiteDatabase(outPath, false) if err := newDB.Open(); err != nil { return nil, fmt.Errorf("opening %s: %w", outPath, err) } - return &Anonymiser{SQLiteDB: newDB}, nil + return &Anonymiser{SQLiteDB: newDB, sourceDb: db}, nil } func (db *Anonymiser) Anonymise(ctx context.Context) error { @@ -49,6 +44,7 @@ func (db *Anonymiser) Anonymise(ctx context.Context) error { defer db.Close() return utils.Do([]func() error{ + func() error { return db.fetch(ctx) }, func() error { return db.deleteBlobs() }, func() error { return db.deleteStashIDs() }, func() error { return db.clearOHistory() }, @@ -78,6 +74,127 @@ func (db *Anonymiser) Anonymise(ctx context.Context) error { return nil } +func (db *Anonymiser) fetch(ctx context.Context) error { + const disableForeignKeys = true + const writable = true + const batchSize = 5000 + + conn, err := db.open(disableForeignKeys, writable) + if err != nil { + return fmt.Errorf("failed to open db: %w", err) + } + + for _, table := range []exp.IdentifierExpression{ + goqu.I(fileTable), + goqu.I(fingerprintTable), + goqu.I(folderTable), + goqu.I(galleryTable), + goqu.I(galleriesChaptersTable), + goqu.I(galleriesFilesTable), + goqu.I(galleriesImagesTable), + goqu.I(galleriesTagsTable), + goqu.I(galleriesURLsTable), + goqu.I(groupURLsTable), + goqu.I(groupTable), + goqu.I(groupRelationsTable), + goqu.I(groupsScenesTable), + goqu.I(groupsTagsTable), + goqu.I(imageFileTable), + goqu.I(imagesURLsTable), + goqu.I(imageTable), + goqu.I(imagesFilesTable), + goqu.I(imagesTagsTable), + goqu.I(performersAliasesTable), + goqu.I("performer_stash_ids"), + goqu.I(performerURLsTable), + goqu.I(performerTable), + goqu.I(performersGalleriesTable), + goqu.I(performersImagesTable), + goqu.I(performersScenesTable), + goqu.I(performersTagsTable), + goqu.I(savedFilterTable), + goqu.I(sceneMarkerTable), + goqu.I("scene_markers_tags"), + goqu.I(scenesURLsTable), + goqu.I(sceneTable), + goqu.I(scenesFilesTable), + goqu.I(scenesGalleriesTable), + goqu.I(scenesODatesTable), + goqu.I(scenesTagsTable), + goqu.I(scenesViewDatesTable), + goqu.I(studioAliasesTable), + goqu.I("studio_stash_ids"), + goqu.I(studioTable), + goqu.I(studiosTagsTable), + goqu.I(tagAliasesTable), + goqu.I(tagTable), + goqu.I(tagRelationsTable), + goqu.I(videoCaptionsTable), + goqu.I(videoFileTable), + } { + offset := 0 + for { + q := dialect.From(table).Select(table.All()).Limit(uint(batchSize)).Offset(uint(offset)) + var rowsSlice []map[string]interface{} + + // Fetch + if err := txn.WithTxn(ctx, db.sourceDb, func(ctx context.Context) error { + if err := queryFunc(ctx, q, false, func(r *sqlx.Rows) error { + for r.Next() { + row := make(map[string]interface{}) + if err := r.MapScan(row); err != nil { + return fmt.Errorf("failed structscan: %w", err) + } + rowsSlice = append(rowsSlice, row) + } + + return nil + }); err != nil { + return fmt.Errorf("querying %s: %w", table, err) + } + + return nil + }); err != nil { + return fmt.Errorf("failed fetch transaction: %w", err) + } + + if len(rowsSlice) == 0 { + break + } + + // Insert + txn, err := conn.BeginTx(ctx, nil) + if err != nil { + return fmt.Errorf("begin tx: %w", err) + } + + i := anon_dialect.Insert(table).Rows(rowsSlice) + sql, args, err := i.ToSQL() + if err != nil { + return fmt.Errorf("failed tosql: %w", err) + } + + _, err = txn.ExecContext(ctx, sql, args...) + if err != nil { + return fmt.Errorf("exec `%s` [%v]: %w", sql, args, err) + } + + if err := txn.Commit(); err != nil { + return fmt.Errorf("commit: %w", err) + } + + // Move to the next batch + offset += batchSize + } + } + + if err := conn.Close(); err != nil { + return fmt.Errorf("close: %w", err) + } + + return nil +} + func (db *Anonymiser) truncateColumn(tableName string, column string) error { _, err := db.writeDB.Exec("UPDATE " + tableName + " SET " + column + " = NULL") return err @@ -131,7 +248,7 @@ func (db *Anonymiser) anonymiseFolders(ctx context.Context) error { func (db *Anonymiser) anonymiseFoldersRecurse(ctx context.Context, parentFolderID int, parentPath string) error { table := folderTableMgr.table - stmt := dialect.Update(table) + stmt := anon_dialect.Update(table) if parentFolderID == 0 { stmt = stmt.Set(goqu.Record{"path": goqu.Cast(table.Col(idColumn), "VARCHAR")}).Where(table.Col("parent_folder_id").IsNull()) @@ -146,7 +263,7 @@ func (db *Anonymiser) anonymiseFoldersRecurse(ctx context.Context, parentFolderI } // now recurse to sub-folders - query := dialect.From(table).Select(table.Col(idColumn), table.Col("path")) + query := anon_dialect.From(table).Select(table.Col(idColumn), table.Col("path")) if parentFolderID == 0 { query = query.Where(table.Col("parent_folder_id").IsNull()) } else { @@ -169,7 +286,7 @@ func (db *Anonymiser) anonymiseFiles(ctx context.Context) error { logger.Infof("Anonymising files") return txn.WithTxn(ctx, db, func(ctx context.Context) error { table := fileTableMgr.table - stmt := dialect.Update(table).Set(goqu.Record{"basename": goqu.Cast(table.Col(idColumn), "VARCHAR")}) + stmt := anon_dialect.Update(table).Set(goqu.Record{"basename": goqu.Cast(table.Col(idColumn), "VARCHAR")}) if _, err := exec(ctx, stmt); err != nil { return fmt.Errorf("anonymising %s: %w", table.GetTable(), err) @@ -183,7 +300,7 @@ func (db *Anonymiser) anonymiseCaptions(ctx context.Context) error { logger.Infof("Anonymising captions") return txn.WithTxn(ctx, db, func(ctx context.Context) error { table := goqu.T(videoCaptionsTable) - stmt := dialect.Update(table).Set(goqu.Record{"filename": goqu.Cast(table.Col("file_id"), "VARCHAR")}) + stmt := anon_dialect.Update(table).Set(goqu.Record{"filename": goqu.Cast(table.Col("file_id"), "VARCHAR")}) if _, err := exec(ctx, stmt); err != nil { return fmt.Errorf("anonymising %s: %w", table.GetTable(), err) @@ -203,7 +320,7 @@ func (db *Anonymiser) anonymiseFingerprints(ctx context.Context) error { for gotSome := true; gotSome; { if err := txn.WithTxn(ctx, db, func(ctx context.Context) error { - query := dialect.From(table).Select( + query := anon_dialect.From(table).Select( table.Col(fileIDColumn), table.Col("type"), table.Col("fingerprint"), @@ -260,7 +377,7 @@ func (db *Anonymiser) anonymiseScenes(ctx context.Context) error { for gotSome := true; gotSome; { if err := txn.WithTxn(ctx, db, func(ctx context.Context) error { - query := dialect.From(table).Select( + query := anon_dialect.From(table).Select( table.Col(idColumn), table.Col("title"), table.Col("details"), @@ -297,7 +414,7 @@ func (db *Anonymiser) anonymiseScenes(ctx context.Context) error { db.obfuscateNullString(set, "details", details) if len(set) > 0 { - stmt := dialect.Update(table).Set(set).Where(table.Col(idColumn).Eq(id)) + stmt := anon_dialect.Update(table).Set(set).Where(table.Col(idColumn).Eq(id)) if _, err := exec(ctx, stmt); err != nil { return fmt.Errorf("anonymising %s: %w", table.GetTable(), err) @@ -347,7 +464,7 @@ func (db *Anonymiser) anonymiseMarkers(ctx context.Context) error { for gotSome := true; gotSome; { if err := txn.WithTxn(ctx, db, func(ctx context.Context) error { - query := dialect.From(table).Select( + query := anon_dialect.From(table).Select( table.Col(idColumn), table.Col("title"), ).Where(table.Col(idColumn).Gt(lastID)).Limit(1000) @@ -399,7 +516,7 @@ func (db *Anonymiser) anonymiseImages(ctx context.Context) error { for gotSome := true; gotSome; { if err := txn.WithTxn(ctx, db, func(ctx context.Context) error { - query := dialect.From(table).Select( + query := anon_dialect.From(table).Select( table.Col(idColumn), table.Col("title"), ).Where(table.Col(idColumn).Gt(lastID)).Limit(1000) @@ -424,7 +541,7 @@ func (db *Anonymiser) anonymiseImages(ctx context.Context) error { db.obfuscateNullString(set, "title", title) if len(set) > 0 { - stmt := dialect.Update(table).Set(set).Where(table.Col(idColumn).Eq(id)) + stmt := anon_dialect.Update(table).Set(set).Where(table.Col(idColumn).Eq(id)) if _, err := exec(ctx, stmt); err != nil { return fmt.Errorf("anonymising %s: %w", table.GetTable(), err) @@ -462,7 +579,7 @@ func (db *Anonymiser) anonymiseGalleries(ctx context.Context) error { for gotSome := true; gotSome; { if err := txn.WithTxn(ctx, db, func(ctx context.Context) error { - query := dialect.From(table).Select( + query := anon_dialect.From(table).Select( table.Col(idColumn), table.Col("title"), table.Col("details"), @@ -495,7 +612,7 @@ func (db *Anonymiser) anonymiseGalleries(ctx context.Context) error { db.obfuscateNullString(set, "photographer", photographer) if len(set) > 0 { - stmt := dialect.Update(table).Set(set).Where(table.Col(idColumn).Eq(id)) + stmt := anon_dialect.Update(table).Set(set).Where(table.Col(idColumn).Eq(id)) if _, err := exec(ctx, stmt); err != nil { return fmt.Errorf("anonymising %s: %w", table.GetTable(), err) @@ -533,7 +650,7 @@ func (db *Anonymiser) anonymisePerformers(ctx context.Context) error { for gotSome := true; gotSome; { if err := txn.WithTxn(ctx, db, func(ctx context.Context) error { - query := dialect.From(table).Select( + query := anon_dialect.From(table).Select( table.Col(idColumn), table.Col("name"), table.Col("disambiguation"), @@ -574,7 +691,7 @@ func (db *Anonymiser) anonymisePerformers(ctx context.Context) error { db.obfuscateNullString(set, "piercings", piercings) if len(set) > 0 { - stmt := dialect.Update(table).Set(set).Where(table.Col(idColumn).Eq(id)) + stmt := anon_dialect.Update(table).Set(set).Where(table.Col(idColumn).Eq(id)) if _, err := exec(ctx, stmt); err != nil { return fmt.Errorf("anonymising %s: %w", table.GetTable(), err) @@ -616,7 +733,7 @@ func (db *Anonymiser) anonymiseStudios(ctx context.Context) error { for gotSome := true; gotSome; { if err := txn.WithTxn(ctx, db, func(ctx context.Context) error { - query := dialect.From(table).Select( + query := anon_dialect.From(table).Select( table.Col(idColumn), table.Col("name"), table.Col("url"), @@ -649,7 +766,7 @@ func (db *Anonymiser) anonymiseStudios(ctx context.Context) error { db.obfuscateNullString(set, "details", details) if len(set) > 0 { - stmt := dialect.Update(table).Set(set).Where(table.Col(idColumn).Eq(id)) + stmt := anon_dialect.Update(table).Set(set).Where(table.Col(idColumn).Eq(id)) if _, err := exec(ctx, stmt); err != nil { return fmt.Errorf("anonymising %s: %w", table.GetTable(), err) @@ -688,7 +805,7 @@ func (db *Anonymiser) anonymiseAliases(ctx context.Context, table exp.Identifier for gotSome := true; gotSome; { if err := txn.WithTxn(ctx, db, func(ctx context.Context) error { - query := dialect.From(table).Select( + query := anon_dialect.From(table).Select( table.Col(idColumn), table.Col("alias"), ).Where(goqu.L("(" + idColumn + ", alias)").Gt(goqu.L("(?, ?)", lastID, lastAlias))).Limit(1000) @@ -713,7 +830,7 @@ func (db *Anonymiser) anonymiseAliases(ctx context.Context, table exp.Identifier db.obfuscateNullString(set, "alias", alias) if len(set) > 0 { - stmt := dialect.Update(table).Set(set).Where( + stmt := anon_dialect.Update(table).Set(set).Where( table.Col(idColumn).Eq(id), table.Col("alias").Eq(alias), ) @@ -750,7 +867,7 @@ func (db *Anonymiser) anonymiseURLs(ctx context.Context, table exp.IdentifierExp for gotSome := true; gotSome; { if err := txn.WithTxn(ctx, db, func(ctx context.Context) error { - query := dialect.From(table).Select( + query := anon_dialect.From(table).Select( table.Col(idColumn), table.Col("url"), ).Where(goqu.L("(" + idColumn + ", url)").Gt(goqu.L("(?, ?)", lastID, lastURL))).Limit(1000) @@ -775,7 +892,7 @@ func (db *Anonymiser) anonymiseURLs(ctx context.Context, table exp.IdentifierExp db.obfuscateNullString(set, "url", url) if len(set) > 0 { - stmt := dialect.Update(table).Set(set).Where( + stmt := anon_dialect.Update(table).Set(set).Where( table.Col(idColumn).Eq(id), table.Col("url").Eq(url), ) @@ -813,7 +930,7 @@ func (db *Anonymiser) anonymiseTags(ctx context.Context) error { for gotSome := true; gotSome; { if err := txn.WithTxn(ctx, db, func(ctx context.Context) error { - query := dialect.From(table).Select( + query := anon_dialect.From(table).Select( table.Col(idColumn), table.Col("name"), table.Col("description"), @@ -842,7 +959,7 @@ func (db *Anonymiser) anonymiseTags(ctx context.Context) error { db.obfuscateNullString(set, "description", description) if len(set) > 0 { - stmt := dialect.Update(table).Set(set).Where(table.Col(idColumn).Eq(id)) + stmt := anon_dialect.Update(table).Set(set).Where(table.Col(idColumn).Eq(id)) if _, err := exec(ctx, stmt); err != nil { return fmt.Errorf("anonymising %s: %w", table.GetTable(), err) @@ -880,7 +997,7 @@ func (db *Anonymiser) anonymiseGroups(ctx context.Context) error { for gotSome := true; gotSome; { if err := txn.WithTxn(ctx, db, func(ctx context.Context) error { - query := dialect.From(table).Select( + query := anon_dialect.From(table).Select( table.Col(idColumn), table.Col("name"), table.Col("aliases"), @@ -917,7 +1034,7 @@ func (db *Anonymiser) anonymiseGroups(ctx context.Context) error { db.obfuscateNullString(set, "director", director) if len(set) > 0 { - stmt := dialect.Update(table).Set(set).Where(table.Col(idColumn).Eq(id)) + stmt := anon_dialect.Update(table).Set(set).Where(table.Col(idColumn).Eq(id)) if _, err := exec(ctx, stmt); err != nil { return fmt.Errorf("anonymising %s: %w", table.GetTable(), err) @@ -955,7 +1072,7 @@ func (db *Anonymiser) anonymiseSavedFilters(ctx context.Context) error { for gotSome := true; gotSome; { if err := txn.WithTxn(ctx, db, func(ctx context.Context) error { - query := dialect.From(table).Select( + query := anon_dialect.From(table).Select( table.Col(idColumn), table.Col("name"), ).Where(table.Col(idColumn).Gt(lastID)).Limit(1000) @@ -980,7 +1097,7 @@ func (db *Anonymiser) anonymiseSavedFilters(ctx context.Context) error { db.obfuscateNullString(set, "name", name) if len(set) > 0 { - stmt := dialect.Update(table).Set(set).Where(table.Col(idColumn).Eq(id)) + stmt := anon_dialect.Update(table).Set(set).Where(table.Col(idColumn).Eq(id)) if _, err := exec(ctx, stmt); err != nil { return fmt.Errorf("anonymising %s: %w", table.GetTable(), err) @@ -1009,7 +1126,7 @@ func (db *Anonymiser) anonymiseText(ctx context.Context, table exp.IdentifierExp set := goqu.Record{} set[column] = db.obfuscateString(value, letters) - stmt := dialect.Update(table).Set(set).Where(table.Col(column).Eq(value)) + stmt := anon_dialect.Update(table).Set(set).Where(table.Col(column).Eq(value)) if _, err := exec(ctx, stmt); err != nil { return fmt.Errorf("anonymising %s: %w", column, err) @@ -1022,7 +1139,7 @@ func (db *Anonymiser) anonymiseFingerprint(ctx context.Context, table exp.Identi set := goqu.Record{} set[column] = db.obfuscateString(value, hex) - stmt := dialect.Update(table).Set(set).Where(table.Col(column).Eq(value)) + stmt := anon_dialect.Update(table).Set(set).Where(table.Col(column).Eq(value)) if _, err := exec(ctx, stmt); err != nil { return fmt.Errorf("anonymising %s: %w", column, err) diff --git a/pkg/sqlite/migrationsPostgres/1_initial.up.sql b/pkg/sqlite/migrationsPostgres/1_initial.up.sql index a01808395d6..ea8324ba194 100644 --- a/pkg/sqlite/migrationsPostgres/1_initial.up.sql +++ b/pkg/sqlite/migrationsPostgres/1_initial.up.sql @@ -11,13 +11,12 @@ CREATE TABLE tags ( updated_at timestamp not null, ignore_auto_tag boolean not null default FALSE, description text, - image_blob varchar(255) - REFERENCES blobs(checksum), + image_blob varchar(255) REFERENCES blobs(checksum), favorite boolean not null default false ); CREATE TABLE folders ( id serial not null primary key, - path varchar(255) NOT NULL, + path text NOT NULL, parent_folder_id integer, mod_time timestamp not null, created_at timestamp not null, @@ -69,7 +68,7 @@ CREATE TABLE IF NOT EXISTS performers ( CREATE TABLE IF NOT EXISTS studios ( id serial not null primary key, name VARCHAR(255) NOT NULL, - url VARCHAR(255), + url VARCHAR(2048), parent_id INTEGER DEFAULT NULL REFERENCES studios(id) ON DELETE SET NULL, created_at timestamp NOT NULL, updated_at timestamp NOT NULL, @@ -106,7 +105,7 @@ CREATE TABLE IF NOT EXISTS images ( CREATE TABLE image_urls ( image_id integer NOT NULL, position integer NOT NULL, - url varchar(255) NOT NULL, + url varchar(2048) NOT NULL, foreign key(image_id) references images(id) on delete CASCADE, PRIMARY KEY(image_id, position, url) ); @@ -120,14 +119,16 @@ CREATE TABLE IF NOT EXISTS galleries ( rating smallint, organized boolean not null default FALSE, created_at timestamp not null, - updated_at timestamp not null, code text, photographer text, + updated_at timestamp not null, + code text, + photographer text, foreign key(studio_id) references studios(id) on delete SET NULL, foreign key(folder_id) references folders(id) on delete SET NULL ); CREATE TABLE gallery_urls ( gallery_id integer NOT NULL, position integer NOT NULL, - url varchar(255) NOT NULL, + url varchar(2048) NOT NULL, foreign key(gallery_id) references galleries(id) on delete CASCADE, PRIMARY KEY(gallery_id, position, url) ); @@ -166,7 +167,7 @@ CREATE TABLE IF NOT EXISTS groups ( CREATE TABLE IF NOT EXISTS group_urls ( "group_id" integer NOT NULL, position integer NOT NULL, - url varchar(255) NOT NULL, + url varchar(2048) NOT NULL, foreign key("group_id") references "groups"(id) on delete CASCADE, PRIMARY KEY("group_id", position, url) ); @@ -180,7 +181,7 @@ CREATE TABLE IF NOT EXISTS groups_tags ( CREATE TABLE performer_urls ( performer_id integer NOT NULL, position integer NOT NULL, - url varchar(255) NOT NULL, + url varchar(2048) NOT NULL, foreign key(performer_id) references performers(id) on delete CASCADE, PRIMARY KEY(performer_id, position, url) ); @@ -203,13 +204,13 @@ CREATE TABLE IF NOT EXISTS scenes_o_dates ( ); CREATE TABLE performer_stash_ids ( performer_id integer, - endpoint varchar(255), + endpoint varchar(2048), stash_id uuid, foreign key(performer_id) references performers(id) on delete CASCADE ); CREATE TABLE studio_stash_ids ( studio_id integer, - endpoint varchar(255), + endpoint varchar(2048), stash_id uuid, foreign key(studio_id) references studios(id) on delete CASCADE ); @@ -336,7 +337,7 @@ CREATE TABLE IF NOT EXISTS images_tags ( ); CREATE TABLE IF NOT EXISTS scene_stash_ids ( scene_id integer NOT NULL, - endpoint varchar(255) NOT NULL, + endpoint varchar(2048) NOT NULL, stash_id uuid NOT NULL, foreign key(scene_id) references scenes(id) on delete CASCADE, PRIMARY KEY(scene_id, endpoint) @@ -407,7 +408,7 @@ CREATE TABLE galleries_chapters ( CREATE TABLE scene_urls ( scene_id integer NOT NULL, position integer NOT NULL, - url varchar(255) NOT NULL, + url varchar(2048) NOT NULL, foreign key(scene_id) references scenes(id) on delete CASCADE, PRIMARY KEY(scene_id, position, url) ); @@ -415,7 +416,7 @@ CREATE TABLE groups_relations ( containing_id integer not null, sub_id integer not null, order_index integer not null, - description varchar(255), + description text, primary key (containing_id, sub_id), foreign key (containing_id) references groups(id) on delete cascade, foreign key (sub_id) references groups(id) on delete cascade, diff --git a/pkg/sqlite/tx.go b/pkg/sqlite/tx.go index 46eb68fb76a..f1676a5e6a2 100644 --- a/pkg/sqlite/tx.go +++ b/pkg/sqlite/tx.go @@ -178,6 +178,8 @@ func (db *dbWrapperType) ExecStmt(ctx context.Context, stmt *stmt, args ...inter type SavepointAction func(ctx context.Context) error +// Encapsulates an action in a savepoint +// Its mostly used to rollback if an error occured in postgres, as errors in postgres cancel the transaction. func withSavepoint(ctx context.Context, action SavepointAction) error { tx, err := getTx(ctx) if err != nil { @@ -189,6 +191,8 @@ func withSavepoint(ctx context.Context, action SavepointAction) error { if err != nil { return err } + + // Sqlite needs some letters infront of the identifier rnd = "savepoint_" + rnd // Create a savepoint From 9f757caf97f2ff08879bd651a318c55d8a5e30c8 Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Fri, 1 Nov 2024 12:27:55 +0100 Subject: [PATCH 73/85] fix misspelling of occurred --- pkg/sqlite/tx.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/sqlite/tx.go b/pkg/sqlite/tx.go index f1676a5e6a2..4733a9ed8e4 100644 --- a/pkg/sqlite/tx.go +++ b/pkg/sqlite/tx.go @@ -179,7 +179,7 @@ func (db *dbWrapperType) ExecStmt(ctx context.Context, stmt *stmt, args ...inter type SavepointAction func(ctx context.Context) error // Encapsulates an action in a savepoint -// Its mostly used to rollback if an error occured in postgres, as errors in postgres cancel the transaction. +// Its mostly used to rollback if an error occurred in postgres, as errors in postgres cancel the transaction. func withSavepoint(ctx context.Context, action SavepointAction) error { tx, err := getTx(ctx) if err != nil { From 2436f6f3b8f9d3fd731a1a6b4757069668b8f159 Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Fri, 1 Nov 2024 13:04:00 +0100 Subject: [PATCH 74/85] Added migrations for pgsql --- go.sum | 12 ++++-------- .../migrationsPostgres/2_image_studio_index.up.sql | 7 +++++++ .../migrationsPostgres/3_stash_id_updated_at.up.sql | 3 +++ 3 files changed, 14 insertions(+), 8 deletions(-) create mode 100644 pkg/sqlite/migrationsPostgres/2_image_studio_index.up.sql create mode 100644 pkg/sqlite/migrationsPostgres/3_stash_id_updated_at.up.sql diff --git a/go.sum b/go.sum index 6c7b6a46de0..7dfce0dcb84 100644 --- a/go.sum +++ b/go.sum @@ -53,6 +53,8 @@ filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= github.com/99designs/gqlgen v0.17.55 h1:3vzrNWYyzSZjGDFo68e5j9sSauLxfKvLp+6ioRokVtM= github.com/99designs/gqlgen v0.17.55/go.mod h1:3Bq768f8hgVPGZxL8aY9MaYmbxa6llPM/qu1IGH1EJo= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= @@ -168,9 +170,10 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/denisenkom/go-mssqldb v0.10.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= github.com/dgryski/trifles v0.0.0-20230903005119-f50d829f2e54 h1:SG7nF6SRlWhcT7cNTs5R6Hk4V2lcmLz2NsG2VnInyNo= github.com/dgryski/trifles v0.0.0-20230903005119-f50d829f2e54/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA= +github.com/dhui/dktest v0.3.16 h1:i6gq2YQEtcrjKbeJpBkWjE8MmLZPYllcjOFbTZuPDnw= +github.com/dhui/dktest v0.3.16/go.mod h1:gYaA3LRmM8Z4vJl2MA0THIigJoZrwOansEOsp+kqxp0= github.com/disintegration/imaging v1.6.2 h1:w1LecBlG2Lnp8B3jk5zSuNqd7b4DXhcjwek1ei82L+c= github.com/disintegration/imaging v1.6.2/go.mod h1:44/5580QXChDfwIclfc/PCwrr44amcmDAg8hxG0Ewe4= github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= @@ -190,8 +193,6 @@ github.com/dop251/goja v0.0.0-20231027120936-b396bb4c349d h1:wi6jN5LVt/ljaBG4ue7 github.com/dop251/goja v0.0.0-20231027120936-b396bb4c349d/go.mod h1:QMWlm50DNe14hD7t24KEqZuUdC9sOTy8W6XbCU1mlw4= github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y= github.com/dop251/goja_nodejs v0.0.0-20211022123610-8dd9abb0616d/go.mod h1:DngW8aVqWbuLRMHItjPUyqdj+HWPvnQe8V8y1nDpIbM= -github.com/doug-martin/goqu/v9 v9.18.0 h1:/6bcuEtAe6nsSMVK/M+fOiXUNfyFF3yYtE07DBPFMYY= -github.com/doug-martin/goqu/v9 v9.18.0/go.mod h1:nf0Wc2/hV3gYK9LiyqIrzBEVGlI8qW3GuDCEobC4wBQ= github.com/doug-martin/goqu/v9 v9.19.1-0.20231214054827-21b6e6d1cb1b h1:WaCes6lOJCbIDgABfA8gB1ADMQo6+ftGEkj+oIB+vm4= github.com/doug-martin/goqu/v9 v9.19.1-0.20231214054827-21b6e6d1cb1b/go.mod h1:1MqhYk2p5QFEUT9ZzH+M02Jv8BbOYlvzupULdHl7Mjs= github.com/dustin/go-humanize v0.0.0-20180421182945-02af3965c54e/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= @@ -239,7 +240,6 @@ github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-sourcemap/sourcemap v2.1.3+incompatible h1:W1iEw64niKVGogNgBN3ePyLFfuisuzeidWPMPWmECqU= github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= -github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= @@ -263,7 +263,6 @@ github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOW github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-migrate/migrate/v4 v4.16.2 h1:8coYbMKUyInrFk1lfGfRovTLAW7PhWp8qQDT2iKfuoA= github.com/golang-migrate/migrate/v4 v4.16.2/go.mod h1:pfcJX4nPHaVdc5nmdCikFBWtm+UBpiZjRNNsyBbp0/o= -github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -469,7 +468,6 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80 h1:6Yzfa6GP0rIo/kULo2bwGEkFvCePZ3qHDDTC3/J9Swo= github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs= -github.com/lib/pq v1.10.1/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY= @@ -497,7 +495,6 @@ github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/ github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-sqlite3 v1.14.7/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= @@ -726,7 +723,6 @@ go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= diff --git a/pkg/sqlite/migrationsPostgres/2_image_studio_index.up.sql b/pkg/sqlite/migrationsPostgres/2_image_studio_index.up.sql new file mode 100644 index 00000000000..3c28cf1196c --- /dev/null +++ b/pkg/sqlite/migrationsPostgres/2_image_studio_index.up.sql @@ -0,0 +1,7 @@ +-- with the existing index, if no images have a studio id, then the index is +-- not used when filtering by studio id. The assumption with this change is that +-- most images don't have a studio id, so filtering by non-null studio id should +-- be faster with this index. This is a tradeoff, as filtering by null studio id +-- will be slower. +DROP INDEX index_images_on_studio_id; +CREATE INDEX index_images_on_studio_id on images (studio_id) WHERE studio_id IS NOT NULL; \ No newline at end of file diff --git a/pkg/sqlite/migrationsPostgres/3_stash_id_updated_at.up.sql b/pkg/sqlite/migrationsPostgres/3_stash_id_updated_at.up.sql new file mode 100644 index 00000000000..8bf9a8cb004 --- /dev/null +++ b/pkg/sqlite/migrationsPostgres/3_stash_id_updated_at.up.sql @@ -0,0 +1,3 @@ +ALTER TABLE performer_stash_ids ADD COLUMN updated_at timestamp not null default '1970-01-01T00:00:00Z'; +ALTER TABLE scene_stash_ids ADD COLUMN updated_at timestamp not null default '1970-01-01T00:00:00Z'; +ALTER TABLE studio_stash_ids ADD COLUMN updated_at timestamp not null default '1970-01-01T00:00:00Z'; From 2b5577952ad13faddf69f5f617c1bd9c1ef776e2 Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Fri, 1 Nov 2024 13:17:49 +0100 Subject: [PATCH 75/85] dbConfig as string --- pkg/sqlite/database.go | 2 +- pkg/sqlite/database_postgres.go | 2 +- pkg/sqlite/database_sqlite.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/sqlite/database.go b/pkg/sqlite/database.go index cd3d49309e9..05d72672c29 100644 --- a/pkg/sqlite/database.go +++ b/pkg/sqlite/database.go @@ -141,7 +141,7 @@ type Database struct { readDB *sqlx.DB writeDB *sqlx.DB - dbConfig interface{} + dbConfig string schemaVersion uint diff --git a/pkg/sqlite/database_postgres.go b/pkg/sqlite/database_postgres.go index af21a775b20..b2cb9fa87ad 100644 --- a/pkg/sqlite/database_postgres.go +++ b/pkg/sqlite/database_postgres.go @@ -72,7 +72,7 @@ func (db *PostgresDB) AppSchemaVersion() uint { } func (db *PostgresDB) DatabaseConnector() string { - return db.dbConfig.(string) + return db.dbConfig } func (db *PostgresDB) open(disableForeignKeys bool, writable bool) (conn *sqlx.DB, err error) { diff --git a/pkg/sqlite/database_sqlite.go b/pkg/sqlite/database_sqlite.go index 31da52ba8f4..3b7ee417b5f 100644 --- a/pkg/sqlite/database_sqlite.go +++ b/pkg/sqlite/database_sqlite.go @@ -88,7 +88,7 @@ func (db *SQLiteDB) DatabaseType() DatabaseType { } func (db *SQLiteDB) DatabasePath() string { - return (db.dbConfig).(string) + return db.dbConfig } func (db *SQLiteDB) open(disableForeignKeys bool, writable bool) (conn *sqlx.DB, err error) { From 3f457e3f25ec2f36e19278a25f96d3fd40ec2e1c Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Fri, 1 Nov 2024 13:22:26 +0100 Subject: [PATCH 76/85] change most varchar to text --- .../migrationsPostgres/1_initial.up.sql | 50 +++++++++---------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/pkg/sqlite/migrationsPostgres/1_initial.up.sql b/pkg/sqlite/migrationsPostgres/1_initial.up.sql index ea8324ba194..24191c0a096 100644 --- a/pkg/sqlite/migrationsPostgres/1_initial.up.sql +++ b/pkg/sqlite/migrationsPostgres/1_initial.up.sql @@ -6,7 +6,7 @@ CREATE TABLE blobs ( ); CREATE TABLE tags ( id serial not null primary key, - name varchar(255), + name text, created_at timestamp not null, updated_at timestamp not null, ignore_auto_tag boolean not null default FALSE, @@ -39,35 +39,35 @@ CREATE TABLE files ( ALTER TABLE folders ADD COLUMN zip_file_id integer REFERENCES files(id); CREATE TABLE IF NOT EXISTS performers ( id serial not null primary key, - name varchar(255) not null, - disambiguation varchar(255), + name text not null, + disambiguation text, gender varchar(20), birthdate date, - ethnicity varchar(255), - country varchar(255), - eye_color varchar(255), + ethnicity text, + country text, + eye_color text, height int, - measurements varchar(255), - fake_tits varchar(255), - career_length varchar(255), - tattoos text, -- For you artsy motherfuckers + measurements text, + fake_tits text, + career_length text, + tattoos text, piercings text, favorite boolean not null default FALSE, created_at timestamp not null, updated_at timestamp not null, details text, death_date date, - hair_color varchar(255), + hair_color text, weight integer, rating smallint, ignore_auto_tag boolean not null default FALSE, image_blob varchar(255) REFERENCES blobs(checksum), penis_length float, - circumcised varchar(255) + circumcised text ); CREATE TABLE IF NOT EXISTS studios ( id serial not null primary key, - name VARCHAR(255) NOT NULL, + name text NOT NULL, url VARCHAR(2048), parent_id INTEGER DEFAULT NULL REFERENCES studios(id) ON DELETE SET NULL, created_at timestamp NOT NULL, @@ -81,7 +81,7 @@ CREATE TABLE IF NOT EXISTS studios ( ); CREATE TABLE IF NOT EXISTS saved_filters ( id serial not null primary key, - name varchar(510) not null, + name text not null, mode varchar(255) not null, find_filter bytea, object_filter bytea, @@ -89,7 +89,7 @@ CREATE TABLE IF NOT EXISTS saved_filters ( ); CREATE TABLE IF NOT EXISTS images ( id serial not null primary key, - title varchar(255), + title text, rating smallint, studio_id integer, o_counter smallint not null default 0, @@ -112,7 +112,7 @@ CREATE TABLE image_urls ( CREATE TABLE IF NOT EXISTS galleries ( id serial not null primary key, folder_id integer, - title varchar(255), + title text, date date, details text, studio_id integer, @@ -134,7 +134,7 @@ CREATE TABLE gallery_urls ( ); CREATE TABLE IF NOT EXISTS scenes ( id serial not null primary key, - title varchar(255), + title text, details text, date date, rating smallint, @@ -151,13 +151,13 @@ CREATE TABLE IF NOT EXISTS scenes ( ); CREATE TABLE IF NOT EXISTS groups ( id serial not null primary key, - name varchar(255) not null, - aliases varchar(255), + name text not null, + aliases text, duration integer, date date, rating smallint, studio_id integer REFERENCES studios(id) ON DELETE SET NULL, - director varchar(255), + director text, "description" text, created_at timestamp not null, updated_at timestamp not null, @@ -290,7 +290,7 @@ CREATE TABLE IF NOT EXISTS performers_scenes ( ); CREATE TABLE IF NOT EXISTS scene_markers ( id serial not null primary key, - title VARCHAR(255) NOT NULL, + title text NOT NULL, seconds FLOAT NOT NULL, primary_tag_id INTEGER NOT NULL, scene_id INTEGER NOT NULL, @@ -380,25 +380,25 @@ CREATE TABLE IF NOT EXISTS performers_tags ( ); CREATE TABLE IF NOT EXISTS tag_aliases ( tag_id integer NOT NULL, - alias varchar(255) NOT NULL, + alias text NOT NULL, foreign key(tag_id) references tags(id) on delete CASCADE, PRIMARY KEY(tag_id, alias) ); CREATE TABLE IF NOT EXISTS studio_aliases ( studio_id integer NOT NULL, - alias varchar(255) NOT NULL, + alias text NOT NULL, foreign key(studio_id) references studios(id) on delete CASCADE, PRIMARY KEY(studio_id, alias) ); CREATE TABLE performer_aliases ( performer_id integer NOT NULL, - alias varchar(255) NOT NULL, + alias text NOT NULL, foreign key(performer_id) references performers(id) on delete CASCADE, PRIMARY KEY(performer_id, alias) ); CREATE TABLE galleries_chapters ( id serial not null primary key, - title varchar(255) not null, + title text not null, image_index integer not null, gallery_id integer not null, created_at timestamp not null, From d3771bfe809b6ddeb0e7b054d6623968aa96ea53 Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Fri, 1 Nov 2024 18:05:08 +0100 Subject: [PATCH 77/85] performance fix for pgsql --- pkg/sqlite/database_postgres.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/pkg/sqlite/database_postgres.go b/pkg/sqlite/database_postgres.go index b2cb9fa87ad..14457c49758 100644 --- a/pkg/sqlite/database_postgres.go +++ b/pkg/sqlite/database_postgres.go @@ -14,6 +14,12 @@ type PostgresDB struct { Database } +const ( + // TODO: Optimize for these + maxPGWriteConnections = 10 + maxPGReadConnections = 25 +) + func NewPostgresDatabase(dbConnector string, init bool) *PostgresDB { db := &PostgresDB{ Database: Database{ @@ -42,6 +48,8 @@ func (db *PostgresDB) openReadDB() error { ) var err error db.readDB, err = db.open(disableForeignKeys, writable) + db.readDB.SetMaxOpenConns(maxPGReadConnections) + db.readDB.SetMaxIdleConns(maxPGReadConnections) db.readDB.SetConnMaxIdleTime(dbConnTimeout) return err } @@ -53,6 +61,8 @@ func (db *PostgresDB) openWriteDB() error { ) var err error db.writeDB, err = db.open(disableForeignKeys, writable) + db.writeDB.SetMaxOpenConns(maxPGWriteConnections) + db.writeDB.SetMaxIdleConns(maxPGWriteConnections) db.writeDB.SetConnMaxIdleTime(dbConnTimeout) return err } From 3f1da4d8caaf9e4e2f39c9dc1dbfbc04c61df771 Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Fri, 1 Nov 2024 18:16:03 +0100 Subject: [PATCH 78/85] Optimize better --- pkg/sqlite/database_postgres.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/sqlite/database_postgres.go b/pkg/sqlite/database_postgres.go index 14457c49758..74914c2d1d1 100644 --- a/pkg/sqlite/database_postgres.go +++ b/pkg/sqlite/database_postgres.go @@ -16,8 +16,8 @@ type PostgresDB struct { const ( // TODO: Optimize for these - maxPGWriteConnections = 10 - maxPGReadConnections = 25 + maxPGWriteConnections = 5 + maxPGReadConnections = 15 ) func NewPostgresDatabase(dbConnector string, init bool) *PostgresDB { From 393ad597ad3f181c88144d8fff56cfc0f43430ba Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Sat, 2 Nov 2024 19:36:11 +0100 Subject: [PATCH 79/85] Replace ilike with LOWER(?) LIKE LOWER(?). Its slightly more performant, and slightly prettier --- pkg/sqlite/criterion_handlers.go | 12 ++++++------ pkg/sqlite/database.go | 9 --------- pkg/sqlite/filter_internal_test.go | 12 ++++++------ pkg/sqlite/gallery_filter.go | 4 ++-- pkg/sqlite/query.go | 6 +++--- pkg/sqlite/scene_marker.go | 2 +- pkg/sqlite/sql.go | 4 ++-- pkg/sqlite/tag.go | 4 ++-- 8 files changed, 22 insertions(+), 31 deletions(-) diff --git a/pkg/sqlite/criterion_handlers.go b/pkg/sqlite/criterion_handlers.go index 7cd1aec1b72..d56a1681465 100644 --- a/pkg/sqlite/criterion_handlers.go +++ b/pkg/sqlite/criterion_handlers.go @@ -43,9 +43,9 @@ func stringCriterionHandler(c *models.StringCriterionInput, column string) crite case models.CriterionModifierExcludes: f.whereClauses = append(f.whereClauses, getStringSearchClause([]string{column}, c.Value, true)) case models.CriterionModifierEquals: - f.addWhere(column+" "+getDBLike()+" ?", c.Value) + f.addWhere("LOWER("+column+") LIKE LOWER(?)", c.Value) case models.CriterionModifierNotEquals: - f.addWhere(column+" NOT "+getDBLike()+" ?", c.Value) + f.addWhere("LOWER("+column+") NOT LIKE LOWER(?)", c.Value) case models.CriterionModifierMatchesRegex: if _, err := regexp.Compile(c.Value); err != nil { f.setError(err) @@ -86,9 +86,9 @@ func uuidCriterionHandler(c *models.StringCriterionInput, column string) criteri case models.CriterionModifierExcludes: f.whereClauses = append(f.whereClauses, getStringSearchClause([]string{columnCast}, c.Value, true)) case models.CriterionModifierEquals: - f.addWhere(columnCast+" "+getDBLike()+" ?", c.Value) + f.addWhere("LOWER("+columnCast+") LIKE LOWER(?)", c.Value) case models.CriterionModifierNotEquals: - f.addWhere(columnCast+" NOT "+getDBLike()+" ?", c.Value) + f.addWhere("LOWER("+columnCast+") NOT LIKE LOWER(?)", c.Value) case models.CriterionModifierMatchesRegex: if _, err := regexp.Compile(c.Value); err != nil { f.setError(err) @@ -191,7 +191,7 @@ func getPathSearchClause(pathColumn, basenameColumn, p string, addWildcards, not } filepathColumn := fmt.Sprintf("%s || '%s' || %s", pathColumn, string(filepath.Separator), basenameColumn) - ret := makeClause(fmt.Sprintf("%s "+getDBLike()+" ?", filepathColumn), p) + ret := makeClause(fmt.Sprintf("LOWER(%s) LIKE LOWER(?)", filepathColumn), p) if not { ret = ret.not() @@ -589,7 +589,7 @@ func (m *stringListCriterionHandlerBuilder) handler(criterion *models.StringCrit // excludes all of the provided values // need to use actual join table name for this // .id NOT IN (select . from where . in ) - whereClause := utils.StrFormat("{primaryTable}.id NOT IN (SELECT {joinTable}.{primaryFK} from {joinTable} where {joinTable}.{stringColumn} "+getDBLike()+" ?)", + whereClause := utils.StrFormat("{primaryTable}.id NOT IN (SELECT {joinTable}.{primaryFK} from {joinTable} where LOWER({joinTable}.{stringColumn}) LIKE LOWER(?))", utils.StrFormatMap{ "primaryTable": m.primaryTable, "joinTable": m.joinTable, diff --git a/pkg/sqlite/database.go b/pkg/sqlite/database.go index 05d72672c29..4f27375ff54 100644 --- a/pkg/sqlite/database.go +++ b/pkg/sqlite/database.go @@ -207,15 +207,6 @@ func getDBMinFunc() string { } } -func getDBLike() string { - switch dbWrapper.dbType { - case PostgresBackend: - return "ILIKE" - default: - return "LIKE" - } -} - func getDBGroupConcat(columnName string) string { switch dbWrapper.dbType { case PostgresBackend: diff --git a/pkg/sqlite/filter_internal_test.go b/pkg/sqlite/filter_internal_test.go index 7e8f0a28701..9444bdd4af3 100644 --- a/pkg/sqlite/filter_internal_test.go +++ b/pkg/sqlite/filter_internal_test.go @@ -471,7 +471,7 @@ func TestStringCriterionHandlerIncludes(t *testing.T) { }, column)) assert.Len(f.whereClauses, 1) - assert.Equal(fmt.Sprintf("(%[1]s "+getDBLike()+" ? OR %[1]s "+getDBLike()+" ?)", column), f.whereClauses[0].sql) + assert.Equal(fmt.Sprintf("(LOWER(%[1]s) LIKE LOWER(?) OR LOWER(%[1]s) LIKE LOWER(?))", column), f.whereClauses[0].sql) assert.Len(f.whereClauses[0].args, 2) assert.Equal("%two%", f.whereClauses[0].args[0]) assert.Equal("%words%", f.whereClauses[0].args[1]) @@ -483,7 +483,7 @@ func TestStringCriterionHandlerIncludes(t *testing.T) { }, column)) assert.Len(f.whereClauses, 1) - assert.Equal(fmt.Sprintf("(%[1]s "+getDBLike()+" ?)", column), f.whereClauses[0].sql) + assert.Equal(fmt.Sprintf("(LOWER(%[1]s) LIKE LOWER(?))", column), f.whereClauses[0].sql) assert.Len(f.whereClauses[0].args, 1) assert.Equal("%two words%", f.whereClauses[0].args[0]) } @@ -502,7 +502,7 @@ func TestStringCriterionHandlerExcludes(t *testing.T) { }, column)) assert.Len(f.whereClauses, 1) - assert.Equal(fmt.Sprintf("(%[1]s NOT "+getDBLike()+" ? AND %[1]s NOT "+getDBLike()+" ?)", column), f.whereClauses[0].sql) + assert.Equal(fmt.Sprintf("(LOWER(%[1]s) NOT LIKE LOWER(?) AND LOWER(%[1]s) NOT LIKE LOWER(?))", column), f.whereClauses[0].sql) assert.Len(f.whereClauses[0].args, 2) assert.Equal("%two%", f.whereClauses[0].args[0]) assert.Equal("%words%", f.whereClauses[0].args[1]) @@ -514,7 +514,7 @@ func TestStringCriterionHandlerExcludes(t *testing.T) { }, column)) assert.Len(f.whereClauses, 1) - assert.Equal(fmt.Sprintf("(%[1]s NOT "+getDBLike()+" ?)", column), f.whereClauses[0].sql) + assert.Equal(fmt.Sprintf("(LOWER(%[1]s) NOT LIKE LOWER(?))", column), f.whereClauses[0].sql) assert.Len(f.whereClauses[0].args, 1) assert.Equal("%two words%", f.whereClauses[0].args[0]) } @@ -532,7 +532,7 @@ func TestStringCriterionHandlerEquals(t *testing.T) { }, column)) assert.Len(f.whereClauses, 1) - assert.Equal(fmt.Sprintf("%[1]s "+getDBLike()+" ?", column), f.whereClauses[0].sql) + assert.Equal(fmt.Sprintf("LOWER(%[1]s) LIKE LOWER(?)", column), f.whereClauses[0].sql) assert.Len(f.whereClauses[0].args, 1) assert.Equal(value1, f.whereClauses[0].args[0]) } @@ -550,7 +550,7 @@ func TestStringCriterionHandlerNotEquals(t *testing.T) { }, column)) assert.Len(f.whereClauses, 1) - assert.Equal(fmt.Sprintf("%[1]s NOT "+getDBLike()+" ?", column), f.whereClauses[0].sql) + assert.Equal(fmt.Sprintf("LOWER(%[1]s) NOT LIKE LOWER(?)", column), f.whereClauses[0].sql) assert.Len(f.whereClauses[0].args, 1) assert.Equal(value1, f.whereClauses[0].args[0]) } diff --git a/pkg/sqlite/gallery_filter.go b/pkg/sqlite/gallery_filter.go index 7ef870ecb29..00c7cc9033a 100644 --- a/pkg/sqlite/gallery_filter.go +++ b/pkg/sqlite/gallery_filter.go @@ -201,13 +201,13 @@ func (qb *galleryFilterHandler) pathCriterionHandler(c *models.StringCriterionIn case models.CriterionModifierEquals: addWildcards = false clause := getPathSearchClause(pathColumn, basenameColumn, c.Value, addWildcards, not) - clause2 := makeClause(folderPathColumn+" "+getDBLike()+" ?", c.Value) + clause2 := makeClause("LOWER("+folderPathColumn+") LIKE LOWER(?)", c.Value) f.whereClauses = append(f.whereClauses, orClauses(clause, clause2)) case models.CriterionModifierNotEquals: addWildcards = false not = true clause := getPathSearchClause(pathColumn, basenameColumn, c.Value, addWildcards, not) - clause2 := makeClause(folderPathColumn+" NOT "+getDBLike()+" ?", c.Value) + clause2 := makeClause("LOWER("+folderPathColumn+") NOT LIKE LOWER(?)", c.Value) f.whereClauses = append(f.whereClauses, orClauses(clause, clause2)) case models.CriterionModifierMatchesRegex: if _, err := regexp.Compile(c.Value); err != nil { diff --git a/pkg/sqlite/query.go b/pkg/sqlite/query.go index f5bc91f0820..26cc0542b81 100644 --- a/pkg/sqlite/query.go +++ b/pkg/sqlite/query.go @@ -190,7 +190,7 @@ func (qb *queryBuilder) parseQueryString(columns []string, q string) { var clauses []string for _, column := range columns { - clauses = append(clauses, column+" "+getDBLike()+" ?") + clauses = append(clauses, "LOWER("+column+") LIKE LOWER(?)") qb.addArg(like(t)) } @@ -199,7 +199,7 @@ func (qb *queryBuilder) parseQueryString(columns []string, q string) { for _, t := range specs.MustNot { for _, column := range columns { - qb.addWhere(coalesce(column) + " NOT " + getDBLike() + " ?") + qb.addWhere("LOWER(" + coalesce(column) + ") NOT LIKE LOWER(?)") qb.addArg(like(t)) } } @@ -209,7 +209,7 @@ func (qb *queryBuilder) parseQueryString(columns []string, q string) { for _, column := range columns { for _, v := range set { - clauses = append(clauses, column+" "+getDBLike()+" ?") + clauses = append(clauses, "LOWER("+column+") LIKE LOWER(?)") qb.addArg(like(v)) } } diff --git a/pkg/sqlite/scene_marker.go b/pkg/sqlite/scene_marker.go index c8165335460..8690f60b599 100644 --- a/pkg/sqlite/scene_marker.go +++ b/pkg/sqlite/scene_marker.go @@ -266,7 +266,7 @@ func (qb *SceneMarkerStore) CountByTagID(ctx context.Context, tagID int) (int, e func (qb *SceneMarkerStore) GetMarkerStrings(ctx context.Context, q *string, sort *string) ([]*models.MarkerStringsResultType, error) { query := "SELECT count(*) as `count`, scene_markers.id as id, scene_markers.title as title FROM scene_markers" if q != nil { - query += " WHERE title " + getDBLike() + " '%" + *q + "%'" + query += " WHERE LOWER(title) LIKE LOWER('%" + *q + "%')" } query += " GROUP BY title" if sort != nil && *sort == "count" { diff --git a/pkg/sqlite/sql.go b/pkg/sqlite/sql.go index a345c596cef..4e58c22403a 100644 --- a/pkg/sqlite/sql.go +++ b/pkg/sqlite/sql.go @@ -169,14 +169,14 @@ func getStringSearchClause(columns []string, q string, not bool) sqlClause { // Search for any word for _, word := range queryWords { for _, column := range columns { - likeClauses = append(likeClauses, column+notStr+" "+getDBLike()+" ?") + likeClauses = append(likeClauses, "LOWER("+column+")"+notStr+" LIKE LOWER(?)") args = append(args, "%"+word+"%") } } } else { // Search the exact query for _, column := range columns { - likeClauses = append(likeClauses, column+notStr+" "+getDBLike()+" ?") + likeClauses = append(likeClauses, "LOWER("+column+")"+notStr+" LIKE LOWER(?)") args = append(args, "%"+trimmedQuery+"%") } } diff --git a/pkg/sqlite/tag.go b/pkg/sqlite/tag.go index 5fdcf6b06cc..361486a7ae4 100644 --- a/pkg/sqlite/tag.go +++ b/pkg/sqlite/tag.go @@ -583,11 +583,11 @@ func (qb *TagStore) QueryForAutoTag(ctx context.Context, words []string) ([]*mod for _, w := range words { ww := w + "%" - whereClauses = append(whereClauses, "tags.name "+getDBLike()+" ?") + whereClauses = append(whereClauses, "LOWER(tags.name) LIKE LOWER(?)") args = append(args, ww) // include aliases - whereClauses = append(whereClauses, "tag_aliases.alias "+getDBLike()+" ?") + whereClauses = append(whereClauses, "LOWER(tag_aliases.alias) LIKE LOWER(?)") args = append(args, ww) } From 192f757b13cd04a5fdaba29df9b39e43096e447a Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Wed, 6 Nov 2024 13:06:16 +0100 Subject: [PATCH 80/85] Fix blob insert bug --- pkg/sqlite/blob.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/sqlite/blob.go b/pkg/sqlite/blob.go index 3970836fc3b..f278568a16b 100644 --- a/pkg/sqlite/blob.go +++ b/pkg/sqlite/blob.go @@ -127,7 +127,7 @@ func (qb *BlobStore) write(ctx context.Context, checksum string, data sql.Null[[ q := dialect.Insert(table).Rows(blobRow{ Checksum: checksum, Blob: data, - }).OnConflict(goqu.DoNothing()) + }).OnConflict(goqu.DoNothing()).Prepared(true) _, err := exec(ctx, q) if err != nil { @@ -141,7 +141,7 @@ func (qb *BlobStore) update(ctx context.Context, checksum string, data []byte) e table := qb.table() q := dialect.Update(table).Set(goqu.Record{ "blob": data, - }).Where(goqu.C(blobChecksumColumn).Eq(checksum)) + }).Where(goqu.C(blobChecksumColumn).Eq(checksum)).Prepared(true) _, err := exec(ctx, q) if err != nil { From 4d729b0969fbc99a25b251f49c97e978052dd3fe Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Thu, 7 Nov 2024 10:42:02 +0100 Subject: [PATCH 81/85] Fix bug in tags that made finding ancestors and descendants not work in postgresql. Postgres is stricter and doesnt allow excess args. --- pkg/sqlite/tag.go | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/pkg/sqlite/tag.go b/pkg/sqlite/tag.go index 361486a7ae4..c2c5c8d5b71 100644 --- a/pkg/sqlite/tag.go +++ b/pkg/sqlite/tag.go @@ -907,12 +907,10 @@ parents AS ( SELECT t.*, p.path FROM tags t INNER JOIN parents p ON t.id = p.parent_id ` - excludeArgs := []interface{}{tagID} + args := []interface{}{tagID, tagID} for _, excludeID := range excludeIDs { - excludeArgs = append(excludeArgs, excludeID) + args = append(args, excludeID) } - args := []interface{}{tagID} - args = append(args, append(append(excludeArgs, excludeArgs...), excludeArgs...)...) return qb.queryTagPaths(ctx, query, args) } @@ -931,12 +929,10 @@ children AS ( SELECT t.*, c.path FROM tags t INNER JOIN children c ON t.id = c.child_id ` - excludeArgs := []interface{}{tagID} + args := []interface{}{tagID, tagID} for _, excludeID := range excludeIDs { - excludeArgs = append(excludeArgs, excludeID) + args = append(args, excludeID) } - args := []interface{}{tagID} - args = append(args, append(append(excludeArgs, excludeArgs...), excludeArgs...)...) return qb.queryTagPaths(ctx, query, args) } From 23ba774dcff742b2593bcca43e688e1bdefbec99 Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Fri, 8 Nov 2024 13:03:50 +0100 Subject: [PATCH 82/85] Setup now works So we initialize the specific database type in PostInit, which also makes it unavailable to the manager. --- internal/autotag/integration_test.go | 4 ++-- internal/manager/init.go | 34 +++++++++++++++++----------- internal/manager/manager.go | 8 ++++++- pkg/sqlite/anonymise.go | 2 +- pkg/sqlite/database.go | 14 +++++++++++- pkg/sqlite/database_postgres.go | 11 ++++----- pkg/sqlite/database_sqlite.go | 12 ++++------ pkg/sqlite/setup_test.go | 4 ++-- 8 files changed, 56 insertions(+), 33 deletions(-) diff --git a/internal/autotag/integration_test.go b/internal/autotag/integration_test.go index 4a4099040f8..efb010463e3 100644 --- a/internal/autotag/integration_test.go +++ b/internal/autotag/integration_test.go @@ -48,10 +48,10 @@ func getNewDB(databaseFile string) sqlite.DBInterface { dbUrl, valid := os.LookupEnv("PGSQL_TEST") if valid { fmt.Printf("Postgres backend for tests detected\n") - db = sqlite.NewPostgresDatabase(dbUrl, true) + db = sqlite.NewPostgresDatabase(sqlite.NewDatabase(), dbUrl, true) } else { fmt.Printf("SQLite backend for tests detected\n") - db = sqlite.NewSQLiteDatabase(databaseFile, true) + db = sqlite.NewSQLiteDatabase(sqlite.NewDatabase(), databaseFile, true) } return db diff --git a/internal/manager/init.go b/internal/manager/init.go index f89178443e2..1834b2df4b6 100644 --- a/internal/manager/init.go +++ b/internal/manager/init.go @@ -35,19 +35,7 @@ import ( func Initialize(cfg *config.Config, l *log.Logger) (*Manager, error) { ctx := context.TODO() - var db sqlite.DBInterface - - dbUrl := cfg.GetDatabaseUrl() - upperUrl := strings.ToUpper(dbUrl) - switch { - case strings.HasPrefix(upperUrl, string(sqlite.PostgresBackend)+":"): - db = sqlite.NewPostgresDatabase(dbUrl, true) - case strings.HasPrefix(upperUrl, string(sqlite.SqliteBackend)+":"): - db = sqlite.NewSQLiteDatabase(dbUrl[len(sqlite.SqliteBackend)+1:], true) - default: - // Assume it's the path to a SQLite database - for backwards compat - db = sqlite.NewSQLiteDatabase(dbUrl, true) - } + var db *sqlite.Database = sqlite.NewDatabase() repo := db.Repository() @@ -198,11 +186,31 @@ func initJobManager(cfg *config.Config) *job.Manager { return ret } +// Initializes the specific DB type +func (s *Manager) RefreshDB() { + cfg := s.Config + + var odb *sqlite.Database = s.Database.Pointer() + + dbUrl := cfg.GetDatabaseUrl() + upperUrl := strings.ToUpper(dbUrl) + switch { + case strings.HasPrefix(upperUrl, string(sqlite.PostgresBackend)+":"): + s.Database = sqlite.NewPostgresDatabase(odb, dbUrl, true) + case strings.HasPrefix(upperUrl, string(sqlite.SqliteBackend)+":"): + s.Database = sqlite.NewSQLiteDatabase(odb, dbUrl[len(sqlite.SqliteBackend)+1:], true) + default: + // Assume it's the path to a SQLite database - for backwards compat + s.Database = sqlite.NewSQLiteDatabase(odb, dbUrl, true) + } +} + // postInit initialises the paths, caches and database after the initial // configuration has been set. Should only be called if the configuration // is valid. func (s *Manager) postInit(ctx context.Context) error { s.RefreshConfig() + s.RefreshDB() s.SessionStore = session.NewStore(s.Config) s.PluginCache.RegisterSessionStore(s.SessionStore) diff --git a/internal/manager/manager.go b/internal/manager/manager.go index 138e38570b2..9a55f3d0185 100644 --- a/internal/manager/manager.go +++ b/internal/manager/manager.go @@ -6,6 +6,7 @@ import ( "context" "errors" "fmt" + "math" "net/http" "os" "path/filepath" @@ -383,7 +384,12 @@ func (s *Manager) GetSystemStatus() *SystemStatus { database := s.Database dbSchema := int(database.Version()) dbPath := database.DatabasePath() - appSchema := int(database.AppSchemaVersion()) + appSchema := math.MaxInt32 + + // Database is not initialized and cannot be used yet + if !s.Config.IsNewSystem() { + appSchema = int(database.AppSchemaVersion()) + } status := SystemStatusEnumOk if s.Config.IsNewSystem() { diff --git a/pkg/sqlite/anonymise.go b/pkg/sqlite/anonymise.go index a36642b0d39..1a02444fa57 100644 --- a/pkg/sqlite/anonymise.go +++ b/pkg/sqlite/anonymise.go @@ -31,7 +31,7 @@ type Anonymiser struct { var anon_dialect = goqu.Dialect("sqlite3") func NewAnonymiser(db DBInterface, outPath string) (*Anonymiser, error) { - newDB := NewSQLiteDatabase(outPath, false) + newDB := NewSQLiteDatabase(NewDatabase(), outPath, false) if err := newDB.Open(); err != nil { return nil, fmt.Errorf("opening %s: %w", outPath, err) } diff --git a/pkg/sqlite/database.go b/pkg/sqlite/database.go index 4f27375ff54..1f001f44a90 100644 --- a/pkg/sqlite/database.go +++ b/pkg/sqlite/database.go @@ -133,6 +133,7 @@ type DBInterface interface { Version() uint WithDatabase(ctx context.Context) (context.Context, error) TestMode() + Pointer() *Database } type Database struct { @@ -148,7 +149,7 @@ type Database struct { lockChan chan struct{} } -func newDatabase() *storeRepository { +func newStoreRepo() *storeRepository { fileStore := NewFileStore() folderStore := NewFolderStore() galleryStore := NewGalleryStore(fileStore, folderStore) @@ -177,6 +178,13 @@ func newDatabase() *storeRepository { return r } +func NewDatabase() *Database { + return &Database{ + storeRepository: newStoreRepo(), + lockChan: make(chan struct{}, 1), + } +} + func getDBBoolean(val bool) string { switch dbWrapper.dbType { case SqliteBackend: @@ -242,6 +250,10 @@ func isConstraintError(err error) bool { return false } +func (db *Database) Pointer() *Database { + return db +} + func (db *Database) SetSchemaVersion(version uint) { db.schemaVersion = version } diff --git a/pkg/sqlite/database_postgres.go b/pkg/sqlite/database_postgres.go index 74914c2d1d1..83f79607bf5 100644 --- a/pkg/sqlite/database_postgres.go +++ b/pkg/sqlite/database_postgres.go @@ -11,7 +11,7 @@ import ( ) type PostgresDB struct { - Database + *Database } const ( @@ -20,13 +20,12 @@ const ( maxPGReadConnections = 15 ) -func NewPostgresDatabase(dbConnector string, init bool) *PostgresDB { +func NewPostgresDatabase(odb *Database, dbConnector string, init bool) *PostgresDB { db := &PostgresDB{ - Database: Database{ - storeRepository: newDatabase(), - dbConfig: dbConnector, - }, + Database: odb, } + + db.dbConfig = dbConnector db.DBInterface = db if init { diff --git a/pkg/sqlite/database_sqlite.go b/pkg/sqlite/database_sqlite.go index 3b7ee417b5f..943093117fe 100644 --- a/pkg/sqlite/database_sqlite.go +++ b/pkg/sqlite/database_sqlite.go @@ -13,17 +13,15 @@ import ( ) type SQLiteDB struct { - Database + *Database } -func NewSQLiteDatabase(dbPath string, init bool) *SQLiteDB { +func NewSQLiteDatabase(odb *Database, dbPath string, init bool) *SQLiteDB { db := &SQLiteDB{ - Database: Database{ - storeRepository: newDatabase(), - lockChan: make(chan struct{}, 1), - dbConfig: dbPath, - }, + Database: odb, } + + db.dbConfig = dbPath db.DBInterface = db if init { diff --git a/pkg/sqlite/setup_test.go b/pkg/sqlite/setup_test.go index 8f7a9ac8eea..56d51be1300 100644 --- a/pkg/sqlite/setup_test.go +++ b/pkg/sqlite/setup_test.go @@ -634,10 +634,10 @@ func getNewDB(databaseFile string) sqlite.DBInterface { dbUrl, valid := os.LookupEnv("PGSQL_TEST") if valid { fmt.Printf("Postgres backend for tests detected\n") - db = sqlite.NewPostgresDatabase(dbUrl, true) + db = sqlite.NewPostgresDatabase(sqlite.NewDatabase(), dbUrl, true) } else { fmt.Printf("SQLite backend for tests detected\n") - db = sqlite.NewSQLiteDatabase(databaseFile, true) + db = sqlite.NewSQLiteDatabase(sqlite.NewDatabase(), databaseFile, true) } return db From cd0f2c56e3effa48c1b22587cf5ef32b010a365e Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Mon, 11 Nov 2024 20:15:55 +0100 Subject: [PATCH 83/85] Fix a bug where multiple scene files caused duplicate ids. Fix a bug where phash distance didnt work. Fix a bug where i wrongly wrote getDBLike in a string in captionCriterionHandler. --- pkg/sqlite/scene.go | 18 +++++++++--------- pkg/sqlite/scene_filter.go | 30 +++++++++++++----------------- 2 files changed, 22 insertions(+), 26 deletions(-) diff --git a/pkg/sqlite/scene.go b/pkg/sqlite/scene.go index 950d788a4bb..f4127d5d55a 100644 --- a/pkg/sqlite/scene.go +++ b/pkg/sqlite/scene.go @@ -922,11 +922,11 @@ func (qb *SceneStore) makeQuery(ctx context.Context, sceneFilter *models.SceneFi query.addJoins( join{ table: scenesFilesTable, - onClause: "scenes_files.scene_id = scenes.id", + onClause: "scenes_files.scene_id = scenes.id AND scenes_files.primary = " + getDBBoolean(true), }, join{ table: fileTable, - onClause: "scenes_files.file_id = files.id", + onClause: "scenes_files.file_id = files.id AND scenes_files.primary = " + getDBBoolean(true), }, join{ table: folderTable, @@ -999,11 +999,11 @@ func (qb *SceneStore) queryGroupedFields(ctx context.Context, options models.Sce query.addJoins( join{ table: scenesFilesTable, - onClause: "scenes_files.scene_id = scenes.id", + onClause: "scenes_files.scene_id = scenes.id AND scenes_files.primary = " + getDBBoolean(true), }, join{ table: videoFileTable, - onClause: "scenes_files.file_id = video_files.file_id", + onClause: "scenes_files.file_id = video_files.file_id AND scenes_files.primary = " + getDBBoolean(true), }, ) query.addColumn("COALESCE(video_files.duration, 0) as duration") @@ -1015,11 +1015,11 @@ func (qb *SceneStore) queryGroupedFields(ctx context.Context, options models.Sce query.addJoins( join{ table: scenesFilesTable, - onClause: "scenes_files.scene_id = scenes.id", + onClause: "scenes_files.scene_id = scenes.id AND scenes_files.primary = " + getDBBoolean(true), }, join{ table: fileTable, - onClause: "scenes_files.file_id = files.id", + onClause: "scenes_files.file_id = files.id AND scenes_files.primary = " + getDBBoolean(true), }, ) query.addColumn("COALESCE(files.size, 0) as size") @@ -1099,11 +1099,11 @@ func (qb *SceneStore) setSceneSort(query *queryBuilder, findFilter *models.FindF query.addJoins( join{ table: scenesFilesTable, - onClause: "scenes_files.scene_id = scenes.id", + onClause: "scenes_files.scene_id = scenes.id AND scenes_files.primary = " + getDBBoolean(true), }, join{ table: fileTable, - onClause: "scenes_files.file_id = files.id", + onClause: "scenes_files.file_id = files.id AND scenes_files.primary = " + getDBBoolean(true), }, ) } @@ -1158,7 +1158,7 @@ func (qb *SceneStore) setSceneSort(query *queryBuilder, findFilter *models.FindF join{ table: fingerprintTable, as: "fingerprints_phash", - onClause: "scenes_files.file_id = fingerprints_phash.file_id AND fingerprints_phash.type = 'phash'", + onClause: "scenes_files.file_id = fingerprints_phash.file_id AND scenes_files.primary = " + getDBBoolean(true) + " AND fingerprints_phash.type = 'phash'", }, ) diff --git a/pkg/sqlite/scene_filter.go b/pkg/sqlite/scene_filter.go index f8d2c746905..042152e7037 100644 --- a/pkg/sqlite/scene_filter.go +++ b/pkg/sqlite/scene_filter.go @@ -65,7 +65,7 @@ func (qb *sceneFilterHandler) criterionHandler() criterionHandler { criterionHandlerFunc(func(ctx context.Context, f *filterBuilder) { if sceneFilter.Oshash != nil { qb.addSceneFilesTable(f) - f.addLeftJoin(fingerprintTable, "fingerprints_oshash", "scenes_files.file_id = fingerprints_oshash.file_id AND fingerprints_oshash.type = 'oshash'") + f.addLeftJoin(fingerprintTable, "fingerprints_oshash", "scenes_files.file_id = fingerprints_oshash.file_id AND scenes_files.primary = "+getDBBoolean(true)+" AND fingerprints_oshash.type = 'oshash'") } stringCriterionHandler(sceneFilter.Oshash, "fingerprints_oshash.fingerprint")(ctx, f) @@ -74,7 +74,7 @@ func (qb *sceneFilterHandler) criterionHandler() criterionHandler { criterionHandlerFunc(func(ctx context.Context, f *filterBuilder) { if sceneFilter.Checksum != nil { qb.addSceneFilesTable(f) - f.addLeftJoin(fingerprintTable, "fingerprints_md5", "scenes_files.file_id = fingerprints_md5.file_id AND fingerprints_md5.type = 'md5'") + f.addLeftJoin(fingerprintTable, "fingerprints_md5", "scenes_files.file_id = fingerprints_md5.file_id AND scenes_files.primary = "+getDBBoolean(true)+" AND fingerprints_md5.type = 'md5'") } stringCriterionHandler(sceneFilter.Checksum, "fingerprints_md5.fingerprint")(ctx, f) @@ -214,12 +214,12 @@ func (qb *sceneFilterHandler) criterionHandler() criterionHandler { } func (qb *sceneFilterHandler) addSceneFilesTable(f *filterBuilder) { - f.addLeftJoin(scenesFilesTable, "", "scenes_files.scene_id = scenes.id") + f.addLeftJoin(scenesFilesTable, "", "scenes_files.scene_id = scenes.id AND scenes_files.primary = "+getDBBoolean(true)) } func (qb *sceneFilterHandler) addFilesTable(f *filterBuilder) { qb.addSceneFilesTable(f) - f.addLeftJoin(fileTable, "", "scenes_files.file_id = files.id") + f.addLeftJoin(fileTable, "", "scenes_files.file_id = files.id AND scenes_files.primary = "+getDBBoolean(true)) } func (qb *sceneFilterHandler) addFoldersTable(f *filterBuilder) { @@ -229,7 +229,7 @@ func (qb *sceneFilterHandler) addFoldersTable(f *filterBuilder) { func (qb *sceneFilterHandler) addVideoFilesTable(f *filterBuilder) { qb.addSceneFilesTable(f) - f.addLeftJoin(videoFileTable, "", "video_files.file_id = scenes_files.file_id") + f.addLeftJoin(videoFileTable, "", "video_files.file_id = scenes_files.file_id AND scenes_files.primary = "+getDBBoolean(true)) } func (qb *sceneFilterHandler) playCountCriterionHandler(count *models.IntCriterionInput) criterionHandlerFunc { @@ -277,7 +277,7 @@ func (qb *sceneFilterHandler) phashDuplicatedCriterionHandler(duplicatedFilter * v = "=" } - f.addInnerJoin("(SELECT file_id FROM files_fingerprints INNER JOIN (SELECT fingerprint FROM files_fingerprints WHERE type = 'phash' GROUP BY fingerprint HAVING COUNT (fingerprint) "+v+" 1) dupes on files_fingerprints.fingerprint = dupes.fingerprint)", "scph", "scenes_files.file_id = scph.file_id") + f.addInnerJoin("(SELECT file_id FROM files_fingerprints INNER JOIN (SELECT fingerprint FROM files_fingerprints WHERE type = 'phash' GROUP BY fingerprint HAVING COUNT (fingerprint) "+v+" 1) dupes on files_fingerprints.fingerprint = dupes.fingerprint)", "scph", "scenes_files.file_id = scph.file_id AND scenes_files.primary = "+getDBBoolean(true)) } } } @@ -339,7 +339,7 @@ func (qb *sceneFilterHandler) isMissingCriterionHandler(isMissing *string) crite f.addWhere("scene_stash_ids.scene_id IS NULL") case "phash": qb.addSceneFilesTable(f) - f.addLeftJoin(fingerprintTable, "fingerprints_phash", "scenes_files.file_id = fingerprints_phash.file_id AND fingerprints_phash.type = 'phash'") + f.addLeftJoin(fingerprintTable, "fingerprints_phash", "scenes_files.file_id = fingerprints_phash.file_id AND scenes_files.primary = "+getDBBoolean(true)+" AND fingerprints_phash.type = 'phash'") f.addWhere("fingerprints_phash.fingerprint IS NULL") case "cover": f.addWhere("scenes.cover_blob IS NULL") @@ -389,7 +389,7 @@ func (qb *sceneFilterHandler) captionCriterionHandler(captions *models.StringCri excludeClause := `scenes.id NOT IN ( SELECT scenes_files.scene_id from scenes_files INNER JOIN video_captions on video_captions.file_id = scenes_files.file_id - WHERE video_captions.language_code " + getDBLike() + " ? + WHERE LOWER(video_captions.language_code) LIKE LOWER(?) )` f.addWhere(excludeClause, criterion.Value) @@ -544,7 +544,7 @@ func (qb *sceneFilterHandler) phashDistanceCriterionHandler(phashDistance *model return func(ctx context.Context, f *filterBuilder) { if phashDistance != nil { qb.addSceneFilesTable(f) - f.addLeftJoin(fingerprintTable, "fingerprints_phash", "scenes_files.file_id = fingerprints_phash.file_id AND fingerprints_phash.type = 'phash'") + f.addLeftJoin(fingerprintTable, "fingerprints_phash", "scenes_files.file_id = fingerprints_phash.file_id AND scenes_files.primary = "+getDBBoolean(true)+" AND fingerprints_phash.type = 'phash'") value, _ := utils.StringToPhash(phashDistance.Value) distance := 0 @@ -557,23 +557,19 @@ func (qb *sceneFilterHandler) phashDistanceCriterionHandler(phashDistance *model intCriterionHandler(&models.IntCriterionInput{ Value: int(value), Modifier: phashDistance.Modifier, - }, "fingerprints_phash.fingerprint", nil)(ctx, f) + }, "CAST(fingerprints_phash.fingerprint AS bigint)", nil)(ctx, f) } switch { case phashDistance.Modifier == models.CriterionModifierEquals && distance > 0: - // needed to avoid a type mismatch - f.addWhere("typeof(fingerprints_phash.fingerprint) = 'integer'") - f.addWhere("phash_distance(fingerprints_phash.fingerprint, ?) < ?", value, distance) + f.addWhere("phash_distance(CAST(fingerprints_phash.fingerprint AS bigint), ?) < ?", value, distance) case phashDistance.Modifier == models.CriterionModifierNotEquals && distance > 0: - // needed to avoid a type mismatch - f.addWhere("typeof(fingerprints_phash.fingerprint) = 'integer'") - f.addWhere("phash_distance(fingerprints_phash.fingerprint, ?) > ?", value, distance) + f.addWhere("phash_distance(CAST(fingerprints_phash.fingerprint AS bigint), ?) > ?", value, distance) default: intCriterionHandler(&models.IntCriterionInput{ Value: int(value), Modifier: phashDistance.Modifier, - }, "fingerprints_phash.fingerprint", nil)(ctx, f) + }, "CAST(fingerprints_phash.fingerprint AS bigint)", nil)(ctx, f) } } } From 7dd8643514724522be5fd7f64477f302dadf7b2e Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Mon, 11 Nov 2024 20:37:56 +0100 Subject: [PATCH 84/85] Escape primary keyword for sqlite --- pkg/sqlite/scene.go | 18 +++++++++--------- pkg/sqlite/scene_filter.go | 16 ++++++++-------- 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/pkg/sqlite/scene.go b/pkg/sqlite/scene.go index f4127d5d55a..1d702cf0424 100644 --- a/pkg/sqlite/scene.go +++ b/pkg/sqlite/scene.go @@ -922,11 +922,11 @@ func (qb *SceneStore) makeQuery(ctx context.Context, sceneFilter *models.SceneFi query.addJoins( join{ table: scenesFilesTable, - onClause: "scenes_files.scene_id = scenes.id AND scenes_files.primary = " + getDBBoolean(true), + onClause: "scenes_files.scene_id = scenes.id AND scenes_files.\"primary\" = " + getDBBoolean(true), }, join{ table: fileTable, - onClause: "scenes_files.file_id = files.id AND scenes_files.primary = " + getDBBoolean(true), + onClause: "scenes_files.file_id = files.id AND scenes_files.\"primary\" = " + getDBBoolean(true), }, join{ table: folderTable, @@ -999,11 +999,11 @@ func (qb *SceneStore) queryGroupedFields(ctx context.Context, options models.Sce query.addJoins( join{ table: scenesFilesTable, - onClause: "scenes_files.scene_id = scenes.id AND scenes_files.primary = " + getDBBoolean(true), + onClause: "scenes_files.scene_id = scenes.id AND scenes_files.\"primary\" = " + getDBBoolean(true), }, join{ table: videoFileTable, - onClause: "scenes_files.file_id = video_files.file_id AND scenes_files.primary = " + getDBBoolean(true), + onClause: "scenes_files.file_id = video_files.file_id AND scenes_files.\"primary\" = " + getDBBoolean(true), }, ) query.addColumn("COALESCE(video_files.duration, 0) as duration") @@ -1015,11 +1015,11 @@ func (qb *SceneStore) queryGroupedFields(ctx context.Context, options models.Sce query.addJoins( join{ table: scenesFilesTable, - onClause: "scenes_files.scene_id = scenes.id AND scenes_files.primary = " + getDBBoolean(true), + onClause: "scenes_files.scene_id = scenes.id AND scenes_files.\"primary\" = " + getDBBoolean(true), }, join{ table: fileTable, - onClause: "scenes_files.file_id = files.id AND scenes_files.primary = " + getDBBoolean(true), + onClause: "scenes_files.file_id = files.id AND scenes_files.\"primary\" = " + getDBBoolean(true), }, ) query.addColumn("COALESCE(files.size, 0) as size") @@ -1099,11 +1099,11 @@ func (qb *SceneStore) setSceneSort(query *queryBuilder, findFilter *models.FindF query.addJoins( join{ table: scenesFilesTable, - onClause: "scenes_files.scene_id = scenes.id AND scenes_files.primary = " + getDBBoolean(true), + onClause: "scenes_files.scene_id = scenes.id AND scenes_files.\"primary\" = " + getDBBoolean(true), }, join{ table: fileTable, - onClause: "scenes_files.file_id = files.id AND scenes_files.primary = " + getDBBoolean(true), + onClause: "scenes_files.file_id = files.id AND scenes_files.\"primary\" = " + getDBBoolean(true), }, ) } @@ -1158,7 +1158,7 @@ func (qb *SceneStore) setSceneSort(query *queryBuilder, findFilter *models.FindF join{ table: fingerprintTable, as: "fingerprints_phash", - onClause: "scenes_files.file_id = fingerprints_phash.file_id AND scenes_files.primary = " + getDBBoolean(true) + " AND fingerprints_phash.type = 'phash'", + onClause: "scenes_files.file_id = fingerprints_phash.file_id AND scenes_files.\"primary\" = " + getDBBoolean(true) + " AND fingerprints_phash.type = 'phash'", }, ) diff --git a/pkg/sqlite/scene_filter.go b/pkg/sqlite/scene_filter.go index 042152e7037..f2fdf6f3821 100644 --- a/pkg/sqlite/scene_filter.go +++ b/pkg/sqlite/scene_filter.go @@ -65,7 +65,7 @@ func (qb *sceneFilterHandler) criterionHandler() criterionHandler { criterionHandlerFunc(func(ctx context.Context, f *filterBuilder) { if sceneFilter.Oshash != nil { qb.addSceneFilesTable(f) - f.addLeftJoin(fingerprintTable, "fingerprints_oshash", "scenes_files.file_id = fingerprints_oshash.file_id AND scenes_files.primary = "+getDBBoolean(true)+" AND fingerprints_oshash.type = 'oshash'") + f.addLeftJoin(fingerprintTable, "fingerprints_oshash", "scenes_files.file_id = fingerprints_oshash.file_id AND scenes_files.\"primary\" = "+getDBBoolean(true)+" AND fingerprints_oshash.type = 'oshash'") } stringCriterionHandler(sceneFilter.Oshash, "fingerprints_oshash.fingerprint")(ctx, f) @@ -74,7 +74,7 @@ func (qb *sceneFilterHandler) criterionHandler() criterionHandler { criterionHandlerFunc(func(ctx context.Context, f *filterBuilder) { if sceneFilter.Checksum != nil { qb.addSceneFilesTable(f) - f.addLeftJoin(fingerprintTable, "fingerprints_md5", "scenes_files.file_id = fingerprints_md5.file_id AND scenes_files.primary = "+getDBBoolean(true)+" AND fingerprints_md5.type = 'md5'") + f.addLeftJoin(fingerprintTable, "fingerprints_md5", "scenes_files.file_id = fingerprints_md5.file_id AND scenes_files.\"primary\" = "+getDBBoolean(true)+" AND fingerprints_md5.type = 'md5'") } stringCriterionHandler(sceneFilter.Checksum, "fingerprints_md5.fingerprint")(ctx, f) @@ -214,12 +214,12 @@ func (qb *sceneFilterHandler) criterionHandler() criterionHandler { } func (qb *sceneFilterHandler) addSceneFilesTable(f *filterBuilder) { - f.addLeftJoin(scenesFilesTable, "", "scenes_files.scene_id = scenes.id AND scenes_files.primary = "+getDBBoolean(true)) + f.addLeftJoin(scenesFilesTable, "", "scenes_files.scene_id = scenes.id AND scenes_files.\"primary\" = "+getDBBoolean(true)) } func (qb *sceneFilterHandler) addFilesTable(f *filterBuilder) { qb.addSceneFilesTable(f) - f.addLeftJoin(fileTable, "", "scenes_files.file_id = files.id AND scenes_files.primary = "+getDBBoolean(true)) + f.addLeftJoin(fileTable, "", "scenes_files.file_id = files.id AND scenes_files.\"primary\" = "+getDBBoolean(true)) } func (qb *sceneFilterHandler) addFoldersTable(f *filterBuilder) { @@ -229,7 +229,7 @@ func (qb *sceneFilterHandler) addFoldersTable(f *filterBuilder) { func (qb *sceneFilterHandler) addVideoFilesTable(f *filterBuilder) { qb.addSceneFilesTable(f) - f.addLeftJoin(videoFileTable, "", "video_files.file_id = scenes_files.file_id AND scenes_files.primary = "+getDBBoolean(true)) + f.addLeftJoin(videoFileTable, "", "video_files.file_id = scenes_files.file_id AND scenes_files.\"primary\" = "+getDBBoolean(true)) } func (qb *sceneFilterHandler) playCountCriterionHandler(count *models.IntCriterionInput) criterionHandlerFunc { @@ -277,7 +277,7 @@ func (qb *sceneFilterHandler) phashDuplicatedCriterionHandler(duplicatedFilter * v = "=" } - f.addInnerJoin("(SELECT file_id FROM files_fingerprints INNER JOIN (SELECT fingerprint FROM files_fingerprints WHERE type = 'phash' GROUP BY fingerprint HAVING COUNT (fingerprint) "+v+" 1) dupes on files_fingerprints.fingerprint = dupes.fingerprint)", "scph", "scenes_files.file_id = scph.file_id AND scenes_files.primary = "+getDBBoolean(true)) + f.addInnerJoin("(SELECT file_id FROM files_fingerprints INNER JOIN (SELECT fingerprint FROM files_fingerprints WHERE type = 'phash' GROUP BY fingerprint HAVING COUNT (fingerprint) "+v+" 1) dupes on files_fingerprints.fingerprint = dupes.fingerprint)", "scph", "scenes_files.file_id = scph.file_id AND scenes_files.\"primary\" = "+getDBBoolean(true)) } } } @@ -339,7 +339,7 @@ func (qb *sceneFilterHandler) isMissingCriterionHandler(isMissing *string) crite f.addWhere("scene_stash_ids.scene_id IS NULL") case "phash": qb.addSceneFilesTable(f) - f.addLeftJoin(fingerprintTable, "fingerprints_phash", "scenes_files.file_id = fingerprints_phash.file_id AND scenes_files.primary = "+getDBBoolean(true)+" AND fingerprints_phash.type = 'phash'") + f.addLeftJoin(fingerprintTable, "fingerprints_phash", "scenes_files.file_id = fingerprints_phash.file_id AND scenes_files.\"primary\" = "+getDBBoolean(true)+" AND fingerprints_phash.type = 'phash'") f.addWhere("fingerprints_phash.fingerprint IS NULL") case "cover": f.addWhere("scenes.cover_blob IS NULL") @@ -544,7 +544,7 @@ func (qb *sceneFilterHandler) phashDistanceCriterionHandler(phashDistance *model return func(ctx context.Context, f *filterBuilder) { if phashDistance != nil { qb.addSceneFilesTable(f) - f.addLeftJoin(fingerprintTable, "fingerprints_phash", "scenes_files.file_id = fingerprints_phash.file_id AND scenes_files.primary = "+getDBBoolean(true)+" AND fingerprints_phash.type = 'phash'") + f.addLeftJoin(fingerprintTable, "fingerprints_phash", "scenes_files.file_id = fingerprints_phash.file_id AND scenes_files.\"primary\" = "+getDBBoolean(true)+" AND fingerprints_phash.type = 'phash'") value, _ := utils.StringToPhash(phashDistance.Value) distance := 0 From cedcd281d29e9e67d2b23a29251060b86b0df925 Mon Sep 17 00:00:00 2001 From: Nodude <75137537+NodudeWasTaken@users.noreply.github.com> Date: Thu, 21 Nov 2024 01:51:32 +0100 Subject: [PATCH 85/85] Add timezone to timestamps for postgres to correct for 1-2 hour off mod_time. Add schema update 4/70 to postgres. --- .../migrationsPostgres/1_initial.up.sql | 52 +++++++++---------- .../migrationsPostgres/4_markers_end.up.sql | 1 + 2 files changed, 27 insertions(+), 26 deletions(-) create mode 100644 pkg/sqlite/migrationsPostgres/4_markers_end.up.sql diff --git a/pkg/sqlite/migrationsPostgres/1_initial.up.sql b/pkg/sqlite/migrationsPostgres/1_initial.up.sql index 24191c0a096..2757ae328ac 100644 --- a/pkg/sqlite/migrationsPostgres/1_initial.up.sql +++ b/pkg/sqlite/migrationsPostgres/1_initial.up.sql @@ -7,8 +7,8 @@ CREATE TABLE blobs ( CREATE TABLE tags ( id serial not null primary key, name text, - created_at timestamp not null, - updated_at timestamp not null, + created_at TIMESTAMP WITH TIME ZONE not null, + updated_at TIMESTAMP WITH TIME ZONE not null, ignore_auto_tag boolean not null default FALSE, description text, image_blob varchar(255) REFERENCES blobs(checksum), @@ -18,9 +18,9 @@ CREATE TABLE folders ( id serial not null primary key, path text NOT NULL, parent_folder_id integer, - mod_time timestamp not null, - created_at timestamp not null, - updated_at timestamp not null, + mod_time TIMESTAMP WITH TIME ZONE not null, + created_at TIMESTAMP WITH TIME ZONE not null, + updated_at TIMESTAMP WITH TIME ZONE not null, foreign key(parent_folder_id) references folders(id) on delete SET NULL ); CREATE TABLE files ( @@ -29,9 +29,9 @@ CREATE TABLE files ( zip_file_id integer, parent_folder_id integer not null, size bigint NOT NULL, - mod_time timestamp not null, - created_at timestamp not null, - updated_at timestamp not null, + mod_time TIMESTAMP WITH TIME ZONE not null, + created_at TIMESTAMP WITH TIME ZONE not null, + updated_at TIMESTAMP WITH TIME ZONE not null, foreign key(zip_file_id) references files(id), foreign key(parent_folder_id) references folders(id), CHECK (basename != '') @@ -53,8 +53,8 @@ CREATE TABLE IF NOT EXISTS performers ( tattoos text, piercings text, favorite boolean not null default FALSE, - created_at timestamp not null, - updated_at timestamp not null, + created_at TIMESTAMP WITH TIME ZONE not null, + updated_at TIMESTAMP WITH TIME ZONE not null, details text, death_date date, hair_color text, @@ -70,8 +70,8 @@ CREATE TABLE IF NOT EXISTS studios ( name text NOT NULL, url VARCHAR(2048), parent_id INTEGER DEFAULT NULL REFERENCES studios(id) ON DELETE SET NULL, - created_at timestamp NOT NULL, - updated_at timestamp NOT NULL, + created_at TIMESTAMP WITH TIME ZONE NOT NULL, + updated_at TIMESTAMP WITH TIME ZONE NOT NULL, details TEXT, rating smallint, ignore_auto_tag BOOLEAN NOT NULL DEFAULT FALSE, @@ -94,8 +94,8 @@ CREATE TABLE IF NOT EXISTS images ( studio_id integer, o_counter smallint not null default 0, organized boolean not null default FALSE, - created_at timestamp not null, - updated_at timestamp not null, + created_at TIMESTAMP WITH TIME ZONE not null, + updated_at TIMESTAMP WITH TIME ZONE not null, date date, code text, photographer text, @@ -118,8 +118,8 @@ CREATE TABLE IF NOT EXISTS galleries ( studio_id integer, rating smallint, organized boolean not null default FALSE, - created_at timestamp not null, - updated_at timestamp not null, + created_at TIMESTAMP WITH TIME ZONE not null, + updated_at TIMESTAMP WITH TIME ZONE not null, code text, photographer text, foreign key(studio_id) references studios(id) on delete SET NULL, @@ -140,8 +140,8 @@ CREATE TABLE IF NOT EXISTS scenes ( rating smallint, studio_id integer, organized boolean not null default FALSE, - created_at timestamp not null, - updated_at timestamp not null, + created_at TIMESTAMP WITH TIME ZONE not null, + updated_at TIMESTAMP WITH TIME ZONE not null, code text, director text, resume_time float not null default 0, @@ -159,8 +159,8 @@ CREATE TABLE IF NOT EXISTS groups ( studio_id integer REFERENCES studios(id) ON DELETE SET NULL, director text, "description" text, - created_at timestamp not null, - updated_at timestamp not null, + created_at TIMESTAMP WITH TIME ZONE not null, + updated_at TIMESTAMP WITH TIME ZONE not null, front_image_blob varchar(255) REFERENCES blobs(checksum), back_image_blob varchar(255) REFERENCES blobs(checksum) ); @@ -194,12 +194,12 @@ CREATE TABLE studios_tags ( ); CREATE TABLE IF NOT EXISTS scenes_view_dates ( scene_id integer not null, - view_date timestamp not null, + view_date TIMESTAMP WITH TIME ZONE not null, foreign key(scene_id) references scenes(id) on delete CASCADE ); CREATE TABLE IF NOT EXISTS scenes_o_dates ( scene_id integer not null, - o_date timestamp not null, + o_date TIMESTAMP WITH TIME ZONE not null, foreign key(scene_id) references scenes(id) on delete CASCADE ); CREATE TABLE performer_stash_ids ( @@ -294,8 +294,8 @@ CREATE TABLE IF NOT EXISTS scene_markers ( seconds FLOAT NOT NULL, primary_tag_id INTEGER NOT NULL, scene_id INTEGER NOT NULL, - created_at timestamp NOT NULL, - updated_at timestamp NOT NULL, + created_at TIMESTAMP WITH TIME ZONE NOT NULL, + updated_at TIMESTAMP WITH TIME ZONE NOT NULL, FOREIGN KEY(primary_tag_id) REFERENCES tags(id), FOREIGN KEY(scene_id) REFERENCES scenes(id) ); @@ -401,8 +401,8 @@ CREATE TABLE galleries_chapters ( title text not null, image_index integer not null, gallery_id integer not null, - created_at timestamp not null, - updated_at timestamp not null, + created_at TIMESTAMP WITH TIME ZONE not null, + updated_at TIMESTAMP WITH TIME ZONE not null, foreign key(gallery_id) references galleries(id) on delete CASCADE ); CREATE TABLE scene_urls ( diff --git a/pkg/sqlite/migrationsPostgres/4_markers_end.up.sql b/pkg/sqlite/migrationsPostgres/4_markers_end.up.sql new file mode 100644 index 00000000000..05469953ace --- /dev/null +++ b/pkg/sqlite/migrationsPostgres/4_markers_end.up.sql @@ -0,0 +1 @@ +ALTER TABLE scene_markers ADD COLUMN end_seconds FLOAT; \ No newline at end of file