From fbb73efde0058a8b97fd4c7c4387775f4adb3ecf Mon Sep 17 00:00:00 2001 From: Christian Benincasa Date: Tue, 4 Jun 2024 07:23:46 -0400 Subject: [PATCH] Improve 'first run' and 'legacy migration' handling / logic. Fixes #474 (#476) The new logic depends on a new boolean in the settings.json file. If Tunarr detects that the settings.json file did not exist at initial startup, it will set 'freshSettings' to true. On subsequent runs, it will explicitly set this to 'false'. This bit is used to determine whether a legacy migration should be attempted. This new logic allows for the Tunarr database _directory_ to exist at startup (a requirement for bind mounting with containers) but still be able to determine if this is a 'first run'. Also includes: * Attaching className to child loggers, for additional logging context in production env * Logging improvement in the legacy migrator to print actual errors * waitAfterEachMs option in asyncPool * support for background fixer tasks to not delay server startup too long --- Dockerfile | 19 ++++++++-- server/src/dao/channelDb.ts | 4 +- .../dao/legacy_migration/legacyDbMigration.ts | 20 +++++----- .../dao/legacy_migration/metadataBackfill.ts | 9 +++-- server/src/dao/settings.ts | 29 +++++++++++---- server/src/ffmpeg/ffmpeg.ts | 1 + server/src/server.ts | 37 ++++++++++--------- .../fixers/BackfillProgramExternalIds.ts | 4 +- .../tasks/fixers/backfillProgramGroupings.ts | 5 ++- server/src/tasks/fixers/fixer.ts | 3 ++ server/src/tasks/fixers/index.ts | 23 +++++++++--- server/src/util/asyncPool.ts | 14 ++++++- server/src/util/logging/LoggerFactory.ts | 12 +++--- 13 files changed, 123 insertions(+), 57 deletions(-) diff --git a/Dockerfile b/Dockerfile index 07e14312e..bcd5b2126 100644 --- a/Dockerfile +++ b/Dockerfile @@ -28,6 +28,7 @@ RUN corepack enable EXPOSE 8000 RUN ln -s /usr/local/bin/ffmpeg /usr/bin/ffmpeg ENTRYPOINT [ "node" ] +CMD [ "/tunarr/bundle.js" ] # Add Tunarr sources FROM ffmpeg-base as sources @@ -85,9 +86,21 @@ COPY --from=build-server /tunarr/server/build /tunarr/server/build # user, such as volume mapping their legacy DBs, while not interrupting the # other assumptions that Tunarr makes about its working directory RUN ln -s /tunarr/server/build/bundle.js /tunarr/bundle.js -CMD [ "/tunarr/bundle.js" ] +# CMD [ "/tunarr/bundle.js" ] ### Begin server run ### Full stack ### -FROM server AS full-stack -COPY --from=build-full-stack /tunarr/web/dist /tunarr/server/build/web \ No newline at end of file +FROM ffmpeg-base AS full-stack +# Duplicate the COPY statements from server build to ensure we don't bundle +# twice, needlessly +COPY --from=prod-deps /tunarr/node_modules /tunarr/node_modules +COPY --from=prod-deps /tunarr/server/node_modules /tunarr/server/node_modules +COPY --from=build-full-stack /tunarr/types /tunarr/types +COPY --from=build-full-stack /tunarr/shared /tunarr/shared +COPY --from=build-full-stack /tunarr/server/package.json /tunarr/server/package.json +COPY --from=build-full-stack /tunarr/server/build /tunarr/server/build +COPY --from=build-full-stack /tunarr/web/dist /tunarr/server/build/web +# Create a symlink to the bundle at /tunarr. This simplifies things for the +# user, such as volume mapping their legacy DBs, while not interrupting the +# other assumptions that Tunarr makes about its working directory +RUN ln -s /tunarr/server/build/bundle.js /tunarr/bundle.js \ No newline at end of file diff --git a/server/src/dao/channelDb.ts b/server/src/dao/channelDb.ts index 484b4c1f8..76bb14182 100644 --- a/server/src/dao/channelDb.ts +++ b/server/src/dao/channelDb.ts @@ -711,7 +711,7 @@ export class ChannelDB { where: { content: { $in: chunk } }, }); }, - 2, + { concurrency: 2 }, ); const allCustomShowContent: CustomShowContent[] = []; @@ -822,7 +822,7 @@ export class ChannelDB { return this.saveLineup(channel.uuid, { ...lineup, items: newLineup }); } }, - 2, + { concurrency: 2 }, ); for await (const updateResult of ops) { diff --git a/server/src/dao/legacy_migration/legacyDbMigration.ts b/server/src/dao/legacy_migration/legacyDbMigration.ts index 0b5f80224..00672aaf2 100644 --- a/server/src/dao/legacy_migration/legacyDbMigration.ts +++ b/server/src/dao/legacy_migration/legacyDbMigration.ts @@ -138,7 +138,7 @@ export class LegacyDbMigrator { }; } } catch (e) { - this.logger.error('Unable to migrate HDHR settings', e); + this.logger.error(e, 'Unable to migrate HDHR settings'); } } @@ -167,7 +167,7 @@ export class LegacyDbMigrator { }; } } catch (e) { - this.logger.error('Unable to migrate XMLTV settings', e); + this.logger.error(e, 'Unable to migrate XMLTV settings'); } } @@ -270,7 +270,7 @@ export class LegacyDbMigrator { }; } } catch (e) { - this.logger.error('Unable to migrate Plex settings', e); + this.logger.error(e, 'Unable to migrate Plex settings'); } } @@ -341,7 +341,7 @@ export class LegacyDbMigrator { await em.persistAndFlush(entities); } } catch (e) { - this.logger.error('Unable to migrate Plex server settings', e); + this.logger.error(e, 'Unable to migrate Plex server settings'); } } @@ -443,7 +443,7 @@ export class LegacyDbMigrator { }; } } catch (e) { - this.logger.error('Unable to migrate ffmpeg settings', e); + this.logger.error(e, 'Unable to migrate ffmpeg settings'); } } @@ -455,7 +455,7 @@ export class LegacyDbMigrator { clientId: clientId['clientId'] as string, }; } catch (e) { - this.logger.error('Unable to migrate client ID', e); + this.logger.error(e, 'Unable to migrate client ID'); } const libraryMigrator = new LegacyLibraryMigrator(); @@ -468,7 +468,7 @@ export class LegacyDbMigrator { 'custom-shows', ); } catch (e) { - this.logger.error('Unable to migrate all custom shows', e); + this.logger.error(e, 'Unable to migrate all custom shows'); } } @@ -480,7 +480,7 @@ export class LegacyDbMigrator { 'filler', ); } catch (e) { - this.logger.error('Unable to migrate all filler shows', e); + this.logger.error(e, 'Unable to migrate all filler shows'); } } @@ -508,7 +508,7 @@ export class LegacyDbMigrator { }), ); } catch (e) { - this.logger.error('Unable to migrate channels', e); + this.logger.error(e, 'Unable to migrate channels'); } } @@ -517,7 +517,7 @@ export class LegacyDbMigrator { this.logger.debug('Migrating cached images'); await this.migrateCachedImages(); } catch (e) { - this.logger.error('Unable to migrate cached images', e); + this.logger.error(e, 'Unable to migrate cached images'); } } diff --git a/server/src/dao/legacy_migration/metadataBackfill.ts b/server/src/dao/legacy_migration/metadataBackfill.ts index 3a1393622..4c3a56711 100644 --- a/server/src/dao/legacy_migration/metadataBackfill.ts +++ b/server/src/dao/legacy_migration/metadataBackfill.ts @@ -26,7 +26,10 @@ import { import { ProgramGroupingExternalId } from '../entities/ProgramGroupingExternalId'; export class LegacyMetadataBackfiller { - private logger = LoggerFactory.child({ caller: import.meta }); + private logger = LoggerFactory.child({ + caller: import.meta, + className: LegacyMetadataBackfiller.name, + }); // It requires valid PlexServerSettings, program metadata, etc async backfillParentMetadata() { @@ -114,7 +117,7 @@ export class LegacyMetadataBackfiller { uuid: grandparentUUID, }); if (!isNull(existingGrandparent)) { - this.logger.debug('Using existing grandparent grouping!'); + this.logger.trace('Using existing grandparent grouping!'); updatedGrandparent = true; if (type === ProgramType.Episode) { existingGrandparent.showEpisodes.add( @@ -142,7 +145,7 @@ export class LegacyMetadataBackfiller { uuid: parentUUID, }); if (!isNull(existingParent)) { - this.logger.debug('Using existing parent!'); + this.logger.trace('Using existing parent!'); updatedParent = true; if (type === ProgramType.Episode) { existingParent.seasonEpisodes.add(em.getReference(Program, uuid)); diff --git a/server/src/dao/settings.ts b/server/src/dao/settings.ts index f0b95a01d..6b654cbf0 100644 --- a/server/src/dao/settings.ts +++ b/server/src/dao/settings.ts @@ -56,9 +56,15 @@ export const SettingsSchema = z.object({ export type Settings = z.infer; export const MigrationStateSchema = z.object({ - legacyMigration: z.boolean(), + legacyMigration: z + .boolean() + .default(false) + .describe('Whether a legacy migration was performed'), + isFreshSettings: z.boolean().default(true).optional(), }); +export type MigrationState = z.infer; + export const SettingsFileSchema = z.object({ version: z.number(), migration: MigrationStateSchema, @@ -74,7 +80,7 @@ export const SettingsFileSchema = z.object({ export type SettingsFile = z.infer; -export const defaultSchema = (dbBasePath: string): SettingsFile => ({ +export const defaultSettings = (dbBasePath: string): SettingsFile => ({ version: 1, migration: { legacyMigration: false, @@ -118,6 +124,10 @@ export class SettingsDB extends ITypedEventEmitter { return !this.db.data.migration.legacyMigration; } + get migrationState(): DeepReadonly { + return this.db.data.migration; + } + clientId(): string { return this.db.data.settings.clientId; } @@ -200,9 +210,9 @@ export const getSettings = once((dbPath?: string) => { const actualPath = dbPath ?? path.resolve(globalOptions().databaseDirectory, 'settings.json'); - const needsFlush = !existsSync(actualPath); + const freshSettings = !existsSync(actualPath); - const defaultValue = defaultSchema(globalOptions().databaseDirectory); + const defaultValue = defaultSettings(globalOptions().databaseDirectory); // Load this synchronously, but then give the DB instance an async version const db = new LowSync( new SyncSchemaBackedDbAdapter(SettingsFileSchema, actualPath, defaultValue), @@ -210,9 +220,14 @@ export const getSettings = once((dbPath?: string) => { ); db.read(); - if (needsFlush) { - db.write(); - } + db.update((data) => { + data.migration.isFreshSettings = freshSettings; + // Redefine thie variable... it came before "isFreshSettings". + // If this is a fresh run, mark legacyMigration as false + if (freshSettings) { + data.migration.legacyMigration = false; + } + }); settingsDbInstance = new SettingsDB( actualPath, diff --git a/server/src/ffmpeg/ffmpeg.ts b/server/src/ffmpeg/ffmpeg.ts index 7880f9a6f..efeb334e3 100644 --- a/server/src/ffmpeg/ffmpeg.ts +++ b/server/src/ffmpeg/ffmpeg.ts @@ -826,6 +826,7 @@ export class FFMPEG extends (events.EventEmitter as new () => TypedEventEmitter< return; } + // TODO: Redact Plex tokens here this.logger.debug(`Starting ffmpeg with args: "${ffmpegArgs.join(' ')}"`); this.ffmpeg = spawn(this.ffmpegPath, ffmpegArgs, { diff --git a/server/src/server.ts b/server/src/server.ts index 3b95eae1f..0d4e120ac 100644 --- a/server/src/server.ts +++ b/server/src/server.ts @@ -37,19 +37,18 @@ import { LoggerFactory } from './util/logging/LoggerFactory.js'; const currentDirectory = dirname(filename(import.meta.url)); +/** + * Initializes the Tunarr "database" directory at the configured location, including + * subdirectories + * @returns True if an existing database directory was found + */ async function initDbDirectories() { // Early init, have to use the non-settings-based root Logger const logger = LoggerFactory.root; const opts = serverOptions(); const hasTunarrDb = fs.existsSync(opts.databaseDirectory); - const hasLegacyDb = fs.existsSync(path.resolve(process.cwd(), '.dizquetv')); if (!hasTunarrDb) { logger.debug(`Existing database at ${opts.databaseDirectory} not found`); - if (hasLegacyDb) { - logger.info( - `DB configured at location ${opts.databaseDirectory} was not found, but a legacy .dizquetv database was located. A migration will be attempted`, - ); - } fs.mkdirSync(opts.databaseDirectory, { recursive: true }); await getSettings().flush(); } @@ -69,11 +68,22 @@ async function initDbDirectories() { fs.mkdirSync(path.join(process.cwd(), 'streams')); } - return !hasTunarrDb && hasLegacyDb; + return hasTunarrDb; +} + +function hasLegacyDizquetvDirectory() { + const logger = LoggerFactory.root; + const legacyDbLocation = path.resolve(process.cwd(), '.dizquetv'); + logger.info(`Searching for legacy dizquetv directory at ${legacyDbLocation}`); + const hasLegacyDb = fs.existsSync(legacyDbLocation); + if (hasLegacyDb) { + logger.info(`A legacy .dizquetv database was located.`); + } + return hasLegacyDb; } export async function initServer(opts: ServerOptions) { - const hadLegacyDb = await initDbDirectories(); + await initDbDirectories(); const settingsDb = getSettings(); LoggerFactory.initialize(settingsDb); @@ -84,20 +94,13 @@ export async function initServer(opts: ServerOptions) { const ctx = serverContext(); if ( - hadLegacyDb && - (ctx.settings.needsLegacyMigration() || opts.force_migration) + (ctx.settings.migrationState.isFreshSettings || opts.force_migration) && + hasLegacyDizquetvDirectory() ) { logger.info('Migrating from legacy database folder...'); await new LegacyDbMigrator().migrateFromLegacyDb(settingsDb).catch((e) => { logger.error('Failed to migrate from legacy DB: %O', e); }); - } else if (ctx.settings.needsLegacyMigration()) { - // Mark the settings as if we migrated, even when there were no - // legacy settings present. This will prevent us from trying - // again on subsequent runs - await ctx.settings.updateBaseSettings('migration', { - legacyMigration: true, - }); } scheduleJobs(ctx); diff --git a/server/src/tasks/fixers/BackfillProgramExternalIds.ts b/server/src/tasks/fixers/BackfillProgramExternalIds.ts index 7318f181f..4b050073f 100644 --- a/server/src/tasks/fixers/BackfillProgramExternalIds.ts +++ b/server/src/tasks/fixers/BackfillProgramExternalIds.ts @@ -16,6 +16,8 @@ import { PlexTerminalMedia } from '@tunarr/types/plex'; export class BackfillProgramExternalIds extends Fixer { #logger = LoggerFactory.child({ caller: import.meta }); + canRunInBackground: boolean = true; + async runInternal(): Promise { const em = getEm(); @@ -64,7 +66,7 @@ export class BackfillProgramExternalIds extends Fixer { program, plexConnections[program.externalSourceId], ), - 2, + { concurrency: 1, waitAfterEachMs: 50 }, )) { if (result.type === 'error') { this.#logger.error( diff --git a/server/src/tasks/fixers/backfillProgramGroupings.ts b/server/src/tasks/fixers/backfillProgramGroupings.ts index a910d6475..895f8b071 100644 --- a/server/src/tasks/fixers/backfillProgramGroupings.ts +++ b/server/src/tasks/fixers/backfillProgramGroupings.ts @@ -28,7 +28,10 @@ import { LoggerFactory } from '../../util/logging/LoggerFactory'; import Fixer from './fixer'; export class BackfillProgramGroupings extends Fixer { - private logger = LoggerFactory.child({ caller: import.meta }); + private logger = LoggerFactory.child({ + caller: import.meta, + className: BackfillProgramGroupings.name, + }); protected async runInternal(em: EntityManager): Promise { const plexServers = await em.findAll(PlexServerSettings); diff --git a/server/src/tasks/fixers/fixer.ts b/server/src/tasks/fixers/fixer.ts index 257285831..7c1c4affa 100644 --- a/server/src/tasks/fixers/fixer.ts +++ b/server/src/tasks/fixers/fixer.ts @@ -2,6 +2,9 @@ import { EntityManager } from '@mikro-orm/better-sqlite'; import { withDb } from '../../dao/dataSource.js'; export default abstract class Fixer { + // False if the fixed data isn't required for proper server functioning + canRunInBackground: boolean = false; + async run() { return withDb((em) => this.runInternal(em)); } diff --git a/server/src/tasks/fixers/index.ts b/server/src/tasks/fixers/index.ts index 9096729bd..1878fe45a 100644 --- a/server/src/tasks/fixers/index.ts +++ b/server/src/tasks/fixers/index.ts @@ -27,14 +27,25 @@ const allFixers: Fixer[] = values(FixersByName); export const runFixers = async () => { for (const fixer of allFixers) { try { - LoggerFactory.root.debug('Running fixer %s', fixer.constructor.name); - await fixer.run(); - } catch (e) { - LoggerFactory.root.error( - 'Fixer %s failed to run %O', + LoggerFactory.root.debug( + 'Running fixer %s [background = %O]', fixer.constructor.name, - e, + fixer.canRunInBackground, ); + const fixerPromise = fixer.run(); + if (!fixer.canRunInBackground) { + await fixerPromise; + } else { + fixerPromise.catch((e) => { + logFixerError(fixer.constructor.name, e); + }); + } + } catch (e) { + logFixerError(fixer.constructor.name, e); } } }; + +function logFixerError(fixer: string, error: unknown) { + LoggerFactory.root.error(error, 'Fixer %s failed to run', fixer); +} diff --git a/server/src/util/asyncPool.ts b/server/src/util/asyncPool.ts index b79884703..dc36dbe0b 100644 --- a/server/src/util/asyncPool.ts +++ b/server/src/util/asyncPool.ts @@ -1,3 +1,10 @@ +import { wait } from './index.js'; + +type AsyncPoolOpts = { + concurrency: number; + waitAfterEachMs?: number; +}; + // Based on https://github.com/rxaviers/async-pool // Notable changes: // 1. Types @@ -5,7 +12,7 @@ export async function* asyncPool( iterable: Iterable, iteratorFn: (item: T, iterable: Iterable) => PromiseLike | R, - concurrency: number, + opts: AsyncPoolOpts, ): AsyncGenerator> { const executing = new Set]>>(); @@ -28,6 +35,9 @@ export async function* asyncPool( const promise = (async () => { try { const r = await iteratorFn(item, iterable); + if (opts.waitAfterEachMs && opts.waitAfterEachMs > 0) { + await wait(opts.waitAfterEachMs); + } return [item, r] as const; } catch (e) { throw { @@ -39,7 +49,7 @@ export async function* asyncPool( })().finally(() => executing.delete(promise)); executing.add(promise); - if (executing.size >= concurrency) { + if (executing.size >= opts.concurrency) { yield await consume(); } } diff --git a/server/src/util/logging/LoggerFactory.ts b/server/src/util/logging/LoggerFactory.ts index d70ac4d40..025abc4e8 100644 --- a/server/src/util/logging/LoggerFactory.ts +++ b/server/src/util/logging/LoggerFactory.ts @@ -152,12 +152,14 @@ class LoggerFactoryImpl { ); } - child(opts: { caller?: ImportMeta } & Bindings = {}) { - const { caller, ...rest } = opts; - const newChild = this.rootLogger.child({ + child(opts: { caller?: ImportMeta; className?: string } & Bindings = {}) { + const { caller, className, ...rest } = opts; + const childOpts = { ...rest, - caller: isProduction ? undefined : caller ? getCaller(caller) : undefined, - }); + caller: isProduction ? className : caller ? getCaller(caller) : undefined, + className: isProduction ? undefined : className, // Don't include this twice in production + }; + const newChild = this.rootLogger.child(childOpts); this.children.push(newChild); return newChild; }