From 27cde2c3cf47e09df8db1dc2541a4d20f98dc123 Mon Sep 17 00:00:00 2001 From: Rory Powell Date: Mon, 24 Oct 2022 15:28:43 +0100 Subject: [PATCH] Move backup processing into pro --- packages/server/src/app.ts | 15 +- packages/server/src/sdk/app/backups/backup.ts | 145 ------------------ packages/server/src/sdk/app/backups/index.ts | 2 - 3 files changed, 14 insertions(+), 148 deletions(-) delete mode 100644 packages/server/src/sdk/app/backups/backup.ts diff --git a/packages/server/src/app.ts b/packages/server/src/app.ts index 776adb602d..330adadd3d 100644 --- a/packages/server/src/app.ts +++ b/packages/server/src/app.ts @@ -38,6 +38,7 @@ import { import { watch } from "./watch" import { initialise as initialiseWebsockets } from "./websocket" import sdk from "./sdk" +import * as pro from "@budibase/pro" const app = new Koa() @@ -103,13 +104,25 @@ server.on("close", async () => { } }) +const initPro = async () => { + await pro.init({ + backups: { + processing: { + exportAppFn: sdk.backups.exportApp, + importAppFn: sdk.backups.importApp, + statsFn: sdk.backups.calculateBackupStats, + }, + }, + }) +} + module.exports = server.listen(env.PORT || 0, async () => { console.log(`Budibase running on ${JSON.stringify(server.address())}`) env._set("PORT", server.address().port) eventEmitter.emitPort(env.PORT) fileSystem.init() await redis.init() - await sdk.backups.init() + await initPro() // run migrations on startup if not done via http // not recommended in a clustered environment diff --git a/packages/server/src/sdk/app/backups/backup.ts b/packages/server/src/sdk/app/backups/backup.ts deleted file mode 100644 index ac85b2940f..0000000000 --- a/packages/server/src/sdk/app/backups/backup.ts +++ /dev/null @@ -1,145 +0,0 @@ -import { backups } from "@budibase/pro" -import { db as dbCore, objectStore, tenancy } from "@budibase/backend-core" -import { - AppBackupQueueData, - AppBackupStatus, - AppBackupTrigger, - AppBackupType, -} from "@budibase/types" -import { exportApp } from "./exports" -import { importApp } from "./imports" -import { calculateBackupStats } from "./statistics" -import { Job } from "bull" -import fs from "fs" -import env from "../../../environment" - -type BackupOpts = { - doc?: { id: string; rev: string } - createdBy?: string -} - -async function removeExistingApp(devId: string) { - const devDb = dbCore.dangerousGetDB(devId, { skip_setup: true }) - await devDb.destroy() -} - -async function runBackup( - name: string, - trigger: AppBackupTrigger, - tenantId: string, - appId: string, - opts?: BackupOpts -) { - const devAppId = dbCore.getDevAppID(appId), - prodAppId = dbCore.getProdAppID(appId) - const timestamp = new Date().toISOString() - const tarPath = await exportApp(devAppId, { tar: true }) - const contents = await calculateBackupStats(devAppId) - let filename = `${prodAppId}/backup-${timestamp}.tar.gz` - // add the tenant to the bucket path if backing up within a multi-tenant environment - if (env.MULTI_TENANCY) { - filename = `${tenantId}/${filename}` - } - const bucket = objectStore.ObjectStoreBuckets.BACKUPS - await objectStore.upload({ - path: tarPath, - type: "application/gzip", - bucket, - filename, - metadata: { - name, - trigger, - timestamp, - appId: prodAppId, - }, - }) - if (opts?.doc) { - await backups.updateBackupStatus( - opts.doc.id, - opts.doc.rev, - AppBackupStatus.COMPLETE, - contents, - filename - ) - } else { - await backups.storeAppBackupMetadata( - { - appId: prodAppId, - timestamp, - name, - trigger, - type: AppBackupType.BACKUP, - status: AppBackupStatus.COMPLETE, - contents, - createdBy: opts?.createdBy, - }, - { filename } - ) - } - // clear up the tarball after uploading it - fs.rmSync(tarPath) -} - -async function importProcessor(job: Job) { - const data: AppBackupQueueData = job.data - const appId = data.appId, - backupId = data.import!.backupId, - nameForBackup = data.import!.nameForBackup, - createdBy = data.import!.createdBy - const tenantId = tenancy.getTenantIDFromAppID(appId) as string - tenancy.doInTenant(tenantId, async () => { - const devAppId = dbCore.getDevAppID(appId) - const { rev } = await backups.updateRestoreStatus( - data.docId, - data.docRev, - AppBackupStatus.PENDING - ) - // initially export the current state to disk - incase something goes wrong - await runBackup( - nameForBackup, - AppBackupTrigger.RESTORING, - tenantId, - appId, - { createdBy } - ) - // get the backup ready on disk - const { path } = await backups.downloadAppBackup(backupId) - // start by removing app database and contents of bucket - which will be updated - await removeExistingApp(devAppId) - let status = AppBackupStatus.COMPLETE - try { - await importApp(devAppId, dbCore.dangerousGetDB(devAppId), { - file: { - type: "application/gzip", - path, - }, - key: path, - }) - } catch (err) { - status = AppBackupStatus.FAILED - } - await backups.updateRestoreStatus(data.docId, rev, status) - }) -} - -async function exportProcessor(job: Job) { - const data: AppBackupQueueData = job.data - const appId = data.appId, - trigger = data.export!.trigger, - name = data.export!.name || `${trigger} - backup` - const tenantId = tenancy.getTenantIDFromAppID(appId) as string - await tenancy.doInTenant(tenantId, async () => { - const { rev } = await backups.updateBackupStatus( - data.docId, - data.docRev, - AppBackupStatus.PENDING - ) - return runBackup(name, trigger, tenantId, appId, { - doc: { id: data.docId, rev }, - }) - }) -} - -export async function init() { - await backups.addAppBackupProcessors(importProcessor, exportProcessor) -} diff --git a/packages/server/src/sdk/app/backups/index.ts b/packages/server/src/sdk/app/backups/index.ts index c2dd7a7b71..8e5697c53c 100644 --- a/packages/server/src/sdk/app/backups/index.ts +++ b/packages/server/src/sdk/app/backups/index.ts @@ -1,11 +1,9 @@ import * as exportApps from "./exports" import * as importApps from "./imports" -import * as backup from "./backup" import * as statistics from "./statistics" export default { ...exportApps, ...importApps, - ...backup, ...statistics, }