1
0
Fork 0
mirror of synced 2024-06-02 02:25:17 +12:00

Getting the import system to carry out a backup of the app before the restore.

This commit is contained in:
mike12345567 2022-10-20 19:07:10 +01:00
parent 0478d9b154
commit 4716eaaab5
2 changed files with 93 additions and 54 deletions

View file

@ -1,6 +1,11 @@
import { backups } from "@budibase/pro"
import { db as dbCore, objectStore, tenancy } from "@budibase/backend-core"
import { AppBackupQueueData, AppBackupStatus } from "@budibase/types"
import {
AppBackupQueueData,
AppBackupStatus,
AppBackupTrigger,
AppBackupType,
} from "@budibase/types"
import { exportApp } from "./exports"
import { importApp } from "./imports"
import { calculateBackupStats } from "../statistics"
@ -8,19 +13,96 @@ import { Job } from "bull"
import fs from "fs"
import env from "../../../environment"
type BackupOpts = {
doc?: { id: string; rev: string }
createdBy?: string
}
async function removeExistingApp(devId: string) {
const devDb = dbCore.dangerousGetDB(devId, { skip_setup: true })
await devDb.destroy()
}
async function runBackup(
name: string,
trigger: AppBackupTrigger,
tenantId: string,
appId: string,
opts?: BackupOpts
) {
const devAppId = dbCore.getDevAppID(appId),
prodAppId = dbCore.getProdAppID(appId)
const timestamp = new Date().toISOString()
const tarPath = await exportApp(devAppId, { tar: true })
const contents = await calculateBackupStats(devAppId)
let filename = `${prodAppId}/backup-${timestamp}.tar.gz`
// add the tenant to the bucket path if backing up within a multi-tenant environment
if (env.MULTI_TENANCY) {
filename = `${tenantId}/${filename}`
}
const bucket = objectStore.ObjectStoreBuckets.BACKUPS
await objectStore.upload({
path: tarPath,
type: "application/gzip",
bucket,
filename,
metadata: {
name,
trigger,
timestamp,
appId: prodAppId,
},
})
if (opts?.doc) {
await backups.updateBackupStatus(
opts.doc.id,
opts.doc.rev,
AppBackupStatus.COMPLETE,
contents,
filename
)
} else {
await backups.storeAppBackupMetadata(
{
appId: prodAppId,
timestamp,
name,
trigger,
type: AppBackupType.BACKUP,
status: AppBackupStatus.COMPLETE,
contents,
createdBy: opts?.createdBy,
},
{ filename }
)
}
// clear up the tarball after uploading it
fs.rmSync(tarPath)
}
async function importProcessor(job: Job) {
const data: AppBackupQueueData = job.data
const appId = data.appId,
backupId = data.import!.backupId
const tenantId = tenancy.getTenantIDFromAppID(appId)
backupId = data.import!.backupId,
nameForBackup = data.import!.nameForBackup,
createdBy = data.import!.createdBy
const tenantId = tenancy.getTenantIDFromAppID(appId) as string
tenancy.doInTenant(tenantId, async () => {
const devAppId = dbCore.getDevAppID(appId)
const performImport = async (path: string) => {
// initially export the current state to disk - incase something goes wrong
await runBackup(
nameForBackup,
AppBackupTrigger.RESTORING,
tenantId,
appId,
{ createdBy }
)
// get the backup ready on disk
const { path } = await backups.downloadAppBackup(backupId)
// start by removing app database and contents of bucket - which will be updated
await removeExistingApp(devAppId)
let status = AppBackupStatus.COMPLETE
try {
await importApp(devAppId, dbCore.dangerousGetDB(devAppId), {
file: {
type: "application/gzip",
@ -28,26 +110,10 @@ async function importProcessor(job: Job) {
},
key: path,
})
}
// initially export the current state to disk - incase something goes wrong
const backupTarPath = await exportApp(devAppId, { tar: true })
// get the backup ready on disk
const { path } = await backups.downloadAppBackup(backupId)
// start by removing app database and contents of bucket - which will be updated
await removeExistingApp(devAppId)
try {
await performImport(path)
} catch (err) {
// rollback - clear up failed import and re-import the pre-backup
await removeExistingApp(devAppId)
await performImport(backupTarPath)
status = AppBackupStatus.FAILED
}
await backups.updateRestoreStatus(
data.docId,
data.docRev,
AppBackupStatus.COMPLETE
)
fs.rmSync(backupTarPath)
await backups.updateRestoreStatus(data.docId, data.docRev, status)
})
}
@ -56,40 +122,11 @@ async function exportProcessor(job: Job) {
const appId = data.appId,
trigger = data.export!.trigger,
name = data.export!.name || `${trigger} - backup`
const tenantId = tenancy.getTenantIDFromAppID(appId)
const tenantId = tenancy.getTenantIDFromAppID(appId) as string
await tenancy.doInTenant(tenantId, async () => {
const devAppId = dbCore.getDevAppID(appId),
prodAppId = dbCore.getProdAppID(appId)
const timestamp = new Date().toISOString()
const tarPath = await exportApp(devAppId, { tar: true })
const contents = await calculateBackupStats(devAppId)
let filename = `${prodAppId}/backup-${timestamp}.tar.gz`
// add the tenant to the bucket path if backing up within a multi-tenant environment
if (env.MULTI_TENANCY) {
filename = `${tenantId}/${filename}`
}
const bucket = objectStore.ObjectStoreBuckets.BACKUPS
await objectStore.upload({
path: tarPath,
type: "application/gzip",
bucket,
filename,
metadata: {
name,
trigger,
timestamp,
appId: prodAppId,
},
return runBackup(name, trigger, tenantId, appId, {
doc: { id: data.docId, rev: data.docRev },
})
await backups.updateBackupStatus(
data.docId,
data.docRev,
AppBackupStatus.COMPLETE,
contents,
filename
)
// clear up the tarball after uploading it
fs.rmSync(tarPath)
})
}

View file

@ -16,6 +16,7 @@ export enum AppBackupTrigger {
PUBLISH = "publish",
MANUAL = "manual",
SCHEDULED = "scheduled",
RESTORING = "restoring",
}
export interface AppBackupContents {
@ -59,6 +60,7 @@ export interface AppBackupQueueData {
}
import?: {
backupId: string
nameForBackup: string
createdBy?: string
}
}