1
0
Fork 0
mirror of synced 2024-06-02 18:44:54 +12:00

Some work to not contact lambda or use quotas when deploying locally.

This commit is contained in:
mike12345567 2020-12-03 17:45:20 +00:00
parent 3bcbb5f27c
commit a5d2e9a3b9
4 changed files with 101 additions and 70 deletions

View file

@ -1,12 +1,7 @@
const fs = require("fs")
const { join } = require("../../../utilities/centralPath")
const AwsDeploy = require("aws-sdk")
const AWS = require("aws-sdk")
const fetch = require("node-fetch")
const { budibaseAppsDir } = require("../../../utilities/budibaseDir")
const PouchDB = require("../../../db")
const env = require("../../../environment")
const { prepareUpload } = require("./utils")
const { walkDir } = require("../../../utilities")
const { deployToObjectStore } = require("./utils")
/**
* Verifies the users API key and
@ -37,7 +32,7 @@ exports.preDeployment = async function(deployment) {
// set credentials here, means any time we're verified we're ready to go
if (json.credentials) {
AwsDeploy.config.update({
AWS.config.update({
accessKeyId: json.credentials.AccessKeyId,
secretAccessKey: json.credentials.SecretAccessKey,
sessionToken: json.credentials.SessionToken,
@ -80,65 +75,11 @@ exports.postDeployment = async function(deployment) {
exports.deploy = async function(deployment) {
const appId = deployment.getAppId()
const { bucket, accountId } = deployment.getVerification()
const s3 = new AwsDeploy.S3({
const metadata = { accountId }
const s3Client = new AWS.S3({
params: {
Bucket: bucket,
},
})
const appAssetsPath = join(budibaseAppsDir(), appId, "public")
const appPages = fs.readdirSync(appAssetsPath)
let uploads = []
for (let page of appPages) {
// Upload HTML, CSS and JS for each page of the web app
walkDir(join(appAssetsPath, page), function(filePath) {
const appAssetUpload = prepareUpload({
file: {
path: filePath,
name: [...filePath.split("/")].pop(),
},
s3Key: filePath.replace(appAssetsPath, `assets/${appId}`),
s3,
metadata: { accountId },
})
uploads.push(appAssetUpload)
})
}
// Upload file attachments
const db = new PouchDB(appId)
let fileUploads
try {
fileUploads = await db.get("_local/fileuploads")
} catch (err) {
fileUploads = { _id: "_local/fileuploads", uploads: [] }
}
for (let file of fileUploads.uploads) {
if (file.uploaded) continue
const attachmentUpload = prepareUpload({
file,
s3Key: `assets/${appId}/attachments/${file.processedFileName}`,
s3,
metadata: { accountId },
})
uploads.push(attachmentUpload)
// mark file as uploaded
file.uploaded = true
}
db.put(fileUploads)
try {
return await Promise.all(uploads)
} catch (err) {
console.error("Error uploading budibase app assets to s3", err)
throw err
}
await deployToObjectStore(appId, s3Client, metadata)
}

View file

@ -1,5 +1,31 @@
exports.preDeployment = async function(deployment) {}
const env = require("../../../environment")
const AWS = require("aws-sdk")
const { deployToObjectStore } = require("./utils")
exports.postDeployment = async function(deployment) {}
const APP_BUCKET = "app-assets"
exports.deploy = async function(deployment) {}
exports.preDeployment = async function() {
AWS.config.update({
accessKeyId: env.MINIO_ACCESS_KEY,
secretAccessKey: env.MINIO_SECRET_KEY,
})
}
exports.postDeployment = async function() {
// we don't actively need to do anything after deployment in self hosting
}
exports.deploy = async function(deployment) {
const appId = deployment.getAppId()
var objClient = new AWS.S3({
endpoint: "http://localhost:9000",
s3ForcePathStyle: true, // needed with minio?
signatureVersion: "v4",
params: {
Bucket: APP_BUCKET,
},
})
// no metadata, aws has account ID in metadata
const metadata = {}
await deployToObjectStore(appId, objClient, metadata)
}

View file

@ -1,5 +1,9 @@
const fs = require("fs")
const sanitize = require("sanitize-s3-objectkey")
const { walkDir } = require("../../../utilities")
const { join } = require("../../../utilities/centralPath")
const { budibaseAppsDir } = require("../../../utilities/budibaseDir")
const PouchDB = require("../../../db")
const CONTENT_TYPE_MAP = {
html: "text/html",
@ -7,11 +11,11 @@ const CONTENT_TYPE_MAP = {
js: "application/javascript",
}
exports.prepareUpload = async function({ s3Key, metadata, s3, file }) {
exports.prepareUpload = async function({ s3Key, metadata, client, file }) {
const extension = [...file.name.split(".")].pop()
const fileBytes = fs.readFileSync(file.path)
const upload = await s3
const upload = await client
.upload({
// windows file paths need to be converted to forward slashes for s3
Key: sanitize(s3Key).replace(/\\/g, "/"),
@ -29,3 +33,61 @@ exports.prepareUpload = async function({ s3Key, metadata, s3, file }) {
key: upload.Key,
}
}
exports.deployToObjectStore = async function(appId, objectClient, metadata) {
const appAssetsPath = join(budibaseAppsDir(), appId, "public")
const appPages = fs.readdirSync(appAssetsPath)
let uploads = []
for (let page of appPages) {
// Upload HTML, CSS and JS for each page of the web app
walkDir(join(appAssetsPath, page), function(filePath) {
const appAssetUpload = exports.prepareUpload({
file: {
path: filePath,
name: [...filePath.split("/")].pop(),
},
s3Key: filePath.replace(appAssetsPath, `assets/${appId}`),
client: objectClient,
metadata,
})
uploads.push(appAssetUpload)
})
}
// Upload file attachments
const db = new PouchDB(appId)
let fileUploads
try {
fileUploads = await db.get("_local/fileuploads")
} catch (err) {
fileUploads = { _id: "_local/fileuploads", uploads: [] }
}
for (let file of fileUploads.uploads) {
if (file.uploaded) continue
const attachmentUpload = exports.prepareUpload({
file,
s3Key: `assets/${appId}/attachments/${file.processedFileName}`,
client: objectClient,
metadata,
})
uploads.push(attachmentUpload)
// mark file as uploaded
file.uploaded = true
}
db.put(fileUploads)
try {
return await Promise.all(uploads)
} catch (err) {
console.error("Error uploading budibase app assets to s3", err)
throw err
}
}

View file

@ -36,6 +36,8 @@ module.exports = {
ENABLE_ANALYTICS: process.env.ENABLE_ANALYTICS,
DEPLOYMENT_DB_URL: process.env.DEPLOYMENT_DB_URL,
LOCAL_TEMPLATES: process.env.LOCAL_TEMPLATES,
MINIO_ACCESS_KEY: process.env.MINIO_ACCESS_KEY,
MINIO_SECRET_KEY: process.env.MINIO_SECRET_KEY,
_set(key, value) {
process.env[key] = value
module.exports[key] = value