feat(parallel-backupfile-added)-v2

This commit is contained in:
2025-10-05 03:59:23 +05:30
parent 5598a5657b
commit b8c0e65045
2 changed files with 139 additions and 136 deletions

View File

@@ -13,6 +13,7 @@
"license": "ISC", "license": "ISC",
"type": "commonjs", "type": "commonjs",
"dependencies": { "dependencies": {
"archiver": "^7.0.1",
"axios": "^1.9.0", "axios": "^1.9.0",
"bcrypt": "^5.1.1", "bcrypt": "^5.1.1",
"cors": "^2.8.5", "cors": "^2.8.5",
@@ -31,6 +32,7 @@
"zod-validation-error": "^3.4.0" "zod-validation-error": "^3.4.0"
}, },
"devDependencies": { "devDependencies": {
"@types/archiver": "^6.0.3",
"@types/bcrypt": "^5.0.2", "@types/bcrypt": "^5.0.2",
"@types/cors": "^2.8.18", "@types/cors": "^2.8.18",
"@types/express": "^5.0.1", "@types/express": "^5.0.1",

View File

@@ -5,13 +5,27 @@ import os from "os";
import fs from "fs"; import fs from "fs";
import { prisma } from "@repo/db/client"; import { prisma } from "@repo/db/client";
import { storage } from "../storage"; import { storage } from "../storage";
import archiver from "archiver";
const router = Router(); const router = Router();
/** /**
* Create a database backup * Create a database backup
*
* - Uses pg_dump in directory format for parallel dump to a tmp dir
* - Uses 'archiver' to create zip or gzipped tar stream directly to response
* - Supports explicit override via BACKUP_ARCHIVE_FORMAT env var ('zip' or 'tar')
* - Ensures cleanup of tmp dir on success/error/client disconnect
*/ */
// helper to remove directory (sync to keep code straightforward)
function safeRmDir(dir: string) {
try {
fs.rmSync(dir, { recursive: true, force: true });
} catch (e) {
/* ignore */
}
}
router.post("/backup", async (req: Request, res: Response): Promise<any> => { router.post("/backup", async (req: Request, res: Response): Promise<any> => {
try { try {
const userId = req.user?.id; const userId = req.user?.id;
@@ -22,29 +36,19 @@ router.post("/backup", async (req: Request, res: Response): Promise<any> => {
// create a unique tmp directory for directory-format dump // create a unique tmp directory for directory-format dump
const tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), "dental_backup_")); // MUST const tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), "dental_backup_")); // MUST
// choose archive extension per platform // Decide archive format
const isWin = process.platform === "win32"; // BACKUP_ARCHIVE_FORMAT can be 'zip' or 'tar' (case-insensitive)
const archiveName = isWin const forced = (process.env.BACKUP_ARCHIVE_FORMAT || "").toLowerCase();
const useZip =
forced === "zip"
? true
: forced === "tar"
? false
: process.platform === "win32";
const filename = useZip
? `dental_backup_${Date.now()}.zip` ? `dental_backup_${Date.now()}.zip`
: `dental_backup_${Date.now()}.tar.gz`; : `dental_backup_${Date.now()}.tar.gz`;
const archivePath = path.join(os.tmpdir(), archiveName);
// ensure archivePath is not inside tmpDir (very important)
if (archivePath.startsWith(tmpDir) || tmpDir.startsWith(archivePath)) {
// place archive in parent tmp to be safe
const safeDir = path.join(os.tmpdir(), "dental_backups");
try {
fs.mkdirSync(safeDir, { recursive: true, mode: 0o700 });
} catch {}
// recompute archivePath
const safeArchivePath = path.join(safeDir, archiveName);
// overwrite archivePath with safe location
// (note: might require permission to write to safeDir)
(global as any).__archivePathOverride = safeArchivePath;
}
const finalArchivePath =
(global as any).__archivePathOverride || archivePath;
// Spawn pg_dump // Spawn pg_dump
const pgDump = spawn( const pgDump = spawn(
@@ -70,143 +74,140 @@ router.post("/backup", async (req: Request, res: Response): Promise<any> => {
}, },
} }
); );
let errorMessage = "";
let pgStderr = "";
pgDump.stderr.on("data", (chunk) => { pgDump.stderr.on("data", (chunk) => {
errorMessage += chunk.toString(); pgStderr += chunk.toString();
}); });
// handle spawn error immediately
pgDump.on("error", (err) => { pgDump.on("error", (err) => {
try { safeRmDir(tmpDir);
fs.rmSync(tmpDir, { recursive: true, force: true });
} catch {}
console.error("Failed to start pg_dump:", err); console.error("Failed to start pg_dump:", err);
return res // If headers haven't been sent, respond; otherwise just end socket
.status(500) if (!res.headersSent) {
.json({ error: "Failed to run pg_dump", details: err.message });
});
// when pg_dump ends
pgDump.on("close", (code) => {
if (code !== 0) {
// cleanup tmpDir
try {
fs.rmSync(tmpDir, { recursive: true, force: true });
} catch {}
console.error("pg_dump failed:", errorMessage || `exit ${code}`);
return res.status(500).json({
error: "Backup failed",
details: errorMessage || `pg_dump exited with ${code}`,
});
}
// SUCCESS: create a single tar.gz archive of the dump directory (so we can stream one file)
let archiver;
let archStderr = "";
if (isWin) {
// Use PowerShell Compress-Archive on Windows (creates .zip)
// Use -Force to overwrite archive if it exists
// Protect paths by wrapping in double quotes
const psCmd = `Compress-Archive -Path "${tmpDir}\\*" -DestinationPath "${finalArchivePath}" -Force`;
archiver = spawn("powershell.exe", ["-NoProfile", "-Command", psCmd], {
env: process.env,
});
} else {
// POSIX: use tar to create gzipped tarball
archiver = spawn("tar", ["-czf", finalArchivePath, "-C", tmpDir, "."]);
}
archiver.stderr.on("data", (chunk) => {
archStderr += chunk.toString();
});
archiver.on("error", (err) => {
try {
fs.rmSync(tmpDir, { recursive: true, force: true });
} catch {}
console.error("Failed to start archiver:", err);
return res return res
.status(500) .status(500)
.json({ error: "Failed to archive backup", details: err.message }); .json({ error: "Failed to run pg_dump", details: err.message });
} else {
res.destroy(err);
}
});
pgDump.on("close", async (code) => {
if (code !== 0) {
safeRmDir(tmpDir);
console.error("pg_dump failed:", pgStderr || `exit ${code}`);
if (!res.headersSent) {
return res.status(500).json({
error: "Backup failed",
details: pgStderr || `pg_dump exited with ${code}`,
});
} else {
// headers already sent — destroy response
res.destroy(new Error("pg_dump failed"));
return;
}
}
// pg_dump succeeded — stream archive directly to response using archiver
// Set headers before piping
res.setHeader(
"Content-Disposition",
`attachment; filename="${filename}"`
);
res.setHeader(
"Content-Type",
useZip ? "application/zip" : "application/gzip"
);
const archive = archiver(
useZip ? "zip" : "tar",
useZip ? {} : { gzip: true, gzipOptions: { level: 6 } }
);
let archErr: string | null = null;
archive.on("error", (err) => {
archErr = err.message;
console.error("Archiver error:", err);
// attempt to respond with error if possible
try {
if (!res.headersSent) {
res
.status(500)
.json({
error: "Failed to create archive",
details: err.message,
});
} else {
// if streaming already started, destroy the connection
res.destroy(err);
}
} catch (e) {
// swallow
} finally {
safeRmDir(tmpDir);
}
}); });
archiver.on("close", (archCode) => { // If client disconnects while streaming
// remove tmpDir now that archive attempt finished res.once("close", () => {
// destroy archiver (stop processing) and cleanup tmpDir
try { try {
fs.rmSync(tmpDir, { recursive: true, force: true }); archive.destroy();
} catch (e) {} } catch (e) {}
safeRmDir(tmpDir);
});
if (archCode !== 0) { // When streaming finishes successfully
console.error( res.once("finish", async () => {
"Archiver exited with", // cleanup the tmp dir used by pg_dump
archCode, safeRmDir(tmpDir);
"stderr:",
archStderr // update metadata (try/catch so it won't break response flow)
); try {
try { await storage.createBackup(userId);
fs.unlinkSync(finalArchivePath); await storage.deleteNotificationsByType(userId, "BACKUP");
} catch (e) {} } catch (err) {
return res.status(500).json({ console.error("Backup saved but metadata update failed:", err);
error: "Failed to create backup archive",
details: archStderr || `archiver exited with code ${archCode}`,
});
} }
});
// Stream the archive to the client // Pipe archive into response
res.setHeader( archive.pipe(res);
"Content-Disposition",
`attachment; filename=${path.basename(finalArchivePath)}`
);
res.setHeader(
"Content-Type",
isWin ? "application/zip" : "application/gzip"
);
const fileStream = fs.createReadStream(finalArchivePath); // Add the dumped directory contents to the archive root
fileStream.pipe(res); // `directory(source, dest)` where dest is false/'' to place contents at archive root
archive.directory(tmpDir + path.sep, false);
// when the response finishes (success) // finalize archive (this starts streaming)
res.once("finish", async () => { try {
// cleanup archive await archive.finalize();
try { } catch (err: any) {
fs.unlinkSync(finalArchivePath); console.error("Failed to finalize archive:", err);
} catch (_) {} // if headers not sent, send 500; otherwise destroy
try {
// update metadata (fire-and-forget, but we await to log failures) if (!res.headersSent) {
try { res
await storage.createBackup(userId); .status(500)
await storage.deleteNotificationsByType(userId, "BACKUP"); .json({
} catch (err) { error: "Failed to finalize archive",
console.error("Backup saved but metadata update failed:", err); details: String(err),
});
} else {
res.destroy(err);
} }
}); } catch (e) {}
safeRmDir(tmpDir);
// if client disconnects or error in streaming }
res.once("close", () => { });
// ensure archive removed
try {
fs.unlinkSync(finalArchivePath);
} catch (_) {}
});
fileStream.on("error", (err) => {
console.error("Error streaming archive:", err);
try {
fs.unlinkSync(finalArchivePath);
} catch (_) {}
if (!res.headersSent)
res.status(500).json({ error: "Error streaming backup" });
});
}); // archiver.on close
}); // pgDump.on close
} catch (err: any) { } catch (err: any) {
console.error("Unexpected error:", err); console.error("Unexpected error in /backup:", err);
if (!res.headersSent) { if (!res.headersSent) {
res return res
.status(500) .status(500)
.json({ message: "Internal server error", details: String(err) }); .json({ message: "Internal server error", details: String(err) });
} else {
res.destroy(err);
} }
} }
}); });