diff --git a/apps/Backend/src/routes/database-management.ts b/apps/Backend/src/routes/database-management.ts index 4c21ae6..33cd767 100644 --- a/apps/Backend/src/routes/database-management.ts +++ b/apps/Backend/src/routes/database-management.ts @@ -19,14 +19,40 @@ router.post("/backup", async (req: Request, res: Response): Promise => { return res.status(401).json({ error: "Unauthorized" }); } - const fileName = `dental_backup_${Date.now()}.dump`; - const tmpFile = path.join(os.tmpdir(), fileName); + // create a unique tmp directory for directory-format dump + const tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), "dental_backup_")); // MUST + + // choose archive extension per platform + const isWin = process.platform === "win32"; + const archiveName = isWin + ? `dental_backup_${Date.now()}.zip` + : `dental_backup_${Date.now()}.tar.gz`; + const archivePath = path.join(os.tmpdir(), archiveName); + + // ensure archivePath is not inside tmpDir (very important) + if (archivePath.startsWith(tmpDir) || tmpDir.startsWith(archivePath)) { + // place archive in parent tmp to be safe + const safeDir = path.join(os.tmpdir(), "dental_backups"); + try { + fs.mkdirSync(safeDir, { recursive: true, mode: 0o700 }); + } catch {} + // recompute archivePath + const safeArchivePath = path.join(safeDir, archiveName); + // overwrite archivePath with safe location + // (note: might require permission to write to safeDir) + (global as any).__archivePathOverride = safeArchivePath; + } + + const finalArchivePath = + (global as any).__archivePathOverride || archivePath; // Spawn pg_dump const pgDump = spawn( "pg_dump", [ - "-Fc", // custom format + "-Fd", // DIRECTORY format (required for parallel dump) + "-j", + "4", // number of parallel jobs โ€” MUST be >0 for parallelism "--no-acl", "--no-owner", "-h", @@ -35,7 +61,7 @@ router.post("/backup", async (req: Request, res: Response): Promise => { process.env.DB_USER || "postgres", process.env.DB_NAME || "dental_db", "-f", - tmpFile, // write directly to temp file + tmpDir, // write parallely ], { env: { @@ -50,22 +76,105 @@ router.post("/backup", async (req: Request, res: Response): Promise => { errorMessage += chunk.toString(); }); + // handle spawn error immediately + pgDump.on("error", (err) => { + try { + fs.rmSync(tmpDir, { recursive: true, force: true }); + } catch {} + console.error("Failed to start pg_dump:", err); + return res + .status(500) + .json({ error: "Failed to run pg_dump", details: err.message }); + }); + + // when pg_dump ends pgDump.on("close", (code) => { - if (code === 0) { - // โœ… Send only if dump succeeded + if (code !== 0) { + // cleanup tmpDir + try { + fs.rmSync(tmpDir, { recursive: true, force: true }); + } catch {} + console.error("pg_dump failed:", errorMessage || `exit ${code}`); + return res.status(500).json({ + error: "Backup failed", + details: errorMessage || `pg_dump exited with ${code}`, + }); + } + + // SUCCESS: create a single tar.gz archive of the dump directory (so we can stream one file) + let archiver; + let archStderr = ""; + + if (isWin) { + // Use PowerShell Compress-Archive on Windows (creates .zip) + // Use -Force to overwrite archive if it exists + // Protect paths by wrapping in double quotes + const psCmd = `Compress-Archive -Path "${tmpDir}\\*" -DestinationPath "${finalArchivePath}" -Force`; + archiver = spawn("powershell.exe", ["-NoProfile", "-Command", psCmd], { + env: process.env, + }); + } else { + // POSIX: use tar to create gzipped tarball + archiver = spawn("tar", ["-czf", finalArchivePath, "-C", tmpDir, "."]); + } + + archiver.stderr.on("data", (chunk) => { + archStderr += chunk.toString(); + }); + + archiver.on("error", (err) => { + try { + fs.rmSync(tmpDir, { recursive: true, force: true }); + } catch {} + console.error("Failed to start archiver:", err); + return res + .status(500) + .json({ error: "Failed to archive backup", details: err.message }); + }); + + archiver.on("close", (archCode) => { + // remove tmpDir now that archive attempt finished + try { + fs.rmSync(tmpDir, { recursive: true, force: true }); + } catch (e) {} + + if (archCode !== 0) { + console.error( + "Archiver exited with", + archCode, + "stderr:", + archStderr + ); + try { + fs.unlinkSync(finalArchivePath); + } catch (e) {} + return res.status(500).json({ + error: "Failed to create backup archive", + details: archStderr || `archiver exited with code ${archCode}`, + }); + } + + // Stream the archive to the client res.setHeader( "Content-Disposition", - `attachment; filename=${fileName}` + `attachment; filename=${path.basename(finalArchivePath)}` + ); + res.setHeader( + "Content-Type", + isWin ? "application/zip" : "application/gzip" ); - res.setHeader("Content-Type", "application/octet-stream"); - const fileStream = fs.createReadStream(tmpFile); + const fileStream = fs.createReadStream(finalArchivePath); fileStream.pipe(res); - fileStream.on("close", async () => { - fs.unlink(tmpFile, () => {}); // cleanup temp file + // when the response finishes (success) + res.once("finish", async () => { + // cleanup archive + try { + fs.unlinkSync(finalArchivePath); + } catch (_) {} - // โœ… Then, in background, update DB + // update metadata (fire-and-forget, but we await to log failures) try { await storage.createBackup(userId); await storage.deleteNotificationsByType(userId, "BACKUP"); @@ -73,31 +182,31 @@ router.post("/backup", async (req: Request, res: Response): Promise => { console.error("Backup saved but metadata update failed:", err); } }); - } else { - console.error("pg_dump failed:", errorMessage); - fs.unlink(tmpFile, () => {}); // cleanup - res.status(500).json({ - error: "Backup failed", - details: errorMessage || `pg_dump exited with code ${code}`, - }); - } - }); - pgDump.on("error", (err) => { - console.error("Failed to start pg_dump:", err); - fs.unlink(tmpFile, () => {}); - res.status(500).json({ - error: "Failed to run pg_dump", - details: err.message, - }); - }); + // if client disconnects or error in streaming + res.once("close", () => { + // ensure archive removed + try { + fs.unlinkSync(finalArchivePath); + } catch (_) {} + }); + + fileStream.on("error", (err) => { + console.error("Error streaming archive:", err); + try { + fs.unlinkSync(finalArchivePath); + } catch (_) {} + if (!res.headersSent) + res.status(500).json({ error: "Error streaming backup" }); + }); + }); // archiver.on close + }); // pgDump.on close } catch (err: any) { console.error("Unexpected error:", err); if (!res.headersSent) { - res.status(500).json({ - message: "Internal server error", - details: String(err), - }); + res + .status(500) + .json({ message: "Internal server error", details: String(err) }); } } }); diff --git a/apps/Frontend/src/pages/database-management-page.tsx b/apps/Frontend/src/pages/database-management-page.tsx index f189737..87d1d5d 100644 --- a/apps/Frontend/src/pages/database-management-page.tsx +++ b/apps/Frontend/src/pages/database-management-page.tsx @@ -26,6 +26,41 @@ export default function DatabaseManagementPage() { }, }); + // helper: safely extract filename from fetch Response headers + function getFileNameFromResponse(res: Response): string { + const disposition = res.headers.get("Content-Disposition") || ""; + // filename*=UTF-8''encoded%20name.zip + const starMatch = disposition.match(/filename\*\s*=\s*([^;]+)/i); + if (starMatch && starMatch[1]) { + let val = starMatch[1] + .trim() + .replace(/^UTF-8''/i, "") + .replace(/['"]/g, ""); + try { + return decodeURIComponent(val); + } catch { + return val; + } + } + + // filename="name" OR filename=name + const fileNameRegex = /filename\s*=\s*"([^"]+)"|filename\s*=\s*([^;]+)/i; + const normalMatch = disposition.match(fileNameRegex); + if (normalMatch) { + // normalMatch[1] corresponds to the quoted capture, normalMatch[2] to unquoted + const candidate = (normalMatch[1] ?? normalMatch[2] ?? "").trim(); + if (candidate) return candidate.replace(/['"]/g, ""); + } + + // fallback by content-type + const ct = (res.headers.get("Content-Type") || "").toLowerCase(); + const iso = new Date().toISOString().replace(/[:.]/g, "-"); + if (ct.includes("zip")) return `dental_backup_${iso}.zip`; + if (ct.includes("gzip") || ct.includes("x-gzip")) + return `dental_backup_${iso}.tar.gz`; + return `dental_backup_${iso}.dump`; + } + // ----- Backup mutation ----- const backupMutation = useMutation({ mutationFn: async () => { @@ -40,15 +75,14 @@ export default function DatabaseManagementPage() { throw new Error((errorBody as any)?.error || "Backup failed"); } + // get filename from Content-Disposition or fallback + const fileName = getFileNameFromResponse(res); + // Convert response to blob (file) const blob = await res.blob(); const url = window.URL.createObjectURL(blob); const a = document.createElement("a"); a.href = url; - const disposition = res.headers.get("Content-Disposition"); - const fileName = - disposition?.split("filename=")[1]?.replace(/"/g, "") || - `dental_backup_${new Date().toISOString()}.dump`; a.download = fileName; document.body.appendChild(a); a.click(); diff --git a/packages/db/docs/migration.md b/packages/db/docs/migration.md new file mode 100644 index 0000000..a75a05d --- /dev/null +++ b/packages/db/docs/migration.md @@ -0,0 +1,137 @@ +# ๐Ÿฆท DentalApp Database Restore Guide + +This document explains how to **restore the DentalApp PostgreSQL database** on a new PC using the latest backup file from the main PC. + +--- + +## ๐Ÿงฉ Overview + +You will: +1. Create a PostgreSQL backup on the **main PC** +2. Copy it to the **target PC** +3. Restore it cleanly using `pg_restore` + +--- + +## โš™๏ธ Prerequisites + +Before starting: +- PostgreSQL is installed on both machines (same major version recommended) +- The app is **not running** during restore +- You know the database credentials + *(from `.env` or environment variables)* + +Example: +```bash +DATABASE_URL=postgresql://postgres:mypassword@localhost:5432/dentalapp +``` + +## ๐Ÿงญ Steps to Restore Database on Another PC + +### ๐Ÿ–ฅ๏ธ Step 1 โ€” Create Backup on Main PC +- Generate the backup.dump file from the backup page from the app. + +### Step 2 โ€” Copy Backup File to Target PC +- Transfer the backup file to the second PC using USB, network share, cloud, or SCP. + += Example destination path: +C:\db_backups\backup.dump + +### ๐Ÿงน Step 3 โ€” Prepare the Target PC +- Stop the DentalApp application to avoid database locks. +- Ensure PostgreSQL is installed and running. + +- (Optional but recommended) Drop the existing database: +``` +PGPASSWORD='mypassword' dropdb -U postgres -h localhost dentalapp +``` + +### โ™ป๏ธ Step 4 โ€” Restore the Database + +# Case1: when we got a zip folder. +-linux bash + +# 4.1) unzip to a directory +``` +unzip backup.zip -d /tmp/dental_dump_dir +``` + + +# 4.2) restore into an already-created DB named 'dentalapp' +``` +PGPASSWORD='mypassword' createdb -U postgres -h localhost -O postgres dentalapp # optional +PGPASSWORD='mypassword' pg_restore -U postgres -h localhost -d dentalapp -j 4 /tmp/dental_dump_dir + +or +PGPASSWORD='mypassword' /usr/lib/postgresql/17/bin/pg_restore -v -U postgres -h localhost -C -d postgres /tmp/dental_dump_dir +``` + +# Case2: when we got a tar folder. +-linux bash + +# 4.1) unzip to a directory +``` +unzip backup.zip -d /tmp/dental_dump_dir +``` + + +# 4.2) restore into an already-created DB named 'dentalapp' +``` +PGPASSWORD='mypassword' createdb -U postgres -h localhost -O postgres dentalapp # optional +PGPASSWORD='mypassword' pg_restore -U postgres -h localhost -d dentalapp -j 4 /tmp/dental_dump_dir + +or +PGPASSWORD='mypassword' /usr/lib/postgresql/17/bin/pg_restore -v -U postgres -h localhost -C -d postgres /tmp/dental_dump_dir +``` + + +### โœ… Step 5 โ€” Verify the Restore + +- Check that the tables are restored successfully: +``` +PGPASSWORD='mypassword' psql -U postgres -h localhost -d dentalapp -c "\dt" +``` + +- You should see all the application tables listed. + +### ๐Ÿงฉ Step 6 โ€” Update App Configuration + +- Ensure the .env file on the target PC points to the correct database: +``` +DATABASE_URL=postgresql://postgres:mypassword@localhost:5432/dentalapp +``` + +- Then start the DentalApp application and verify that it connects and displays data correctly. + +# ๐Ÿง  Step 7 โ€” Tips + +- Use the same PostgreSQL version as the main PC. + + +- For large databases, use parallel restore for speed: +``` +pg_restore -U postgres -j 4 -d dentalapp backup.dump +``` + +- Always keep at least one recent backup archived safely. + + +# If such error came: + +- pg_restore: error: unsupported version (1.16) in file header + +- use cmd: + +- 1) Add PGDG (official PostgreSQL) APT repo and its key, then update and install client-17 +``` +sudo apt update && sudo apt install -y wget ca-certificates gnupg lsb-release +curl -fsSL https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo gpg --dearmor -o /usr/share/keyrings/pgdg.gpg +echo "deb [signed-by=/usr/share/keyrings/pgdg.gpg] http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main" | sudo tee /etc/apt/sources.list.d/pgdg.list +sudo apt update +sudo apt install -y postgresql-client-17 +``` + +- 2) Run pg_restore from the installed v17 binary (replace password as needed) +``` +PGPASSWORD='mypassword' /usr/lib/postgresql/17/bin/pg_restore -v -U postgres -h localhost -C -d postgres ./backup.dump +``` \ No newline at end of file