feat(parallel-backupfile-added)
This commit is contained in:
@@ -19,14 +19,40 @@ router.post("/backup", async (req: Request, res: Response): Promise<any> => {
|
||||
return res.status(401).json({ error: "Unauthorized" });
|
||||
}
|
||||
|
||||
const fileName = `dental_backup_${Date.now()}.dump`;
|
||||
const tmpFile = path.join(os.tmpdir(), fileName);
|
||||
// create a unique tmp directory for directory-format dump
|
||||
const tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), "dental_backup_")); // MUST
|
||||
|
||||
// choose archive extension per platform
|
||||
const isWin = process.platform === "win32";
|
||||
const archiveName = isWin
|
||||
? `dental_backup_${Date.now()}.zip`
|
||||
: `dental_backup_${Date.now()}.tar.gz`;
|
||||
const archivePath = path.join(os.tmpdir(), archiveName);
|
||||
|
||||
// ensure archivePath is not inside tmpDir (very important)
|
||||
if (archivePath.startsWith(tmpDir) || tmpDir.startsWith(archivePath)) {
|
||||
// place archive in parent tmp to be safe
|
||||
const safeDir = path.join(os.tmpdir(), "dental_backups");
|
||||
try {
|
||||
fs.mkdirSync(safeDir, { recursive: true, mode: 0o700 });
|
||||
} catch {}
|
||||
// recompute archivePath
|
||||
const safeArchivePath = path.join(safeDir, archiveName);
|
||||
// overwrite archivePath with safe location
|
||||
// (note: might require permission to write to safeDir)
|
||||
(global as any).__archivePathOverride = safeArchivePath;
|
||||
}
|
||||
|
||||
const finalArchivePath =
|
||||
(global as any).__archivePathOverride || archivePath;
|
||||
|
||||
// Spawn pg_dump
|
||||
const pgDump = spawn(
|
||||
"pg_dump",
|
||||
[
|
||||
"-Fc", // custom format
|
||||
"-Fd", // DIRECTORY format (required for parallel dump)
|
||||
"-j",
|
||||
"4", // number of parallel jobs — MUST be >0 for parallelism
|
||||
"--no-acl",
|
||||
"--no-owner",
|
||||
"-h",
|
||||
@@ -35,7 +61,7 @@ router.post("/backup", async (req: Request, res: Response): Promise<any> => {
|
||||
process.env.DB_USER || "postgres",
|
||||
process.env.DB_NAME || "dental_db",
|
||||
"-f",
|
||||
tmpFile, // write directly to temp file
|
||||
tmpDir, // write parallely
|
||||
],
|
||||
{
|
||||
env: {
|
||||
@@ -50,22 +76,105 @@ router.post("/backup", async (req: Request, res: Response): Promise<any> => {
|
||||
errorMessage += chunk.toString();
|
||||
});
|
||||
|
||||
// handle spawn error immediately
|
||||
pgDump.on("error", (err) => {
|
||||
try {
|
||||
fs.rmSync(tmpDir, { recursive: true, force: true });
|
||||
} catch {}
|
||||
console.error("Failed to start pg_dump:", err);
|
||||
return res
|
||||
.status(500)
|
||||
.json({ error: "Failed to run pg_dump", details: err.message });
|
||||
});
|
||||
|
||||
// when pg_dump ends
|
||||
pgDump.on("close", (code) => {
|
||||
if (code === 0) {
|
||||
// ✅ Send only if dump succeeded
|
||||
if (code !== 0) {
|
||||
// cleanup tmpDir
|
||||
try {
|
||||
fs.rmSync(tmpDir, { recursive: true, force: true });
|
||||
} catch {}
|
||||
console.error("pg_dump failed:", errorMessage || `exit ${code}`);
|
||||
return res.status(500).json({
|
||||
error: "Backup failed",
|
||||
details: errorMessage || `pg_dump exited with ${code}`,
|
||||
});
|
||||
}
|
||||
|
||||
// SUCCESS: create a single tar.gz archive of the dump directory (so we can stream one file)
|
||||
let archiver;
|
||||
let archStderr = "";
|
||||
|
||||
if (isWin) {
|
||||
// Use PowerShell Compress-Archive on Windows (creates .zip)
|
||||
// Use -Force to overwrite archive if it exists
|
||||
// Protect paths by wrapping in double quotes
|
||||
const psCmd = `Compress-Archive -Path "${tmpDir}\\*" -DestinationPath "${finalArchivePath}" -Force`;
|
||||
archiver = spawn("powershell.exe", ["-NoProfile", "-Command", psCmd], {
|
||||
env: process.env,
|
||||
});
|
||||
} else {
|
||||
// POSIX: use tar to create gzipped tarball
|
||||
archiver = spawn("tar", ["-czf", finalArchivePath, "-C", tmpDir, "."]);
|
||||
}
|
||||
|
||||
archiver.stderr.on("data", (chunk) => {
|
||||
archStderr += chunk.toString();
|
||||
});
|
||||
|
||||
archiver.on("error", (err) => {
|
||||
try {
|
||||
fs.rmSync(tmpDir, { recursive: true, force: true });
|
||||
} catch {}
|
||||
console.error("Failed to start archiver:", err);
|
||||
return res
|
||||
.status(500)
|
||||
.json({ error: "Failed to archive backup", details: err.message });
|
||||
});
|
||||
|
||||
archiver.on("close", (archCode) => {
|
||||
// remove tmpDir now that archive attempt finished
|
||||
try {
|
||||
fs.rmSync(tmpDir, { recursive: true, force: true });
|
||||
} catch (e) {}
|
||||
|
||||
if (archCode !== 0) {
|
||||
console.error(
|
||||
"Archiver exited with",
|
||||
archCode,
|
||||
"stderr:",
|
||||
archStderr
|
||||
);
|
||||
try {
|
||||
fs.unlinkSync(finalArchivePath);
|
||||
} catch (e) {}
|
||||
return res.status(500).json({
|
||||
error: "Failed to create backup archive",
|
||||
details: archStderr || `archiver exited with code ${archCode}`,
|
||||
});
|
||||
}
|
||||
|
||||
// Stream the archive to the client
|
||||
res.setHeader(
|
||||
"Content-Disposition",
|
||||
`attachment; filename=${fileName}`
|
||||
`attachment; filename=${path.basename(finalArchivePath)}`
|
||||
);
|
||||
res.setHeader(
|
||||
"Content-Type",
|
||||
isWin ? "application/zip" : "application/gzip"
|
||||
);
|
||||
res.setHeader("Content-Type", "application/octet-stream");
|
||||
|
||||
const fileStream = fs.createReadStream(tmpFile);
|
||||
const fileStream = fs.createReadStream(finalArchivePath);
|
||||
fileStream.pipe(res);
|
||||
|
||||
fileStream.on("close", async () => {
|
||||
fs.unlink(tmpFile, () => {}); // cleanup temp file
|
||||
// when the response finishes (success)
|
||||
res.once("finish", async () => {
|
||||
// cleanup archive
|
||||
try {
|
||||
fs.unlinkSync(finalArchivePath);
|
||||
} catch (_) {}
|
||||
|
||||
// ✅ Then, in background, update DB
|
||||
// update metadata (fire-and-forget, but we await to log failures)
|
||||
try {
|
||||
await storage.createBackup(userId);
|
||||
await storage.deleteNotificationsByType(userId, "BACKUP");
|
||||
@@ -73,31 +182,31 @@ router.post("/backup", async (req: Request, res: Response): Promise<any> => {
|
||||
console.error("Backup saved but metadata update failed:", err);
|
||||
}
|
||||
});
|
||||
} else {
|
||||
console.error("pg_dump failed:", errorMessage);
|
||||
fs.unlink(tmpFile, () => {}); // cleanup
|
||||
res.status(500).json({
|
||||
error: "Backup failed",
|
||||
details: errorMessage || `pg_dump exited with code ${code}`,
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
pgDump.on("error", (err) => {
|
||||
console.error("Failed to start pg_dump:", err);
|
||||
fs.unlink(tmpFile, () => {});
|
||||
res.status(500).json({
|
||||
error: "Failed to run pg_dump",
|
||||
details: err.message,
|
||||
});
|
||||
});
|
||||
// if client disconnects or error in streaming
|
||||
res.once("close", () => {
|
||||
// ensure archive removed
|
||||
try {
|
||||
fs.unlinkSync(finalArchivePath);
|
||||
} catch (_) {}
|
||||
});
|
||||
|
||||
fileStream.on("error", (err) => {
|
||||
console.error("Error streaming archive:", err);
|
||||
try {
|
||||
fs.unlinkSync(finalArchivePath);
|
||||
} catch (_) {}
|
||||
if (!res.headersSent)
|
||||
res.status(500).json({ error: "Error streaming backup" });
|
||||
});
|
||||
}); // archiver.on close
|
||||
}); // pgDump.on close
|
||||
} catch (err: any) {
|
||||
console.error("Unexpected error:", err);
|
||||
if (!res.headersSent) {
|
||||
res.status(500).json({
|
||||
message: "Internal server error",
|
||||
details: String(err),
|
||||
});
|
||||
res
|
||||
.status(500)
|
||||
.json({ message: "Internal server error", details: String(err) });
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
@@ -26,6 +26,41 @@ export default function DatabaseManagementPage() {
|
||||
},
|
||||
});
|
||||
|
||||
// helper: safely extract filename from fetch Response headers
|
||||
function getFileNameFromResponse(res: Response): string {
|
||||
const disposition = res.headers.get("Content-Disposition") || "";
|
||||
// filename*=UTF-8''encoded%20name.zip
|
||||
const starMatch = disposition.match(/filename\*\s*=\s*([^;]+)/i);
|
||||
if (starMatch && starMatch[1]) {
|
||||
let val = starMatch[1]
|
||||
.trim()
|
||||
.replace(/^UTF-8''/i, "")
|
||||
.replace(/['"]/g, "");
|
||||
try {
|
||||
return decodeURIComponent(val);
|
||||
} catch {
|
||||
return val;
|
||||
}
|
||||
}
|
||||
|
||||
// filename="name" OR filename=name
|
||||
const fileNameRegex = /filename\s*=\s*"([^"]+)"|filename\s*=\s*([^;]+)/i;
|
||||
const normalMatch = disposition.match(fileNameRegex);
|
||||
if (normalMatch) {
|
||||
// normalMatch[1] corresponds to the quoted capture, normalMatch[2] to unquoted
|
||||
const candidate = (normalMatch[1] ?? normalMatch[2] ?? "").trim();
|
||||
if (candidate) return candidate.replace(/['"]/g, "");
|
||||
}
|
||||
|
||||
// fallback by content-type
|
||||
const ct = (res.headers.get("Content-Type") || "").toLowerCase();
|
||||
const iso = new Date().toISOString().replace(/[:.]/g, "-");
|
||||
if (ct.includes("zip")) return `dental_backup_${iso}.zip`;
|
||||
if (ct.includes("gzip") || ct.includes("x-gzip"))
|
||||
return `dental_backup_${iso}.tar.gz`;
|
||||
return `dental_backup_${iso}.dump`;
|
||||
}
|
||||
|
||||
// ----- Backup mutation -----
|
||||
const backupMutation = useMutation({
|
||||
mutationFn: async () => {
|
||||
@@ -40,15 +75,14 @@ export default function DatabaseManagementPage() {
|
||||
throw new Error((errorBody as any)?.error || "Backup failed");
|
||||
}
|
||||
|
||||
// get filename from Content-Disposition or fallback
|
||||
const fileName = getFileNameFromResponse(res);
|
||||
|
||||
// Convert response to blob (file)
|
||||
const blob = await res.blob();
|
||||
const url = window.URL.createObjectURL(blob);
|
||||
const a = document.createElement("a");
|
||||
a.href = url;
|
||||
const disposition = res.headers.get("Content-Disposition");
|
||||
const fileName =
|
||||
disposition?.split("filename=")[1]?.replace(/"/g, "") ||
|
||||
`dental_backup_${new Date().toISOString()}.dump`;
|
||||
a.download = fileName;
|
||||
document.body.appendChild(a);
|
||||
a.click();
|
||||
|
||||
137
packages/db/docs/migration.md
Normal file
137
packages/db/docs/migration.md
Normal file
@@ -0,0 +1,137 @@
|
||||
# 🦷 DentalApp Database Restore Guide
|
||||
|
||||
This document explains how to **restore the DentalApp PostgreSQL database** on a new PC using the latest backup file from the main PC.
|
||||
|
||||
---
|
||||
|
||||
## 🧩 Overview
|
||||
|
||||
You will:
|
||||
1. Create a PostgreSQL backup on the **main PC**
|
||||
2. Copy it to the **target PC**
|
||||
3. Restore it cleanly using `pg_restore`
|
||||
|
||||
---
|
||||
|
||||
## ⚙️ Prerequisites
|
||||
|
||||
Before starting:
|
||||
- PostgreSQL is installed on both machines (same major version recommended)
|
||||
- The app is **not running** during restore
|
||||
- You know the database credentials
|
||||
*(from `.env` or environment variables)*
|
||||
|
||||
Example:
|
||||
```bash
|
||||
DATABASE_URL=postgresql://postgres:mypassword@localhost:5432/dentalapp
|
||||
```
|
||||
|
||||
## 🧭 Steps to Restore Database on Another PC
|
||||
|
||||
### 🖥️ Step 1 — Create Backup on Main PC
|
||||
- Generate the backup.dump file from the backup page from the app.
|
||||
|
||||
### Step 2 — Copy Backup File to Target PC
|
||||
- Transfer the backup file to the second PC using USB, network share, cloud, or SCP.
|
||||
|
||||
= Example destination path:
|
||||
C:\db_backups\backup.dump
|
||||
|
||||
### 🧹 Step 3 — Prepare the Target PC
|
||||
- Stop the DentalApp application to avoid database locks.
|
||||
- Ensure PostgreSQL is installed and running.
|
||||
|
||||
- (Optional but recommended) Drop the existing database:
|
||||
```
|
||||
PGPASSWORD='mypassword' dropdb -U postgres -h localhost dentalapp
|
||||
```
|
||||
|
||||
### ♻️ Step 4 — Restore the Database
|
||||
|
||||
# Case1: when we got a zip folder.
|
||||
-linux bash
|
||||
|
||||
# 4.1) unzip to a directory
|
||||
```
|
||||
unzip backup.zip -d /tmp/dental_dump_dir
|
||||
```
|
||||
|
||||
|
||||
# 4.2) restore into an already-created DB named 'dentalapp'
|
||||
```
|
||||
PGPASSWORD='mypassword' createdb -U postgres -h localhost -O postgres dentalapp # optional
|
||||
PGPASSWORD='mypassword' pg_restore -U postgres -h localhost -d dentalapp -j 4 /tmp/dental_dump_dir
|
||||
|
||||
or
|
||||
PGPASSWORD='mypassword' /usr/lib/postgresql/17/bin/pg_restore -v -U postgres -h localhost -C -d postgres /tmp/dental_dump_dir
|
||||
```
|
||||
|
||||
# Case2: when we got a tar folder.
|
||||
-linux bash
|
||||
|
||||
# 4.1) unzip to a directory
|
||||
```
|
||||
unzip backup.zip -d /tmp/dental_dump_dir
|
||||
```
|
||||
|
||||
|
||||
# 4.2) restore into an already-created DB named 'dentalapp'
|
||||
```
|
||||
PGPASSWORD='mypassword' createdb -U postgres -h localhost -O postgres dentalapp # optional
|
||||
PGPASSWORD='mypassword' pg_restore -U postgres -h localhost -d dentalapp -j 4 /tmp/dental_dump_dir
|
||||
|
||||
or
|
||||
PGPASSWORD='mypassword' /usr/lib/postgresql/17/bin/pg_restore -v -U postgres -h localhost -C -d postgres /tmp/dental_dump_dir
|
||||
```
|
||||
|
||||
|
||||
### ✅ Step 5 — Verify the Restore
|
||||
|
||||
- Check that the tables are restored successfully:
|
||||
```
|
||||
PGPASSWORD='mypassword' psql -U postgres -h localhost -d dentalapp -c "\dt"
|
||||
```
|
||||
|
||||
- You should see all the application tables listed.
|
||||
|
||||
### 🧩 Step 6 — Update App Configuration
|
||||
|
||||
- Ensure the .env file on the target PC points to the correct database:
|
||||
```
|
||||
DATABASE_URL=postgresql://postgres:mypassword@localhost:5432/dentalapp
|
||||
```
|
||||
|
||||
- Then start the DentalApp application and verify that it connects and displays data correctly.
|
||||
|
||||
# 🧠 Step 7 — Tips
|
||||
|
||||
- Use the same PostgreSQL version as the main PC.
|
||||
|
||||
|
||||
- For large databases, use parallel restore for speed:
|
||||
```
|
||||
pg_restore -U postgres -j 4 -d dentalapp backup.dump
|
||||
```
|
||||
|
||||
- Always keep at least one recent backup archived safely.
|
||||
|
||||
|
||||
# If such error came:
|
||||
|
||||
- pg_restore: error: unsupported version (1.16) in file header
|
||||
|
||||
- use cmd:
|
||||
|
||||
- 1) Add PGDG (official PostgreSQL) APT repo and its key, then update and install client-17
|
||||
```
|
||||
sudo apt update && sudo apt install -y wget ca-certificates gnupg lsb-release
|
||||
curl -fsSL https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo gpg --dearmor -o /usr/share/keyrings/pgdg.gpg
|
||||
echo "deb [signed-by=/usr/share/keyrings/pgdg.gpg] http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main" | sudo tee /etc/apt/sources.list.d/pgdg.list
|
||||
sudo apt update
|
||||
sudo apt install -y postgresql-client-17
|
||||
```
|
||||
|
||||
- 2) Run pg_restore from the installed v17 binary (replace password as needed)
|
||||
```
|
||||
PGPASSWORD='mypassword' /usr/lib/postgresql/17/bin/pg_restore -v -U postgres -h localhost -C -d postgres ./backup.dump
|
||||
```
|
||||
Reference in New Issue
Block a user