feat(ocr payment page) - added backend routes on app

This commit is contained in:
2025-08-30 01:11:59 +05:30
parent e5271d6979
commit c5edac2ab8
19 changed files with 314 additions and 109 deletions

View File

@@ -3,14 +3,15 @@ import patientsRoutes from "./patients";
import appointmentsRoutes from "./appointments"; import appointmentsRoutes from "./appointments";
import usersRoutes from "./users"; import usersRoutes from "./users";
import staffsRoutes from "./staffs"; import staffsRoutes from "./staffs";
import pdfExtractionRoutes from "./pdfExtraction";
import claimsRoutes from "./claims"; import claimsRoutes from "./claims";
import patientDataExtractionRoutes from "./patientdataExtraction";
import insuranceCredsRoutes from "./insuranceCreds"; import insuranceCredsRoutes from "./insuranceCreds";
import documentsRoutes from "./documents"; import documentsRoutes from "./documents";
import insuranceEligibilityRoutes from "./insuranceEligibility"; import insuranceEligibilityRoutes from "./insuranceEligibility";
import paymentsRoutes from "./payments"; import paymentsRoutes from "./payments";
import databaseManagementRoutes from "./database-management"; import databaseManagementRoutes from "./database-management";
import notificationsRoutes from "./notifications"; import notificationsRoutes from "./notifications";
import paymentOcrRoutes from "./paymentOcrExtraction";
const router = Router(); const router = Router();
@@ -18,7 +19,7 @@ router.use("/patients", patientsRoutes);
router.use("/appointments", appointmentsRoutes); router.use("/appointments", appointmentsRoutes);
router.use("/users", usersRoutes); router.use("/users", usersRoutes);
router.use("/staffs", staffsRoutes); router.use("/staffs", staffsRoutes);
router.use("/pdfExtraction", pdfExtractionRoutes); router.use("/patientDataExtraction", patientDataExtractionRoutes);
router.use("/claims", claimsRoutes); router.use("/claims", claimsRoutes);
router.use("/insuranceCreds", insuranceCredsRoutes); router.use("/insuranceCreds", insuranceCredsRoutes);
router.use("/documents", documentsRoutes); router.use("/documents", documentsRoutes);
@@ -26,5 +27,6 @@ router.use("/insuranceEligibility", insuranceEligibilityRoutes);
router.use("/payments", paymentsRoutes); router.use("/payments", paymentsRoutes);
router.use("/database-management", databaseManagementRoutes); router.use("/database-management", databaseManagementRoutes);
router.use("/notifications", notificationsRoutes); router.use("/notifications", notificationsRoutes);
router.use("/payment-ocr", paymentOcrRoutes);
export default router; export default router;

View File

@@ -2,17 +2,17 @@ import { Router } from "express";
import type { Request, Response } from "express"; import type { Request, Response } from "express";
const router = Router(); const router = Router();
import multer from "multer"; import multer from "multer";
import forwardToPdfService from "../services/pdfClient"; import forwardToPatientDataExtractorService from "../services/patientDataExtractorService";
const upload = multer({ storage: multer.memoryStorage() }); const upload = multer({ storage: multer.memoryStorage() });
router.post("/extract", upload.single("pdf"), async (req: Request, res: Response): Promise<any>=> { router.post("/patientdataextract", upload.single("pdf"), async (req: Request, res: Response): Promise<any>=> {
if (!req.file) { if (!req.file) {
return res.status(400).json({ error: "No PDF file uploaded." }); return res.status(400).json({ error: "No PDF file uploaded." });
} }
try { try {
const result = await forwardToPdfService(req.file); const result = await forwardToPatientDataExtractorService(req.file);
res.json(result); res.json(result);
} catch (err) { } catch (err) {
console.error(err); console.error(err);

View File

@@ -0,0 +1,50 @@
import { Router, Request, Response } from "express";
import multer from "multer";
import { forwardToPaymentOCRService } from "../services/paymentOCRService";
const router = Router();
// keep files in memory; FastAPI accepts them as multipart bytes
const upload = multer({ storage: multer.memoryStorage() });
// POST /payment-ocr/extract (field name: "files")
router.post(
"/extract",
upload.array("files"), // allow multiple images
async (req: Request, res: Response): Promise<any> => {
try {
const files = req.files as Express.Multer.File[] | undefined;
if (!files || files.length === 0) {
return res
.status(400)
.json({ error: "No image files uploaded. Use field name 'files'." });
}
// (optional) basic client-side MIME guard
const allowed = new Set([
"image/jpeg",
"image/png",
"image/tiff",
"image/bmp",
"image/jpg",
]);
const bad = files.filter((f) => !allowed.has(f.mimetype.toLowerCase()));
if (bad.length) {
return res.status(415).json({
error: `Unsupported file types: ${bad
.map((b) => b.originalname)
.join(", ")}`,
});
}
const rows = await forwardToPaymentOCRService(files);
return res.json({ rows });
} catch (err) {
console.error(err);
return res.status(500).json({ error: "Payment OCR extraction failed" });
}
}
);
export default router;

View File

@@ -9,7 +9,7 @@ export interface ExtractedData {
[key: string]: any; [key: string]: any;
} }
export default async function forwardToPdfService( export default async function forwardToPatientDataExtractorService(
file: Express.Multer.File file: Express.Multer.File
): Promise<ExtractedData> { ): Promise<ExtractedData> {
const form = new FormData(); const form = new FormData();

View File

@@ -0,0 +1,34 @@
import axios from "axios";
import FormData from "form-data";
export async function forwardToPaymentOCRService(
files: Express.Multer.File | Express.Multer.File[]
): Promise<any> {
const arr = Array.isArray(files) ? files : [files];
const form = new FormData();
for (const f of arr) {
form.append("files", f.buffer, {
filename: f.originalname,
contentType: f.mimetype, // image/jpeg, image/png, image/tiff, etc.
knownLength: f.size,
});
}
const url = `http://localhost:5003/extract/json`;
try {
const resp = await axios.post<{ rows: any }>(url, form, {
headers: form.getHeaders(),
maxBodyLength: Infinity,
maxContentLength: Infinity,
timeout: 120000, // OCR can be heavy; adjust as needed
});
return resp.data?.rows ?? [];
} catch (err: any) {
// Bubble up a useful error message
const status = err?.response?.status;
const detail = err?.response?.data?.detail || err?.message || "Unknown error";
throw new Error(`Payment OCR request failed${status ? ` (${status})` : ""}: ${detail}`);
}
}

View File

@@ -16,7 +16,7 @@ export default function useExtractPdfData() {
const formData = new FormData(); const formData = new FormData();
formData.append("pdf", pdfFile); formData.append("pdf", pdfFile);
const res = await apiRequest("POST", "/api/pdfExtraction/extract", formData); const res = await apiRequest("POST", "/api/patientDataExtraction/patientdataextract", formData);
if (!res.ok) throw new Error("Failed to extract PDF"); if (!res.ok) throw new Error("Failed to extract PDF");
return res.json(); return res.json();
}, },

View File

@@ -0,0 +1,3 @@
GOOGLE_APPLICATION_CREDENTIALS=google-credentials.json
HOST="0.0.0.0"
PORT="5003"

1
apps/PaymentOCRService/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
google_credentials.json

View File

@@ -1,81 +0,0 @@
from fastapi import FastAPI, UploadFile, File, HTTPException
from fastapi.responses import StreamingResponse, JSONResponse, PlainTextResponse
from typing import List, Optional
import io
import os
from app.pipeline_adapter import (
process_images_to_rows,
rows_to_csv_bytes,
)
app = FastAPI(
title="Medical Billing OCR API",
description="FastAPI wrapper around the complete OCR pipeline (Google Vision + deskew + line clustering + extraction).",
version="1.0.0",
)
ALLOWED_EXTS = {".jpg", ".jpeg", ".png", ".tif", ".tiff", ".bmp"}
@app.get("/health", response_class=PlainTextResponse)
def health():
# Simple sanity check (also ensures GCP creds var visibility)
creds = os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", "")
return f"OK | GOOGLE_APPLICATION_CREDENTIALS set: {bool(creds)}"
@app.post("/extract/json")
async def extract_json(files: List[UploadFile] = File(...)):
if not files:
raise HTTPException(status_code=400, detail="No files provided.")
# Validate extensions early (not bulletproof, but helpful)
bad = [f.filename for f in files if os.path.splitext(f.filename or "")[1].lower() not in ALLOWED_EXTS]
if bad:
raise HTTPException(
status_code=415,
detail=f"Unsupported file types: {', '.join(bad)}. Allowed: {', '.join(sorted(ALLOWED_EXTS))}"
)
# Read blobs in-memory
blobs = []
filenames = []
for f in files:
blobs.append(await f.read())
filenames.append(f.filename or "upload.bin")
try:
rows = process_images_to_rows(blobs, filenames)
# rows is a list[dict] where each dict contains the columns you already emit (Patient Name, etc.)
return JSONResponse(content={"rows": rows})
except Exception as e:
raise HTTPException(status_code=500, detail=f"Processing error: {e}")
@app.post("/extract/csv")
async def extract_csv(files: List[UploadFile] = File(...), filename: Optional[str] = None):
if not files:
raise HTTPException(status_code=400, detail="No files provided.")
bad = [f.filename for f in files if os.path.splitext(f.filename or "")[1].lower() not in ALLOWED_EXTS]
if bad:
raise HTTPException(
status_code=415,
detail=f"Unsupported file types: {', '.join(bad)}. Allowed: {', '.join(sorted(ALLOWED_EXTS))}"
)
blobs = []
filenames = []
for f in files:
blobs.append(await f.read())
filenames.append(f.filename or "upload.bin")
try:
rows = process_images_to_rows(blobs, filenames)
csv_bytes = rows_to_csv_bytes(rows)
out_name = filename or "medical_billing_extract.csv"
return StreamingResponse(
io.BytesIO(csv_bytes),
media_type="text/csv",
headers={"Content-Disposition": f'attachment; filename="{out_name}"'}
)
except Exception as e:
raise HTTPException(status_code=500, detail=f"Processing error: {e}")

View File

@@ -2,6 +2,8 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
""" """
ALL IS GENERATED BY REPLIT:
End-to-end local pipeline (single script) End-to-end local pipeline (single script)
- One Google Vision pass per image (DOCUMENT_TEXT_DETECTION) - One Google Vision pass per image (DOCUMENT_TEXT_DETECTION)

View File

@@ -4,10 +4,7 @@ from typing import List, Dict
import pandas as pd import pandas as pd
# Import your existing functions directly from complete_pipeline.py # Import your existing functions directly from complete_pipeline.py
from complete_pipeline import ( from complete_pipeline import smart_deskew_with_lines, extract_all_clients_from_lines
smart_deskew_with_lines,
extract_all_clients_from_lines,
)
def _process_single_image_bytes(blob: bytes, display_name: str) -> List[Dict]: def _process_single_image_bytes(blob: bytes, display_name: str) -> List[Dict]:
""" """

View File

@@ -0,0 +1,168 @@
from fastapi import FastAPI, UploadFile, File, HTTPException, Request
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import StreamingResponse, JSONResponse, PlainTextResponse
from typing import List, Optional
import io
import os
import asyncio
from dotenv import load_dotenv
load_dotenv() # loads .env (GOOGLE_APPLICATION_CREDENTIALS, HOST, PORT, etc.)
# Your adapter that calls the pipeline
from complete_pipeline_adapter import process_images_to_rows,rows_to_csv_bytes
# -------------------------------------------------
# App + concurrency controls (similar to your other app)
# -------------------------------------------------
app = FastAPI(
title="Payment OCR Services API",
description="FastAPI wrapper around the OCR pipeline (Google Vision + deskew + line grouping + extraction).",
version="1.0.0",
)
# Concurrency/semaphore (optional but useful for OCR)
MAX_CONCURRENCY = int(os.getenv("MAX_CONCURRENCY", "2"))
semaphore = asyncio.Semaphore(MAX_CONCURRENCY)
active_jobs = 0
waiting_jobs = 0
lock = asyncio.Lock()
# CORS
cors_origins = os.getenv("CORS_ORIGINS", "*")
allow_origins = ["*"] if cors_origins.strip() == "*" else [o.strip() for o in cors_origins.split(",") if o.strip()]
app.add_middleware(
CORSMiddleware,
allow_origins=allow_origins,
allow_methods=["*"],
allow_headers=["*"],
)
ALLOWED_EXTS = {".jpg", ".jpeg", ".png", ".tif", ".tiff", ".bmp"}
# -------------------------------------------------
# Health + status
# -------------------------------------------------
@app.get("/health", response_class=PlainTextResponse)
def health():
creds = os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", "")
return f"OK | GOOGLE_APPLICATION_CREDENTIALS set: {bool(creds)}"
@app.get("/status")
async def get_status():
async with lock:
return {
"active_jobs": active_jobs,
"queued_jobs": waiting_jobs,
"max_concurrency": MAX_CONCURRENCY,
"status": "busy" if active_jobs > 0 or waiting_jobs > 0 else "idle",
}
# -------------------------------------------------
# Helpers
# -------------------------------------------------
def _validate_files(files: List[UploadFile]):
if not files:
raise HTTPException(status_code=400, detail="No files provided.")
bad = [f.filename for f in files if os.path.splitext(f.filename or "")[1].lower() not in ALLOWED_EXTS]
if bad:
raise HTTPException(
status_code=415,
detail=f"Unsupported file types: {', '.join(bad)}. Allowed: {', '.join(sorted(ALLOWED_EXTS))}"
)
# -------------------------------------------------
# Endpoints
# -------------------------------------------------
@app.post("/extract/json")
async def extract_json(files: List[UploadFile] = File(...)):
_validate_files(files)
async with lock:
global waiting_jobs
waiting_jobs += 1
async with semaphore:
async with lock:
waiting_jobs -= 1
global active_jobs
active_jobs += 1
try:
blobs = [await f.read() for f in files]
names = [f.filename or "upload.bin" for f in files]
rows = process_images_to_rows(blobs, names) # calls your pipeline
return JSONResponse(content={"rows": rows})
except Exception as e:
raise HTTPException(status_code=500, detail=f"Processing error: {e}")
finally:
async with lock:
active_jobs -= 1
@app.post("/extract/csvtext", response_class=PlainTextResponse)
async def extract_csvtext(files: List[UploadFile] = File(...)):
_validate_files(files)
async with lock:
global waiting_jobs
waiting_jobs += 1
async with semaphore:
async with lock:
waiting_jobs -= 1
global active_jobs
active_jobs += 1
try:
blobs = [await f.read() for f in files]
names = [f.filename or "upload.bin" for f in files]
rows = process_images_to_rows(blobs, names)
csv_bytes = rows_to_csv_bytes(rows)
return PlainTextResponse(csv_bytes.decode("utf-8"), media_type="text/csv")
except Exception as e:
raise HTTPException(status_code=500, detail=f"Processing error: {e}")
finally:
async with lock:
active_jobs -= 1
@app.post("/extract/csv")
async def extract_csv(files: List[UploadFile] = File(...), filename: Optional[str] = None):
_validate_files(files)
async with lock:
global waiting_jobs
waiting_jobs += 1
async with semaphore:
async with lock:
waiting_jobs -= 1
global active_jobs
active_jobs += 1
try:
blobs = [await f.read() for f in files]
names = [f.filename or "upload.bin" for f in files]
rows = process_images_to_rows(blobs, names)
csv_bytes = rows_to_csv_bytes(rows)
out_name = filename or "medical_billing_extract.csv"
return StreamingResponse(
io.BytesIO(csv_bytes),
media_type="text/csv",
headers={"Content-Disposition": f'attachment; filename="{out_name}"'}
)
except Exception as e:
raise HTTPException(status_code=500, detail=f"Processing error: {e}")
finally:
async with lock:
active_jobs -= 1
# -------------------------------------------------
# Entrypoint (same pattern as your selenium app)
# -------------------------------------------------
if __name__ == "__main__":
import uvicorn
host = os.getenv("HOST")
port = int(os.getenv("PORT"))
reload_flag = os.getenv("RELOAD", "false").lower() == "true"
uvicorn.run(app, host=host, port=port, reload=reload_flag)

View File

@@ -1,5 +1,5 @@
{ {
"name": "pdfservice", "name": "paymentocrservice",
"private": true, "private": true,
"scripts": { "scripts": {
"postinstall": "pip install -r requirements.txt", "postinstall": "pip install -r requirements.txt",

View File

@@ -1,10 +1,26 @@
fastapi annotated-types==0.7.0
uvicorn[standard] anyio==4.10.0
google-cloud-vision click==8.2.1
opencv-python-headless colorama==0.4.6
pytesseract et_xmlfile==2.0.0
pillow fastapi==0.116.1
pandas h11==0.16.0
openpyxl idna==3.10
numpy numpy==2.2.6
python-multipart google-cloud-vision>=3.10.2
opencv-python==4.12.0.88
openpyxl==3.1.5
pandas==2.3.2
pydantic==2.11.7
pydantic_core==2.33.2
python-dateutil==2.9.0.post0
python-dotenv==1.1.1
pytz==2025.2
six==1.17.0
sniffio==1.3.1
starlette==0.47.3
typing-inspection==0.4.1
typing_extensions==4.15.0
tzdata==2025.2
uvicorn==0.35.0
python-multipart==0.0.20

21
package-lock.json generated
View File

@@ -163,8 +163,17 @@
"vite": "^6.3.5" "vite": "^6.3.5"
} }
}, },
"apps/PatientDataExtractorService": {
"name": "patientdataextractorservice",
"hasInstallScript": true
},
"apps/PaymentOCRService": {
"name": "paymentocrservice",
"hasInstallScript": true
},
"apps/PdfService": { "apps/PdfService": {
"name": "pdfservice", "name": "pdfservice",
"extraneous": true,
"hasInstallScript": true "hasInstallScript": true
}, },
"apps/SeleniumService": { "apps/SeleniumService": {
@@ -9983,11 +9992,19 @@
"devOptional": true, "devOptional": true,
"license": "MIT" "license": "MIT"
}, },
"node_modules/patientdataextractorservice": {
"resolved": "apps/PatientDataExtractorService",
"link": true
},
"node_modules/pause": { "node_modules/pause": {
"version": "0.0.1", "version": "0.0.1",
"resolved": "https://registry.npmjs.org/pause/-/pause-0.0.1.tgz", "resolved": "https://registry.npmjs.org/pause/-/pause-0.0.1.tgz",
"integrity": "sha512-KG8UEiEVkR3wGEb4m5yZkVCzigAD+cVEJck2CzYZO37ZGJfctvVptVO192MwrtPhzONn6go8ylnOdMhKqi4nfg==" "integrity": "sha512-KG8UEiEVkR3wGEb4m5yZkVCzigAD+cVEJck2CzYZO37ZGJfctvVptVO192MwrtPhzONn6go8ylnOdMhKqi4nfg=="
}, },
"node_modules/paymentocrservice": {
"resolved": "apps/PaymentOCRService",
"link": true
},
"node_modules/pdfjs-dist": { "node_modules/pdfjs-dist": {
"version": "3.11.174", "version": "3.11.174",
"resolved": "https://registry.npmjs.org/pdfjs-dist/-/pdfjs-dist-3.11.174.tgz", "resolved": "https://registry.npmjs.org/pdfjs-dist/-/pdfjs-dist-3.11.174.tgz",
@@ -10001,10 +10018,6 @@
"path2d-polyfill": "^2.0.1" "path2d-polyfill": "^2.0.1"
} }
}, },
"node_modules/pdfservice": {
"resolved": "apps/PdfService",
"link": true
},
"node_modules/perfect-debounce": { "node_modules/perfect-debounce": {
"version": "1.0.0", "version": "1.0.0",
"resolved": "https://registry.npmjs.org/perfect-debounce/-/perfect-debounce-1.0.0.tgz", "resolved": "https://registry.npmjs.org/perfect-debounce/-/perfect-debounce-1.0.0.tgz",

View File

@@ -11,8 +11,8 @@
"db:generate": "prisma generate --schema=packages/db/prisma/schema.prisma && ts-node packages/db/scripts/patch-zod-buffer.ts", "db:generate": "prisma generate --schema=packages/db/prisma/schema.prisma && ts-node packages/db/scripts/patch-zod-buffer.ts",
"db:migrate": "dotenv -e packages/db/.env -- prisma migrate dev --schema=packages/db/prisma/schema.prisma", "db:migrate": "dotenv -e packages/db/.env -- prisma migrate dev --schema=packages/db/prisma/schema.prisma",
"db:seed": "prisma db seed --schema=packages/db/prisma/schema.prisma", "db:seed": "prisma db seed --schema=packages/db/prisma/schema.prisma",
"setup:env": "shx cp packages/db/prisma/.env.example packages/db/prisma/.env && shx cp apps/Frontend/.env.example apps/Frontend/.env && shx cp apps/Backend/.env.example apps/Backend/.env", "setup:env": "shx cp packages/db/prisma/.env.example packages/db/prisma/.env && shx cp apps/Frontend/.env.example apps/Frontend/.env && shx cp apps/Backend/.env.example apps/Backend/.env && shx cp apps/PaymentOCRService/.env.example apps/PaymentOCRService/.env",
"postinstall": "cd apps/PdfService && npm run postinstall" "postinstall": "npm --prefix apps/PatientDataExtractorService run postinstall && npm --prefix apps/PaymentOCRService run postinstall"
}, },
"prisma": { "prisma": {
"seed": "ts-node packages/db/prisma/seed.ts" "seed": "ts-node packages/db/prisma/seed.ts"