process_images.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370
  1. #!/usr/bin/env python3
  2. """
  3. Image processing script for OCR and entity extraction using OpenAI-compatible API.
  4. Processes images from Downloads folder and extracts structured data.
  5. """
  6. import os
  7. import json
  8. import re
  9. import base64
  10. from pathlib import Path
  11. from typing import Dict, List, Optional
  12. import concurrent.futures
  13. from dataclasses import dataclass, asdict
  14. from openai import OpenAI
  15. from tqdm import tqdm
  16. import argparse
  17. from dotenv import load_dotenv
  18. @dataclass
  19. class ProcessingResult:
  20. """Structure for processing results"""
  21. filename: str
  22. success: bool
  23. data: Optional[Dict] = None
  24. error: Optional[str] = None
  25. class ImageProcessor:
  26. """Process images using OpenAI-compatible vision API"""
  27. def __init__(self, api_url: str, api_key: str, model: str = "gpt-4o", index_file: str = "processing_index.json", downloads_dir: Optional[str] = None):
  28. self.client = OpenAI(api_key=api_key, base_url=api_url)
  29. self.model = model
  30. self.downloads_dir = Path(downloads_dir) if downloads_dir else Path.home() / "Downloads"
  31. self.index_file = index_file
  32. self.processed_files = self.load_index()
  33. def load_index(self) -> set:
  34. """Load the index of already processed files"""
  35. if os.path.exists(self.index_file):
  36. try:
  37. with open(self.index_file, 'r') as f:
  38. data = json.load(f)
  39. return set(data.get('processed_files', []))
  40. except Exception as e:
  41. print(f"⚠️ Warning: Could not load index file: {e}")
  42. return set()
  43. return set()
  44. def save_index(self, failed_files=None):
  45. """Save the current index of processed files"""
  46. data = {
  47. 'processed_files': sorted(list(self.processed_files)),
  48. 'last_updated': str(Path.cwd())
  49. }
  50. if failed_files:
  51. data['failed_files'] = failed_files
  52. with open(self.index_file, 'w') as f:
  53. json.dump(data, f, indent=2)
  54. def mark_processed(self, filename: str):
  55. """Mark a file as processed and update index"""
  56. self.processed_files.add(filename)
  57. self.save_index()
  58. def get_image_files(self) -> List[Path]:
  59. """Get all image files from Downloads folder (recursively)"""
  60. image_extensions = {'.jpg', '.jpeg', '.png', '.gif', '.bmp', '.tiff', '.webp'}
  61. image_files = []
  62. for ext in image_extensions:
  63. image_files.extend(self.downloads_dir.glob(f'**/*{ext}'))
  64. image_files.extend(self.downloads_dir.glob(f'**/*{ext.upper()}'))
  65. return sorted(image_files)
  66. def get_relative_path(self, file_path: Path) -> str:
  67. """Get relative path from downloads directory for unique indexing"""
  68. try:
  69. return str(file_path.relative_to(self.downloads_dir))
  70. except ValueError:
  71. # If file is not relative to downloads_dir, use full path
  72. return str(file_path)
  73. def get_unprocessed_files(self) -> List[Path]:
  74. """Get only files that haven't been processed yet"""
  75. all_files = self.get_image_files()
  76. unprocessed = [f for f in all_files if self.get_relative_path(f) not in self.processed_files]
  77. return unprocessed
  78. def encode_image(self, image_path: Path) -> str:
  79. """Encode image to base64"""
  80. with open(image_path, 'rb') as f:
  81. return base64.b64encode(f.read()).decode('utf-8')
  82. def get_system_prompt(self) -> str:
  83. """Get the system prompt for structured extraction"""
  84. return """You are an expert OCR and document analysis system.
  85. Extract ALL text from the image in READING ORDER to create a digital twin of the document.
  86. IMPORTANT: Transcribe text exactly as it appears on the page, from top to bottom, left to right, including:
  87. - All printed text
  88. - All handwritten text (inline where it appears)
  89. - Stamps and annotations (inline where they appear)
  90. - Signatures (note location)
  91. Preserve the natural reading flow. Mix printed and handwritten text together in the order they appear.
  92. Return ONLY valid JSON in this exact structure:
  93. {
  94. "document_metadata": {
  95. "page_number": "string or null",
  96. "document_number": "string or null",
  97. "date": "string or null",
  98. "document_type": "string or null",
  99. "has_handwriting": true/false,
  100. "has_stamps": true/false
  101. },
  102. "full_text": "Complete text transcription in reading order. Include ALL text - printed, handwritten, stamps, etc. - exactly as it appears from top to bottom.",
  103. "text_blocks": [
  104. {
  105. "type": "printed|handwritten|stamp|signature|other",
  106. "content": "text content",
  107. "position": "top|middle|bottom|header|footer|margin"
  108. }
  109. ],
  110. "entities": {
  111. "people": ["list of person names"],
  112. "organizations": ["list of organizations"],
  113. "locations": ["list of locations"],
  114. "dates": ["list of dates found"],
  115. "reference_numbers": ["list of any reference/ID numbers"]
  116. },
  117. "additional_notes": "Any observations about document quality, redactions, damage, etc."
  118. }"""
  119. def process_image(self, image_path: Path) -> ProcessingResult:
  120. """Process a single image through the API"""
  121. try:
  122. # Encode image
  123. base64_image = self.encode_image(image_path)
  124. # Make API call using OpenAI client
  125. response = self.client.chat.completions.create(
  126. model=self.model,
  127. messages=[
  128. {
  129. "role": "system",
  130. "content": self.get_system_prompt()
  131. },
  132. {
  133. "role": "user",
  134. "content": [
  135. {
  136. "type": "text",
  137. "text": "Extract all text and entities from this image. Return only valid JSON."
  138. },
  139. {
  140. "type": "image_url",
  141. "image_url": {
  142. "url": f"data:image/jpeg;base64,{base64_image}"
  143. }
  144. }
  145. ]
  146. }
  147. ],
  148. max_tokens=4096,
  149. temperature=0.1
  150. )
  151. # Parse response
  152. content = response.choices[0].message.content
  153. # Robust JSON extraction
  154. content = content.strip()
  155. # 1. Try to find JSON between markdown code fences
  156. json_match = re.search(r'```(?:json)?\s*\n(.*?)\n```', content, re.DOTALL)
  157. if json_match:
  158. content = json_match.group(1).strip()
  159. else:
  160. # 2. Try to find JSON between curly braces
  161. json_match = re.search(r'\{.*\}', content, re.DOTALL)
  162. if json_match:
  163. content = json_match.group(0).strip()
  164. else:
  165. # 3. Strip markdown manually
  166. if content.startswith('```json'):
  167. content = content[7:]
  168. elif content.startswith('```'):
  169. content = content[3:]
  170. if content.endswith('```'):
  171. content = content[:-3]
  172. content = content.strip()
  173. # Try to parse JSON
  174. try:
  175. extracted_data = json.loads(content)
  176. except json.JSONDecodeError as e:
  177. # Try to salvage by finding the first complete JSON object
  178. try:
  179. # Find first { and matching }
  180. start = content.find('{')
  181. if start == -1:
  182. raise ValueError("No JSON object found")
  183. brace_count = 0
  184. end = start
  185. for i in range(start, len(content)):
  186. if content[i] == '{':
  187. brace_count += 1
  188. elif content[i] == '}':
  189. brace_count -= 1
  190. if brace_count == 0:
  191. end = i + 1
  192. break
  193. if end > start:
  194. content = content[start:end]
  195. extracted_data = json.loads(content)
  196. else:
  197. raise ValueError("Could not find complete JSON object")
  198. except Exception:
  199. # If we can't salvage it, raise the original error
  200. raise e
  201. return ProcessingResult(
  202. filename=self.get_relative_path(image_path),
  203. success=True,
  204. data=extracted_data
  205. )
  206. except Exception as e:
  207. return ProcessingResult(
  208. filename=self.get_relative_path(image_path),
  209. success=False,
  210. error=str(e)
  211. )
  212. def process_all(self, max_workers: int = 5, limit: Optional[int] = None, resume: bool = True) -> List[ProcessingResult]:
  213. """Process all images with parallel processing"""
  214. if resume:
  215. image_files = self.get_unprocessed_files()
  216. total_files = len(self.get_image_files())
  217. already_processed = len(self.processed_files)
  218. print(f"Found {total_files} total image files")
  219. print(f"Already processed: {already_processed}")
  220. print(f"Remaining to process: {len(image_files)}")
  221. else:
  222. image_files = self.get_image_files()
  223. print(f"Found {len(image_files)} image files to process")
  224. if limit:
  225. image_files = image_files[:limit]
  226. print(f"Limited to {limit} files for this run")
  227. if not image_files:
  228. print("No files to process!")
  229. return []
  230. results = []
  231. failed_files = []
  232. with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
  233. futures = {executor.submit(self.process_image, img): img for img in image_files}
  234. with tqdm(total=len(image_files), desc="Processing images") as pbar:
  235. for future in concurrent.futures.as_completed(futures):
  236. result = future.result()
  237. results.append(result)
  238. # Save individual result to file
  239. if result.success:
  240. self.save_individual_result(result)
  241. tqdm.write(f"✅ Processed: {result.filename}")
  242. else:
  243. # Track failed files
  244. failed_files.append({
  245. 'filename': result.filename,
  246. 'error': result.error
  247. })
  248. tqdm.write(f"❌ Failed: {result.filename} - {result.error}")
  249. # Mark as processed regardless of success/failure
  250. self.mark_processed(result.filename)
  251. pbar.update(1)
  252. # Save failed files to index for reference
  253. if failed_files:
  254. self.save_index(failed_files=failed_files)
  255. print(f"\n⚠️ {len(failed_files)} files failed - logged in {self.index_file}")
  256. return results
  257. def save_individual_result(self, result: ProcessingResult):
  258. """Save individual result to ./results/folder/imagename.json"""
  259. # Create output path mirroring the source structure
  260. result_path = Path("./results") / result.filename
  261. result_path = result_path.with_suffix('.json')
  262. # Create parent directories
  263. result_path.parent.mkdir(parents=True, exist_ok=True)
  264. # Save the extracted data
  265. with open(result_path, 'w', encoding='utf-8') as f:
  266. json.dump(result.data, f, indent=2, ensure_ascii=False)
  267. def save_results(self, results: List[ProcessingResult], output_file: str = "processed_results.json"):
  268. """Save summary results to JSON file"""
  269. output_data = {
  270. "total_processed": len(results),
  271. "successful": sum(1 for r in results if r.success),
  272. "failed": sum(1 for r in results if not r.success),
  273. "results": [asdict(r) for r in results]
  274. }
  275. with open(output_file, 'w', encoding='utf-8') as f:
  276. json.dump(output_data, f, indent=2, ensure_ascii=False)
  277. print(f"\n✅ Summary saved to {output_file}")
  278. print(f" Individual results saved to ./results/")
  279. print(f" Successful: {output_data['successful']}")
  280. print(f" Failed: {output_data['failed']}")
  281. def main():
  282. # Load environment variables
  283. load_dotenv()
  284. parser = argparse.ArgumentParser(description="Process images with OCR and entity extraction")
  285. parser.add_argument("--api-url", help="OpenAI-compatible API base URL (default: from .env or OPENAI_API_URL)")
  286. parser.add_argument("--api-key", help="API key (default: from .env or OPENAI_API_KEY)")
  287. parser.add_argument("--model", help="Model name (default: from .env, OPENAI_MODEL, or meta-llama/Llama-4-Maverick-17B-128E-Instruct)")
  288. parser.add_argument("--workers", type=int, default=5, help="Number of parallel workers (default: 5)")
  289. parser.add_argument("--limit", type=int, help="Limit number of images to process (for testing)")
  290. parser.add_argument("--output", default="processed_results.json", help="Output JSON file")
  291. parser.add_argument("--index", default="processing_index.json", help="Index file to track processed files")
  292. parser.add_argument("--downloads-dir", default="./downloads", help="Directory containing images (default: ./downloads)")
  293. parser.add_argument("--no-resume", action="store_true", help="Process all files, ignoring index")
  294. args = parser.parse_args()
  295. # Get values from args or environment variables
  296. api_url = args.api_url or os.getenv("OPENAI_API_URL", "http://...")
  297. api_key = args.api_key or os.getenv("OPENAI_API_KEY", "abcd1234")
  298. model = args.model or os.getenv("OPENAI_MODEL", "meta-llama/Llama-4-Maverick-17B-128E-Instruct")
  299. processor = ImageProcessor(
  300. api_url=api_url,
  301. api_key=api_key,
  302. model=model,
  303. index_file=args.index,
  304. downloads_dir=args.downloads_dir
  305. )
  306. results = processor.process_all(
  307. max_workers=args.workers,
  308. limit=args.limit,
  309. resume=not args.no_resume
  310. )
  311. processor.save_results(results, args.output)
  312. if __name__ == "__main__":
  313. main()