analyze_documents.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289
  1. #!/usr/bin/env python3
  2. """
  3. Document analysis script using LLM to generate summaries and key insights.
  4. Groups pages into documents (like .eleventy.js) and analyzes each one.
  5. """
  6. import os
  7. import json
  8. import re
  9. from pathlib import Path
  10. from typing import Dict, List, Optional
  11. from collections import defaultdict
  12. from openai import OpenAI
  13. from tqdm import tqdm
  14. from dotenv import load_dotenv
  15. class DocumentAnalyzer:
  16. """Analyze grouped documents using LLM"""
  17. def __init__(self, api_url: str, api_key: str, model: str = "gpt-4o"):
  18. self.client = OpenAI(api_key=api_key, base_url=api_url)
  19. self.model = model
  20. self.results_dir = Path("./results")
  21. self.analyses_file = Path("./analyses.json")
  22. def normalize_doc_num(self, doc_num: Optional[str]) -> Optional[str]:
  23. """Normalize document number to handle LLM variations"""
  24. if not doc_num:
  25. return None
  26. return str(doc_num).lower().replace(r'[^a-z0-9-]', '-').replace(r'-+', '-').strip('-')
  27. def load_and_group_documents(self) -> List[Dict]:
  28. """Load all JSON files and group into documents (matching .eleventy.js logic)"""
  29. pages = []
  30. # Recursively read all JSON files
  31. for json_file in self.results_dir.glob("**/*.json"):
  32. try:
  33. with open(json_file, 'r', encoding='utf-8') as f:
  34. content = json.load(f)
  35. relative_path = json_file.relative_to(self.results_dir)
  36. pages.append({
  37. 'path': str(relative_path),
  38. 'filename': json_file.stem,
  39. 'folder': str(relative_path.parent) if relative_path.parent != Path('.') else 'root',
  40. **content
  41. })
  42. except Exception as e:
  43. print(f"Warning: Could not load {json_file}: {e}")
  44. print(f"Loaded {len(pages)} pages")
  45. # Group by normalized document number
  46. document_map = defaultdict(list)
  47. for page in pages:
  48. doc_num = page.get('document_metadata', {}).get('document_number')
  49. if not doc_num:
  50. # Use filename as fallback
  51. normalized = self.normalize_doc_num(page['filename']) or page['filename']
  52. else:
  53. normalized = self.normalize_doc_num(doc_num)
  54. document_map[normalized].append(page)
  55. # Helper to extract numeric page number
  56. def get_page_num(page):
  57. page_num = page.get('document_metadata', {}).get('page_number', 0) or 0
  58. if isinstance(page_num, int):
  59. return page_num
  60. # Handle formats like "24 of 66" or "24/66"
  61. if isinstance(page_num, str):
  62. # Extract first number
  63. match = re.search(r'(\d+)', page_num)
  64. if match:
  65. return int(match.group(1))
  66. return 0
  67. # Convert to sorted documents
  68. documents = []
  69. for normalized_num, doc_pages in document_map.items():
  70. # Sort pages by page number
  71. doc_pages.sort(key=get_page_num)
  72. # Get metadata
  73. first_page = doc_pages[0]
  74. raw_doc_nums = list(set(
  75. p.get('document_metadata', {}).get('document_number')
  76. for p in doc_pages
  77. if p.get('document_metadata', {}).get('document_number')
  78. ))
  79. # Combine full text from all pages
  80. full_text = '\n\n--- PAGE BREAK ---\n\n'.join(
  81. p.get('full_text', '') for p in doc_pages
  82. )
  83. # Collect all entities
  84. all_entities = {
  85. 'people': set(),
  86. 'organizations': set(),
  87. 'locations': set(),
  88. 'dates': set(),
  89. 'reference_numbers': set()
  90. }
  91. for page in doc_pages:
  92. if 'entities' in page and page['entities']:
  93. for key in all_entities.keys():
  94. if key in page['entities'] and page['entities'][key]:
  95. all_entities[key].update(page['entities'][key])
  96. documents.append({
  97. 'unique_id': normalized_num,
  98. 'document_number': raw_doc_nums[0] if len(raw_doc_nums) == 1 else normalized_num,
  99. 'page_count': len(doc_pages),
  100. 'full_text': full_text,
  101. 'document_metadata': first_page.get('document_metadata', {}),
  102. 'entities': {k: sorted(list(v)) for k, v in all_entities.items()}
  103. })
  104. print(f"Grouped into {len(documents)} documents")
  105. return sorted(documents, key=lambda d: d['document_number'])
  106. def get_analysis_prompt(self) -> str:
  107. """Get the system prompt for document analysis"""
  108. return """You are an expert legal document analyst specializing in court documents, depositions, and legal filings.
  109. Analyze the provided document and return a concise summary with key insights.
  110. Your analysis should include:
  111. 1. **Document Type**: What kind of document is this? (deposition, court filing, letter, email, affidavit, etc.)
  112. 2. **Key Topics**: What are the main subjects/topics discussed? (2-3 bullet points)
  113. 3. **Key People**: Who are the most important people mentioned and their roles?
  114. 4. **Significance**: Why is this document potentially important? What does it reveal or establish?
  115. 5. **Summary**: A 2-3 sentence summary of the document's content
  116. Be factual, concise, and focus on what makes this document notable or significant.
  117. Return ONLY valid JSON in this format:
  118. {
  119. "document_type": "string",
  120. "key_topics": ["topic1", "topic2", "topic3"],
  121. "key_people": [
  122. {"name": "person name", "role": "their role or significance in this doc"}
  123. ],
  124. "significance": "Why this document matters (1-2 sentences)",
  125. "summary": "Brief summary (2-3 sentences)"
  126. }"""
  127. def analyze_document(self, document: Dict) -> Optional[Dict]:
  128. """Analyze a single document using LLM"""
  129. try:
  130. # Limit text length for API (keep first ~8000 chars if too long)
  131. full_text = document['full_text']
  132. if len(full_text) > 8000:
  133. full_text = full_text[:8000] + "\n\n[... document continues ...]"
  134. response = self.client.chat.completions.create(
  135. model=self.model,
  136. messages=[
  137. {
  138. "role": "system",
  139. "content": self.get_analysis_prompt()
  140. },
  141. {
  142. "role": "user",
  143. "content": f"Analyze this document:\n\n{full_text}"
  144. }
  145. ],
  146. temperature=0.2,
  147. max_tokens=1000
  148. )
  149. content = response.choices[0].message.content.strip()
  150. # Extract JSON
  151. json_match = re.search(r'```(?:json)?\s*\n(.*?)\n```', content, re.DOTALL)
  152. if json_match:
  153. content = json_match.group(1).strip()
  154. else:
  155. json_match = re.search(r'\{.*\}', content, re.DOTALL)
  156. if json_match:
  157. content = json_match.group(0).strip()
  158. analysis = json.loads(content)
  159. return {
  160. 'document_id': document['unique_id'],
  161. 'document_number': document['document_number'],
  162. 'page_count': document['page_count'],
  163. 'analysis': analysis
  164. }
  165. except Exception as e:
  166. print(f"Error analyzing document {document['document_number']}: {e}")
  167. return None
  168. def analyze_all(self, limit: Optional[int] = None) -> List[Dict]:
  169. """Analyze all documents"""
  170. print("=" * 60)
  171. print("DOCUMENT ANALYSIS")
  172. print("=" * 60)
  173. # Load existing analyses to resume
  174. existing_analyses = {}
  175. if self.analyses_file.exists():
  176. try:
  177. with open(self.analyses_file, 'r', encoding='utf-8') as f:
  178. data = json.load(f)
  179. existing_analyses = {a['document_id']: a for a in data.get('analyses', [])}
  180. print(f"Found {len(existing_analyses)} existing analyses")
  181. except Exception as e:
  182. print(f"Could not load existing analyses: {e}")
  183. documents = self.load_and_group_documents()
  184. if limit:
  185. documents = documents[:limit]
  186. print(f"Limited to {limit} documents for this run")
  187. analyses = []
  188. skipped = 0
  189. for doc in tqdm(documents, desc="Analyzing documents"):
  190. # Skip if already analyzed
  191. if doc['unique_id'] in existing_analyses:
  192. analyses.append(existing_analyses[doc['unique_id']])
  193. skipped += 1
  194. continue
  195. analysis = self.analyze_document(doc)
  196. if analysis:
  197. analyses.append(analysis)
  198. # Save incrementally
  199. self.save_analyses(analyses)
  200. print(f"\n✅ Analyzed {len(analyses) - skipped} new documents")
  201. print(f" Skipped {skipped} already-analyzed documents")
  202. print(f" Total analyses: {len(analyses)}")
  203. return analyses
  204. def save_analyses(self, analyses: List[Dict]):
  205. """Save analyses to JSON file"""
  206. output = {
  207. 'total': len(analyses),
  208. 'analyses': analyses
  209. }
  210. with open(self.analyses_file, 'w', encoding='utf-8') as f:
  211. json.dump(output, f, indent=2, ensure_ascii=False)
  212. def main():
  213. load_dotenv()
  214. import argparse
  215. parser = argparse.ArgumentParser(description="Analyze documents using LLM")
  216. parser.add_argument("--api-url", help="OpenAI-compatible API base URL")
  217. parser.add_argument("--api-key", help="API key")
  218. parser.add_argument("--model", help="Model name")
  219. parser.add_argument("--limit", type=int, help="Limit number of documents to analyze")
  220. parser.add_argument("--force", action="store_true", help="Re-analyze all documents (ignore existing)")
  221. args = parser.parse_args()
  222. api_url = args.api_url or os.getenv("OPENAI_API_URL")
  223. api_key = args.api_key or os.getenv("OPENAI_API_KEY")
  224. model = args.model or os.getenv("OPENAI_MODEL", "gpt-4o")
  225. analyzer = DocumentAnalyzer(api_url, api_key, model)
  226. # Clear existing if force flag
  227. if args.force and analyzer.analyses_file.exists():
  228. analyzer.analyses_file.unlink()
  229. print("Removed existing analyses (--force mode)")
  230. analyses = analyzer.analyze_all(limit=args.limit)
  231. analyzer.save_analyses(analyses)
  232. print(f"\n✅ Saved analyses to {analyzer.analyses_file}")
  233. if __name__ == "__main__":
  234. main()