waybackproxy.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346
  1. #!/usr/bin/env python
  2. import base64, lrudict, re, socket, socketserver, sys, threading, urllib.request, urllib.error, urllib.parse
  3. from config import *
  4. # internal LRU dictionary for preserving URLs on redirect
  5. date_cache = lrudict.LRUDict(maxduration=60, maxsize=1024)
  6. class ThreadingTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
  7. """TCPServer with ThreadingMixIn added."""
  8. pass
  9. class Handler(socketserver.BaseRequestHandler):
  10. """Main request handler."""
  11. def handle(self):
  12. """Handle a request."""
  13. global DATE
  14. # readline is pretty convenient
  15. f = self.request.makefile()
  16. # read request line
  17. reqline = line = f.readline()
  18. split = line.rstrip('\r\n').split(' ')
  19. http_version = len(split) > 2 and split[2] or 'HTTP/0.9'
  20. if split[0] != 'GET':
  21. # only GET is implemented
  22. return self.error_page(http_version, 501, 'Not Implemented')
  23. # parse the URL
  24. request_url = archived_url = split[1]
  25. parsed = urllib.parse.urlparse(request_url)
  26. # make a path
  27. path = parsed.path
  28. if parsed.query != '': path += '?' + parsed.query
  29. if path == '': path == '/'
  30. # get the hostname for later
  31. host = parsed.netloc.split(':')
  32. hostname = host[0]
  33. # read out the headers, saving the PAC file host
  34. pac_host = '" + location.host + ":' + str(LISTEN_PORT) # may not actually work
  35. effective_date = DATE
  36. auth = None
  37. while line.rstrip('\r\n') != '':
  38. line = f.readline()
  39. ll = line.lower()
  40. if ll[:6] == 'host: ':
  41. pac_host = line[6:].rstrip('\r\n')
  42. if ':' not in pac_host: # who would run this on port 80 anyway?
  43. pac_host += ':80'
  44. elif ll[:21] == 'x-waybackproxy-date: ':
  45. # API for a personal project of mine
  46. effective_date = line[21:].rstrip('\r\n')
  47. elif ll[:21] == 'authorization: basic ':
  48. # asset date code passed as username:password
  49. auth = base64.b64decode(ll[21:])
  50. try:
  51. if path in ('/proxy.pac', '/wpad.dat', '/wpad.da'):
  52. # PAC file to bypass QUICK_IMAGES requests
  53. pac = http_version.encode('ascii', 'ignore') + b''' 200 OK\r\n'''
  54. pac += b'''Content-Type: application/x-ns-proxy-autoconfig\r\n'''
  55. pac += b'''\r\n'''
  56. pac += b'''function FindProxyForURL(url, host)\r\n'''
  57. pac += b'''{\r\n'''
  58. pac += b''' if (shExpMatch(url, "http://web.archive.org/web/*") && !shExpMatch(url, "http://web.archive.org/web/??????????????if_/*"))\r\n'''
  59. pac += b''' {\r\n'''
  60. pac += b''' return "DIRECT";\r\n'''
  61. pac += b''' }\r\n'''
  62. pac += b''' return "PROXY ''' + pac_host.encode('ascii', 'ignore') + b'''";\r\n'''
  63. pac += b'''}\r\n'''
  64. self.request.sendall(pac)
  65. return
  66. elif hostname == 'web.archive.org' or auth:
  67. if path[:5] != '/web/':
  68. # launch settings
  69. return self.handle_settings(parsed.query)
  70. else:
  71. # pass-through requests to web.archive.org
  72. # required for QUICK_IMAGES
  73. # did we get an username:password with an asset date code?
  74. if auth:
  75. request_url = 'http://web.archive.org/web/{0}/{1}'.format(auth.replace(':', ''), archived_url)
  76. else:
  77. archived_url = '/'.join(request_url.split('/')[5:])
  78. _print('[>] [QI] {0}'.format(archived_url))
  79. try:
  80. conn = urllib.request.urlopen(request_url)
  81. except urllib.error.HTTPError as e:
  82. if e.code == 404:
  83. # Try this file on another date, might be redundant
  84. return self.redirect_page(http_version, archived_url)
  85. else:
  86. raise e
  87. elif GEOCITIES_FIX and hostname == 'www.geocities.com':
  88. # apply GEOCITIES_FIX and pass it through
  89. _print('[>] {0}'.format(archived_url))
  90. split = archived_url.split('/')
  91. hostname = split[2] = 'www.oocities.org'
  92. request_url = '/'.join(split)
  93. conn = urllib.request.urlopen(request_url)
  94. else:
  95. # get from Wayback
  96. _print('[>] {0}'.format(archived_url))
  97. # get cached date for redirects
  98. effective_date = date_cache.get(effective_date + '\x00' + archived_url, effective_date)
  99. request_url = 'http://web.archive.org/web/{0}/{1}'.format(effective_date, archived_url)
  100. conn = urllib.request.urlopen(request_url)
  101. except urllib.error.HTTPError as e:
  102. # an error has been found
  103. # 403 or 404 => heuristically determine the static URL for some redirect scripts
  104. if e.code in (403, 404):
  105. match = re.search('''(?:\?|&)(?:target|trg|dest(?:ination)?|to|go)?(?:url)?=(http[^&]+)''', archived_url, re.IGNORECASE)
  106. if not match:
  107. match = re.search('''/(?:target|trg|dest(?:ination)?|to|go)?(?:url)?/(http.+)''', archived_url, re.IGNORECASE)
  108. if match:
  109. # we found it
  110. new_url = urllib.parse.unquote_plus(match.group(1))
  111. _print('[r]', new_url)
  112. return self.redirect_page(http_version, new_url)
  113. _print('[!] {0} {1}'.format(e.code, e.reason))
  114. return self.error_page(http_version, e.code, e.reason)
  115. # get content type
  116. content_type = conn.info().get('Content-Type')
  117. if content_type == None: content_type = 'text/html'
  118. if not CONTENT_TYPE_ENCODING and content_type.find(';') > -1: content_type = content_type[:content_type.find(';')]
  119. # set the mode: [0]wayback [1]oocities
  120. mode = 0
  121. if GEOCITIES_FIX and hostname in ['www.oocities.org', 'www.oocities.com']: mode = 1
  122. if 'text/html' in content_type: # HTML
  123. # Some dynamically generated links may end up pointing to
  124. # web.archive.org. Correct that by redirecting the Wayback
  125. # portion of the URL away if it ends up being HTML consumed
  126. # through the QUICK_IMAGES interface.
  127. if hostname == 'web.archive.org':
  128. conn.close()
  129. return self.redirect_page(http_version, '/'.join(request_url.split('/')[5:]), 301)
  130. # consume all data
  131. data = conn.read()
  132. # patch the page
  133. if mode == 0: # wayback
  134. if b'<title>Wayback Machine</title>' in data:
  135. match = re.search(b'<iframe id="playback" src="((?:(?:http(?:s)?:)?//web.archive.org)?/web/[^"]+)"', data)
  136. if match:
  137. # media playback iframe
  138. # Some websites (especially ones that use frames)
  139. # inexplicably render inside a media playback iframe.
  140. # In that case, a simple redirect would result in a
  141. # redirect loop. Download the URL and render it instead.
  142. request_url = match.group(1).decode('ascii', 'ignore')
  143. archived_url = '/'.join(request_url.split('/')[5:])
  144. print('[f]', archived_url)
  145. try:
  146. conn = urllib.request.urlopen(request_url)
  147. except urllib.error.HTTPError as e:
  148. _print('[!]', e.code, e.reason)
  149. return self.error_page(http_version, e.code, e.reason)
  150. content_type = conn.info().get('Content-Type')
  151. if not CONTENT_TYPE_ENCODING and content_type.find(';') > -1: content_type = content_type[:content_type.find(';')]
  152. data = conn.read()
  153. if b'<title></title>' in data and b'<h1><span>Internet Archive\'s Wayback Machine</span></h1>' in data:
  154. match = re.search(b'<p class="impatient"><a href="(?:(?:http(?:s)?:)?//web\.archive\.org)?/web/([^/]+)/([^"]+)">Impatient\?</a></p>', data)
  155. if match:
  156. # wayback redirect page, follow it
  157. match2 = re.search(b'<p class="code shift red">Got an HTTP ([0-9]+)', data)
  158. try:
  159. redirect_code = int(match2.group(1))
  160. except:
  161. redirect_code = 302
  162. archived_url = match.group(2).decode('ascii', 'ignore')
  163. date_cache[effective_date + '\x00' + archived_url] = match.group(1).decode('ascii', 'ignore')
  164. print('[r]', archived_url)
  165. return self.redirect_page(http_version, archived_url, redirect_code)
  166. # pre-toolbar scripts and CSS
  167. data = re.sub(b'<script src="//archive\.org/(?:.*)<!-- End Wayback Rewrite JS Include -->', b'', data, flags=re.S)
  168. # toolbar
  169. data = re.sub(b'<!-- BEGIN WAYBACK TOOLBAR INSERT -->(?:.*)<!-- END WAYBACK TOOLBAR INSERT -->', b'', data, flags=re.S)
  170. # comments on footer
  171. data = re.sub(b'\n<!--\n FILE ARCHIVED (?:.*)$', b'', data, flags=re.S)
  172. # fix base tag
  173. data = re.sub(b'(<base (?:[^>]*)href=(?:["\'])?)(?:(?:http(?:s)?:)?//web.archive.org)?/web/(?:[^/]+)/', b'\\1', data, flags=re.I + re.S)
  174. # remove extraneous :80 from links
  175. data = re.sub(b'((?:(?:http(?:s)?:)?//web.archive.org)?/web/)([^/]+)/([^:]+)://([^:]+):80/', b'\\1\\2/\\3://\\4/', data)
  176. # fix links
  177. if QUICK_IMAGES:
  178. # QUICK_IMAGES works by intercepting asset URLs (those
  179. # with a date code ending in im_, js_...) and letting the
  180. # proxy pass them through. This may reduce load time
  181. # because Wayback doesn't have to hunt down the closest
  182. # copy of that asset to DATE, as those URLs have specific
  183. # date codes. This taints the HTML with web.archive.org
  184. # URLs. QUICK_IMAGES=2 uses the original URLs with an added
  185. # username:password, which taints less but is not supported
  186. # by all browsers - IE6 notably kills the whole page if it
  187. # sees an iframe pointing to an invalid URL.
  188. data = re.sub(b'(?:(?:http(?:s)?:)?//web.archive.org)?/web/([0-9]+)([a-z]+_)/([^:]+)://',
  189. QUICK_IMAGES == 2 and b'\\3://\\1:\\2@' or b'http://web.archive.org/web/\\1\\2/\\3://', data)
  190. data = re.sub(b'(?:(?:http(?:s)?:)?//web.archive.org)?/web/([0-9]+)/', b'', data)
  191. else:
  192. #data = re.sub(b'(?:(?:http(?:s)?:)?//web.archive.org)?/web/([^/]+)/', b'', data)
  193. def add_to_date_cache(match):
  194. orig_url = match.group(2)
  195. new_date = match.group(1)
  196. if len(new_date) > 14: # only cache asset URLs
  197. date_cache[effective_date + '\x00' + orig_url.decode('ascii', 'ignore')] = new_date.decode('ascii', 'ignore')
  198. return orig_url
  199. data = re.sub(b'(?:(?:http(?:s)?:)?//web.archive.org)?/web/([^/]+)/([^"\'#<>]+)', add_to_date_cache, data)
  200. elif mode == 1: # oocities
  201. # viewport/cache-control/max-width code (header)
  202. data = re.sub(b'^(?:.*?)\n\n', b'', data, flags=re.S)
  203. # archive notice and tracking code (footer)
  204. data = re.sub(b'<style> \n.zoomout { -webkit-transition: (?:.*)$', b'', data, flags=re.S)
  205. # clearly labeled snippets from Geocities
  206. data = re.sub(b'^(?:.*)<\!-- text above generated by server\. PLEASE REMOVE -->', b'', data, flags=re.S)
  207. data = re.sub(b'<\!-- following code added by server\. PLEASE REMOVE -->(?:.*)<\!-- preceding code added by server\. PLEASE REMOVE -->', b'', data, flags=re.S)
  208. data = re.sub(b'<\!-- text below generated by server\. PLEASE REMOVE -->(?:.*)$', b'', data, flags=re.S)
  209. # fix links
  210. data = re.sub(b'//([^.]*)\.oocities\.com/', b'//\\1.geocities.com/', data, flags=re.S)
  211. self.request.sendall('{0} 200 OK\r\nContent-Type: {1}\r\nETag: "{2}"\r\n\r\n'.format(http_version, content_type, request_url.replace('"', '')).encode('ascii', 'ignore'))
  212. self.request.sendall(data)
  213. else: # other data
  214. self.request.sendall('{0} 200 OK\r\nContent-Type: {1}\r\nETag: "{2}"\r\n\r\n'.format(http_version, content_type, request_url.replace('"', '')).encode('ascii', 'ignore'))
  215. while True:
  216. data = conn.read(1024)
  217. if not data: break
  218. self.request.sendall(data)
  219. self.request.close()
  220. def error_page(self, http_version, code, reason):
  221. """Generate an error page."""
  222. # make error page
  223. errorpage = '<html><head><title>{0} {1}</title></head><body><h1>{1}</h1><p>'.format(code, reason)
  224. # add code information
  225. if code == 404: # page not archived
  226. errorpage += 'This page may not be archived by the Wayback Machine.'
  227. elif code == 403: # not crawled due to robots.txt
  228. errorpage += 'This page was not archived due to a robots.txt block.'
  229. elif code == 501: # method not implemented
  230. errorpage += 'WaybackProxy only implements the GET method.'
  231. else: # another error
  232. errorpage += 'Unknown error. The Wayback Machine may be experiencing technical difficulties.'
  233. errorpage += '</p><hr><i>'
  234. errorpage += self.signature()
  235. errorpage += '</i></body></html>'
  236. # send error page and stop
  237. self.request.sendall('{0} {1} {2}\r\nContent-Type: text/html\r\nContent-Length: {3}\r\n\r\n{4}'.format(http_version, code, reason, len(errorpage), errorpage).encode('utf8', 'ignore'))
  238. self.request.close()
  239. def redirect_page(self, http_version, target, code=302):
  240. """Generate a redirect page."""
  241. # make redirect page
  242. redirectpage = '<html><head><title>Redirect</title><meta http-equiv="refresh" content="0;url='
  243. redirectpage += target
  244. redirectpage += '"></head><body><p>If you are not redirected, <a href="'
  245. redirectpage += target
  246. redirectpage += '">click here</a>.</p></body></html>'
  247. # send redirect page and stop
  248. self.request.sendall('{0} {1} Found\r\nLocation: {2}\r\nContent-Type: text/html\r\nContent-Length: {3}\r\n\r\n{4}'.format(http_version, code, target, len(redirectpage), redirectpage).encode('utf8', 'ignore'))
  249. self.request.close()
  250. def handle_settings(self, query):
  251. """Generate the settings page."""
  252. global DATE, GEOCITIES_FIX, QUICK_IMAGES, CONTENT_TYPE_ENCODING
  253. if query != '': # handle any parameters that may have been sent
  254. parsed = urllib.parse.parse_qs(query)
  255. if 'date' in parsed: DATE = parsed['date'][0]
  256. GEOCITIES_FIX = 'gcFix' in parsed
  257. QUICK_IMAGES = 'quickImages' in parsed
  258. CONTENT_TYPE_ENCODING = 'ctEncoding' in parsed
  259. # send the page and stop
  260. settingspage = 'HTTP/1.1 200 OK\r\nContent-Type: text/html\r\n\r\n'
  261. settingspage += '<html><head><title>WaybackProxy Settings</title></head><body><p><b>'
  262. settingspage += self.signature()
  263. settingspage += '</b></p><form method="get" action="/"><p>Date to get pages from: <input type="text" name="date" size="8" value="'
  264. settingspage += DATE
  265. settingspage += '"><br><input type="checkbox" name="gcFix"'
  266. if GEOCITIES_FIX: settingspage += ' checked'
  267. settingspage += '> Geocities Fix<br><input type="checkbox" name="quickImages"'
  268. if QUICK_IMAGES: settingspage += ' checked'
  269. settingspage += '> Quick images<br><input type="checkbox" name="ctEncoding"'
  270. if CONTENT_TYPE_ENCODING: settingspage += ' checked'
  271. settingspage += '> Encoding in Content-Type</p><p><input type="submit" value="Save"></p></form></body></html>'
  272. self.request.send(settingspage.encode('utf8', 'ignore'))
  273. self.request.close()
  274. def signature(self):
  275. """Return the server signature."""
  276. return 'WaybackProxy on {0}'.format(socket.gethostname())
  277. print_lock = threading.Lock()
  278. def _print(*args, linebreak=True):
  279. """Logging function."""
  280. if SILENT: return
  281. s = ' '.join([str(x) for x in args])
  282. print_lock.acquire()
  283. sys.stdout.write(linebreak and (s + '\n') or s)
  284. sys.stdout.flush()
  285. print_lock.release()
  286. def main():
  287. """Starts the server."""
  288. server = ThreadingTCPServer(('', LISTEN_PORT), Handler)
  289. _print('[-] Now listening on port {0}'.format(LISTEN_PORT))
  290. try:
  291. server.serve_forever()
  292. except KeyboardInterrupt: # Ctrl+C to stop
  293. pass
  294. if __name__ == '__main__':
  295. main()