waybackproxy.py 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558
  1. #!/usr/bin/env python3
  2. import base64, datetime, json, lrudict, re, socket, socketserver, sys, threading, urllib.request, urllib.error, urllib.parse
  3. from config import *
  4. class ThreadingTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
  5. """TCPServer with ThreadingMixIn added."""
  6. pass
  7. class Handler(socketserver.BaseRequestHandler):
  8. """Main request handler."""
  9. def __init__(self, *args, **kwargs):
  10. super().__init__(*args, **kwargs)
  11. # internal LRU dictionary for preserving URLs on redirect
  12. self.date_cache = lrudict.LRUDict(maxduration=86400, maxsize=1024)
  13. # internal LRU dictionary for date availability
  14. self.availability_cache = lrudict.LRUDict(maxduration=86400, maxsize=1024) if WAYBACK_API else None
  15. def handle(self):
  16. """Handle a request."""
  17. # readline is pretty convenient
  18. f = self.request.makefile()
  19. # read request line
  20. reqline = line = f.readline()
  21. split = line.rstrip().split()
  22. http_version = len(split) > 2 and split[2] or 'HTTP/0.9'
  23. if split[0] != 'GET':
  24. # only GET is implemented
  25. return self.error_page(http_version, 501, 'Not Implemented')
  26. # read out the headers
  27. request_host = None
  28. pac_host = '" + location.host + ":' + str(LISTEN_PORT) # may not actually work
  29. effective_date = DATE
  30. auth = None
  31. while line.strip() != '':
  32. line = f.readline()
  33. ll = line.lower()
  34. if ll[:6] == 'host: ':
  35. pac_host = request_host = line[6:].rstrip()
  36. if ':' not in pac_host: # explicitly specify port if running on port 80
  37. pac_host += ':80'
  38. elif ll[:21] == 'x-waybackproxy-date: ':
  39. # API for a personal project of mine
  40. effective_date = line[21:].rstrip()
  41. elif ll[:21] == 'authorization: basic ':
  42. # asset date code passed as username:password
  43. auth = base64.b64decode(ll[21:])
  44. # parse the URL
  45. pac_file_paths = ('/proxy.pac', '/wpad.dat', '/wpad.da')
  46. if split[1][0] == '/' and split[1] not in pac_file_paths:
  47. # just a path (not corresponding to a PAC file) => transparent proxy
  48. # Host header and therefore HTTP/1.1 are required
  49. if not request_host:
  50. return self.error_page(http_version, 400, 'Host header missing')
  51. archived_url = 'http://' + request_host + split[1]
  52. else:
  53. # full URL => explicit proxy
  54. archived_url = split[1]
  55. request_url = archived_url
  56. parsed = urllib.parse.urlparse(request_url)
  57. # make a path
  58. path = parsed.path
  59. if parsed.query:
  60. path += '?' + parsed.query
  61. elif path == '':
  62. path == '/'
  63. # get the hostname for later
  64. host = parsed.netloc.split(':')
  65. hostname = host[0]
  66. # get cached date for redirects, if available
  67. original_date = effective_date
  68. effective_date = self.date_cache.get(effective_date + '\x00' + archived_url, effective_date)
  69. # get date from username:password, if available
  70. if auth:
  71. effective_date = auth.replace(':', '')
  72. # effectively handle the request
  73. try:
  74. if path in pac_file_paths:
  75. # PAC file to bypass QUICK_IMAGES requests if WAYBACK_API is not enabled
  76. pac = http_version + ''' 200 OK\r\n'''
  77. pac += '''Content-Type: application/x-ns-proxy-autoconfig\r\n'''
  78. pac += '''\r\n'''
  79. pac += '''function FindProxyForURL(url, host)\r\n'''
  80. pac += '''{\r\n'''
  81. if not self.availability_cache:
  82. pac += ''' if (shExpMatch(url, "http://web.archive.org/web/*") && !shExpMatch(url, "http://web.archive.org/web/??????????????if_/*"))\r\n'''
  83. pac += ''' {\r\n'''
  84. pac += ''' return "DIRECT";\r\n'''
  85. pac += ''' }\r\n'''
  86. pac += ''' return "PROXY ''' + pac_host + '''";\r\n'''
  87. pac += '''}\r\n'''
  88. self.request.sendall(pac.encode('ascii', 'ignore'))
  89. return
  90. elif hostname == 'web.archive.org':
  91. if path[:5] != '/web/':
  92. # launch settings if enabled
  93. if SETTINGS_PAGE:
  94. return self.handle_settings(parsed.query)
  95. else:
  96. return self.error_page(http_version, 404, 'Not Found')
  97. else:
  98. # pass requests through to web.archive.org
  99. # required for QUICK_IMAGES
  100. archived_url = '/'.join(request_url.split('/')[5:])
  101. _print('[>] [QI] {0}'.format(archived_url))
  102. elif GEOCITIES_FIX and hostname == 'www.geocities.com':
  103. # apply GEOCITIES_FIX and pass it through
  104. _print('[>] {0}'.format(archived_url))
  105. split = archived_url.split('/')
  106. hostname = split[2] = 'www.oocities.org'
  107. request_url = '/'.join(split)
  108. else:
  109. # get from Wayback
  110. _print('[>] {0}'.format(archived_url))
  111. request_url = 'http://web.archive.org/web/{0}/{1}'.format(effective_date, archived_url)
  112. if self.availability_cache is not None:
  113. # are we requesting from Wayback?
  114. split = request_url.split('/')
  115. # if so, get the closest available date from Wayback's API, to avoid archived 404 pages and other site errors
  116. if split[2] == 'web.archive.org':
  117. # remove extraneous :80 from URL
  118. if ':' in split[5]:
  119. if split[7][-3:] == ':80':
  120. split[7] = split[7][:-3]
  121. elif split[5][-3:] == ':80':
  122. split[5] = split[5][:-3]
  123. # check availability LRU cache
  124. availability_url = '/'.join(split[5:])
  125. new_url = self.availability_cache.get(availability_url, None)
  126. if new_url:
  127. # in cache => replace URL immediately
  128. request_url = new_url
  129. else:
  130. # not in cache => contact API
  131. try:
  132. availability = json.loads(urllib.request.urlopen('https://archive.org/wayback/available?url=' + urllib.parse.quote_plus(availability_url) + '&timestamp=' + effective_date[:14], timeout=10).read())
  133. closest = availability.get('archived_snapshots', {}).get('closest', {})
  134. new_date = closest.get('timestamp', None)
  135. except:
  136. _print('[!] Failed to fetch Wayback availability data')
  137. new_date = None
  138. if new_date and new_date != effective_date[:14]:
  139. # returned date is different
  140. new_url = closest['url']
  141. # add asset tag if one is present in the original URL
  142. if len(effective_date) > 14:
  143. split = new_url.split('/')
  144. split[4] += effective_date[14:]
  145. new_url = '/'.join(split)
  146. # replace URL and add it to the availability cache
  147. request_url = availability[availability_url] = new_url
  148. conn = urllib.request.urlopen(request_url)
  149. except urllib.error.HTTPError as e:
  150. # an error has been found
  151. if e.code in (403, 404, 412): # not found or tolerance exceeded
  152. # heuristically determine the static URL for some redirect scripts
  153. match = re.search('''[^/]/((?:http(?:%3A|:)(?:%2F|/)|www(?:[0-9]+)?\\.(?:[^/%]+))(?:%2F|/).+)''', archived_url, re.I)
  154. if not match:
  155. match = re.search('''(?:\\?|&)(?:[^=]+)=((?:http(?:%3A|:)(?:%2F|/)|www(?:[0-9]+)?\\.(?:[^/%]+))?(?:%2F|/)[^&]+)''', archived_url, re.I)
  156. if match: # found it
  157. new_url = urllib.parse.unquote_plus(match.group(1))
  158. if new_url[0] != '/' and '://' not in new_url: # add protocol if the URL is absolute but missing a protocol
  159. new_url = 'http://' + new_url
  160. _print('[r]', new_url)
  161. return self.redirect_page(http_version, new_url)
  162. elif e.code in (301, 302): # urllib-generated error about an infinite redirect loop
  163. _print('[!] Infinite redirect loop')
  164. return self.error_page(http_version, 508, 'Infinite Redirect Loop')
  165. if e.code != 412: # tolerance exceeded has its own error message above
  166. _print('[!] {0} {1}'.format(e.code, e.reason))
  167. # If the memento Link header is present, this is a website error
  168. # instead of a Wayback error. Pass it along if that's the case.
  169. if 'Link' in e.headers:
  170. conn = e
  171. else:
  172. return self.error_page(http_version, e.code, e.reason)
  173. # get content type
  174. content_type = conn.info().get('Content-Type')
  175. if content_type == None:
  176. content_type = 'text/html'
  177. elif not CONTENT_TYPE_ENCODING:
  178. idx = content_type.find(';')
  179. if idx > -1:
  180. content_type = content_type[:idx]
  181. # set the mode: [0]wayback [1]oocities
  182. if GEOCITIES_FIX and hostname in ('www.oocities.org', 'www.oocities.com'):
  183. mode = 1
  184. else:
  185. mode = 0
  186. # Wayback will add its HTML to anything it thinks is HTML
  187. guessed_content_type = conn.info().get('X-Archive-Guessed-Content-Type')
  188. if not guessed_content_type:
  189. guessed_content_type = content_type
  190. if 'text/html' in guessed_content_type:
  191. # Some dynamically generated links may end up pointing to
  192. # web.archive.org. Correct that by redirecting the Wayback
  193. # portion of the URL away if it ends up being HTML consumed
  194. # through the QUICK_IMAGES interface.
  195. if hostname == 'web.archive.org':
  196. conn.close()
  197. archived_url = '/'.join(request_url.split('/')[5:])
  198. _print('[r] [QI]', archived_url)
  199. return self.redirect_page(http_version, archived_url, 301)
  200. # check if the date is within tolerance
  201. if DATE_TOLERANCE is not None:
  202. match = re.search('''//web\\.archive\\.org/web/([0-9]+)''', conn.geturl())
  203. if match:
  204. requested_date = match.group(1)
  205. if self.wayback_to_datetime(requested_date) > self.wayback_to_datetime(original_date) + datetime.timedelta(DATE_TOLERANCE):
  206. _print('[!]', requested_date, 'is outside the configured tolerance of', DATE_TOLERANCE, 'days')
  207. conn.close()
  208. return self.error_page(http_version, 412, 'Snapshot ' + requested_date + ' not available')
  209. # consume all data
  210. data = conn.read()
  211. # patch the page
  212. if mode == 0: # wayback
  213. if b'<title>Wayback Machine</title>' in data:
  214. if b'<p>This URL has been excluded from the Wayback Machine.</p>' in data: # exclusion error (robots.txt?)
  215. return self.error_page(http_version, 403, 'URL excluded')
  216. match = re.search(b'''<iframe id="playback" src="((?:(?:https?:)?//web.archive.org)?/web/[^"]+)"''', data)
  217. if match: # media playback iframe
  218. # Some websites (especially ones that use frames)
  219. # inexplicably render inside a media playback iframe.
  220. # In that case, a simple redirect would result in a
  221. # redirect loop. Download the URL and render it instead.
  222. request_url = match.group(1).decode('ascii', 'ignore')
  223. archived_url = '/'.join(request_url.split('/')[5:])
  224. print('[f]', archived_url)
  225. try:
  226. conn = urllib.request.urlopen(request_url)
  227. except urllib.error.HTTPError as e:
  228. _print('[!]', e.code, e.reason)
  229. # If the memento Link header is present, this is a website error
  230. # instead of a Wayback error. Pass it along if that's the case.
  231. if 'Link' in e.headers:
  232. conn = e
  233. else:
  234. return self.error_page(http_version, e.code, e.reason)
  235. content_type = conn.info().get('Content-Type')
  236. if not CONTENT_TYPE_ENCODING and content_type.find(';') > -1:
  237. content_type = content_type[:content_type.find(';')]
  238. data = conn.read()
  239. if b'<title></title>' in data and b'<h1><span>Internet Archive\'s Wayback Machine</span></h1>' in data:
  240. match = re.search(b'''<p class="impatient"><a href="(?:(?:https?:)?//web\\.archive\\.org)?/web/([^/]+)/([^"]+)">Impatient\\?</a></p>''', data)
  241. if match:
  242. # wayback redirect page, follow it
  243. match2 = re.search(b'<p class="code shift red">Got an HTTP ([0-9]+)', data)
  244. try:
  245. redirect_code = int(match2.group(1))
  246. except:
  247. redirect_code = 302
  248. archived_url = match.group(2).decode('ascii', 'ignore')
  249. self.date_cache[effective_date + '\x00' + archived_url] = match.group(1).decode('ascii', 'ignore')
  250. print('[r]', archived_url)
  251. return self.redirect_page(http_version, archived_url, redirect_code)
  252. # pre-toolbar scripts and CSS
  253. data = re.sub(b'''<script src="//archive\\.org/.*<!-- End Wayback Rewrite JS Include -->\\r?\\n''', b'', data, flags=re.S)
  254. # toolbar
  255. data = re.sub(b'''<!-- BEGIN WAYBACK TOOLBAR INSERT -->.*<!-- END WAYBACK TOOLBAR INSERT -->''', b'', data, flags=re.S)
  256. # comments on footer
  257. data = re.sub(b'''<!--\\r?\\n FILE ARCHIVED .*$', b''', data, flags=re.S)
  258. # fix base tag
  259. data = re.sub(b'''(<base (?:[^>]*)href=(?:["\'])?)(?:(?:https?:)?//web.archive.org)?/web/(?:[^/]+)/''', b'\\1', data, flags=re.I + re.S)
  260. # remove extraneous :80 from links
  261. data = re.sub(b'((?:(?:https?:)?//web.archive.org)?/web/)([^/]+)/([^:]+)://([^:]+):80/', b'\\1\\2/\\3://\\4/', data)
  262. # fix links
  263. if QUICK_IMAGES:
  264. # QUICK_IMAGES works by intercepting asset URLs (those
  265. # with a date code ending in im_, js_...) and letting the
  266. # proxy pass them through. This may reduce load time
  267. # because Wayback doesn't have to hunt down the closest
  268. # copy of that asset to DATE, as those URLs have specific
  269. # date codes. This taints the HTML with web.archive.org
  270. # URLs. QUICK_IMAGES=2 uses the original URLs with an added
  271. # username:password, which taints less but is not supported
  272. # by all browsers - IE notably kills the whole page if it
  273. # sees an iframe pointing to an invalid URL.
  274. data = re.sub(b'(?:(?:https?:)?//web.archive.org)?/web/([0-9]+)([a-z]+_)/([^:]+)://',
  275. QUICK_IMAGES == 2 and b'\\3://\\1:\\2@' or b'http://web.archive.org/web/\\1\\2/\\3://', data)
  276. data = re.sub(b'(?:(?:https?:)?//web.archive.org)?/web/([0-9]+)/', b'', data) # non-asset
  277. else:
  278. # Remove asset URLs while simultaneously adding them to the
  279. # LRU cache with their respective date.
  280. def add_to_date_cache(match):
  281. orig_url = match.group(2)
  282. self.date_cache[effective_date + '\x00' + orig_url.decode('ascii', 'ignore')] = match.group(1).decode('ascii', 'ignore')
  283. return orig_url
  284. data = re.sub(b'''(?:(?:https?:)?//web.archive.org)?/web/([^/]+)/([^"\\'#<>]+)''', add_to_date_cache, data)
  285. elif mode == 1: # oocities
  286. # viewport/cache-control/max-width code (header)
  287. data = re.sub(b'''^.*?\n\n''', b'', data, flags=re.S)
  288. # archive notice and tracking code (footer)
  289. data = re.sub(b'''<style> \n.zoomout { -webkit-transition: .*$''', b'', data, flags=re.S)
  290. # clearly labeled snippets from Geocities
  291. data = re.sub(b'''^.*<\\!-- text above generated by server\\. PLEASE REMOVE -->''', b'', data, flags=re.S)
  292. data = re.sub(b'''<\\!-- following code added by server\\. PLEASE REMOVE -->.*<\!-- preceding code added by server\. PLEASE REMOVE -->''', b'', data, flags=re.S)
  293. data = re.sub(b'''<\\!-- text below generated by server\\. PLEASE REMOVE -->.*$''', b'', data, flags=re.S)
  294. # fix links
  295. data = re.sub(b'''//([^.]*)\\.oocities\\.com/''', b'//\\1.geocities.com/', data, flags=re.S)
  296. self.send_response_headers(conn, http_version, content_type, request_url)
  297. self.request.sendall(data)
  298. else: # other data
  299. self.send_response_headers(conn, http_version, content_type, request_url)
  300. while True:
  301. data = conn.read(1024)
  302. if not data: break
  303. self.request.sendall(data)
  304. self.request.close()
  305. def send_response_headers(self, conn, http_version, content_type, request_url):
  306. """Generate and send the response headers."""
  307. response = http_version
  308. # pass the error code if there is one
  309. if isinstance(conn, urllib.error.HTTPError):
  310. response += '{0} {1}'.format(conn.code, conn.reason.replace('\n', ' '))
  311. else:
  312. response += '200 OK'
  313. # add content type, and the ETag for caching
  314. response += '\r\nContent-Type: ' + content_type + '\r\nETag: "' + request_url.replace('"', '') + '"\r\n'
  315. # add X-Archive-Orig-* headers
  316. headers = conn.info()
  317. for header in headers:
  318. if header.find('X-Archive-Orig-') == 0:
  319. orig_header = header[15:]
  320. # blacklist certain headers which may alter the client
  321. if orig_header.lower() not in ('connection', 'location', 'content-type', 'content-length', 'etag', 'authorization', 'set-cookie'):
  322. response += orig_header + ': ' + headers[header] + '\r\n'
  323. # finish and send the request
  324. response += '\r\n'
  325. self.request.sendall(response.encode('ascii', 'ignore'))
  326. def error_page(self, http_version, code, reason):
  327. """Generate an error page."""
  328. # make error page
  329. errorpage = '<html><head><title>{0} {1}</title>'.format(code, reason)
  330. # IE's same-origin policy throws "Access is denied." inside frames
  331. # loaded from a different origin. Use that to our advantage, even
  332. # though regular frames are also affected. IE also doesn't recognize
  333. # language="javascript1.4", so use 1.3 while blocking IE4 by detecting
  334. # the lack of screenLeft as IE4 is quite noisy with script errors.
  335. errorpage += '<script language="javascript1.3">if (window.screenLeft != null) { eval(\'try { var frameElement = window.frameElement; } catch (e) { document.location.href = "about:blank"; }\'); }</script>'
  336. errorpage += '<script language="javascript">if (window.self != window.top && !(window.frameElement && window.frameElement.tagName == "FRAME")) { document.location.href = "about:blank"; }</script>'
  337. errorpage += '</head><body><h1>{0}</h1><p>'.format(reason)
  338. # add code information
  339. if code in (404, 508): # page not archived or redirect loop
  340. errorpage += 'This page may not be archived by the Wayback Machine.'
  341. elif code == 403: # not crawled due to exclusion
  342. errorpage += 'This page was not archived due to a Wayback Machine exclusion.'
  343. elif code == 501: # method not implemented
  344. errorpage += 'WaybackProxy only implements the GET method.'
  345. elif code == 412: # outside of tolerance
  346. errorpage += 'The earliest snapshot for this page is outside of the configured tolerance interval.'
  347. elif code == 400 and reason == 'Host header missing': # no host header in transparent mode
  348. errorpage += 'WaybackProxy\'s transparent mode requires an HTTP/1.1 compliant client.'
  349. else: # another error
  350. errorpage += 'Unknown error. The Wayback Machine may be experiencing technical difficulties.'
  351. errorpage += '</p><hr><i>'
  352. errorpage += self.signature()
  353. errorpage += '</i></body></html>'
  354. # add padding for IE
  355. if len(errorpage) <= 512:
  356. padding = '\n<!-- This comment pads the HTML so Internet Explorer displays this error page instead of its own. '
  357. remainder = 510 - len(errorpage) - len(padding)
  358. if remainder > 0:
  359. padding += ' ' * remainder
  360. padding += '-->'
  361. errorpage += padding
  362. # send error page and stop
  363. self.request.sendall('{0} {1} {2}\r\nContent-Type: text/html\r\nContent-Length: {3}\r\n\r\n{4}'.format(http_version, code, reason, len(errorpage), errorpage).encode('utf8', 'ignore'))
  364. self.request.close()
  365. def redirect_page(self, http_version, target, code=302):
  366. """Generate a redirect page."""
  367. # make redirect page
  368. redirectpage = '<html><head><title>Redirect</title><meta http-equiv="refresh" content="0;url='
  369. redirectpage += target
  370. redirectpage += '"></head><body><p>If you are not redirected, <a href="'
  371. redirectpage += target
  372. redirectpage += '">click here</a>.</p></body></html>'
  373. # send redirect page and stop
  374. self.request.sendall('{0} {1} Found\r\nLocation: {2}\r\nContent-Type: text/html\r\nContent-Length: {3}\r\n\r\n{4}'.format(http_version, code, target, len(redirectpage), redirectpage).encode('utf8', 'ignore'))
  375. self.request.close()
  376. def handle_settings(self, query):
  377. """Generate the settings page."""
  378. global DATE, DATE_TOLERANCE, GEOCITIES_FIX, QUICK_IMAGES, CONTENT_TYPE_ENCODING
  379. if query != '': # handle any parameters that may have been sent
  380. parsed = urllib.parse.parse_qs(query)
  381. if 'date' in parsed and DATE != parsed['date'][0]:
  382. DATE = parsed['date'][0]
  383. self.date_cache.clear()
  384. self.availability_cache.clear()
  385. if 'dateTolerance' in parsed and DATE_TOLERANCE != parsed['dateTolerance'][0]:
  386. DATE_TOLERANCE = parsed['dateTolerance'][0]
  387. GEOCITIES_FIX = 'gcFix' in parsed
  388. QUICK_IMAGES = 'quickImages' in parsed
  389. CONTENT_TYPE_ENCODING = 'ctEncoding' in parsed
  390. # send the page and stop
  391. settingspage = 'HTTP/1.1 200 OK\r\nContent-Type: text/html\r\n\r\n'
  392. settingspage += '<html><head><title>WaybackProxy Settings</title></head><body><p><b>'
  393. settingspage += self.signature()
  394. settingspage += '</b></p><form method="get" action="/">'
  395. settingspage += '<p>Date to get pages from: <input type="text" name="date" size="8" value="'
  396. settingspage += DATE
  397. settingspage += '"><p>Date tolerance: <input type="text" name="dateTolerance" size="8" value="'
  398. settingspage += DATE_TOLERANCE
  399. settingspage += '"> days<br><input type="checkbox" name="gcFix"'
  400. if GEOCITIES_FIX:
  401. settingspage += ' checked'
  402. settingspage += '> Geocities Fix<br><input type="checkbox" name="quickImages"'
  403. if QUICK_IMAGES:
  404. settingspage += ' checked'
  405. settingspage += '> Quick images<br><input type="checkbox" name="ctEncoding"'
  406. if CONTENT_TYPE_ENCODING:
  407. settingspage += ' checked'
  408. settingspage += '> Encoding in Content-Type</p><p><input type="submit" value="Save"></p></form></body></html>'
  409. self.request.send(settingspage.encode('utf8', 'ignore'))
  410. self.request.close()
  411. def signature(self):
  412. """Return the server signature."""
  413. return 'WaybackProxy on {0}'.format(socket.gethostname())
  414. def wayback_to_datetime(self, date):
  415. """Convert a Wayback format date string to a datetime.datetime object."""
  416. # parse the string
  417. year = 1995
  418. month = 12
  419. day = 31
  420. hour = 0
  421. minute = 0
  422. second = 0
  423. if len(date) > 0:
  424. year = int(date[:4])
  425. if len(date) > 4:
  426. month = int(date[4:6])
  427. if len(date) > 6:
  428. day = int(date[6:8])
  429. if len(date) > 8:
  430. hour = int(date[8:10])
  431. if len(date) > 10:
  432. minute = int(date[10:12])
  433. if len(date) > 12:
  434. second = int(date[12:14])
  435. # sanitize the numbers
  436. if month < 1:
  437. month = 1
  438. elif month > 12:
  439. month = 12
  440. if day < 1:
  441. day = 1
  442. elif day > 31:
  443. day = 31
  444. if hour > 23:
  445. hour = 23
  446. elif hour < 0:
  447. hour = 0
  448. if minute > 59:
  449. minute = 59
  450. elif minute < 0:
  451. minute = 0
  452. if second > 59:
  453. second = 59
  454. elif second < 0:
  455. second = 0
  456. # if the day is invalid for that month, work its way down
  457. try:
  458. dt = datetime.datetime(year, month, day, hour, minute, second) # max 31
  459. except:
  460. try:
  461. dt = datetime.datetime(year, month, day - 1, hour, minute, second) # max 30
  462. except:
  463. try:
  464. dt = datetime.datetime(year, month, day - 2, hour, minute, second) # max 29
  465. except:
  466. dt = datetime.datetime(year, month, day - 3, hour, minute, second) # max 28
  467. return dt
  468. print_lock = threading.Lock()
  469. def _print(*args, linebreak=True):
  470. """Logging function."""
  471. if SILENT: return
  472. s = ' '.join([str(x) for x in args])
  473. with print_lock:
  474. sys.stdout.write(linebreak and (s + '\n') or s)
  475. sys.stdout.flush()
  476. def main():
  477. """Starts the server."""
  478. server = ThreadingTCPServer(('', LISTEN_PORT), Handler)
  479. _print('[-] Now listening on port {0}'.format(LISTEN_PORT))
  480. try:
  481. server.serve_forever()
  482. except KeyboardInterrupt: # Ctrl+C to stop
  483. pass
  484. if __name__ == '__main__':
  485. main()