Przeglądaj źródła

Initial mirror from https://github.com/RipMeApp/ripme.git

This repository was automatically mirrored.
mitch donaberger 3 miesięcy temu
commit
108f60443e
100 zmienionych plików z 16070 dodań i 0 usunięć
  1. 15 0
      .github/ISSUE_TEMPLATE.md
  2. 27 0
      .github/PULL_REQUEST_TEMPLATE.md
  3. 65 0
      .github/workflows/gradle.yml
  4. 132 0
      .gitignore
  5. 94 0
      CONTRIBUTING.md
  6. 216 0
      README.md
  7. 21 0
      SECURITY.md
  8. 1 0
      build.bat
  9. 150 0
      build.gradle.kts
  10. 2 0
      build.sh
  11. BIN
      gradle/wrapper/gradle-wrapper.jar
  12. 7 0
      gradle/wrapper/gradle-wrapper.properties
  13. 252 0
      gradlew
  14. 94 0
      gradlew.bat
  15. 31 0
      remote-branch.sh
  16. 31 0
      remote-merge.sh
  17. 296 0
      ripme.json
  18. 9 0
      settings.gradle.kts
  19. 406 0
      src/main/java/com/rarchives/ripme/App.java
  20. 536 0
      src/main/java/com/rarchives/ripme/ripper/AbstractHTMLRipper.java
  21. 338 0
      src/main/java/com/rarchives/ripme/ripper/AbstractJSONRipper.java
  22. 807 0
      src/main/java/com/rarchives/ripme/ripper/AbstractRipper.java
  23. 43 0
      src/main/java/com/rarchives/ripme/ripper/AbstractSingleFileRipper.java
  24. 253 0
      src/main/java/com/rarchives/ripme/ripper/AlbumRipper.java
  25. 312 0
      src/main/java/com/rarchives/ripme/ripper/DownloadFileThread.java
  26. 56 0
      src/main/java/com/rarchives/ripme/ripper/DownloadThreadPool.java
  27. 160 0
      src/main/java/com/rarchives/ripme/ripper/DownloadVideoThread.java
  28. 21 0
      src/main/java/com/rarchives/ripme/ripper/RipperInterface.java
  29. 214 0
      src/main/java/com/rarchives/ripme/ripper/VideoRipper.java
  30. 82 0
      src/main/java/com/rarchives/ripme/ripper/rippers/AllporncomicRipper.java
  31. 336 0
      src/main/java/com/rarchives/ripme/ripper/rippers/ArtStationRipper.java
  32. 69 0
      src/main/java/com/rarchives/ripme/ripper/rippers/ArtstnRipper.java
  33. 20 0
      src/main/java/com/rarchives/ripme/ripper/rippers/BaraagRipper.java
  34. 139 0
      src/main/java/com/rarchives/ripme/ripper/rippers/BatoRipper.java
  35. 111 0
      src/main/java/com/rarchives/ripme/ripper/rippers/BooruRipper.java
  36. 98 0
      src/main/java/com/rarchives/ripme/ripper/rippers/CfakeRipper.java
  37. 294 0
      src/main/java/com/rarchives/ripme/ripper/rippers/ChanRipper.java
  38. 125 0
      src/main/java/com/rarchives/ripme/ripper/rippers/CheveretoRipper.java
  39. 183 0
      src/main/java/com/rarchives/ripme/ripper/rippers/CoomerPartyRipper.java
  40. 145 0
      src/main/java/com/rarchives/ripme/ripper/rippers/DanbooruRipper.java
  41. 149 0
      src/main/java/com/rarchives/ripme/ripper/rippers/DerpiRipper.java
  42. 653 0
      src/main/java/com/rarchives/ripme/ripper/rippers/DeviantartRipper.java
  43. 94 0
      src/main/java/com/rarchives/ripme/ripper/rippers/DribbbleRipper.java
  44. 78 0
      src/main/java/com/rarchives/ripme/ripper/rippers/DynastyscansRipper.java
  45. 267 0
      src/main/java/com/rarchives/ripme/ripper/rippers/E621Ripper.java
  46. 267 0
      src/main/java/com/rarchives/ripme/ripper/rippers/EHentaiRipper.java
  47. 161 0
      src/main/java/com/rarchives/ripme/ripper/rippers/EightmusesRipper.java
  48. 119 0
      src/main/java/com/rarchives/ripme/ripper/rippers/ErofusRipper.java
  49. 178 0
      src/main/java/com/rarchives/ripme/ripper/rippers/EromeRipper.java
  50. 147 0
      src/main/java/com/rarchives/ripme/ripper/rippers/FapDungeonRipper.java
  51. 154 0
      src/main/java/com/rarchives/ripme/ripper/rippers/FapwizRipper.java
  52. 57 0
      src/main/java/com/rarchives/ripme/ripper/rippers/FemjoyhunterRipper.java
  53. 75 0
      src/main/java/com/rarchives/ripme/ripper/rippers/FitnakedgirlsRipper.java
  54. 340 0
      src/main/java/com/rarchives/ripme/ripper/rippers/FivehundredpxRipper.java
  55. 315 0
      src/main/java/com/rarchives/ripme/ripper/rippers/FlickrRipper.java
  56. 74 0
      src/main/java/com/rarchives/ripme/ripper/rippers/FreeComicOnlineRipper.java
  57. 249 0
      src/main/java/com/rarchives/ripme/ripper/rippers/FuraffinityRipper.java
  58. 118 0
      src/main/java/com/rarchives/ripme/ripper/rippers/FuskatorRipper.java
  59. 87 0
      src/main/java/com/rarchives/ripme/ripper/rippers/GirlsOfDesireRipper.java
  60. 146 0
      src/main/java/com/rarchives/ripme/ripper/rippers/Hentai2readRipper.java
  61. 181 0
      src/main/java/com/rarchives/ripme/ripper/rippers/HentaiNexusRipper.java
  62. 196 0
      src/main/java/com/rarchives/ripme/ripper/rippers/HentaifoundryRipper.java
  63. 76 0
      src/main/java/com/rarchives/ripme/ripper/rippers/HentaifoxRipper.java
  64. 78 0
      src/main/java/com/rarchives/ripme/ripper/rippers/HentaiimageRipper.java
  65. 89 0
      src/main/java/com/rarchives/ripme/ripper/rippers/HitomiRipper.java
  66. 305 0
      src/main/java/com/rarchives/ripme/ripper/rippers/HqpornerRipper.java
  67. 164 0
      src/main/java/com/rarchives/ripme/ripper/rippers/HypnohubRipper.java
  68. 170 0
      src/main/java/com/rarchives/ripme/ripper/rippers/ImagebamRipper.java
  69. 318 0
      src/main/java/com/rarchives/ripme/ripper/rippers/ImagefapRipper.java
  70. 123 0
      src/main/java/com/rarchives/ripme/ripper/rippers/ImagevenueRipper.java
  71. 57 0
      src/main/java/com/rarchives/ripme/ripper/rippers/ImgboxRipper.java
  72. 629 0
      src/main/java/com/rarchives/ripme/ripper/rippers/ImgurRipper.java
  73. 521 0
      src/main/java/com/rarchives/ripme/ripper/rippers/InstagramRipper.java
  74. 95 0
      src/main/java/com/rarchives/ripme/ripper/rippers/JabArchivesRipper.java
  75. 84 0
      src/main/java/com/rarchives/ripme/ripper/rippers/JagodibujaRipper.java
  76. 75 0
      src/main/java/com/rarchives/ripme/ripper/rippers/Jpg3Ripper.java
  77. 56 0
      src/main/java/com/rarchives/ripme/ripper/rippers/KingcomixRipper.java
  78. 241 0
      src/main/java/com/rarchives/ripme/ripper/rippers/ListalRipper.java
  79. 119 0
      src/main/java/com/rarchives/ripme/ripper/rippers/LusciousRipper.java
  80. 159 0
      src/main/java/com/rarchives/ripme/ripper/rippers/MangadexRipper.java
  81. 96 0
      src/main/java/com/rarchives/ripme/ripper/rippers/MastodonRipper.java
  82. 20 0
      src/main/java/com/rarchives/ripme/ripper/rippers/MastodonXyzRipper.java
  83. 72 0
      src/main/java/com/rarchives/ripme/ripper/rippers/ModelmayhemRipper.java
  84. 204 0
      src/main/java/com/rarchives/ripme/ripper/rippers/MotherlessRipper.java
  85. 196 0
      src/main/java/com/rarchives/ripme/ripper/rippers/MrCongRipper.java
  86. 71 0
      src/main/java/com/rarchives/ripme/ripper/rippers/MultpornRipper.java
  87. 132 0
      src/main/java/com/rarchives/ripme/ripper/rippers/MyhentaicomicsRipper.java
  88. 60 0
      src/main/java/com/rarchives/ripme/ripper/rippers/MyhentaigalleryRipper.java
  89. 59 0
      src/main/java/com/rarchives/ripme/ripper/rippers/MyreadingmangaRipper.java
  90. 130 0
      src/main/java/com/rarchives/ripme/ripper/rippers/NatalieMuRipper.java
  91. 140 0
      src/main/java/com/rarchives/ripme/ripper/rippers/NewgroundsRipper.java
  92. 234 0
      src/main/java/com/rarchives/ripme/ripper/rippers/NfsfwRipper.java
  93. 144 0
      src/main/java/com/rarchives/ripme/ripper/rippers/NhentaiRipper.java
  94. 82 0
      src/main/java/com/rarchives/ripme/ripper/rippers/NsfwAlbumRipper.java
  95. 135 0
      src/main/java/com/rarchives/ripme/ripper/rippers/NsfwXxxRipper.java
  96. 106 0
      src/main/java/com/rarchives/ripme/ripper/rippers/NudeGalsRipper.java
  97. 81 0
      src/main/java/com/rarchives/ripme/ripper/rippers/OglafRipper.java
  98. 127 0
      src/main/java/com/rarchives/ripme/ripper/rippers/PahealRipper.java
  99. 21 0
      src/main/java/com/rarchives/ripme/ripper/rippers/PawooRipper.java
  100. 305 0
      src/main/java/com/rarchives/ripme/ripper/rippers/PhotobucketRipper.java

+ 15 - 0
.github/ISSUE_TEMPLATE.md

@@ -0,0 +1,15 @@
+* Ripme version:
+* Java version: <!-- (output of `java -version`) -->
+* Operating system: <!-- (if Windows, output of `ver` or `winver`) -->
+<!-- Please do not link to content featuring underage characters even if the characters are drawn.
+ These works are still illegal in many places including much of America -->
+* Exact URL you were trying to rip when the problem occurred:
+* Please include any additional information about how to reproduce the problem:
+
+## Expected Behavior
+
+Detail the expected behavior here.
+
+## Actual Behavior
+
+Detail the actual (incorrect) behavior here. You can post log snippets or attach log files to your issue report.

+ 27 - 0
.github/PULL_REQUEST_TEMPLATE.md

@@ -0,0 +1,27 @@
+# Category
+
+This change is exactly one of the following (please change `[ ]` to `[x]`) to indicate which:
+* [ ] a bug fix (Fix #...)
+* [ ] a new Ripper
+* [ ] a refactoring
+* [ ] a style change/fix
+* [ ] a new feature
+
+
+# Description
+
+Please add details about your change here.
+
+
+# Testing
+
+Required verification:
+* [ ] I've verified that there are no regressions in `gradlew test` (there are no new failures or errors).
+* [ ] I've verified that this change works as intended.
+  * [ ] Downloads all relevant content.
+  * [ ] Downloads content from multiple pages (as necessary or appropriate).
+  * [ ] Saves content at reasonable file names (e.g. page titles or content IDs) to help easily browse downloaded content.
+* [ ] I've verified that this change did not break existing functionality (especially in the Ripper I modified).
+
+Optional but recommended:
+* [ ] I've added a unit test to cover my change.

+ 65 - 0
.github/workflows/gradle.yml

@@ -0,0 +1,65 @@
+name: CI + release
+
+on:
+  pull_request:
+  push:
+    branches:
+      - '**'
+    tags:
+      - '!**'
+
+jobs:
+  build:
+
+    runs-on: ${{ matrix.os }}
+    strategy:
+      matrix:
+        os: [ubuntu-latest, windows-latest, macOS-latest]
+        java: [23]
+        include: # test old java on one os only, upload from ubuntu java-17
+          - os: ubuntu-latest
+            java: 17
+            upload: true
+
+    steps:
+
+    - uses: actions/checkout@v1
+
+    - name: Set environment CI_ variables
+      id: ci-env
+      uses: FranzDiebold/github-env-vars-action@v2
+
+    - name: Set up java
+      uses: actions/setup-java@v4.2.1
+      with:
+        java-version: ${{ matrix.java }}
+        distribution: zulu
+        cache: gradle
+
+    - name: Build with Gradle
+      run: gradle clean build -PjavacRelease=${{ matrix.java }}
+
+    - name: SHA256
+      if: matrix.upload
+      run: shasum -a 256 build/libs/*.jar
+
+    - name: upload jar as asset
+      if: matrix.upload
+      uses: actions/upload-artifact@v4
+      with:
+        name: zipped-ripme-jar
+        path: build/libs/*.jar
+
+    - name: create pre-release
+      id: create-pre-release
+      if: matrix.upload
+      uses: "marvinpinto/action-automatic-releases@latest"
+      with:
+        repo_token: "${{ secrets.GITHUB_TOKEN }}"
+        automatic_release_tag: "latest-${{ env.CI_REF_NAME_SLUG  }}"
+        prerelease: true
+        title: "development build ${{ env.CI_REF_NAME }}"
+        files: |
+            build/libs/*.jar
+
+# vim:set ts=2 sw=2 et:

+ 132 - 0
.gitignore

@@ -0,0 +1,132 @@
+# Created by https://www.gitignore.io/api/java,linux,macos,maven,windows
+
+### Java ###
+# Compiled class file
+*.class
+
+# Log file
+*.log
+
+# BlueJ files
+*.ctxt
+
+# Mobile Tools for Java (J2ME)
+.mtj.tmp/
+
+# Package Files #
+*.jar
+*.war
+*.ear
+*.zip
+*.tar.gz
+*.rar
+
+# virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml
+hs_err_pid*
+
+### Linux ###
+*~
+
+# temporary files which can be created if a process still has a handle open of a deleted file
+.fuse_hidden*
+
+# KDE directory preferences
+.directory
+
+# Linux trash folder which might appear on any partition or disk
+.Trash-*
+
+# .nfs files are created when an open file is removed but is still being accessed
+.nfs*
+
+### macOS ###
+*.DS_Store
+.AppleDouble
+.LSOverride
+
+# Icon must end with two \r
+Icon
+
+# Thumbnails
+._*
+
+# Files that might appear in the root of a volume
+.DocumentRevisions-V100
+.fseventsd
+.Spotlight-V100
+.TemporaryItems
+.Trashes
+.VolumeIcon.icns
+.com.apple.timemachine.donotpresent
+
+# Directories potentially created on remote AFP share
+.AppleDB
+.AppleDesktop
+Network Trash Folder
+Temporary Items
+.apdisk
+
+### Maven ###
+target/
+pom.xml.tag
+pom.xml.releaseBackup
+pom.xml.versionsBackup
+pom.xml.next
+release.properties
+dependency-reduced-pom.xml
+buildNumber.properties
+.mvn/timing.properties
+
+# Avoid ignoring Maven wrapper jar file (.jar files are usually ignored)
+!/.mvn/wrapper/maven-wrapper.jar
+
+### gradle ###
+/.gradle
+/build
+# Avoid ignoring gradle wrapper jar file (.jar files are usually ignored)
+!/gradle/wrapper/gradle-wrapper.jar
+
+### Windows ###
+# Windows thumbnail cache files
+Thumbs.db
+ehthumbs.db
+ehthumbs_vista.db
+
+# Folder config file
+Desktop.ini
+
+# Recycle Bin used on file shares
+$RECYCLE.BIN/
+
+# Windows Installer files
+*.cab
+*.msi
+*.msm
+*.msp
+
+# Windows shortcuts
+*.lnk
+
+### IDEs ###
+.vscode
+.idea
+.project
+local.properties
+
+### Build files
+.gradle/
+build/
+
+### Ripme ###
+ripme.log
+rips/
+.history
+ripme.jar.update
+*.swp
+!LabelsBundle*.properties
+history.json
+*.iml
+.settings/
+.classpath
+*.txt
+bin/

+ 94 - 0
CONTRIBUTING.md

@@ -0,0 +1,94 @@
+# We've moved!
+
+You can now find the latest code, issues, and releases at [RipMeApp/ripme](https://github.com/RipMeApp/ripme/).
+
+
+# Etiquette
+
+Please be polite and supportive to all users and contributors. Please be inclusive of everyone regardless of race, religion, gender identity or expression, sexual preference, or tools and platform preferences. Please be helpful and stick to the engineering facts, and avoid expressing unhelpful or off-topic opinions.
+
+
+# NSFW Content
+
+**Please tag NSFW links (links to sites with adult content) with "(NSFW)"!**
+
+Many of the sites we deal with contain NSFW (Not Safe For Work) content. Please assume any link you see is NSFW unless tagged otherwise -- i.e., SFW (Safe For Work). Please tag all links you post with either "(NSFW)" or "(SFW)" to be considerate to others who may not be browsing this repo in private or who are not interested in NSFW content.
+
+There is a helpful plugin called uMatrix available for [Firefox](https://addons.mozilla.org/en-US/firefox/addon/umatrix/) and [Chrome](https://chrome.google.com/webstore/detail/umatrix/ogfcmafjalglgifnmanfmnieipoejdcf) which allows you to block certain types of content like media and scripts.
+If you're not sure if a site might contain NSFW images or media, and you are in mixed company but want to develop a new ripper, you can block downloading images and media in the * (all sites) scope and allow requests for specific domains you trust as you go.
+Being able to browse the HTML is usually the most important part of developing or fixing a ripper, so it is not necessarily important to actually see the images load.
+
+
+# Priorities
+
+Our priorities, roughly in order of impact:
+
+* Bug fixes for popular websites (e.g. that have recently changed their layout and broke our behavior).
+* Bug fixes for minor websites
+* New Rippers
+* Refactorings that make development easier
+* Style fixes
+
+
+# Issues
+
+## Bugs
+
+If you have noticed a bug in RipMe, please open an issue at [RipMeApp/ripme](https://github.com/RipMeApp/ripme/issues/new).
+
+Please include enough information that we can easily confirm the problem and verify when it is fixed. (For example: the exact URL that you tried to rip when something bad/incorrect happened.)
+
+
+## Rippers / Website Support
+
+Request support for more sites by adding a comment to [this Github issue](https://github.com/RipMeApp/ripme/issues/2068).
+
+If you're a developer, you can add your own Ripper by following the wiki guide
+[How To Create A Ripper for HTML Websites](https://github.com/ripmeapp/ripme/wiki/How-To-Create-A-Ripper-for-HTML-websites).
+
+
+# Pull Requests
+
+Before you open your pull request, please consider the following:
+
+* Please ensure your change is based on the `master` branch of this repo (i.e. https://github.com/RipMeApp/ripme.git)
+  * Please do `git pull` on the `master` branch before starting work on your bug fix.
+  * This helps avoid merge conflicts.
+* Please ensure your change includes only the minimum changes needed to fix a single issue. These are easiest to review and tend to get merged more quickly. If the scope of your PR is wider than a single issue, you may be asked to reopen your PR as multiple separate PRs.
+* Are you fixing an issue from one of the issue trackers ([RipMeApp](https://github.com/RipMeApp/ripme/issues) or ([RipMeApp2](https://github.com/RipMeApp2/ripme/issues) or [4pr0n](https://github.com/4pr0n/ripme/issues))? If so, please ensure that you reference the issue you are fixing in your commit message so that it will be [automatically closed](https://help.github.com/articles/closing-issues-via-commit-messages/).
+* Please ensure you verify that you did not break any functionality outside of your change or feature
+  * The CI might be broken, so please ensure that `gradlew test` shows no new errors since before your change.
+  * Keep in mind each Ripper likely supports multiple URL formats for each website, which have different content layouts (users, galleries, etc.)
+  * We deal with live websites, so things might break while we aren't looking. Websites can change and content can be deleted at any time. Our code and/or tests may need to be rewritten to fix issues.
+* Please consider adding a test to check for regressions to the Ripper you added or the bug you fixed.
+  * See e.g. `src/test/java/com/rarchives/ripme/tst/ripper/rippers/ImgurRipperTest.java`
+
+
+## Style
+
+Generally, we will regard style changes as low-priority. Please consider that the contributors don't have a lot of volunteer time to work on this project, so style changes which do not improve the functionality of the project may be ignored in favor of critical bug fixes, new features, or other tangible improvements. Additionally, changes which are difficult to review may be ignored.
+
+If you make a large or complex change, please detail what changes you made, and how they are helpful or what they fix.
+
+If you feel the need to make a style change: changes to spacing and so on are easy to review regardless of the number of lines changed if ONLY whitespace changes are present. (`git diff -w` is helpful here.) If you change spacing and layout, please avoid also moving things around or otherwise refactoring the code. **Submit refactoring changes separately from style changes.**
+
+Good style is a tool for communicating your intent with other developers of the project. We are interested in maintaining reasonably well-styled code. If a contribution is illegible, we may refuse to merge it until it has been brought up to reasonable style guidelines. If a contribution violates any of our "rules" but is still legible, it is likely to be merged anyway.
+
+Some recommendations:
+
+* Above all, be consistent!
+* Spaces, not tabs. Indents should be 4 spaces.
+* We prefer "Egyptian brackets" (in `if`, `for`, `while`, `switch`, etc.):
+  * `if (...) {`
+  * `} else if (...) {`
+  * `} else {`
+  * `}`
+* Note the spacing convention above for control flow constructs (a single space on the outside of each paren)
+* Constants in `UPPER_SNAKE_CASE` a.k.a. `CONST_CASE`
+* Class names in `PascalCase` a.k.a. `UpperCamelCase`
+* Variable names in `camelCase` a.k.a. `lowerCamelCase`
+* Do not use Hungarian notation
+* Do not use `lower_snake_case`
+* Place spaces around binary operators: `1 + 2` not `1+2`
+* Do not place spaces inside of parens: `(a + b)` not `( a + b )`
+* Use a function like VS Code's "Organize Imports" to ensure imports are committed to the repo in a consistent order no matter who writes the code.

+ 216 - 0
README.md

@@ -0,0 +1,216 @@
+![Notice, this repository was mirrored to here from Github](https://m1s5.c20.e2-5.dev/files/images/mirror-notice.svg)
+
+# RipMe
+
+[![Licensed under the MIT License](https://img.shields.io/badge/License-MIT-blue.svg)](/LICENSE.txt)
+[![Join the chat at https://gitter.im/RipMeApp/Lobby](https://badges.gitter.im/RipMeApp/Lobby.svg)](https://gitter.im/RipMeApp/Lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
+[![Subreddit](https://img.shields.io/badge/discuss-on%20reddit-blue.svg)](https://www.reddit.com/r/ripme/)
+![alt Badge Status](https://github.com/ripmeapp/ripme/actions/workflows/gradle.yml/badge.svg)
+[![Coverage Status](https://coveralls.io/repos/github/RipMeApp/ripme/badge.svg?branch=main)](https://coveralls.io/github/RipMeApp/ripme?branch=main)
+
+## About
+
+RipMe is an album downloader (or "ripper") for various websites. This is a cross-platform tool that runs on *your* computer. Works on Windows, Linux, and Mac.
+
+![Screenshot](https://i.imgur.com/UCQNjeg.png)
+
+## Download
+
+Download `ripme.jar` from the [latest release](https://github.com/ripmeapp/ripme/releases/latest). For information about running the `.jar` file, see
+[the How To Run wiki](https://github.com/ripmeapp/ripme/wiki/How-To-Run-RipMe).
+
+## Minimum Requirements
+
+Requires Java 17 or later to run. RipMe has been tested on and is confirmed working on Windows, Linux, and Mac.
+
+Java 17 is the most modern possible Java version that allows us to support the most platforms. See [thread](https://github.com/RipMeApp/ripme/pull/2057#issuecomment-2571472016).
+
+## Supported Sites
+
+Jump to:
+- [List of Supported Sites](https://github.com/RipMeApp/ripme?tab=readme-ov-file#list-of-supported-sites)
+- [Site Not Supported?](https://github.com/RipMeApp/ripme?tab=readme-ov-file#site-not-supported)
+
+## Recent development updates
+
+- The current active development repo for RipMe is located at [ripmeapp/ripme](https://github.com/ripmeapp/ripme/).
+- Note: For a while, the ripmeapp/ripme repo was inactive, but development continued at ripmeapp2/ripme.
+- Now, maintainers have been updated and development has been rejoined with ripmeapp/ripme where it will continue.
+- You may find a number of stale issues on ripmeapp/ripme and/or on ripmeapp2/ripme until everything is merged back together and statuses are updated.
+
+## Maintainers
+
+RipMe has been maintained with ♥️ and in our limited free time by the following
+people, with current expected activity level marked by color of the indicator:
+
+- **[@MetaPrime](https://github.com/metaprime)** 🟡 (2025–present)
+- **[@soloturn](https://github.com/soloturn)** 🟡 (2021–present)
+- **[@cyian-1756](https://github.com/cyian-1756)** 🟥 (2017–2020)
+- **[@kevin51jiang](https://github.com/kevin51jiang)** 🟥 (2017–2018)
+- **[@MetaPrime](https://github.com/metaprime)** 🟡 (2016–2017)
+- and its original creator, **[@4pr0n](https://github.com/4pr0n)** 🟥 (2014–2016)
+
+If you'd like to become a maintainer, reach out on https://gitter.im/RipMeApp/Lobby and ask an active maintainer to be added to the team.
+
+## Contact
+
+Chat with the team and community on [gitter](https://gitter.im/RipMeApp/Lobby) and [reddit.com/r/ripme](https://www.reddit.com/r/ripme/)
+
+## Version Numbers
+
+The version number like `ripme-1.7.94-17-2167aa34-feature_auto_release.jar` contains a release number (`1.7.94`), given by
+a person, the number of commits since this version (`17`). The commit SHA (`2167aa34`) uniquely references the
+source code ripme was built from. If it is not built from the main branch, the branch name (`feature/auto-release`) is
+given.
+
+Note that this follows the Semantic Versioning spec (see https://semver.org/),
+and uses the feature of the format that adds extra data after the `-` to
+provide helpful context so that every commit on every branch has a dintinct
+semver version associated with it.
+
+## Installation
+
+On macOS, there is a [cask](https://github.com/Homebrew/homebrew-cask/blob/master/Casks/ripme.rb).
+
+```
+brew install --cask ripme && xattr -d com.apple.quarantine /Applications/ripme.jar
+```
+
+## Changelog
+
+[Changelog](/ripme.json) **(ripme.json)**
+
+# Features
+
+- Quickly downloads all images in an online album. [See supported sites](https://github.com/ripmeapp/ripme/wiki/Supported-Sites)
+- Easily re-rip albums to fetch new content
+- Built in updater
+- Skips already downloaded images by default
+- Can auto skip e-hentai and nhentai albums containing certain tags. [See here for how to enable](https://github.com/RipMeApp/ripme/wiki/Config-options#nhentaiblacklisttags)
+- Download a range of urls. [See here for how](https://github.com/RipMeApp/ripme/wiki/How-To-Run-RipMe#downloading-a-url-range)
+
+## List of Supported Sites
+
+See the full list of [Supported Sites](https://github.com/ripmeapp/ripme/wiki/Supported-Sites) in the wiki.
+
+Note: Websites change over time and therefore rippers, which fundamentally depend on website layouts, can break at any time.
+Feel free to open an issue if you notice something not working, but please search the list of issues to see if it's already been reported.
+
+The list of supported rippers includes:
+
+- imgur
+- twitter (currently broken, needs to be updated for X)
+- tumblr
+- instagram
+- flickr
+- photobucket
+- reddit
+- redgifs
+- motherless
+- imagefap
+- seenive
+- 8muses
+- deviantart (currently broken by major changes to the site)
+- xhamster
+- xvideos
+- ... and [more](https://github.com/ripmeapp/ripme/wiki/Supported-Sites)!
+
+## Site Not Supported?
+
+Request support for more sites by adding a comment to [this Github issue](https://github.com/RipMeApp/ripme/issues/2068).
+
+If you're a developer, you can add your own Ripper by following the wiki guide:
+[How To Create A Ripper for HTML Websites](https://github.com/ripmeapp/ripme/wiki/How-To-Create-A-Ripper-for-HTML-websites).
+
+# Maintainers
+
+## Compiling & Building
+
+The project uses [Gradle](https://gradle.org).
+
+In particular, we use the `gradlew` script (`gradlew.bat` on Windows) 
+to build the app and run tests.
+
+To build the .jar file and run the appropriate tests, navigate to the
+root project directory and run `gradlew` with the appropriate commands
+(see below). Ensure that you manually test that your change works,
+add or update the corresponding test, and run at least the test(s)
+that are relevant to your change.
+
+For example, if you modify the Xhamster ripper, you might run this or
+a similar command:
+
+```
+./gradlew clean build testAll --tests XhamsterRipperTest.testXhamster2Album
+```
+
+Here's a breakdown of the functionality of the `gradlew` scripts:
+
+The `build` verb will build the `.jar`, the tests, and will also *run*
+the tests ,except for disabled and flaky tests.
+
+Thus `build` and `build test` are actually synonymous.
+
+```bash
+./gradlew build
+./gradlew build test
+```
+
+You can get more specific with your test commands:
+
+```bash
+./gradlew clean build testAll --tests XhamsterRipperTest.testXhamster2Album
+./gradlew clean build -x test --warning-mode all
+```
+
+To perform a clean rebuild, which is only necessary to see warnings for
+unchanged files or if you believe that the incremental build is interfering
+with the build picking up your changes, for example. That shouldn't be an issue
+for gradle, so you should check for easier explanations like  whether you saved
+your changes. :)
+
+```bash
+./gradlew clean
+```
+
+To build ("assemble") the .jar without running the tests, which is useful for 
+manual verification of functionality of a work in progress, run the following:
+
+```bash
+./gradlew assemble
+```
+
+The generated JAR (java archive) in build/libs will include all
+dependencies.
+
+## Running Tests
+
+Tests can be tagged as beeing slow, or flaky. The gradle build reacts to
+the following combinations of tags:
+
+- default is to run all tests without tag.
+- testAll runs all tests.
+- testFlaky runs tests with tag "flaky".
+- testSlow runs tests with tag "slow".
+- tests can be run by test class, or single test. Use "testAll" so it does
+  not matter if a test is tagged or not.
+- tests can give the full stack of an assertion, exception, or error if you pass `--info` to the command
+
+```bash
+./gradlew test
+./gradlew testAll
+./gradlew testFlaky
+./gradlew testSlow
+./gradlew testAll --tests XhamsterRipperTest
+./gradlew testAll --tests XhamsterRipperTest.testXhamster2Album
+./gradlew testAll --tests ChanRipperTest --info
+```
+
+Please note that some tests may fail as sites change and our rippers
+become out of date. Start by building and testing a released version
+of RipMe and then ensure that any changes you make do not cause more
+tests to break.
+
+## Maintainers: How to Publish a New Release
+
+See wiki: [How to Make a New Ripme Release](https://github.com/RipMeApp/ripme/wiki/How-to-make-a-new-ripme-release)

+ 21 - 0
SECURITY.md

@@ -0,0 +1,21 @@
+# Security Policy
+
+## Supported Versions
+
+Use this section to tell people about which versions of your project are
+currently being supported with security updates.
+
+| Version | Supported          |
+| ------- | ------------------ |
+| 2.1.x   | :white_check_mark: |
+| 1.7.x   | :x:                |
+| < 1.7   | :x:                |
+
+## Reporting a Vulnerability
+
+A vulnerability may be reported either on [github](https://github.com/RipMeApp/ripme/issues) 
+or by contacting a maintainer on reddit.
+
+Maintainers with reddit accounts:
+* [ineedmorealts](https://www.reddit.com/user/ineedmorealts/)
+* [metaprime](https://www.reddit.com/user/metaprime/)

+ 1 - 0
build.bat

@@ -0,0 +1 @@
+./gradlew clean build -x test

+ 150 - 0
build.gradle.kts

@@ -0,0 +1,150 @@
+//    the build derives a version with the jgitver plugin out of a tag in the git history. when there is no
+// git repo, the jgitver default would be 0.0.0. one can override this version with a parameter. also, permit
+// to start the build setting the javac release parameter, no parameter means build for java-17:
+// gradle clean build -PjavacRelease=17
+// gradle clean build -PjavacRelease=21
+// gradle clean build -PcustomVersion=1.0.0-10-asdf
+val customVersion = (project.findProperty("customVersion") ?: "") as String
+val javacRelease = (project.findProperty("javacRelease") ?: "17") as String
+
+plugins {
+  id("fr.brouillard.oss.gradle.jgitver") version "0.9.1"
+  id("jacoco")
+  id("java")
+  id("maven-publish")
+}
+
+repositories {
+  mavenLocal()
+  mavenCentral()
+}
+
+dependencies {
+  implementation("com.lmax:disruptor:3.4.4")
+  implementation("org.java-websocket:Java-WebSocket:1.5.3")
+  implementation("org.jsoup:jsoup:1.16.1")
+  implementation("org.json:json:20211205")
+  implementation("com.j2html:j2html:1.6.0")
+  implementation("commons-configuration:commons-configuration:1.10")
+  implementation("commons-cli:commons-cli:1.5.0")
+  implementation("commons-io:commons-io:2.13.0")
+  implementation("org.apache.httpcomponents:httpclient:4.5.14")
+  implementation("org.apache.httpcomponents:httpmime:4.5.14")
+  implementation("org.apache.logging.log4j:log4j-api:2.20.0")
+  implementation("org.apache.logging.log4j:log4j-core:2.20.0")
+  implementation("com.squareup.okhttp3:okhttp:4.12.0")
+  implementation("org.graalvm.js:js:22.3.2")
+  testImplementation(enforcedPlatform("org.junit:junit-bom:5.10.0"))
+  testImplementation("org.junit.jupiter:junit-jupiter")
+  testRuntimeOnly("org.junit.platform:junit-platform-launcher")
+}
+
+group = "com.rarchives.ripme"
+version = "1.7.94"
+description = "ripme"
+
+jacoco {
+  toolVersion = "0.8.12"
+}
+
+jgitver {
+  gitCommitIDLength = 8
+  nonQualifierBranches = "main,master"
+  useGitCommitID = true
+}
+
+afterEvaluate {
+  if (customVersion != "") {
+    project.version = customVersion
+  }
+}
+
+tasks.compileJava {
+  options.release.set(Integer.parseInt(javacRelease))
+}
+
+tasks.withType<Jar> {
+  duplicatesStrategy = DuplicatesStrategy.INCLUDE
+  manifest {
+    attributes["Main-Class"] = "com.rarchives.ripme.App"
+    attributes["Implementation-Version"] =  archiveVersion
+    attributes["Multi-Release"] = "true"
+  }
+
+  // To add all of the dependencies otherwise a "NoClassDefFoundError" error
+  from(sourceSets.main.get().output)
+
+  dependsOn(configurations.runtimeClasspath)
+  from({
+    configurations.runtimeClasspath.get().filter { it.name.endsWith("jar") }.map { zipTree(it) }
+  })
+}
+
+publishing {
+  publications {
+    create<MavenPublication>("maven") {
+      from(components["java"])
+    }
+  }
+}
+
+tasks.withType<JavaCompile> {
+  options.encoding = "UTF-8"
+  val compilerArgs = options.compilerArgs
+  compilerArgs.addAll(listOf("-Xlint:deprecation"))
+}
+
+tasks.test {
+  testLogging {
+    showStackTraces = true
+  }
+  useJUnitPlatform {
+    // gradle-6.5.1 not yet allows passing this as parameter, so exclude it
+    excludeTags("flaky","slow")
+    includeEngines("junit-jupiter")
+    includeEngines("junit-vintage")
+  }
+  finalizedBy(tasks.jacocoTestReport) // report is always generated after tests run
+}
+
+tasks.register<Test>("testAll") {
+  useJUnitPlatform {
+    includeTags("any()", "none()")
+  }
+}
+
+tasks.register<Test>("testFlaky") {
+  useJUnitPlatform {
+    includeTags("flaky")
+  }
+}
+
+tasks.register<Test>("testSlow") {
+  useJUnitPlatform {
+    includeTags("slow")
+  }
+}
+
+tasks.register<Test>("testTagged") {
+  useJUnitPlatform {
+    includeTags("any()")
+  }
+}
+
+// make all archive tasks in the build reproducible
+tasks.withType<AbstractArchiveTask>().configureEach {
+  isPreserveFileTimestamps = false
+  isReproducibleFileOrder = true
+}
+
+println("Build directory: ${file(layout.buildDirectory)}")
+
+tasks.jacocoTestReport {
+  dependsOn(tasks.test) // tests are required to run before generating the report
+  reports {
+    xml.required.set(false)
+    csv.required.set(false)
+    html.outputLocation.set(file("${file(layout.buildDirectory)}/jacocoHtml"))
+  }
+}
+

+ 2 - 0
build.sh

@@ -0,0 +1,2 @@
+#!/usr/bin/env bash
+./gradlew clean build -x test

BIN
gradle/wrapper/gradle-wrapper.jar


+ 7 - 0
gradle/wrapper/gradle-wrapper.properties

@@ -0,0 +1,7 @@
+distributionBase=GRADLE_USER_HOME
+distributionPath=wrapper/dists
+distributionUrl=https\://services.gradle.org/distributions/gradle-8.10.2-bin.zip
+networkTimeout=10000
+validateDistributionUrl=true
+zipStoreBase=GRADLE_USER_HOME
+zipStorePath=wrapper/dists

+ 252 - 0
gradlew

@@ -0,0 +1,252 @@
+#!/bin/sh
+
+#
+# Copyright © 2015-2021 the original authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# SPDX-License-Identifier: Apache-2.0
+#
+
+##############################################################################
+#
+#   Gradle start up script for POSIX generated by Gradle.
+#
+#   Important for running:
+#
+#   (1) You need a POSIX-compliant shell to run this script. If your /bin/sh is
+#       noncompliant, but you have some other compliant shell such as ksh or
+#       bash, then to run this script, type that shell name before the whole
+#       command line, like:
+#
+#           ksh Gradle
+#
+#       Busybox and similar reduced shells will NOT work, because this script
+#       requires all of these POSIX shell features:
+#         * functions;
+#         * expansions «$var», «${var}», «${var:-default}», «${var+SET}»,
+#           «${var#prefix}», «${var%suffix}», and «$( cmd )»;
+#         * compound commands having a testable exit status, especially «case»;
+#         * various built-in commands including «command», «set», and «ulimit».
+#
+#   Important for patching:
+#
+#   (2) This script targets any POSIX shell, so it avoids extensions provided
+#       by Bash, Ksh, etc; in particular arrays are avoided.
+#
+#       The "traditional" practice of packing multiple parameters into a
+#       space-separated string is a well documented source of bugs and security
+#       problems, so this is (mostly) avoided, by progressively accumulating
+#       options in "$@", and eventually passing that to Java.
+#
+#       Where the inherited environment variables (DEFAULT_JVM_OPTS, JAVA_OPTS,
+#       and GRADLE_OPTS) rely on word-splitting, this is performed explicitly;
+#       see the in-line comments for details.
+#
+#       There are tweaks for specific operating systems such as AIX, CygWin,
+#       Darwin, MinGW, and NonStop.
+#
+#   (3) This script is generated from the Groovy template
+#       https://github.com/gradle/gradle/blob/HEAD/platforms/jvm/plugins-application/src/main/resources/org/gradle/api/internal/plugins/unixStartScript.txt
+#       within the Gradle project.
+#
+#       You can find Gradle at https://github.com/gradle/gradle/.
+#
+##############################################################################
+
+# Attempt to set APP_HOME
+
+# Resolve links: $0 may be a link
+app_path=$0
+
+# Need this for daisy-chained symlinks.
+while
+    APP_HOME=${app_path%"${app_path##*/}"}  # leaves a trailing /; empty if no leading path
+    [ -h "$app_path" ]
+do
+    ls=$( ls -ld "$app_path" )
+    link=${ls#*' -> '}
+    case $link in             #(
+      /*)   app_path=$link ;; #(
+      *)    app_path=$APP_HOME$link ;;
+    esac
+done
+
+# This is normally unused
+# shellcheck disable=SC2034
+APP_BASE_NAME=${0##*/}
+# Discard cd standard output in case $CDPATH is set (https://github.com/gradle/gradle/issues/25036)
+APP_HOME=$( cd -P "${APP_HOME:-./}" > /dev/null && printf '%s
+' "$PWD" ) || exit
+
+# Use the maximum available, or set MAX_FD != -1 to use that value.
+MAX_FD=maximum
+
+warn () {
+    echo "$*"
+} >&2
+
+die () {
+    echo
+    echo "$*"
+    echo
+    exit 1
+} >&2
+
+# OS specific support (must be 'true' or 'false').
+cygwin=false
+msys=false
+darwin=false
+nonstop=false
+case "$( uname )" in                #(
+  CYGWIN* )         cygwin=true  ;; #(
+  Darwin* )         darwin=true  ;; #(
+  MSYS* | MINGW* )  msys=true    ;; #(
+  NONSTOP* )        nonstop=true ;;
+esac
+
+CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar
+
+
+# Determine the Java command to use to start the JVM.
+if [ -n "$JAVA_HOME" ] ; then
+    if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
+        # IBM's JDK on AIX uses strange locations for the executables
+        JAVACMD=$JAVA_HOME/jre/sh/java
+    else
+        JAVACMD=$JAVA_HOME/bin/java
+    fi
+    if [ ! -x "$JAVACMD" ] ; then
+        die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME
+
+Please set the JAVA_HOME variable in your environment to match the
+location of your Java installation."
+    fi
+else
+    JAVACMD=java
+    if ! command -v java >/dev/null 2>&1
+    then
+        die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
+
+Please set the JAVA_HOME variable in your environment to match the
+location of your Java installation."
+    fi
+fi
+
+# Increase the maximum file descriptors if we can.
+if ! "$cygwin" && ! "$darwin" && ! "$nonstop" ; then
+    case $MAX_FD in #(
+      max*)
+        # In POSIX sh, ulimit -H is undefined. That's why the result is checked to see if it worked.
+        # shellcheck disable=SC2039,SC3045
+        MAX_FD=$( ulimit -H -n ) ||
+            warn "Could not query maximum file descriptor limit"
+    esac
+    case $MAX_FD in  #(
+      '' | soft) :;; #(
+      *)
+        # In POSIX sh, ulimit -n is undefined. That's why the result is checked to see if it worked.
+        # shellcheck disable=SC2039,SC3045
+        ulimit -n "$MAX_FD" ||
+            warn "Could not set maximum file descriptor limit to $MAX_FD"
+    esac
+fi
+
+# Collect all arguments for the java command, stacking in reverse order:
+#   * args from the command line
+#   * the main class name
+#   * -classpath
+#   * -D...appname settings
+#   * --module-path (only if needed)
+#   * DEFAULT_JVM_OPTS, JAVA_OPTS, and GRADLE_OPTS environment variables.
+
+# For Cygwin or MSYS, switch paths to Windows format before running java
+if "$cygwin" || "$msys" ; then
+    APP_HOME=$( cygpath --path --mixed "$APP_HOME" )
+    CLASSPATH=$( cygpath --path --mixed "$CLASSPATH" )
+
+    JAVACMD=$( cygpath --unix "$JAVACMD" )
+
+    # Now convert the arguments - kludge to limit ourselves to /bin/sh
+    for arg do
+        if
+            case $arg in                                #(
+              -*)   false ;;                            # don't mess with options #(
+              /?*)  t=${arg#/} t=/${t%%/*}              # looks like a POSIX filepath
+                    [ -e "$t" ] ;;                      #(
+              *)    false ;;
+            esac
+        then
+            arg=$( cygpath --path --ignore --mixed "$arg" )
+        fi
+        # Roll the args list around exactly as many times as the number of
+        # args, so each arg winds up back in the position where it started, but
+        # possibly modified.
+        #
+        # NB: a `for` loop captures its iteration list before it begins, so
+        # changing the positional parameters here affects neither the number of
+        # iterations, nor the values presented in `arg`.
+        shift                   # remove old arg
+        set -- "$@" "$arg"      # push replacement arg
+    done
+fi
+
+
+# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
+DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"'
+
+# Collect all arguments for the java command:
+#   * DEFAULT_JVM_OPTS, JAVA_OPTS, JAVA_OPTS, and optsEnvironmentVar are not allowed to contain shell fragments,
+#     and any embedded shellness will be escaped.
+#   * For example: A user cannot expect ${Hostname} to be expanded, as it is an environment variable and will be
+#     treated as '${Hostname}' itself on the command line.
+
+set -- \
+        "-Dorg.gradle.appname=$APP_BASE_NAME" \
+        -classpath "$CLASSPATH" \
+        org.gradle.wrapper.GradleWrapperMain \
+        "$@"
+
+# Stop when "xargs" is not available.
+if ! command -v xargs >/dev/null 2>&1
+then
+    die "xargs is not available"
+fi
+
+# Use "xargs" to parse quoted args.
+#
+# With -n1 it outputs one arg per line, with the quotes and backslashes removed.
+#
+# In Bash we could simply go:
+#
+#   readarray ARGS < <( xargs -n1 <<<"$var" ) &&
+#   set -- "${ARGS[@]}" "$@"
+#
+# but POSIX shell has neither arrays nor command substitution, so instead we
+# post-process each arg (as a line of input to sed) to backslash-escape any
+# character that might be a shell metacharacter, then use eval to reverse
+# that process (while maintaining the separation between arguments), and wrap
+# the whole thing up as a single "set" statement.
+#
+# This will of course break if any of these variables contains a newline or
+# an unmatched quote.
+#
+
+eval "set -- $(
+        printf '%s\n' "$DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS" |
+        xargs -n1 |
+        sed ' s~[^-[:alnum:]+,./:=@_]~\\&~g; ' |
+        tr '\n' ' '
+    )" '"$@"'
+
+exec "$JAVACMD" "$@"

+ 94 - 0
gradlew.bat

@@ -0,0 +1,94 @@
+@rem
+@rem Copyright 2015 the original author or authors.
+@rem
+@rem Licensed under the Apache License, Version 2.0 (the "License");
+@rem you may not use this file except in compliance with the License.
+@rem You may obtain a copy of the License at
+@rem
+@rem      https://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+@rem
+@rem SPDX-License-Identifier: Apache-2.0
+@rem
+
+@if "%DEBUG%"=="" @echo off
+@rem ##########################################################################
+@rem
+@rem  Gradle startup script for Windows
+@rem
+@rem ##########################################################################
+
+@rem Set local scope for the variables with windows NT shell
+if "%OS%"=="Windows_NT" setlocal
+
+set DIRNAME=%~dp0
+if "%DIRNAME%"=="" set DIRNAME=.
+@rem This is normally unused
+set APP_BASE_NAME=%~n0
+set APP_HOME=%DIRNAME%
+
+@rem Resolve any "." and ".." in APP_HOME to make it shorter.
+for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi
+
+@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
+set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m"
+
+@rem Find java.exe
+if defined JAVA_HOME goto findJavaFromJavaHome
+
+set JAVA_EXE=java.exe
+%JAVA_EXE% -version >NUL 2>&1
+if %ERRORLEVEL% equ 0 goto execute
+
+echo. 1>&2
+echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. 1>&2
+echo. 1>&2
+echo Please set the JAVA_HOME variable in your environment to match the 1>&2
+echo location of your Java installation. 1>&2
+
+goto fail
+
+:findJavaFromJavaHome
+set JAVA_HOME=%JAVA_HOME:"=%
+set JAVA_EXE=%JAVA_HOME%/bin/java.exe
+
+if exist "%JAVA_EXE%" goto execute
+
+echo. 1>&2
+echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% 1>&2
+echo. 1>&2
+echo Please set the JAVA_HOME variable in your environment to match the 1>&2
+echo location of your Java installation. 1>&2
+
+goto fail
+
+:execute
+@rem Setup the command line
+
+set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
+
+
+@rem Execute Gradle
+"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %*
+
+:end
+@rem End local scope for the variables with windows NT shell
+if %ERRORLEVEL% equ 0 goto mainEnd
+
+:fail
+rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of
+rem the _cmd.exe /c_ return code!
+set EXIT_CODE=%ERRORLEVEL%
+if %EXIT_CODE% equ 0 set EXIT_CODE=1
+if not ""=="%GRADLE_EXIT_CONSOLE%" exit %EXIT_CODE%
+exit /b %EXIT_CODE%
+
+:mainEnd
+if "%OS%"=="Windows_NT" endlocal
+
+:omega

+ 31 - 0
remote-branch.sh

@@ -0,0 +1,31 @@
+#!/usr/bin/bash
+
+# if $1 is user:branch, split it into $USER and $BRANCH at the :
+if [[ $1 == *:* ]]; then
+	echo "Splitting $1 into user and branch"
+	USER=$(echo $1 | cut -d: -f1)
+	BRANCH=$(echo $1 | cut -d: -f2)
+else
+	# if $1 and $2 are not both specified, fail
+	if [ -z "$1" ] || [ -z "$2" ]; then
+		echo "Usage: $0 <username> <branch>"
+		exit 1
+	fi
+
+	# provide defaults for $1 and $2 for safety anyway
+	USER=${1:-ripmeapp}
+	BRANCH=${2:-main}
+fi
+
+# Check that USER and BRANCH are not empty
+if [ -z "$USER" ] || [ -z "$BRANCH" ]; then
+	echo "Usage: $0 <username> <branch>"
+	exit 1
+fi
+
+LOCAL_BRANCH=$USER-$BRANCH
+REMOTE_BRANCH=$USER/$BRANCH
+
+git remote add $USER https://github.com/$USER/ripme.git
+git fetch $USER $BRANCH
+git checkout -B $LOCAL_BRANCH $USER/$BRANCH

+ 31 - 0
remote-merge.sh

@@ -0,0 +1,31 @@
+#!/usr/bin/bash
+
+# if $1 is user:branch, split it into $USER and $BRANCH at the :
+if [[ $1 == *:* ]]; then
+	USER=$(echo $1 | cut -d: -f1)
+	BRANCH=$(echo $1 | cut -d: -f2)
+else
+	# if $1 and $2 are not both specified, fail
+	if [ -z "$1" ] || [ -z "$2" ]; then
+		echo "Usage: $0 <username> <branch>"
+		exit 1
+	fi
+
+	# provide defaults for $1 and $2 for safety anyway
+	USER=${1:-ripmeapp}
+	BRANCH=${2:-main}
+fi
+
+# Check that USER and BRANCH are not empty
+if [ -z "$USER" ] || [ -z "$BRANCH" ]; then
+	echo "Usage: $0 <username> <branch>"
+	exit 1
+fi
+
+LOCAL_BRANCH=$USER-$BRANCH
+REMOTE_BRANCH=$USER/$BRANCH
+
+git remote add $USER https://github.com/$USER/ripme.git
+git fetch $USER $BRANCH
+git checkout -B $LOCAL_BRANCH origin/main
+git merge $REMOTE_BRANCH

+ 296 - 0
ripme.json

@@ -0,0 +1,296 @@
+{
+    "latestVersion": "2.1.20-14-5a3070e4",
+    "currentHash": "5a49f48ac63b0085a90726787141fb826ee70a63f26450bf0edf6ef6cbcb4d13",
+    "changeList": [
+        "2.1.20-14-5a3070e4: Fix MrCongRipper (misskon.com); Fixes for Erome, Hypnohub, Dribbble; update user-agent to Chrome 140",
+        "2.1.19-16-78ff73fb: Fixed CfakeRipper, HentaiimageRipper; fix some tests; remove lots more rippers for permanently offline sites; update user-agent to Chrome 134; Supported sites are now a scrollable list",
+        "2.1.18-16-1342b621: Implement FapwizRipper; fix ChanRipper video/gif download rate limit issue; new boolean config twitter.exclude_replies; added audio post downloading support for tumblr",
+        "2.1.17-32-8f58eb41: Remove lots of rippers for permanently offline sites; add Nude-Gals video support; fixed BatoRipper & FitnakedgirlsRipper; clipboard auto-rip does not break flickr links",
+        "2.1.16-4-02372e85: Republishing the build with an updated version number",
+        "2.1.14-33-c275bbf6: Java-17 is now minimum version requirement, down from java-21; User Agent updated; fix Xvideos, add FapDungeon, add NsfwAlbum; added rate limiting to Coomer, E621, Redgifs, and RedditRipper's handling of Redgifs; rate limit sleep()s now include jitter; code cleanup; removed several rippers for site that are permanently offline",
+        "2.1.13-7-fac3f8ea, luscious, xvideos fixed",
+        "2.1.12-7-d0b97acd, ripme now instead of ripme2 on github, ignore SSL verification option added",
+        "2.1.11-20-ca96ce88, Commer.party next page, Imgur, E-hentai fixed, set recent language.",
+        "2.1.10-21-c94a9543, Imagebam, Unify colons in UI, Motherless, right click menu, rgif fixed",
+        "2.1.9-7-22e915df, HistoryMenuMouseListener right click menu, Imagefap retry logic for getFullSizedImage(), EightmusesRipper fixed",
+        "2.1.8-1-f5153de8: jpg3 add, java-21 adjustments.",
+        "2.1.7-29-b080faae: luciousripper fix, java-21 adjustments.",
+        "2.1.6-1-68189f27: erome fix.",
+        "2.1.5-8-ba51d7b: ripme running with java-17.",
+        "2.1.4-38-836a7494: fixed imagefap ripper.",
+        "2.1.3-15-1b83dc68: relative path now from working dir to subfolder, allowing images to be put in subfolder with same filename, sanatize reddit titles saved as files, additional logging in AbstractHTMLRipper.",
+        "2.1.2-23-e5438e85: caching of first page, retry sleep time, nhentai fixed",
+        "2.1.2-3-ea90b172: better sanitize filenames for windows, save config on update value. reddit, print exceptions in loops and continue.",
+        "2.1.1-3-536339dd: java-11+ necessary to run, work around non existing working directory.",
+        "2.0.4-13-03e32cb7: fix vsco, add danbooru.",
+        "2.0.3: Check new version against ripme2app.",
+        "2.0.2: Add greek translation, fixed reddit, redgif.",
+        "2.0.1: Fixed reddit, tujigu, xhamster, imagebam, erome; marked some tests as flaky.",
+        "2.0.0: Fixed Zizki, WordpressComics, Imagebam; marked some tests as flaky ",
+        "1.7.95: Added porncomixinfo.net; Fixed ripper for HentaiNexus; move l option to before r and R; marked some tests as flaky ",
+        "1.7.94: Added reddit gallery support; Fixed AllporncomicRipper; Fix imagefap ripper; instagramRipper, replaced Nashorn with GraalVM.js",
+        "1.7.93: Fixed Motherless ripper; Fixed e621 ripper; Updated pt_PT translation; Implemented redgifs Ripper; added missing translation to Korean/KR; Fixed elecx ripper; Added ripper for HentaiNexus",
+        "1.7.92: Added read-comic.com ripper; Fix Pawoo ripper; Add ChineseSimplified language file; Fixed artstation ripper",
+        "1.7.91: Fixed luscious ripper. Fixed VK ripper; Added Kingcomix ripper",
+        "1.7.90: Added FitnakedgirlsRipper; Fixed VK Album Ripper; Fixed Myreadingmanga Ripper; Fixed windows max file name; Fixed Pornhub Video Ripper; Fixed Motherless Ripper; Fixed Instagram Ripper",
+        "1.7.89:  Improved twitter ripper; Fixed xhamster image ripper; Fixed allporncomic ripper; Added Ripper for folio.ink",
+        "1.7.88:  Added ripper for Myreadingmanga.info; Added Mastodon rippers; Fix queue count update when queue is 0; Added ripper for listal; Now downloads best video when ripping twitter",
+        "1.7.87: Added ripper for allporncomic.com; Fixed Xhamster ripper; Added support xhamster2.com and xhamster.desi; Fixes for gfycat thumbs urls",
+        "1.7.86: Added Meituri Ripper; fixed -u flag; Fixed pornhub ripper; Xhamster ripper can now queue users videos",
+        "1.7.85: Fixed instagram ripper; Flickr ripper now downloads largest image",
+        "1.7.84: Fixed instagram ripper; xhamster ripper now accepts urls with page numbers; Fixed Deviantart Ripper",
+        "1.7.83: Added a ripper for hentaifox.com; Added ripper for Erofus.com; Fixed fsktr not ripping some images; Added support for Gfycat profiles; Added opt to disable prefix for HentaifoundryRipper ",
+        "1.7.82: Hentai foundry now rips oldest first by default; 8muses ripper no longer makes unneeded requests; Added support for i.thechive.com",
+        "1.7.81: Added support for artstn.co; Added new boolean config twitter.rip_retweet; Fixed MulemaxRipper; Fix minor bug that could cause a freeze at pending 1",
+        "1.7.80: Fixed porncomix.one ripper; Fixed instagram ripper; Fixed Fuskator ripper; Fixed handling of urls with spaces in them",
+        "1.7.79: Fixed artstation ripper; Fixed imagefap ripper folder naming; Can now filter reddit posts by votes; Added Ripper for Xlecx; Linux/Mac updater is now pure java",
+        "1.7.78: Fixed gfycat ripper; Fixed E621 ripper; Added support for new xhamster url format; Now supports furaffinty scraps",
+        "1.7.77: Reduced log spam; HQporner now supports actress/category/studio/top links; Improved luscious ripper; Fixed Pornhub video ripper; Tumblr ripper now always downloads highest quality available",
+        "1.7.76: Fixed remember url history",
+        "1.7.75: Fix e-hentai ripper; added comixfap ripper; fixed writting urls to files on windows; Fixed update screen issues; Added support for hentaidude; Fixed erome ripper",
+        "1.7.74: Added support for hentai foundry stories; Fixed tubex6 ripper; Fixed instagram ripper",
+        "1.7.73: Tumblr tags are now downloaded in right order; Added xcartx ripper; Can now run command when rips complete",
+        "1.7.72: tumblr ripper now respects download.save_order; Tumblr ripper now can rip from liked pages",
+        "1.7.71: Fixed Erome ripper; Remove HexBinaryAdapter import remnant; fized photobucket watermark issue; add derpi ripper; added support for 4channel.org",
+        "1.7.70: Added arabic translation; Updater now works on java 10; Fixed mangadex ripper",
+        "1.7.69: Fixes TheChive bug so that it can now rip gifs; e621 ripper now rips all media types; Upgraded org.apache.httpcomponents to 4.3.6; Added ripper for Mangadex.org; Added ripper for various duckmovie frontends; reddit ripper no longer freezes when ripping certain links",
+        "1.7.68: Added support for 55chan.org; Now limits file name length to 255 chars; fixed Tsumino ripper",
+        "1.7.67: Added yuki.la ripper; Fixed xhamster ripper; Fixed instagram ripper; Added porncomix.one ripper; Fixed bug which caused large files to be download when running tests",
+        "1.7.66: Added pl_PL translation; Ported various video rippers to AbstractSingleFileRipper; Erome now downloads both HD and SD video; Can now download embedded images from tumblr; Fixed 8muses folder naming; Added ripper for Tubex6.com",
+        "1.7.65: removed RajceRipper; Fixed xhamster ripper; Added xhamster QuickQ support; added StickyXXX.com Ripper; added Mulemax.com Ripper; added SpankBang.com Ripper; fixed LusciousRipper",
+        "1.7.64: Removed loveroms ripper; Update ru_RU lang file; Removed tumblr raw image downloading; getFilesFromURL now can handle imgur videos; 8muses ripper now gets full sized images; Added -a flag for appending a string to folder name",
+        "1.7.63: Removed line wrapping; Added anillty to rip url ranges; Fixed gfycatRipper",
+        "1.7.62: Added hentai-comic and hentai-image rippers; Fixed HitomiRipper; Added hqporner ripper; Added Gelbooru.com ripper; Added line wrapping in log window",
+        "1.7.61: Re-add 8Chan support; Added kenzato.uk to CheveretoRippers; Added russian translation; Reddit ripper now uses sub folders; fixed JabArchivesRipper regex",
+        "1.7.60: Fixed EightmusesRipper; added Jab Archives ripper; loveroms ripper now properly names files; Fixed ArtStationRipper",
+        "1.7.59: Added Loverom ripper; added Imagearn ripper; Added support for Desuarchive.org; Fixed erome ripper",
+        "1.7.58: Fixed Deviantart ripper; fixed HitomiRipper; Fixed ManganeloRipper; Fixed update box formating",
+        "1.7.57: Got DeviantartRipper working again; Imagefap ripper now downloads full sized images; Twitter ripper can now rip extended tweets; Added nl_NL translation",
+        "1.7.56: Fixed DeviantartRipper ripper; Added support for resuming file downloads; Fixed erome ripper; Fixed ModelmayhemRipper NSFW image downloading",
+        "1.7.55: Fixed instagram ripper; Reddit ripper now respects history.end_rip_after_already_seen; Improvements to patch.py and release.py",
+        "1.7.54: Fixed twitter ripper video downloading; fixed instagram ripper",
+        "1.7.53: Added Picstatio ripper; Fixed instagram ripper; Reddit ripper now gets videos from v.redd.it; Fixed ZikiRipper getAlbumTitle; fixed twitter ripper",
+        "1.7.52: Added warning about using furaffinty shared account; Refactoring in Utils class; XhamsterRipper now accepts all countries subdomains; E621 ripper now accepts urls with order:Score at the end; release.py imrpovements; DeviantartRipper now logs in using cookies; patch.py imrpovements",
+        "1.7.51: Fixed instagram ripper; Added the ability to rip from vsco profiles; Fixed TheyiffgalleryRipper; Can now update ripme using the -j flag; added script to automate releases; Code style fixes",
+        "1.7.50: Ripme now checks file hash before running update; fixed update bug which cased ripme to report every update as new",
+        "1.7.49: Fixed -n flag; Added ability to change locale at runtime and from gui; Update kr_KR translation; Removed support for tnbtu.com; No longer writes url to url_history file is save urls only is checked",
+        "1.7.48: Fixed instagram ripper; Added Korean translation; Added quickQueue support to nhentairipper; Rewrote nhentairipper to be faster; myhentaicomics ripper now requests proper url when downloading images; Can now include comments in url files; Added the ability to blacklist tags on e-hentai.org",
+        "1.7.47: Added quick queue support for hentai2read ripper; Fixed instagram ripper; SankakuComplexRipper can now download from different subdomains; Added ripper for bato.to; Added quick queue support for 8muses.download; ",
+        "1.7.46: Fixed hentai2read ripper; Rewrote the myhentaicomics ripper to use the new getAlbumsToQueue func; Can now blacklist nhentai tags; SinnercomicsRipper no longer adds -page-01 to folder names; EightmusesRipper now adds file extension to filename; disbaled test for twitch ripper",
+        "1.7.45: Fixed hentai2read ripper; ImageBam album fixed; Added various translations; TsuminoRipper no longer requires album name to download",
+        "1.7.44: Fixed instagram ripper regex",
+        "1.7.43: Fixed queryId regex in instagram ripper",
+        "1.7.42: Added user support to SmuttyRipper; Removed vine ripper; Fixed NudeGalsRipper; addURLToDownload improvments; Fixed Instagram ripper",
+        "1.7.41: Added support for spyingwithlana.com; Added ManganeloRipper; Added support for dynasty-scans.com",
+        "1.7.40: Added hypnohub.net ripper; Fixed rule34.xxx ripper; Tsumino Ripper now add .png to filenames",
+        "1.7.39: Added rule34.xxx ripper; Added Gfycatporntube.com ripper; Fixed AbstractRipper subdir bug; Added AbstractRipper unit tests",
+        "1.7.38: Added http and socks proxy support; Extended some unit tests to include getGid; Added HitomiRipper; hentaifoundry ripper now can rip all images from accounts",
+        "1.7.37: MInor code clean up; Added socks proxy support; Added support for 8muses.download; Hentaifoundry no longer errors when there are no more pages; Fix bug that causes tumblr to replace https with httpss when downloading resized images",
+        "1.7.36: Fixed Instagram ripper; Fixed hentai2read ripper test; Fixed tnbtu.com ripper",
+        "1.7.35: Fixed instagram ripper; hentai2read ripper now properly names folders",
+        "1.7.34: Added Blackbrickroadofoz Ripper; Fixed webtoons regex",
+        "1.7.33: Instagram ripper no longer errors out when downloading from more than 1 page",
+        "1.7.32: Instagram ripper update to use new enpoints",
+        "1.7.31: InstaGram ripper no longer errors out when getting next page",
+        "1.7.30: Fixed usage of command-line on non-headless systems",
+        "1.7.29: Cano now download single images from imgur; Improved handling of headless mode & OS-specific config; Added modelx ripper; Fixed eroshae ripper",
+        "1.7.28: IG ripper now uses display_url when downloading images; Reddit ripper now gets erome links; Hentaifoundry Ripper no longer errors out when there is no next page",
+        "1.7.27: IG ripper can now rip from tags; fixed json parsing issues",
+        "1.7.26: fixed instagram ripper",
+        "1.7.25: Fixed instagram ripper; Added an option to use short names for 8muses; Added tsuminoRipper; Added support for incase.buttsmithy.com",
+        "1.7.24: Added sta.sh ripper; Added sinfest.com ripper; Added femjoyhunter.com ripper; Disabled flaky unit tests",
+        "1.7.23: Fixed xvideos ripper; InstagramRipper now works with lastseenfeature",
+        "1.7.22: Added func to normalize urls before reading from/writing to url history file; last seen feature now works with instagram",
+        "1.7.21: Fixed last seen feature",
+        "1.7.20: Fixed 8muses ripper; Added last seen feature; disabled 500px test",
+        "1.7.19: imgurRipper no longer tries to add ?1 to file names",
+        "1.7.18: AlbumRipper now removes bad chars from file names",
+        "1.7.17: Fixed hentai.cafe autorip from clipboard",
+        "1.7.16: Eightmuses now supports pages containing both images and subpages",
+        "1.7.15: Eigthmuses ripper now uses ASAP ripping; Remove ripper and tests for gwarchives.com and hushpix.com; Remove ripper and tests for imagearn.com; Fixed pornhub video downloader",
+        "1.7.14: Tumblr API Key Choosing Fix; Make webtoons ripper download maximum quality images; Added twitch ripper; Added VSCO ripper; Fixed pornhub video ripper",
+        "1.7.13: disabled FuskatorRipperTest; Fixes xhamster.com video ripper; Add yuvutu.com ripper",
+        "1.7.12: Instagram ripper no longer 403s on certain images",
+        "1.7.11: Added gwarchives support to the cheveretoRipper; Gfycat Tests & Fix for bad reddit submissions; instagram ripper can now be made to skip videos",
+        "1.7.10: Added basic pornpics.com ripper; Fixed hentai.cafe regex",
+        "1.7.9: FuraffinityRipper can now rip non-public albums; Added 2 new api keys, ripper can now download raw images from tumblr; Erome ripper now matchs links without the www; Tumblr ripper now tells the user if it hits the rate limit",
+        "1.7.8: Forced https for tumblr image links; Fixed imgur album filenames; SankakuComplexRipper now downloads full sized images; Added dribbble.com ripper; Added comfirm button for clearing history",
+        "1.7.7: Fixed E621 Ripper; Added unit test for zizki.com; Added unit test for Xbooru.com; Updated reddit useragent",
+        "1.7.6: Added OglafRipper",
+        "1.7.5: Improve WordpressComicRipper; update to a modern User Agent",
+        "1.7.4: Fix WordpressComicRipper konradokonski.com/wiory; Fix CheveretoRipper hushpix.com by adding consent cookie",
+        "1.7.3: Improved Aerisdies and Imagearn folders; fixed tapas.io; XhamsterRipper now uses mobile site; InstagramRipper slideshows under user profiles",
+        "1.7.2: InstagramRipper: Added support for ripping individual posts",
+        "1.7.1: Fix WordpressComicRipper's ripper for freeadultcomix.com; FuraffinityRipper can now rip public albums",
+        "1.7.0: Improved Webtoons folders; Added code coverage with Coveralls.io and improved unit tests; removed rippers for dead sites",
+        "1.6.13: Added Instagram tags; improved Instagram and Pichunter regexes",
+        "1.6.12: Fix InstagramRipper with timestamps; Pichunter galleries support; logging improvements",
+        "1.6.11: Added pichunter.com ripper; Improved Instagram filenames; added tehyiffgallery ripper; Fixed xchan ripper; Fixed chanRipper folders",
+        "1.6.10: Added viewcomic ripper; Fixed webtoons malformed url error message; Fixed chan ripper thread title; Fixed Modelmayhem ripper",
+        "1.6.9: Added support for imgur /t/ albums; Added portable mode; Unit tests no longer fail if run twice; Formating fixes",
+        "1.6.8: code clean up; ripme can now remeber and skip already downloaded images",
+        "1.6.7: Fixed instagram ripper",
+        "1.6.6: Fixed 8muses ripper",
+        "1.6.5: Imgbox ripper now downloads full sized image from galleries",
+        "1.6.4: Added webtoons ripper",
+        "1.6.3: Window is now resizable; Added Porncomix.info ripper; Fixed imgbox ripper; Added hentai2read ripper",
+        "1.6.2: Fixed shesfreaky.com ripper; Fixed imgbox ripper; Fixed Xhamster video ripping",
+        "1.6.1: Rolled E621Ripper back from 1.6.0 to the 1.5.15 version",
+        "1.6.0: Updated to java 8; Some code cleanup",
+        "1.5.15: Added Hbrowse.com ripper; 8muses ripper now can rip from all album types",
+        "1.5.14: Myhentaicomics ripper no longer tries to download ads; Added hentai.cafe ripper; Fixed sankakucomplex ripper",
+        "1.5.13: InstagramRipper: fixed minor bug",
+        "1.5.12: Make tray icon optional; work around window positioning bug on Windows.",
+        "1.5.11: Added -v, --version flag",
+        "1.5.10: Added ripper for cfake.com; Fixed nhentai album naming",
+        "1.5.9: InstagramRipper now downloads full sized images; ImagefapRipper Now adds GID to folder name",
+        "1.5.8: Fixed 8muses ripper",
+        "1.5.7: Added EromeRipper",
+        "1.5.6: Fixed ImagearnRipper; Fixed SmuttyRipper",
+        "1.5.5: Wordpress comic ripper Updates",
+        "1.5.4: Added Luscious.net ripper",
+        "1.5.3: Eroshare links redirect to Eroshae; add AerisdiesRipper",
+        "1.5.2: Fix Imgur titles; fix xhamster (new URL format); fixed Instagram ripping cropped pictures",
+        "1.5.1: Ensure update mechanism is working correctly.",
+        "1.5.0: Change 'home' repo from 4pr0n/RipMe to RipMeApp/RipMe",
+        "1.4.21: Added Chevereto ripper (hushpix.com, tag-fox.com)",
+        "1.4.20: EroshareRipper can now rip user profiles",
+        "1.4.19: WordpressComicRipper supports more rippers; improvements to Instagram and code quality",
+        "1.4.18: Fix video rippers (broken in 1.4.14)",
+        "1.4.17: MyHentaiComics improvements",
+        "1.4.16: Fix Eightmuses; Add Instagram album support",
+        "1.4.15: Fixed DeviantArt Ripper",
+        "1.4.14: Improvements to ChanRipper (rip external links), MyHentaiComics, and Twitter (video and albums)",
+        "1.4.13: Fixed furaffinity ripper.",
+        "1.4.12: Fixed Crash on Win10 CU; Fixed SSL error on xHamster.",
+        "1.4.11: Instagram: fixed cropped images issue.",
+        "1.4.10: Add WordPressComicRipper (various sites supported)",
+        "1.4.9: Fixed HentaiFoundry ripper",
+        "1.4.8: Added Jagodibuja comics ripper",
+        "1.4.7: Fixed NewsFilter, XHamster; added TheChiveRipper",
+        "1.4.6: Eroshare: get album names; Imgur: improve grabbing album name.",
+        "1.4.5: SinnerComics: Added work around for naming bug",
+        "1.4.4: Added SinnerComics, MyHentaiComics rippers; improve E621 ripper.",
+        "1.4.3: Add missing subdomain for 4chan; fix ehentai, 8muses; add zizki ripper.",
+        "1.4.2: Added nhentai ripper.",
+        "1.4.1: Fixed Imgbox: correctly downloads full-size images.",
+        "1.4.0: Fixed update mechanism. Some improvements to Imgur, etc.",
+        "1.3.0: Fix Instagram, Tumblr, xHamster, 4chan, 8muses. Some new features.",
+        "1.2.13: Hotfix for imgur album rips",
+        "1.2.12: 500px gallery/subgallery support",
+        "1.2.11: Deviant fav subfolders, and reddituploads support",
+        "1.2.10: Imgur /gallery/ images fix",
+        "1.2.9: Imgur 10-image fix, original twitter sizes",
+        "1.2.8: Option to prefer MP4 over GIF for imgur",
+        "1.2.7: Fix 500px ripper to fetch NSFW images",
+        "1.2.6: Fix 500px ripper",
+        "1.2.5: Descriptions are optional, minor imgur fixes",
+        "1.2.4: Fix instagram ripper",
+        "1.2.3: Fix xhamster videos, option to remove/clear Queue",
+        "1.2.2: Fix imagefap ripper",
+        "1.2.1: Gfycat Fix, lots of changes pushed",
+        "1.2.0: Fix imagebam, 8muses. Remember queue items",
+        "1.1.9: Hotfix for new imgur album layout",
+        "1.1.8: Fix for failed reddit rips",
+        "1.1.7: Imagefap fix, corrupt history crash fix, deviantart 403 fix",
+        "1.1.6: History error handling and drawchan support",
+        "1.1.5: Fix imagefap and 8muses rippers",
+        "1.1.4: Fix DeviantArt 403 errors",
+        "1.1.3: Fix Check Selected in History",
+        "1.1.2: Check/Uncheck history by right-clicking the history",
+        "1.1.1: Gfycat/Reddit fix",
+        "1.1.0: Revamped History, Cheeby fix",
+        "1.0.93: Reddit fix, gfycat fix, video download fix",
+        "1.0.92: Anon-ib fix, cheeby fix, vid.me ripper",
+        "1.0.91: Fix for anon-ib, minus rippers",
+        "1.0.90: Hide error message when ripping valid album",
+        "1.0.89: Fix fapproved ripper",
+        "1.0.88: Fix imgbox ripper",
+        "1.0.87: Chan ripper update, Finebox update, Motherless video ripper",
+        "1.0.86: Fix for imagefap albums larger than 1k images",
+        "1.0.85: Fix Modelmayhem ripper",
+        "1.0.84: Ripper can resume after being stopped",
+        "1.0.83: Fix 2dgalleries ripper",
+        "1.0.82: Photobucket ripper fix, Clipboard Autorip toggle",
+        "1.0.81: Tumblr/seenive fixes, queue system, better history",
+        "1.0.80: Fix Butttoucher ripper",
+        "1.0.79: Fix cheeby to rip all images",
+        "1.0.78: BCFakes ripper",
+        "1.0.77: Cheeby ripper, status in title, various fixes",
+        "1.0.76: Option to only save URLs, Taptastic ripper",
+        "1.0.75: 500px ripper",
+        "1.0.74: Videarn video ripper",
+        "1.0.73: Datw.in ripper",
+        "1.0.72: Support for DeviantArt favourites",
+        "1.0.71: Fuskator ripper",
+        "1.0.70: Various improvements. Xhamster, Cliphunter video rippers",
+        "1.0.69: Gfycat video ripper, instgram username fix",
+        "1.0.68: Imagevenue and hentai-foundry rippers",
+        "1.0.67: Support for external tumblr domains",
+        "1.0.66: GirlsOfDesire ripper",
+        "1.0.65: Vidd.me video ripper",
+        "1.0.64: Imagebam ripper",
+        "1.0.63: Hopefully fixing freezing issue while re-ripping all albums",
+        "1.0.62: Imgur album directories named after album title",
+        "1.0.61: Logs are optional, defaults to not save logs",
+        "1.0.60: Fix for crazy directory creation bug",
+        "1.0.59: Show when albums can be ripped immediately",
+        "1.0.58: Logs are saved to album directory, ehentai fix",
+        "1.0.57: Nfsfw ripper",
+        "1.0.56: Fix for imgur rips",
+        "1.0.55: Ehentai ripper bypasses content warning",
+        "1.0.54: Mediacru.sh ripper, may require a Java update",
+        "1.0.53: 8Muses ripper fix, can rip subalbums",
+        "1.0.52: Imgbox ripper, popup notifications are optional",
+        "1.0.51: Deviantart rips full-size NSFW images",
+        "1.0.50: Smutty.com ripper",
+        "1.0.49: More Ehentai ripper fixes",
+        "1.0.48: Imagestash.org /tag/ ripper, ehentai fixes",
+        "1.0.47: Vidble ripper, right-click popupmenu on text",
+        "1.0.46: Auto-indexing filenames (001_, 002_, etc) is now optional",
+        "1.0.45: Imagefap /gallery/, Motherless search terms, reddit ripper fix",
+        "1.0.44: Deviantart rips full-size images",
+        "1.0.43: Added Modelmayhem ripper",
+        "1.0.42: Added Drawcrowd ripper, bug fix for large albums",
+        "1.0.41: Fix for multi-page Deviantart galleries, secure Flickr URLs",
+        "1.0.40: Flickr bug fix and groups support",
+        "1.0.39: Various fixes for Ehentai and Motherless",
+        "1.0.38: Ehentai ripper, 4chan .webm support, optional audio confirmations",
+        "1.0.37: Added Vine.co and Supertangas rippers",
+        "1.0.36: Added semi-working Gifyo ripper",
+        "1.0.35: Fixed i.rarchives ripper, delete empty directories",
+        "1.0.34: Added fapproved and anonib rippers",
+        "1.0.33: Imgur ripper fixes",
+        "1.0.32: Fix for directories with special characters",
+        "1.0.31: Fix for large imgur albums",
+        "1.0.30: Added Minus ripper",
+        "1.0.29: Various fixes for tumblr, flickr, 4chan",
+        "1.0.28: Added vk.com video ripper(s)",
+        "1.0.27: Added flickr ripper",
+        "1.0.26: Ability to rerip history from command-line",
+        "1.0.25: Added photobucket ripper",
+        "1.0.24: Fixed possible deadlock issue while re-ripping albums",
+        "1.0.23: Added teenplanet, irarchives, and butttoucher support",
+        "1.0.22: Fixed huge bug where ripper did not work at all for any sites",
+        "1.0.21: Ability to rip user account images on imgur",
+        "1.0.20: Video ripper support: pornhub, youporn, beeg, xvideos",
+        "1.0.19: Fix imgur account ripper",
+        "1.0.18: Button icons, kinkyshare.com ripper",
+        "1.0.17: *chan ripper, imgur titles in filenames",
+        "1.0.16: Fix bug with instagram usernames containing _ or -",
+        "1.0.15: Auto-updater should be compatible with Windows",
+        "1.0.14: Fix twitter account names with _ or -",
+        "1.0.13: Auto-updater is more verbose, hopefully works",
+        "1.0.12: Fixed clipboard autorip bug",
+        "1.0.11: 404 images are markead as errored",
+        "1.0.10: Taskbar notifications when rips start",
+        "1.0.9: More-verbose completion, UI tweaks",
+        "1.0.8: Auto-update functionality",
+        "1.0.7: Clipboard Autorip and tray icons",
+        "1.0.6: Support imgur.com/r/subreddit albums",
+        "1.0.5: Persistent configuration, small bug fixes",
+        "1.0.4: Fixed spaces-in-directory bug",
+        "1.0.3: Added VK.com ripper",
+        "1.0.1: Added auto-update functionality"
+    ]
+}

+ 9 - 0
settings.gradle.kts

@@ -0,0 +1,9 @@
+pluginManagement {
+    repositories {
+        mavenLocal()
+        gradlePluginPortal()
+        // TODO: remove after new build of compose-jb is published
+        maven("https://maven.pkg.jetbrains.space/public/p/compose/dev")
+    }
+}
+rootProject.name = "ripme"

+ 406 - 0
src/main/java/com/rarchives/ripme/App.java

@@ -0,0 +1,406 @@
+package com.rarchives.ripme;
+
+import com.rarchives.ripme.ripper.AbstractRipper;
+import com.rarchives.ripme.ui.History;
+import com.rarchives.ripme.ui.HistoryEntry;
+import com.rarchives.ripme.ui.MainWindow;
+import com.rarchives.ripme.ui.UpdateUtils;
+import com.rarchives.ripme.utils.Proxy;
+import com.rarchives.ripme.utils.RipUtils;
+import com.rarchives.ripme.utils.Utils;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.DefaultParser;
+import org.apache.commons.cli.HelpFormatter;
+import org.apache.commons.cli.Options;
+import org.apache.commons.cli.ParseException;
+import org.apache.commons.lang.SystemUtils;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+
+import javax.swing.*;
+import java.awt.*;
+import java.io.BufferedReader;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URI;
+import java.net.URL;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Date;
+import java.util.stream.Stream;
+
+/**
+ * Entry point to application.
+ * This is where all the fun happens, with the main method.
+ * Decides to display UI or to run silently via command-line.
+ *
+ * As the "controller" to all other classes, it parses command line parameters and loads the history.
+ */
+public class App {
+
+    private static final Logger logger = LogManager.getLogger(App.class);
+    public static String stringToAppendToFoldername = null;
+    private static final History HISTORY = new History();
+
+    /**
+     * Where everything starts. Takes in, and tries to parse as many commandline arguments as possible.
+     * Otherwise, it launches a GUI.
+     *
+     * @param args Array of command line arguments.
+     */
+    public static void main(String[] args) throws IOException {
+        CommandLine cl = getArgs(args);
+
+        if (args.length > 0 && cl.hasOption('v')){
+            System.out.println(UpdateUtils.getThisJarVersion());
+            System.exit(0);
+        }
+
+        if (Utils.getConfigString("proxy.http", null) != null) {
+            Proxy.setHTTPProxy(Utils.getConfigString("proxy.http", null));
+        } else if (Utils.getConfigString("proxy.socks", null) != null) {
+            Proxy.setSocks(Utils.getConfigString("proxy.socks", null));
+        }
+
+        // This has to be here instead of handleArgs because handleArgs isn't parsed until after a item is ripper
+        if (cl.hasOption("a")) {
+            logger.info(cl.getOptionValue("a"));
+            stringToAppendToFoldername = cl.getOptionValue("a");
+        }
+
+        if (GraphicsEnvironment.isHeadless() || args.length > 0) {
+            handleArguments(args);
+        } else {
+            if (SystemUtils.IS_OS_MAC_OSX) {
+                System.setProperty("apple.laf.useScreenMenuBar", "true");
+                System.setProperty("com.apple.mrj.application.apple.menu.about.name", "RipMe");
+            }
+
+            Utils.configureLogger();
+
+            logger.info("Initialized ripme v" + UpdateUtils.getThisJarVersion());
+
+            MainWindow mw = new MainWindow();
+            SwingUtilities.invokeLater(mw);
+        }
+    }
+
+    /**
+     * Creates an abstract ripper and instructs it to rip.
+     * @param url URL to be ripped
+     * @throws Exception Nothing too specific here, just a catch-all.
+     *
+     */
+    private static void rip(URL url) throws Exception {
+        AbstractRipper ripper = AbstractRipper.getRipper(url);
+        ripper.setup();
+        ripper.rip();
+
+        String u = ripper.getURL().toExternalForm();
+        Date date = new Date();
+        if (HISTORY.containsURL(u)) {
+            HistoryEntry entry = HISTORY.getEntryByURL(u);
+            entry.modifiedDate = date;
+        } else {
+            HistoryEntry entry = new HistoryEntry();
+            entry.url = u;
+            entry.dir = ripper.getWorkingDir().getAbsolutePath();
+            try {
+                entry.title = ripper.getAlbumTitle(ripper.getURL());
+            } catch (MalformedURLException ignored) { }
+            HISTORY.add(entry);
+        }
+    }
+
+    /**
+     * For dealing with command-line arguments.
+     * @param args Array of Command-line arguments
+     */
+    private static void handleArguments(String[] args) throws IOException {
+        CommandLine cl = getArgs(args);
+
+        //Help (list commands)
+        if (cl.hasOption('h') || args.length == 0) {
+            HelpFormatter hf = new HelpFormatter();
+            hf.printHelp("java -jar ripme.jar [OPTIONS]", getOptions());
+            System.exit(0);
+        }
+
+        Utils.configureLogger();
+        logger.info("Initialized ripme v" + UpdateUtils.getThisJarVersion());
+
+        //Set history file
+        if (cl.hasOption('H')) {
+            String historyLocation = cl.getOptionValue('H');
+            Utils.setConfigString("history.location", historyLocation);
+            logger.info("Set history file to " + historyLocation);
+        }
+
+        //Allow file overwriting
+        if (cl.hasOption('w')) {
+            Utils.setConfigBoolean("file.overwrite", true);
+        }
+
+        //SOCKS proxy server
+        if (cl.hasOption('s')) {
+            String sservfull = cl.getOptionValue('s').trim();
+            Proxy.setSocks(sservfull);
+        }
+
+        //HTTP proxy server
+        if (cl.hasOption('p')) {
+            String proxyserverfull = cl.getOptionValue('p').trim();
+            Proxy.setHTTPProxy(proxyserverfull);
+        }
+
+        //Number of threads
+        if (cl.hasOption('t')) {
+            Utils.setConfigInteger("threads.size", Integer.parseInt(cl.getOptionValue('t')));
+        }
+
+        //Ignore 404
+        if (cl.hasOption('4')) {
+            Utils.setConfigBoolean("errors.skip404", true);
+        }
+
+        //Destination directory
+        if (cl.hasOption('l')) {
+            // change the default rips directory
+            Utils.setConfigString("rips.directory", cl.getOptionValue('l'));
+        }
+
+        //Re-rip <i>all</i> previous albums
+        if (cl.hasOption('r')) {
+            // Re-rip all via command-line
+            loadHistory();
+            if (HISTORY.toList().isEmpty()) {
+                logger.error("There are no history entries to re-rip. Rip some albums first");
+                System.exit(-1);
+            }
+            for (HistoryEntry entry : HISTORY.toList()) {
+                try {
+                    URL url = new URI(entry.url).toURL();
+                     rip(url);
+                } catch (Exception e) {
+                    logger.error("[!] Failed to rip URL " + entry.url, e);
+                    continue;
+                }
+                try {
+                    Thread.sleep(500);
+                } catch (InterruptedException e) {
+                    logger.warn("[!] Interrupted while re-ripping history");
+                    System.exit(-1);
+                }
+            }
+            // Exit
+            System.exit(0);
+        }
+
+        //Re-rip all <i>selected</i> albums
+        if (cl.hasOption('R')) {
+            loadHistory();
+            if (HISTORY.toList().isEmpty()) {
+                logger.error("There are no history entries to re-rip. Rip some albums first");
+                System.exit(-1);
+            }
+            int added = 0;
+            for (HistoryEntry entry : HISTORY.toList()) {
+                if (entry.selected) {
+                    added++;
+                    try {
+                        URL url = new URI(entry.url).toURL();
+                        rip(url);
+                    } catch (Exception e) {
+                        logger.error("[!] Failed to rip URL " + entry.url, e);
+                        continue;
+                    }
+                    try {
+                        Thread.sleep(500);
+                    } catch (InterruptedException e) {
+                        logger.warn("[!] Interrupted while re-ripping history");
+                        System.exit(-1);
+                    }
+                }
+            }
+            if (added == 0) {
+                logger.error("No history entries have been 'Checked'\n" +
+                    "Check an entry by clicking the checkbox to the right of the URL or Right-click a URL to check/uncheck all items");
+                System.exit(-1);
+            }
+        }
+
+        //Save the order of images in album
+        if (cl.hasOption('d')) {
+            Utils.setConfigBoolean("download.save_order", true);
+        }
+
+        //Don't save the order of images in album
+        if (cl.hasOption('D')) {
+            Utils.setConfigBoolean("download.save_order", false);
+        }
+
+        //In case specify both, break and exit since it isn't possible.
+        if ((cl.hasOption('d'))&&(cl.hasOption('D'))) {
+            logger.error("\nCannot specify '-d' and '-D' simultaneously");
+            System.exit(-1);
+        }
+
+        //Read URLs from File
+        if (cl.hasOption('f')) {
+            Path urlfile = Paths.get(cl.getOptionValue('f'));
+
+            try (BufferedReader br = Files.newBufferedReader(urlfile)) {
+                String url;
+                while ((url = br.readLine()) != null) {
+                    if (url.startsWith("//") || url.startsWith("#")) {
+                        logger.debug("Skipping over line \"" + url + "\"because it is a comment");
+                    } else {
+                        // loop through each url in the file and process each url individually.
+                        ripURL(url.trim(), !cl.hasOption("n"));
+                    }
+                }
+            } catch (FileNotFoundException fne) {
+                logger.error("[!] File containing list of URLs not found. Cannot continue.");
+            } catch (IOException ioe) {
+                logger.error("[!] Failed reading file containing list of URLs. Cannot continue.");
+            }
+        }
+
+        //The URL to rip.
+        if (cl.hasOption('u')) {
+            loadHistory();
+            String url = cl.getOptionValue('u').trim();
+            ripURL(url, !cl.hasOption("n"));
+        }
+
+        if (cl.hasOption('j')) {
+            UpdateUtils.updateProgramCLI();
+        }
+
+    }
+
+    /**
+     * Attempt to rip targetURL.
+     * @param targetURL URL to rip
+     * @param saveConfig Whether you want to save the config (?)
+     */
+    private static void ripURL(String targetURL, boolean saveConfig) {
+        try {
+            URL url = new URI(targetURL).toURL();
+            rip(url);
+            saveHistory();
+        } catch (MalformedURLException e) {
+            logger.error("[!] Given URL is not valid. Expected URL format is http://domain.com/...");
+            // System.exit(-1);
+        } catch (Exception e) {
+            logger.error("[!] Error while ripping URL " + targetURL, e);
+            // System.exit(-1);
+        }
+    }
+
+    /**
+     * Creates an Options object, returns it.
+     * @return Returns all acceptable command-line options.
+     */
+    private static Options getOptions() {
+        Options opts = new Options();
+        opts.addOption("h", "help", false, "Print the help");
+        opts.addOption("u", "url", true, "URL of album to rip");
+        opts.addOption("t", "threads", true, "Number of download threads per rip");
+        opts.addOption("w", "overwrite", false, "Overwrite existing files");
+        opts.addOption("r", "rerip", false, "Re-rip all ripped albums");
+        opts.addOption("R", "rerip-selected", false, "Re-rip all selected albums");
+        opts.addOption("d", "saveorder", false, "Save the order of images in album");
+        opts.addOption("D", "nosaveorder", false, "Don't save order of images");
+        opts.addOption("4", "skip404", false, "Don't retry after a 404 (not found) error");
+        opts.addOption("l", "ripsdirectory", true, "Rips Directory (Default: ./rips)");
+        opts.addOption("n", "no-prop-file", false, "Do not create properties file.");
+        opts.addOption("f", "urls-file", true, "Rip URLs from a file.");
+        opts.addOption("v", "version", false, "Show current version");
+        opts.addOption("s", "socks-server", true, "Use socks server ([user:password]@host[:port])");
+        opts.addOption("p", "proxy-server", true, "Use HTTP Proxy server ([user:password]@host[:port])");
+        opts.addOption("j", "update", false, "Update ripme");
+        opts.addOption("a","append-to-folder", true, "Append a string to the output folder name");
+        opts.addOption("H", "history", true, "Set history file location.");
+        return opts;
+    }
+
+    /**
+     * Tries to parse commandline arguments.
+     * @param args Array of commandline arguments.
+     * @return CommandLine object containing arguments.
+     */
+    private static CommandLine getArgs(String[] args) {
+        var parser = new DefaultParser();
+        try {
+            return parser.parse(getOptions(), args, false);
+        } catch (ParseException e) {
+            logger.error("[!] Error while parsing command-line arguments: " + Arrays.toString(args), e);
+            System.exit(-1);
+            return null;
+        }
+    }
+
+    /**
+     * Loads history from history file into memory.
+     */
+    private static void loadHistory() throws IOException {
+        Path historyFile = Paths.get(Utils.getConfigDir() + "/history.json");
+        HISTORY.clear();
+        if (Files.exists(historyFile)) {
+            try {
+                logger.info("Loading history from " + historyFile);
+                HISTORY.fromFile(historyFile.toString());
+            } catch (IOException e) {
+                logger.error("Failed to load history from file " + historyFile, e);
+                logger.warn(
+                        "RipMe failed to load the history file at " + historyFile + "\n\n" +
+                        "Error: " + e.getMessage() + "\n\n" +
+                        "Closing RipMe will automatically overwrite the contents of this file,\n" +
+                        "so you may want to back the file up before closing RipMe!");
+            }
+        } else {
+            logger.info("Loading history from configuration");
+            HISTORY.fromList(Utils.getConfigList("download.history"));
+            if (HISTORY.toList().isEmpty()) {
+                // Loaded from config, still no entries.
+                // Guess rip history based on rip folder
+                Stream<Path> stream = Files.list(Utils.getWorkingDirectory())
+                        .filter(Files::isDirectory);
+
+                stream.forEach(dir -> {
+                    String url = RipUtils.urlFromDirectoryName(dir.toString());
+                    if (url != null) {
+                        // We found one, add it to history
+                        HistoryEntry entry = new HistoryEntry();
+                        entry.url = url;
+                        HISTORY.add(entry);
+                    }
+                });
+            }
+        }
+    }
+
+    /*
+    * @see MainWindow.saveHistory
+    */
+    private static void saveHistory() {
+        Path historyFile = Paths.get(Utils.getConfigDir() + "/history.json");
+        try {
+            if (!Files.exists(historyFile)) {
+                Files.createDirectories(historyFile.getParent());
+                Files.createFile(historyFile);
+            }
+
+            HISTORY.toFile(historyFile.toString());
+            Utils.setConfigList("download.history", Collections.emptyList());
+        } catch (IOException e) {
+            logger.error("Failed to save history to file " + historyFile, e);
+        }
+    }
+}

+ 536 - 0
src/main/java/com/rarchives/ripme/ripper/AbstractHTMLRipper.java

@@ -0,0 +1,536 @@
+package com.rarchives.ripme.ripper;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.UnsupportedEncodingException;
+import java.net.MalformedURLException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.nio.charset.StandardCharsets;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.nio.file.StandardOpenOption;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.jsoup.nodes.Document;
+
+import com.rarchives.ripme.ui.MainWindow;
+import com.rarchives.ripme.ui.RipStatusMessage;
+import com.rarchives.ripme.ui.RipStatusMessage.STATUS;
+import com.rarchives.ripme.utils.Http;
+import com.rarchives.ripme.utils.Utils;
+
+/**
+ * Simplified ripper, designed for ripping from sites by parsing HTML.
+ */
+public abstract class AbstractHTMLRipper extends AbstractRipper {
+
+    private static final Logger logger = LogManager.getLogger(AbstractHTMLRipper.class);
+
+    private final Map<URL, File> itemsPending = Collections.synchronizedMap(new HashMap<>());
+    private final Map<URL, Path> itemsCompleted = Collections.synchronizedMap(new HashMap<>());
+    private final Map<URL, String> itemsErrored = Collections.synchronizedMap(new HashMap<>());
+    Document cachedFirstPage;
+
+    protected AbstractHTMLRipper(URL url) throws IOException {
+        super(url);
+        if(Utils.getConfigBoolean("ssl.verify.off",false)){
+            Http.SSLVerifyOff();
+        }else {
+            Http.undoSSLVerifyOff();
+        }
+    }
+
+    protected abstract String getDomain();
+    public abstract String getHost();
+
+    protected Document getFirstPage() throws IOException, URISyntaxException {
+        return Http.url(url).get();
+    }
+
+    protected Document getCachedFirstPage() throws IOException, URISyntaxException {
+        if (cachedFirstPage == null) {
+            cachedFirstPage = getFirstPage();
+        }
+        return cachedFirstPage;
+    }
+
+    public Document getNextPage(Document doc) throws IOException, URISyntaxException {
+        return null;
+    }
+
+    protected abstract List<String> getURLsFromPage(Document page) throws UnsupportedEncodingException, URISyntaxException;
+
+    protected List<String> getDescriptionsFromPage(Document doc) throws IOException {
+        throw new IOException("getDescriptionsFromPage not implemented"); // Do I do this or make an abstract function?
+    }
+
+    protected abstract void downloadURL(URL url, int index);
+
+    protected DownloadThreadPool getThreadPool() {
+        return null;
+    }
+
+    protected boolean keepSortOrder() {
+        return true;
+    }
+
+    @Override
+    public boolean canRip(URL url) {
+        return url.getHost().endsWith(getDomain());
+    }
+
+    @Override
+    public URL sanitizeURL(URL url) throws MalformedURLException, URISyntaxException {
+        return url;
+    }
+    protected boolean hasDescriptionSupport() {
+        return false;
+    }
+
+    protected String[] getDescription(String url, Document page) throws IOException {
+        throw new IOException("getDescription not implemented"); // Do I do this or make an abstract function?
+    }
+    protected int descSleepTime() {
+        return 100;
+    }
+
+    protected List<String> getAlbumsToQueue(Document doc) {
+        return null;
+    }
+
+    // If a page has Queue support then it has no images we want to download, just a list of urls we want to add to
+    // the queue
+    protected boolean hasQueueSupport() {
+        return false;
+    }
+
+    // Takes a url and checks if it is for a page of albums
+    protected boolean pageContainsAlbums(URL url) {
+        return false;
+    }
+
+    @Override
+    public void rip() throws IOException, URISyntaxException {
+        int index = 0;
+        int textindex = 0;
+        logger.info("Retrieving " + this.url);
+        sendUpdate(STATUS.LOADING_RESOURCE, this.url.toExternalForm());
+        var doc = getCachedFirstPage();
+
+        if (hasQueueSupport() && pageContainsAlbums(this.url)) {
+            List<String> urls = getAlbumsToQueue(doc);
+            for (String url : urls) {
+                MainWindow.addUrlToQueue(url);
+            }
+
+            // We set doc to null here so the while loop below this doesn't fire
+            doc = null;
+            logger.debug("Adding items from " + this.url + " to queue");
+        }
+
+        List<String> doclocation = new ArrayList<>();
+
+        logger.info("Got doc location " + doc.location());
+
+        while (doc != null) {
+
+            logger.info("Processing a doc...");
+
+            // catch if we saw a doc location already, save the ones seen in a list
+            if (doclocation.contains(doc.location())) {
+                logger.info("Already processed location " + doc.location() + " breaking");
+                break;
+            }
+            doclocation.add(doc.location());
+
+            if (alreadyDownloadedUrls >= Utils.getConfigInteger("history.end_rip_after_already_seen", 1000000000) && !isThisATest()) {
+                sendUpdate(STATUS.DOWNLOAD_COMPLETE_HISTORY, "Already seen the last " + alreadyDownloadedUrls + " images ending rip");
+                break;
+            }
+
+            logger.info("retrieving urls from doc");
+
+            List<String> imageURLs = getURLsFromPage(doc);
+            // If hasASAPRipping() returns true then the ripper will handle downloading the files
+            // if not it's done in the following block of code
+            if (!hasASAPRipping()) {
+                // Remove all but 1 image
+                if (isThisATest()) {
+                    while (imageURLs.size() > 1) {
+                        imageURLs.remove(1);
+                    }
+                }
+
+                if (imageURLs.isEmpty()) {
+                    throw new IOException("No images found at " + doc.location());
+                }
+
+                for (String imageURL : imageURLs) {
+                    index += 1;
+                    logger.debug("Found image url #" + index + ": '" + imageURL + "'");
+                    downloadURL(new URI(imageURL).toURL(), index);
+                    if (isStopped() || isThisATest()) {
+                        break;
+                    }
+                }
+            }
+            if (hasDescriptionSupport() && Utils.getConfigBoolean("descriptions.save", false)) {
+                logger.debug("Fetching description(s) from " + doc.location());
+                List<String> textURLs = getDescriptionsFromPage(doc);
+                if (!textURLs.isEmpty()) {
+                    logger.debug("Found description link(s) from " + doc.location());
+                    for (String textURL : textURLs) {
+                        if (isStopped() || isThisATest()) {
+                            break;
+                        }
+
+                        textindex += 1;
+                        logger.debug("Getting description from " + textURL);
+                        String[] tempDesc = getDescription(textURL,doc);
+
+                        if (tempDesc != null) {
+                            URL url = new URI(textURL).toURL();
+                            String filename = fileNameFromURL(url);
+
+                            boolean fileExists = new File(
+                                workingDir.getCanonicalPath()
+                                        + ""
+                                        + File.separator
+                                        + getPrefix(index)
+                                        + (tempDesc.length > 1 ? tempDesc[1] : filename)
+                                        + ".txt").exists();
+
+                            if (Utils.getConfigBoolean("file.overwrite", false) || !fileExists) {
+                                logger.debug("Got description from " + textURL);
+                                saveText(url, "", tempDesc[0], textindex, (tempDesc.length > 1 ? tempDesc[1] : filename));
+                                sleep(descSleepTime());
+                            } else {
+                                logger.debug("Description from " + textURL + " already exists.");
+                            }
+                        }
+
+                    }
+                }
+            }
+
+            if (isStopped() || isThisATest()) {
+                break;
+            }
+
+            try {
+                sendUpdate(STATUS.LOADING_RESOURCE, "next page");
+                doc = getNextPage(doc);
+            } catch (IOException e) {
+                logger.info("Can't get next page: " + e.getMessage());
+                break;
+            }
+        }
+
+        // If they're using a thread pool, wait for it.
+        if (getThreadPool() != null) {
+            logger.debug("Waiting for threadpool " + getThreadPool().getClass().getName());
+            getThreadPool().waitForThreads();
+        }
+        waitForThreads();
+    }
+
+    /**
+     * Gets the file name from the URL
+     * @param url
+     *      URL that you want to get the filename from
+     * @return
+     *      Filename of the URL
+     */
+    private String fileNameFromURL(URL url) {
+        String saveAs = url.toExternalForm();
+        if (saveAs.substring(saveAs.length() - 1).equals("/")) { saveAs = saveAs.substring(0,saveAs.length() - 1) ;}
+        saveAs = saveAs.substring(saveAs.lastIndexOf('/')+1);
+        if (saveAs.indexOf('?') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf('?')); }
+        if (saveAs.indexOf('#') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf('#')); }
+        if (saveAs.indexOf('&') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf('&')); }
+        if (saveAs.indexOf(':') >= 0) { saveAs = saveAs.substring(0, saveAs.indexOf(':')); }
+        return saveAs;
+    }
+    /**
+     *
+     * @param url
+     *      Target URL
+     * @param subdirectory
+     *      Path to subdirectory where you want to save it
+     * @param text
+     *      Text you want to save
+     * @param index
+     *      Index in something like an album
+     * @return
+     *      True if ripped successfully
+     *      False if failed
+     */
+    public boolean saveText(URL url, String subdirectory, String text, int index) {
+        String saveAs = fileNameFromURL(url);
+        return saveText(url,subdirectory,text,index,saveAs);
+    }
+    private boolean saveText(URL url, String subdirectory, String text, int index, String fileName) {
+        // Not the best for some cases, like FurAffinity. Overridden there.
+        try {
+            stopCheck();
+        } catch (IOException e) {
+            return false;
+        }
+        File saveFileAs;
+        try {
+            if (!subdirectory.equals("")) { // Not sure about this part
+                subdirectory = File.separator + subdirectory;
+            }
+            saveFileAs = new File(
+                    workingDir.getCanonicalPath()
+                    + subdirectory
+                    + File.separator
+                    + getPrefix(index)
+                    + fileName
+                    + ".txt");
+            // Write the file
+            FileOutputStream out = (new FileOutputStream(saveFileAs));
+            out.write(text.getBytes());
+            out.close();
+        } catch (IOException e) {
+            logger.error("[!] Error creating save file path for description '" + url + "':", e);
+            return false;
+        }
+        logger.debug("Downloading " + url + "'s description to " + saveFileAs);
+        if (!saveFileAs.getParentFile().exists()) {
+            logger.info("[+] Creating directory: " + saveFileAs.getParent());
+            saveFileAs.getParentFile().mkdirs();
+        }
+        return true;
+    }
+
+    /**
+     * Gets prefix based on where in the index it is
+     * @param index
+     *      The index in question
+     * @return
+     *      Returns prefix for a file. (?)
+     */
+    protected String getPrefix(int index) {
+        String prefix = "";
+        if (keepSortOrder() && Utils.getConfigBoolean("download.save_order", true)) {
+            prefix = String.format("%03d_", index);
+        }
+        return prefix;
+    }
+
+    /*
+     * ------ Methods copied from AlbumRipper. ------
+     * This removes AlbumnRipper's usage from this class.
+     */
+
+    protected boolean allowDuplicates() {
+        return false;
+    }
+
+    @Override
+    /*
+      Returns total amount of files attempted.
+     */
+    public int getCount() {
+        return itemsCompleted.size() + itemsErrored.size();
+    }
+
+    @Override
+    /*
+      Queues multiple URLs of single images to download from a single Album URL
+     */
+    public boolean addURLToDownload(URL url, Path saveAs, String referrer, Map<String,String> cookies, Boolean getFileExtFromMIME) {
+        // Only download one file if this is a test.
+        if (isThisATest() && (itemsCompleted.size() > 0 || itemsErrored.size() > 0)) {
+            stop();
+            itemsPending.clear();
+            return false;
+        }
+        if (!allowDuplicates()
+                && ( itemsPending.containsKey(url)
+                  || itemsCompleted.containsKey(url)
+                  || itemsErrored.containsKey(url) )) {
+            // Item is already downloaded/downloading, skip it.
+            logger.info("[!] Skipping " + url + " -- already attempted: " + Utils.removeCWD(saveAs));
+            return false;
+        }
+        if (shouldIgnoreURL(url)) {
+            sendUpdate(STATUS.DOWNLOAD_SKIP, "Skipping " + url.toExternalForm() + " - ignored extension");
+            return false;
+        }
+        if (Utils.getConfigBoolean("urls_only.save", false)) {
+            // Output URL to file
+            Path urlFile = Paths.get(this.workingDir + "/urls.txt");
+            String text = url.toExternalForm() + System.lineSeparator();
+            try {
+                Files.write(urlFile, text.getBytes(StandardCharsets.UTF_8), StandardOpenOption.CREATE, StandardOpenOption.APPEND);
+                itemsCompleted.put(url, urlFile);
+            } catch (IOException e) {
+                logger.error("Error while writing to " + urlFile, e);
+            }
+        }
+        else {
+            itemsPending.put(url, saveAs.toFile());
+            DownloadFileThread dft = new DownloadFileThread(url,  saveAs.toFile(),  this, getFileExtFromMIME);
+            if (referrer != null) {
+                dft.setReferrer(referrer);
+            }
+            if (cookies != null) {
+                dft.setCookies(cookies);
+            }
+            threadPool.addThread(dft);
+        }
+
+        return true;
+    }
+
+    @Override
+    public boolean addURLToDownload(URL url, Path saveAs) {
+        return addURLToDownload(url, saveAs, null, null, false);
+    }
+
+    /**
+     * Queues image to be downloaded and saved.
+     * Uses filename from URL to decide filename.
+     * @param url
+     *      URL to download
+     * @return
+     *      True on success
+     */
+    protected boolean addURLToDownload(URL url) {
+        // Use empty prefix and empty subdirectory
+        return addURLToDownload(url, "", "");
+    }
+
+    @Override
+    /*
+      Cleans up & tells user about successful download
+     */
+    public void downloadCompleted(URL url, Path saveAs) {
+        if (observer == null) {
+            return;
+        }
+        try {
+            String path = Utils.removeCWD(saveAs);
+            RipStatusMessage msg = new RipStatusMessage(STATUS.DOWNLOAD_COMPLETE, path);
+            itemsPending.remove(url);
+            itemsCompleted.put(url, saveAs);
+            observer.update(this, msg);
+
+            checkIfComplete();
+        } catch (Exception e) {
+            logger.error("Exception while updating observer: ", e);
+        }
+    }
+
+    @Override
+    /*
+     * Cleans up & tells user about failed download.
+     */
+    public void downloadErrored(URL url, String reason) {
+        if (observer == null) {
+            return;
+        }
+        itemsPending.remove(url);
+        itemsErrored.put(url, reason);
+        observer.update(this, new RipStatusMessage(STATUS.DOWNLOAD_ERRORED, url + " : " + reason));
+
+        checkIfComplete();
+    }
+
+    @Override
+    /*
+      Tells user that a single file in the album they wish to download has
+      already been downloaded in the past.
+     */
+    public void downloadExists(URL url, Path file) {
+        if (observer == null) {
+            return;
+        }
+
+        itemsPending.remove(url);
+        itemsCompleted.put(url, file);
+        observer.update(this, new RipStatusMessage(STATUS.DOWNLOAD_WARN, url + " already saved as " + file));
+
+        checkIfComplete();
+    }
+
+    /**
+     * Notifies observers and updates state if all files have been ripped.
+     */
+    @Override
+    protected void checkIfComplete() {
+        if (observer == null) {
+            return;
+        }
+        if (itemsPending.isEmpty()) {
+            super.checkIfComplete();
+        }
+    }
+
+    /**
+     * Sets directory to save all ripped files to.
+     * @param url
+     *      URL to define how the working directory should be saved.
+     */
+    @Override
+    public void setWorkingDir(URL url) throws IOException, URISyntaxException {
+        Path wd = Utils.getWorkingDirectory();
+        // TODO - change to nio
+        String path = wd.toAbsolutePath().toString();
+        if (!path.endsWith(File.separator)) {
+            path += File.separator;
+        }
+        String title = getAlbumTitle(this.url);
+        logger.debug("Using album title '" + title + "'");
+
+        title = Utils.filesystemSafe(title);
+        path += title;
+        path = Utils.getOriginalDirectory(path) + File.separator;   // check for case sensitive (unix only)
+
+        this.workingDir = new File(path);
+        if (!this.workingDir.exists()) {
+            logger.info("[+] Creating directory: " + Utils.removeCWD(this.workingDir.toPath()));
+            if (!this.workingDir.mkdirs()) {
+                throw new IOException("Failed creating dir: \"" + this.workingDir + "\"");
+            }
+        }
+        logger.debug("Set working directory to: " + this.workingDir);
+    }
+
+    /**
+     * @return
+     *      Integer between 0 and 100 defining the progress of the album rip.
+     */
+    @Override
+    public int getCompletionPercentage() {
+        double total = itemsPending.size()  + itemsErrored.size() + itemsCompleted.size();
+        return (int) (100 * ( (total - itemsPending.size()) / total));
+    }
+
+    /**
+     * @return
+     *      Human-readable information on the status of the current rip.
+     */
+    @Override
+    public String getStatusText() {
+        return getCompletionPercentage() +
+                "% " +
+                "- Pending: " + itemsPending.size() +
+                ", Completed: " + itemsCompleted.size() +
+                ", Errored: " + itemsErrored.size();
+    }
+
+
+}

+ 338 - 0
src/main/java/com/rarchives/ripme/ripper/AbstractJSONRipper.java

@@ -0,0 +1,338 @@
+package com.rarchives.ripme.ripper;
+
+import java.io.File;
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.nio.charset.StandardCharsets;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.nio.file.StandardOpenOption;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.json.JSONObject;
+
+import com.rarchives.ripme.ui.RipStatusMessage;
+import com.rarchives.ripme.ui.RipStatusMessage.STATUS;
+import com.rarchives.ripme.utils.Utils;
+
+/**
+ * Simplified ripper, designed for ripping from sites by parsing JSON.
+ */
+public abstract class AbstractJSONRipper extends AbstractRipper {
+
+    private static final Logger logger = LogManager.getLogger(AbstractJSONRipper.class);
+
+    private Map<URL, File> itemsPending = Collections.synchronizedMap(new HashMap<URL, File>());
+    private Map<URL, Path> itemsCompleted = Collections.synchronizedMap(new HashMap<URL, Path>());
+    private Map<URL, String> itemsErrored = Collections.synchronizedMap(new HashMap<URL, String>());
+
+    protected AbstractJSONRipper(URL url) throws IOException {
+        super(url);
+    }
+
+    protected abstract String getDomain();
+    @Override
+    public abstract String getHost();
+
+    protected abstract JSONObject getFirstPage() throws IOException, URISyntaxException;
+    protected JSONObject getNextPage(JSONObject doc) throws IOException, URISyntaxException {
+        throw new IOException("getNextPage not implemented");
+    }
+    protected abstract List<String> getURLsFromJSON(JSONObject json);
+    protected abstract void downloadURL(URL url, int index);
+    private DownloadThreadPool getThreadPool() {
+        return null;
+    }
+
+    protected boolean keepSortOrder() {
+        return true;
+    }
+
+    @Override
+    public boolean canRip(URL url) {
+        return url.getHost().endsWith(getDomain());
+    }
+
+    @Override
+    public URL sanitizeURL(URL url) throws MalformedURLException, URISyntaxException {
+        return url;
+    }
+
+    @Override
+    public void rip() throws IOException, URISyntaxException {
+        int index = 0;
+        logger.info("Retrieving " + this.url);
+        sendUpdate(STATUS.LOADING_RESOURCE, this.url.toExternalForm());
+        JSONObject json = getFirstPage();
+
+        while (json != null) {
+            List<String> imageURLs = getURLsFromJSON(json);
+
+            if (alreadyDownloadedUrls >= Utils.getConfigInteger("history.end_rip_after_already_seen", 1000000000) && !isThisATest()) {
+                 sendUpdate(STATUS.DOWNLOAD_COMPLETE, "Already seen the last " + alreadyDownloadedUrls + " images ending rip");
+                 break;
+            }
+
+            // Remove all but 1 image
+            if (isThisATest()) {
+                while (imageURLs.size() > 1) {
+                    imageURLs.remove(1);
+                }
+            }
+
+            if (imageURLs.isEmpty() && !hasASAPRipping()) {
+                throw new IOException("No images found at " + this.url);
+            }
+
+            for (String imageURL : imageURLs) {
+                if (isStopped()) {
+                    break;
+                }
+
+                index += 1;
+                logger.debug("Found image url #" + index+ ": " + imageURL);
+                downloadURL(new URI(imageURL).toURL(), index);
+            }
+
+            if (isStopped() || isThisATest()) {
+                break;
+            }
+
+            try {
+                sendUpdate(STATUS.LOADING_RESOURCE, "next page");
+                json = getNextPage(json);
+            } catch (IOException | URISyntaxException e) {
+                logger.info("Can't get next page: " + e.getMessage());
+                break;
+            }
+        }
+
+        // If they're using a thread pool, wait for it.
+        if (getThreadPool() != null) {
+            logger.debug("Waiting for threadpool " + getThreadPool().getClass().getName());
+            getThreadPool().waitForThreads();
+        }
+        waitForThreads();
+    }
+
+    protected String getPrefix(int index) {
+        String prefix = "";
+        if (keepSortOrder() && Utils.getConfigBoolean("download.save_order", true)) {
+            prefix = String.format("%03d_", index);
+        }
+        return prefix;
+    }
+
+    /*
+     * ------ Methods copied from AlbumRipper ------
+     */
+
+    protected boolean allowDuplicates() {
+        return false;
+    }
+
+    @Override
+    /**
+     * Returns total amount of files attempted.
+     */
+    public int getCount() {
+        return itemsCompleted.size() + itemsErrored.size();
+    }
+
+    @Override
+    /**
+     * Queues multiple URLs of single images to download from a single Album URL
+     */
+    public boolean addURLToDownload(URL url, Path saveAs, String referrer, Map<String,String> cookies, Boolean getFileExtFromMIME) {
+        // Only download one file if this is a test.
+        if (super.isThisATest() && (itemsCompleted.size() > 0 || itemsErrored.size() > 0)) {
+            stop();
+            itemsPending.clear();
+            return false;
+        }
+        if (!allowDuplicates()
+                && ( itemsPending.containsKey(url)
+                  || itemsCompleted.containsKey(url)
+                  || itemsErrored.containsKey(url) )) {
+            // Item is already downloaded/downloading, skip it.
+            logger.info("[!] Skipping " + url + " -- already attempted: " + Utils.removeCWD(saveAs));
+            return false;
+        }
+        if (shouldIgnoreURL(url)) {
+            sendUpdate(STATUS.DOWNLOAD_SKIP, "Skipping " + url.toExternalForm() + " - ignored extension");
+            return false;
+        }
+        if (Utils.getConfigBoolean("urls_only.save", false)) {
+            // Output URL to file
+            Path urlFile = Paths.get(this.workingDir + "/urls.txt");
+            String text = url.toExternalForm() + System.lineSeparator();
+            try {
+                Files.write(urlFile, text.getBytes(StandardCharsets.UTF_8), StandardOpenOption.CREATE, StandardOpenOption.APPEND);
+                itemsCompleted.put(url, urlFile);
+            } catch (IOException e) {
+                logger.error("Error while writing to " + urlFile, e);
+            }
+        }
+        else {
+            itemsPending.put(url, saveAs.toFile());
+            DownloadFileThread dft = new DownloadFileThread(url,  saveAs.toFile(),  this, getFileExtFromMIME);
+            if (referrer != null) {
+                dft.setReferrer(referrer);
+            }
+            if (cookies != null) {
+                dft.setCookies(cookies);
+            }
+            threadPool.addThread(dft);
+        }
+
+        return true;
+    }
+
+    @Override
+    public boolean addURLToDownload(URL url, Path saveAs) {
+        return addURLToDownload(url, saveAs, null, null, false);
+    }
+
+    /**
+     * Queues image to be downloaded and saved.
+     * Uses filename from URL to decide filename.
+     * @param url
+     *      URL to download
+     * @return
+     *      True on success
+     */
+    protected boolean addURLToDownload(URL url) {
+        // Use empty prefix and empty subdirectory
+        return addURLToDownload(url, "", "");
+    }
+
+    @Override
+    /**
+     * Cleans up & tells user about successful download
+     */
+    public void downloadCompleted(URL url, Path saveAs) {
+        if (observer == null) {
+            return;
+        }
+        try {
+            String path = Utils.removeCWD(saveAs);
+            RipStatusMessage msg = new RipStatusMessage(STATUS.DOWNLOAD_COMPLETE, path);
+            itemsPending.remove(url);
+            itemsCompleted.put(url, saveAs);
+            observer.update(this, msg);
+
+            checkIfComplete();
+        } catch (Exception e) {
+            logger.error("Exception while updating observer: ", e);
+        }
+    }
+
+    @Override
+    /**
+     * Cleans up & tells user about failed download.
+     */
+    public void downloadErrored(URL url, String reason) {
+        if (observer == null) {
+            return;
+        }
+        itemsPending.remove(url);
+        itemsErrored.put(url, reason);
+        observer.update(this, new RipStatusMessage(STATUS.DOWNLOAD_ERRORED, url + " : " + reason));
+
+        checkIfComplete();
+    }
+
+    @Override
+    /**
+     * Tells user that a single file in the album they wish to download has
+     * already been downloaded in the past.
+     */
+    public void downloadExists(URL url, Path file) {
+        if (observer == null) {
+            return;
+        }
+
+        itemsPending.remove(url);
+        itemsCompleted.put(url, file);
+        observer.update(this, new RipStatusMessage(STATUS.DOWNLOAD_WARN, url + " already saved as " + file));
+
+        checkIfComplete();
+    }
+
+    /**
+     * Notifies observers and updates state if all files have been ripped.
+     */
+    @Override
+    protected void checkIfComplete() {
+        if (observer == null) {
+            return;
+        }
+        if (itemsPending.isEmpty()) {
+            super.checkIfComplete();
+        }
+    }
+
+    /**
+     * Sets directory to save all ripped files to.
+     * @param url
+     *      URL to define how the working directory should be saved.
+     * @throws
+     *      IOException
+     */
+    @Override
+    public void setWorkingDir(URL url) throws IOException, URISyntaxException {
+        Path wd = Utils.getWorkingDirectory();
+        String title;
+        if (Utils.getConfigBoolean("album_titles.save", true)) {
+            title = getAlbumTitle(this.url);
+        } else {
+            title = super.getAlbumTitle(this.url);
+        }
+        logger.debug("Using album title '" + title + "'");
+
+        title = Utils.filesystemSafe(title);
+        wd = wd.resolve(title);
+        if (!Files.exists(wd)) {
+            logger.info("[+] Creating directory: " + Utils.removeCWD(wd));
+            Files.createDirectory(wd);
+        }
+        this.workingDir = wd.toFile();
+        logger.info("Set working directory to: {}", this.workingDir);
+    }
+
+    /**
+     * @return
+     *      Integer between 0 and 100 defining the progress of the album rip.
+     */
+    @Override
+    public int getCompletionPercentage() {
+        double total = itemsPending.size()  + itemsErrored.size() + itemsCompleted.size();
+        return (int) (100 * ( (total - itemsPending.size()) / total));
+    }
+
+    /**
+     * @return
+     *      Human-readable information on the status of the current rip.
+     */
+    @Override
+    public String getStatusText() {
+        StringBuilder sb = new StringBuilder();
+        sb.append(getCompletionPercentage())
+          .append("% ")
+          .append("- Pending: "  ).append(itemsPending.size())
+          .append(", Completed: ").append(itemsCompleted.size())
+          .append(", Errored: "  ).append(itemsErrored.size());
+        return sb.toString();
+    }
+
+
+}

+ 807 - 0
src/main/java/com/rarchives/ripme/ripper/AbstractRipper.java

@@ -0,0 +1,807 @@
+package com.rarchives.ripme.ripper;
+
+import java.awt.Desktop;
+import java.io.BufferedWriter;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.lang.reflect.Constructor;
+import java.net.MalformedURLException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Observable;
+import java.util.Random;
+import java.util.Scanner;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.jsoup.HttpStatusException;
+
+import com.rarchives.ripme.App;
+import com.rarchives.ripme.ui.RipStatusComplete;
+import com.rarchives.ripme.ui.RipStatusHandler;
+import com.rarchives.ripme.ui.RipStatusMessage;
+import com.rarchives.ripme.ui.RipStatusMessage.STATUS;
+import com.rarchives.ripme.utils.Utils;
+
+// Suppress warning for specifically Observable. Hopefully no other deprecations
+// get hidden by this suppression.
+// The reason for this is that the deprecation is due to insufficiently powerful
+// design. However, it's good enough for us and getting rid of the warning means
+// adding our own Observer pattern implementation that is essentially a copy-
+// paste of the one in the JDK that has been deprecated. No need to do that.
+@SuppressWarnings("deprecation")
+public abstract class AbstractRipper
+        extends Observable
+        implements RipperInterface, Runnable {
+
+    private static final Logger logger = LogManager.getLogger(AbstractRipper.class);
+    private final String URLHistoryFile = Utils.getURLHistoryFile();
+
+    public static final String USER_AGENT = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/140.0.0.0 Safari/537.36";
+
+    private static Random randomGenerator = new Random();
+
+    protected URL url;
+    protected File workingDir;
+    DownloadThreadPool threadPool;
+    RipStatusHandler observer = null;
+
+    private boolean completed = true;
+
+    public abstract void rip() throws IOException, URISyntaxException;
+
+    public abstract String getHost();
+
+    public abstract String getGID(URL url) throws MalformedURLException, URISyntaxException;
+
+    public boolean hasASAPRipping() {
+        return false;
+    }
+
+    // Everytime addUrlToDownload skips a already downloaded url this increases by 1
+    public int alreadyDownloadedUrls = 0;
+    private final AtomicBoolean shouldStop = new AtomicBoolean(false);
+    private static boolean thisIsATest = false;
+
+    public void stop() {
+        logger.trace("stop()");
+        shouldStop.set(true);
+    }
+
+    public boolean isStopped() {
+        return shouldStop.get();
+    }
+
+    protected void stopCheck() throws IOException {
+        if (shouldStop.get()) {
+            throw new IOException("Ripping interrupted");
+        }
+    }
+
+    /**
+     * Adds a URL to the url history file
+     *
+     * @param downloadedURL URL to check if downloaded
+     */
+    protected void writeDownloadedURL(String downloadedURL) throws IOException {
+        // If "save urls only" is checked don't write to the url history file
+        if (Utils.getConfigBoolean("urls_only.save", false)) {
+            return;
+        }
+        downloadedURL = normalizeUrl(downloadedURL);
+        BufferedWriter bw = null;
+        FileWriter fw = null;
+        try {
+            File file = new File(URLHistoryFile);
+            if (!new File(Utils.getConfigDir()).exists()) {
+                logger.error("Config dir doesn't exist");
+                logger.info("Making config dir");
+                boolean couldMakeDir = new File(Utils.getConfigDir()).mkdirs();
+                if (!couldMakeDir) {
+                    logger.error("Couldn't make config dir");
+                    return;
+                }
+            }
+            // if file doesnt exists, then create it
+            if (!file.exists()) {
+                boolean couldMakeDir = file.createNewFile();
+                if (!couldMakeDir) {
+                    logger.error("Couldn't url history file");
+                    return;
+                }
+            }
+            if (!file.canWrite()) {
+                logger.error("Can't write to url history file: " + URLHistoryFile);
+                return;
+            }
+            fw = new FileWriter(file.getAbsoluteFile(), true);
+            bw = new BufferedWriter(fw);
+            bw.write(downloadedURL);
+        } catch (IOException e) {
+            e.printStackTrace();
+        } finally {
+            try {
+                if (bw != null)
+                    bw.close();
+                if (fw != null)
+                    fw.close();
+            } catch (IOException ex) {
+                ex.printStackTrace();
+            }
+        }
+    }
+
+    /**
+     * Normalize a URL
+     *
+     * @param url URL to check if downloaded
+     */
+    public String normalizeUrl(String url) {
+        return url;
+    }
+
+    /**
+     * Checks to see if Ripme has already downloaded a URL
+     *
+     * @param url URL to check if downloaded
+     * @return Returns true if previously downloaded.
+     *         Returns false if not yet downloaded.
+     */
+    protected boolean hasDownloadedURL(String url) {
+        File file = new File(URLHistoryFile);
+        url = normalizeUrl(url);
+
+        try (Scanner scanner = new Scanner(file)) {
+            while (scanner.hasNextLine()) {
+                final String lineFromFile = scanner.nextLine();
+                if (lineFromFile.equals(url)) {
+                    return true;
+                }
+            }
+        } catch (FileNotFoundException e) {
+            return false;
+        }
+
+        return false;
+    }
+
+    /**
+     * Ensures inheriting ripper can rip this URL, raises exception if not.
+     * Otherwise initializes working directory and thread pool.
+     *
+     * @param url URL to rip.
+     * @throws IOException If anything goes wrong.
+     */
+    public AbstractRipper(URL url) throws IOException {
+        if (!canRip(url)) {
+            throw new MalformedURLException("Unable to rip url: " + url);
+        }
+        try {
+            this.url = sanitizeURL(url);
+        } catch (URISyntaxException e) {
+            throw new MalformedURLException(e.getMessage());
+        }
+    }
+
+    /**
+     * Sets ripper's:
+     * - Working directory
+     * - Logger (for debugging)
+     * - FileAppender
+     * - Threadpool
+     *
+     * @throws IOException Always be prepared.
+     */
+    public void setup() throws IOException, URISyntaxException {
+        setWorkingDir(this.url);
+        // we do not care if the RollingFileAppender is active,
+        // just change the logfile in case.
+        // TODO this does not work - not even with
+        // .withFileName("${sys:logFilename}")
+        // in Utils.java, RollingFileAppender.
+        // System.setProperty("logFilename", this.workingDir + "/log.txt");
+        // logger.debug("Changing log file to '{}/log.txt'", this.workingDir);
+        // LoggerContext ctx = (LoggerContext) LogManager.getContext(false);
+        // ctx.reconfigure();
+        // ctx.updateLoggers();
+
+        this.threadPool = new DownloadThreadPool();
+    }
+
+    public void setObserver(RipStatusHandler obs) {
+        this.observer = obs;
+    }
+
+    /**
+     * Queues image to be downloaded and saved.
+     *
+     * @param url    URL of the file
+     * @param saveAs Path of the local file to save the content to.
+     * @return True on success, false on failure.
+     */
+    public abstract boolean addURLToDownload(URL url, Path saveAs);
+
+    /**
+     * Queues image to be downloaded and saved.
+     *
+     * @param url      URL of the file
+     * @param saveAs   Path of the local file to save the content to.
+     * @param referrer The HTTP referrer to use while downloading this file.
+     * @param cookies  The cookies to send to the server while downloading this
+     *                 file.
+     * @return True if downloaded successfully
+     *         False if failed to download
+     */
+    protected abstract boolean addURLToDownload(URL url, Path saveAs, String referrer, Map<String, String> cookies,
+            Boolean getFileExtFromMIME);
+
+    /**
+     * Queues image to be downloaded and saved.
+     *
+     * @param url     URL of the file
+     * @param options A map<String,String> containing any changes to the default
+     *                options.
+     *                Options are getFileExtFromMIME, prefix, subdirectory,
+     *                referrer, fileName, extension, getFileExtFromMIME.
+     *                getFileExtFromMIME should be "true" or "false"
+     * @param cookies The cookies to send to the server while downloading this file.
+     * @return True if downloaded successfully
+     *         False if failed to download
+     */
+    protected boolean addURLToDownload(URL url, Map<String, String> options, Map<String, String> cookies) {
+        // Bit of a hack but this lets us pass a bool using a map<string,String>
+        boolean useMIME = options.getOrDefault("getFileExtFromMIME", "false").equalsIgnoreCase("true");
+        return addURLToDownload(url,
+                options.getOrDefault("subdirectory", ""),
+                options.getOrDefault("referrer", null),
+                cookies,
+                options.getOrDefault("prefix", ""), options.getOrDefault("fileName", null),
+                options.getOrDefault("extension", null),
+                useMIME);
+    }
+
+    /**
+     * Queues image to be downloaded and saved.
+     *
+     * @param url     URL of the file
+     * @param options A map<String,String> containing any changes to the default
+     *                options.
+     *                Options are getFileExtFromMIME, prefix, subdirectory,
+     *                referrer, fileName, extension, getFileExtFromMIME.
+     *                getFileExtFromMIME should be "true" or "false"
+     * @return True if downloaded successfully
+     *         False if failed to download
+     */
+    protected boolean addURLToDownload(URL url, Map<String, String> options) {
+        return addURLToDownload(url, options, null);
+    }
+
+    /**
+     * Queues image to be downloaded and saved.
+     *
+     * @param url          URL of the file
+     * @param prefix       Prefix for the downloaded file
+     * @param subdirectory Path to get to desired directory from working directory
+     * @param referrer     The HTTP referrer to use while downloading this file.
+     * @param cookies      The cookies to send to the server while downloading this
+     *                     file.
+     * @param fileName     The name that file will be written to
+     * @return True if downloaded successfully
+     *         False if failed to download
+     */
+    protected boolean addURLToDownload(URL url, String subdirectory, String referrer, Map<String, String> cookies,
+            String prefix, String fileName, String extension, Boolean getFileExtFromMIME) {
+        // A common bug is rippers adding urls that are just "http:".
+        // This rejects said urls.
+        if (url.toExternalForm().equals("http:") || url.toExternalForm().equals("https:")) {
+            logger.info(url.toExternalForm() + " is a invalid url and will be changed");
+            return false;
+
+        }
+
+        // Make sure the url doesn't contain any spaces as that can cause a 400 error
+        // when requesting the file
+        if (url.toExternalForm().contains(" ")) {
+            // If for some reason the url with all spaces encoded as %20 is malformed print
+            // an error
+            try {
+                url = new URI(url.toExternalForm().replaceAll(" ", "%20")).toURL();
+            } catch (MalformedURLException | URISyntaxException e) {
+                logger.error("Unable to remove spaces from url\nURL: " + url.toExternalForm());
+                e.printStackTrace();
+            }
+        }
+
+        // Don't re-add the url if it was downloaded in a previous rip
+        if (Utils.getConfigBoolean("remember.url_history", true) && !isThisATest()) {
+            if (hasDownloadedURL(url.toExternalForm())) {
+                sendUpdate(STATUS.DOWNLOAD_WARN, "Already downloaded " + url.toExternalForm());
+                alreadyDownloadedUrls += 1;
+                return false;
+            }
+        }
+
+        try {
+            stopCheck();
+        } catch (IOException e) {
+            logger.debug("Ripper has been stopped");
+            return false;
+        }
+
+        logger.debug("url: " + url + ", subdirectory" + subdirectory + ", referrer: " + referrer + ", cookies: "
+                + cookies + ", prefix: " + prefix + ", fileName: " + fileName);
+
+        Path saveAs;
+        try {
+            saveAs = getFilePath(url, subdirectory, prefix, fileName, extension);
+            logger.debug("Downloading " + url + " to " + saveAs);
+            if (!Files.exists(saveAs.getParent())) {
+                logger.info("[+] Creating directory: " + saveAs.getParent());
+                Files.createDirectories(saveAs.getParent());
+            }
+        } catch (IOException e) {
+            logger.error("[!] Error creating save file path for URL '" + url + "':", e);
+            return false;
+        }
+
+        if (Utils.getConfigBoolean("remember.url_history", true) && !isThisATest()) {
+            logger.info("Writing " + url.toExternalForm() + " to file");
+            try {
+                writeDownloadedURL(url.toExternalForm() + "\n");
+            } catch (IOException e) {
+                logger.debug("Unable to write URL history file");
+            }
+        }
+
+        return addURLToDownload(url, saveAs, referrer, cookies, getFileExtFromMIME);
+    }
+
+    protected boolean addURLToDownload(URL url, String prefix, String subdirectory, String referrer,
+            Map<String, String> cookies, String fileName, String extension) {
+        return addURLToDownload(url, subdirectory, referrer, cookies, prefix, fileName, extension, false);
+    }
+
+    protected boolean addURLToDownload(URL url, String prefix, String subdirectory, String referrer,
+            Map<String, String> cookies, String fileName) {
+        return addURLToDownload(url, prefix, subdirectory, referrer, cookies, fileName, null);
+    }
+
+    /**
+     * Queues file to be downloaded and saved. With options.
+     *
+     * @param url          URL to download.
+     * @param prefix       Prefix to prepend to the saved filename.
+     * @param subdirectory Sub-directory of the working directory to save the images
+     *                     to.
+     * @return True on success, flase on failure.
+     */
+    protected boolean addURLToDownload(URL url, String prefix, String subdirectory) {
+        return addURLToDownload(url, prefix, subdirectory, null, null, null);
+    }
+
+    protected boolean addURLToDownload(URL url, String prefix, String subdirectory,
+            String referrer, Map<String, String> cookies) {
+        return addURLToDownload(url, prefix, subdirectory, referrer, cookies, null);
+    }
+
+    /**
+     * Queues image to be downloaded and saved.
+     * Uses filename from URL (and 'prefix') to decide filename.
+     *
+     * @param url
+     *               URL to download
+     * @param prefix
+     *               Text to append to saved filename.
+     * @return True on success, flase on failure.
+     */
+    protected boolean addURLToDownload(URL url, String prefix) {
+        // Use empty subdirectory
+        return addURLToDownload(url, prefix, "");
+    }
+
+    public Path getFilePath(URL url, String subdir, String prefix, String fileName, String extension)
+            throws IOException {
+        // construct the path: workingdir + subdir + prefix + filename + extension
+        // save into working dir
+        Path filepath = Paths.get(workingDir.getCanonicalPath());
+
+        if (null != App.stringToAppendToFoldername) {
+            filepath = filepath.resolveSibling(filepath.getFileName() + App.stringToAppendToFoldername);
+        }
+
+        if (null != subdir && !subdir.trim().isEmpty()) {
+            filepath = filepath.resolve(Utils.filesystemSafe(subdir));
+        }
+
+        filepath = filepath.resolve(getFileName(url, prefix, fileName, extension));
+        return filepath;
+    }
+
+    public static String getFileName(URL url, String prefix, String fileName, String extension) {
+        // retrieve filename from URL if not passed
+        if (fileName == null || fileName.trim().isEmpty()) {
+            fileName = url.toExternalForm();
+            fileName = fileName.substring(fileName.lastIndexOf('/') + 1);
+        }
+
+        if (fileName.indexOf('?') >= 0) {
+            fileName = fileName.substring(0, fileName.indexOf('?'));
+        }
+
+        if (fileName.indexOf('#') >= 0) {
+            fileName = fileName.substring(0, fileName.indexOf('#'));
+        }
+
+        if (fileName.indexOf('&') >= 0) {
+            fileName = fileName.substring(0, fileName.indexOf('&'));
+        }
+
+        if (fileName.indexOf(':') >= 0) {
+            fileName = fileName.substring(0, fileName.indexOf(':'));
+        }
+
+        // add prefix
+        if (prefix != null && !prefix.trim().isEmpty()) {
+            fileName = prefix + fileName;
+        }
+
+        // retrieve extension from URL if not passed, no extension if nothing found
+        if (extension == null || extension.trim().isEmpty()) {
+            // Get the extension of the file
+            String[] lastBitOfURL = url.toExternalForm().split("/");
+
+            String[] lastBit = lastBitOfURL[lastBitOfURL.length - 1].split(".");
+            if (lastBit.length != 0) {
+                extension = lastBit[lastBit.length - 1];
+            }
+        }
+
+        // if extension is passed or found, add it
+        if (extension != null) {
+            fileName = fileName + "." + extension;
+        }
+
+        // make sure filename is not too long and has no unsupported chars
+        return Utils.sanitizeSaveAs(fileName);
+    }
+
+    /**
+     * Waits for downloading threads to complete.
+     */
+    protected void waitForThreads() {
+        logger.debug("Waiting for threads to finish");
+        completed = false;
+        threadPool.waitForThreads();
+        checkIfComplete();
+    }
+
+    /**
+     * Notifies observers that source is being retrieved.
+     *
+     * @param url URL being retrieved
+     */
+    public void retrievingSource(String url) {
+        RipStatusMessage msg = new RipStatusMessage(STATUS.LOADING_RESOURCE, url);
+        if (observer != null) {
+            observer.update(this, msg);
+        }
+    }
+
+    /**
+     * Notifies observers that a file download has completed.
+     *
+     * @param url    URL that was completed.
+     * @param saveAs Where the downloaded file is stored.
+     */
+    public abstract void downloadCompleted(URL url, Path saveAs);
+
+    /**
+     * Notifies observers that a file could not be downloaded (includes a reason).
+     */
+    public abstract void downloadErrored(URL url, String reason);
+
+    /**
+     * Notify observers that a download could not be completed,
+     * but was not technically an "error".
+     */
+    public abstract void downloadExists(URL url, Path file);
+
+    /**
+     * @return Number of files downloaded.
+     */
+    int getCount() {
+        return 1;
+    }
+
+    /**
+     * Notifies observers and updates state if all files have been ripped.
+     */
+    void checkIfComplete() {
+        if (observer == null) {
+            logger.debug("observer is null");
+            return;
+        }
+
+        if (!completed) {
+            completed = true;
+            logger.info("   Rip completed!");
+
+            RipStatusComplete rsc = new RipStatusComplete(workingDir.toPath(), getCount());
+            RipStatusMessage msg = new RipStatusMessage(STATUS.RIP_COMPLETE, rsc);
+            observer.update(this, msg);
+
+            // we do not care if the rollingfileappender is active,
+            // just change the logfile in case
+            // TODO - does not work.
+            // System.setProperty("logFilename", "ripme.log");
+            // logger.debug("Changing log file back to 'ripme.log'");
+            // LoggerContext ctx = (LoggerContext) LogManager.getContext(false);
+            // ctx.reconfigure();
+
+            if (Utils.getConfigBoolean("urls_only.save", false)) {
+                String urlFile = this.workingDir + File.separator + "urls.txt";
+                try {
+                    Desktop.getDesktop().open(new File(urlFile));
+                } catch (IOException e) {
+                    logger.warn("Error while opening " + urlFile, e);
+                }
+            }
+        }
+    }
+
+    /**
+     * Gets URL
+     *
+     * @return Returns URL that wants to be downloaded.
+     */
+    public URL getURL() {
+        return url;
+    }
+
+    /**
+     * @return Path to the directory in which all files
+     *         ripped via this ripper will be stored.
+     */
+    public File getWorkingDir() {
+        return workingDir;
+    }
+
+    @Override
+    public abstract void setWorkingDir(URL url) throws IOException, URISyntaxException;
+
+    /**
+     * @param url The URL you want to get the title of.
+     * @return host_URLid
+     *         e.g. (for a reddit post)
+     *         reddit_post_7mg2ur
+     * @throws MalformedURLException If any of those damned URLs gets malformed.
+     */
+    public String getAlbumTitle(URL url) throws MalformedURLException, URISyntaxException {
+        try {
+            return getHost() + "_" + getGID(url);
+        } catch (URISyntaxException e) {
+            throw new MalformedURLException(e.getMessage());
+        }
+    }
+
+    /**
+     * Finds, instantiates, and returns a compatible ripper for given URL.
+     *
+     * @param url URL to rip.
+     * @return Instantiated ripper ready to rip given URL.
+     * @throws Exception If no compatible rippers can be found.
+     */
+    public static AbstractRipper getRipper(URL url) throws Exception {
+        for (Constructor<?> constructor : getRipperConstructors("com.rarchives.ripme.ripper.rippers")) {
+            try {
+                // by design: can throw ClassCastException
+                AbstractRipper ripper = (AbstractRipper) constructor.newInstance(url);
+                logger.debug("Found album ripper: " + ripper.getClass().getName());
+                return ripper;
+            } catch (Exception e) {
+                // Incompatible rippers *will* throw exceptions during instantiation.
+            }
+        }
+        for (Constructor<?> constructor : getRipperConstructors("com.rarchives.ripme.ripper.rippers.video")) {
+            try {
+                // by design: can throw ClassCastException
+                VideoRipper ripper = (VideoRipper) constructor.newInstance(url);
+                logger.debug("Found video ripper: " + ripper.getClass().getName());
+                return ripper;
+            } catch (Exception e) {
+                // Incompatible rippers *will* throw exceptions during instantiation.
+            }
+        }
+        throw new Exception("No compatible ripper found");
+    }
+
+    /**
+     * @param pkg The package name.
+     * @return List of constructors for all eligible Rippers.
+     */
+    public static List<Constructor<?>> getRipperConstructors(String pkg) throws Exception {
+        List<Constructor<?>> constructors = new ArrayList<>();
+        for (Class<?> clazz : Utils.getClassesForPackage(pkg)) {
+            if (AbstractRipper.class.isAssignableFrom(clazz)) {
+                constructors.add(clazz.getConstructor(URL.class));
+            }
+        }
+        return constructors;
+    }
+
+    /**
+     * Sends an update message to the relevant observer(s) on this ripper.
+     *
+     * @param status
+     */
+    public void sendUpdate(STATUS status, Object message) {
+        if (observer == null) {
+            return;
+        }
+        observer.update(this, new RipStatusMessage(status, message));
+    }
+
+    /**
+     * Get the completion percentage.
+     *
+     * @return Percentage complete
+     */
+    public abstract int getCompletionPercentage();
+
+    /**
+     * @return Text for status
+     */
+    public abstract String getStatusText();
+
+    /**
+     * Rips the album when the thread is invoked.
+     */
+    public void run() {
+        try {
+            rip();
+        } catch (HttpStatusException e) {
+            logger.error("Got exception while running ripper:", e);
+            waitForThreads();
+            sendUpdate(STATUS.RIP_ERRORED, "HTTP status code " + e.getStatusCode() + " for URL " + e.getUrl());
+        } catch (Exception e) {
+            logger.error("Got exception while running ripper:", e);
+            waitForThreads();
+            sendUpdate(STATUS.RIP_ERRORED, e.getMessage());
+        } finally {
+            cleanup();
+        }
+    }
+
+    /**
+     * Tries to delete any empty directories
+     */
+    private void cleanup() {
+        if (this.workingDir.list().length == 0) {
+            // No files, delete the dir
+            logger.info("Deleting empty directory " + this.workingDir);
+            boolean deleteResult = this.workingDir.delete();
+            if (!deleteResult) {
+                logger.error("Unable to delete empty directory " + this.workingDir);
+            }
+        }
+    }
+
+    /**
+     * To look more like a natural human usage,
+     * randomize the requested sleep interval on a gaussian distribution with a
+     * std-deviation of 30% of the value requested
+     * to look more like natural usage.
+     *
+     * The method nextGaussian returns a sample from a normal distribution with mean
+     * 0 and std-deviation 1.
+     * We need to transform this so that the standard deviation is 30% of the
+     * requested sleep time.
+     *
+     * To achieve this, scale the value by 0.3 and multiply it by milliseconds to
+     * get the adjustment we want, then apply that to the sleep time.
+     * and then multiply that by milliseconds to get the final sleep time.
+     * But we also want to make sure that we have an even better guarantee that we
+     * sleep long enough
+     * to avoid being rate-limited, so make the minimum for this value 47% (an
+     * arbitrary number near 50%) less than the requested time.
+     *
+     * @param milliseconds Sleep time requested.
+     * @return A sleep time that has been adjusted according to a gaussian
+     *         distribution with mean milliseconds and std-deviation of 30% of
+     *         milliseconds.
+     */
+    private int gaussianJitterSleepValue(int milliseconds) {
+        logger.debug("Requested sleep time: " + milliseconds);
+
+        String requestedMsg = " (Requested sleep time was " + milliseconds + " ms)";
+
+        int sleepTime = (int) randomGenerator.nextGaussian(milliseconds, milliseconds * 0.3);
+        logger.debug("Sleep time after gaussian jitter: " + sleepTime + requestedMsg);
+
+        int minSleepTime = (int) (milliseconds * 0.47);
+        if (sleepTime < minSleepTime) {
+            logger.debug("Sleep time after gaussian jitter was " + sleepTime
+                    + " which is less than minimum sleep time; adjusting to minimum sleep time of " + minSleepTime
+                    + requestedMsg);
+
+            sleepTime = minSleepTime;
+        }
+
+        return sleepTime;
+    }
+
+    /**
+     * Pauses thread for a set amount of time.
+     *
+     * @param milliseconds Amount of time (in milliseconds) that the thread gets
+     *                     paused for
+     * @return True if paused successfully
+     *         False if failed to pause/got interrupted.
+     */
+    protected boolean sleep(int milliseconds) {
+        milliseconds = gaussianJitterSleepValue(milliseconds);
+
+        try {
+            logger.debug("Sleeping " + milliseconds + "ms");
+            Thread.sleep(milliseconds);
+            return true;
+        } catch (InterruptedException e) {
+            logger.error("Interrupted while waiting to load next page", e);
+            return false;
+        }
+    }
+
+    public void setBytesTotal(int bytes) {
+        // Do nothing
+    }
+
+    public void setBytesCompleted(int bytes) {
+        // Do nothing
+    }
+
+    /** Methods for detecting when we're running a test. */
+    public void markAsTest() {
+        logger.debug("THIS IS A TEST RIP");
+        thisIsATest = true;
+    }
+
+    protected static boolean isThisATest() {
+        return thisIsATest;
+    }
+
+    // If true ripme uses a byte progress bar
+    protected boolean useByteProgessBar() {
+        return false;
+    }
+
+    // If true ripme will try to resume a broken download for this ripper
+    protected boolean tryResumeDownload() {
+        return false;
+    }
+
+    protected boolean shouldIgnoreURL(URL url) {
+        final String[] ignoredExtensions = Utils.getConfigStringArray("download.ignore_extensions");
+        if (ignoredExtensions == null || ignoredExtensions.length == 0)
+            return false; // nothing ignored
+        String[] pathElements = url.getPath().split("\\.");
+        if (pathElements.length == 0)
+            return false; // no extension, can't filter
+        String extension = pathElements[pathElements.length - 1];
+        for (String ignoredExtension : ignoredExtensions) {
+            if (ignoredExtension.equalsIgnoreCase(extension)) {
+                return true;
+            }
+        }
+        return false;
+    }
+}

+ 43 - 0
src/main/java/com/rarchives/ripme/ripper/AbstractSingleFileRipper.java

@@ -0,0 +1,43 @@
+package com.rarchives.ripme.ripper;
+
+import com.rarchives.ripme.utils.Utils;
+
+import java.io.IOException;
+import java.net.URL;
+
+
+/**
+ * This is just an extension of AbstractHTMLRipper that auto overrides a few things
+ * to help cut down on copy pasted code
+ */
+public abstract class AbstractSingleFileRipper extends AbstractHTMLRipper {
+    private int bytesTotal = 1;
+    private int bytesCompleted = 1;
+
+    protected AbstractSingleFileRipper(URL url) throws IOException {
+        super(url);
+    }
+
+    @Override
+    public String getStatusText() {
+        return Utils.getByteStatusText(getCompletionPercentage(), bytesCompleted, bytesTotal);
+    }
+
+    @Override
+    public int getCompletionPercentage() {
+        return (int) (100 * (bytesCompleted / (float) bytesTotal));
+    }
+
+    @Override
+    public void setBytesTotal(int bytes) {
+        this.bytesTotal = bytes;
+    }
+
+    @Override
+    public void setBytesCompleted(int bytes) {
+        this.bytesCompleted = bytes;
+    }
+
+    @Override
+    public boolean useByteProgessBar() {return true;}
+}

+ 253 - 0
src/main/java/com/rarchives/ripme/ripper/AlbumRipper.java

@@ -0,0 +1,253 @@
+package com.rarchives.ripme.ripper;
+
+import java.io.File;
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.nio.charset.StandardCharsets;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.nio.file.StandardOpenOption;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+
+import com.rarchives.ripme.ui.RipStatusMessage;
+import com.rarchives.ripme.ui.RipStatusMessage.STATUS;
+import com.rarchives.ripme.utils.Utils;
+
+// Should this file even exist? It does the same thing as abstractHTML ripper
+
+/**'
+ * For ripping delicious albums off the interwebz.
+ * @deprecated Use AbstractHTMLRipper instead.
+ */
+@Deprecated
+public abstract class AlbumRipper extends AbstractRipper {
+
+    private static final Logger logger = LogManager.getLogger(AlbumRipper.class);
+
+    private Map<URL, File> itemsPending = Collections.synchronizedMap(new HashMap<URL, File>());
+    private Map<URL, Path> itemsCompleted = Collections.synchronizedMap(new HashMap<URL, Path>());
+    private Map<URL, String> itemsErrored = Collections.synchronizedMap(new HashMap<URL, String>());
+
+    protected AlbumRipper(URL url) throws IOException {
+        super(url);
+    }
+
+    public abstract boolean canRip(URL url);
+    public abstract URL sanitizeURL(URL url) throws MalformedURLException, URISyntaxException;
+    public abstract void rip() throws IOException;
+    public abstract String getHost();
+    public abstract String getGID(URL url) throws MalformedURLException, URISyntaxException;
+
+    protected boolean allowDuplicates() {
+        return false;
+    }
+
+    @Override
+    /**
+     * Returns total amount of files attempted.
+     */
+    public int getCount() {
+        return itemsCompleted.size() + itemsErrored.size();
+    }
+
+    @Override
+    /**
+     * Queues multiple URLs of single images to download from a single Album URL
+     */
+    public boolean addURLToDownload(URL url, Path saveAs, String referrer, Map<String,String> cookies, Boolean getFileExtFromMIME) {
+        // Only download one file if this is a test.
+        if (super.isThisATest() && (itemsCompleted.size() > 0 || itemsErrored.size() > 0)) {
+            stop();
+            itemsPending.clear();
+            return false;
+        }
+        if (!allowDuplicates()
+                && ( itemsPending.containsKey(url)
+                  || itemsCompleted.containsKey(url)
+                  || itemsErrored.containsKey(url) )) {
+            // Item is already downloaded/downloading, skip it.
+            logger.info("[!] Skipping " + url + " -- already attempted: " + Utils.removeCWD(saveAs));
+            return false;
+        }
+        if (shouldIgnoreURL(url)) {
+            sendUpdate(STATUS.DOWNLOAD_SKIP, "Skipping " + url.toExternalForm() + " - ignored extension");
+            return false;
+        }
+        if (Utils.getConfigBoolean("urls_only.save", false)) {
+            // Output URL to file
+            Path urlFile = Paths.get(this.workingDir + "/urls.txt");
+            String text = url.toExternalForm() + System.lineSeparator();
+            try {
+                Files.write(urlFile, text.getBytes(StandardCharsets.UTF_8), StandardOpenOption.CREATE, StandardOpenOption.APPEND);
+                itemsCompleted.put(url, urlFile);
+            } catch (IOException e) {
+                logger.error("Error while writing to " + urlFile, e);
+            }
+        }
+        else {
+            itemsPending.put(url, saveAs.toFile());
+            DownloadFileThread dft = new DownloadFileThread(url,  saveAs.toFile(),  this, getFileExtFromMIME);
+            if (referrer != null) {
+                dft.setReferrer(referrer);
+            }
+            if (cookies != null) {
+                dft.setCookies(cookies);
+            }
+            threadPool.addThread(dft);
+        }
+
+        return true;
+    }
+
+    @Override
+    public boolean addURLToDownload(URL url, Path saveAs) {
+        return addURLToDownload(url, saveAs, null, null, false);
+    }
+
+    /**
+     * Queues image to be downloaded and saved.
+     * Uses filename from URL to decide filename.
+     * @param url
+     *      URL to download
+     * @return
+     *      True on success
+     */
+    protected boolean addURLToDownload(URL url) {
+        // Use empty prefix and empty subdirectory
+        return addURLToDownload(url, "", "");
+    }
+
+    @Override
+    /**
+     * Cleans up & tells user about successful download
+     */
+    public void downloadCompleted(URL url, Path saveAs) {
+        if (observer == null) {
+            return;
+        }
+        try {
+            String path = Utils.removeCWD(saveAs);
+            RipStatusMessage msg = new RipStatusMessage(STATUS.DOWNLOAD_COMPLETE, path);
+            itemsPending.remove(url);
+            itemsCompleted.put(url, saveAs);
+            observer.update(this, msg);
+
+            checkIfComplete();
+        } catch (Exception e) {
+            logger.error("Exception while updating observer: ", e);
+        }
+    }
+
+    @Override
+    /**
+     * Cleans up & tells user about failed download.
+     */
+    public void downloadErrored(URL url, String reason) {
+        if (observer == null) {
+            return;
+        }
+        itemsPending.remove(url);
+        itemsErrored.put(url, reason);
+        observer.update(this, new RipStatusMessage(STATUS.DOWNLOAD_ERRORED, url + " : " + reason));
+
+        checkIfComplete();
+    }
+
+    @Override
+    /**
+     * Tells user that a single file in the album they wish to download has
+     * already been downloaded in the past.
+     */
+    public void downloadExists(URL url, Path file) {
+        if (observer == null) {
+            return;
+        }
+
+        itemsPending.remove(url);
+        itemsCompleted.put(url, file);
+        observer.update(this, new RipStatusMessage(STATUS.DOWNLOAD_WARN, url + " already saved as " + file));
+
+        checkIfComplete();
+    }
+
+    /**
+     * Notifies observers and updates state if all files have been ripped.
+     */
+    @Override
+    protected void checkIfComplete() {
+        if (observer == null) {
+            return;
+        }
+        if (itemsPending.isEmpty()) {
+            super.checkIfComplete();
+        }
+    }
+
+    /**
+     * Sets directory to save all ripped files to.
+     * @param url
+     *      URL to define how the working directory should be saved.
+     * @throws
+     *      IOException
+     */
+    @Override
+    public void setWorkingDir(URL url) throws IOException, URISyntaxException {
+        Path wd = Utils.getWorkingDirectory();
+        // TODO - change to nio
+        String path = wd.toAbsolutePath().toString();
+        if (!path.endsWith(File.separator)) {
+            path += File.separator;
+        }
+        String title;
+        if (Utils.getConfigBoolean("album_titles.save", true)) {
+            title = getAlbumTitle(this.url);
+        } else {
+            title = super.getAlbumTitle(this.url);
+        }
+        logger.debug("Using album title '" + title + "'");
+
+        title = Utils.filesystemSafe(title);
+        path += title;
+        path = Utils.getOriginalDirectory(path) + File.separator;   // check for case sensitive (unix only)
+
+        this.workingDir = new File(path);
+        if (!this.workingDir.exists()) {
+            logger.info("[+] Creating directory: " + Utils.removeCWD(this.workingDir.toPath()));
+            this.workingDir.mkdirs();
+        }
+        logger.debug("Set working directory to: " + this.workingDir);
+    }
+
+    /**
+     * @return
+     *      Integer between 0 and 100 defining the progress of the album rip.
+     */
+    @Override
+    public int getCompletionPercentage() {
+        double total = itemsPending.size()  + itemsErrored.size() + itemsCompleted.size();
+        return (int) (100 * ( (total - itemsPending.size()) / total));
+    }
+
+    /**
+     * @return
+     *      Human-readable information on the status of the current rip.
+     */
+    @Override
+    public String getStatusText() {
+        StringBuilder sb = new StringBuilder();
+        sb.append(getCompletionPercentage())
+          .append("% ")
+          .append("- Pending: "  ).append(itemsPending.size())
+          .append(", Completed: ").append(itemsCompleted.size())
+          .append(", Errored: "  ).append(itemsErrored.size());
+        return sb.toString();
+    }
+}

+ 312 - 0
src/main/java/com/rarchives/ripme/ripper/DownloadFileThread.java

@@ -0,0 +1,312 @@
+package com.rarchives.ripme.ripper;
+
+import java.io.*;
+import java.net.*;
+import java.nio.file.Files;
+import java.nio.file.Paths;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+
+import javax.net.ssl.HttpsURLConnection;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.jsoup.HttpStatusException;
+
+import com.rarchives.ripme.ui.RipStatusMessage.STATUS;
+import com.rarchives.ripme.utils.Utils;
+
+/**
+ * Thread for downloading files. Includes retry logic, observer notifications,
+ * and other goodies.
+ */
+class DownloadFileThread implements Runnable {
+    private static final Logger logger = LogManager.getLogger(DownloadFileThread.class);
+
+    private String referrer = "";
+    private Map<String, String> cookies = new HashMap<>();
+
+    private final URL url;
+    private File saveAs;
+    private final String prettySaveAs;
+    private final AbstractRipper observer;
+    private final int retries;
+    private final Boolean getFileExtFromMIME;
+
+    private final int TIMEOUT;
+
+    private final int retrySleep;
+    public DownloadFileThread(URL url, File saveAs, AbstractRipper observer, Boolean getFileExtFromMIME) {
+        super();
+        this.url = url;
+        this.saveAs = saveAs;
+        this.prettySaveAs = Utils.removeCWD(saveAs.toPath());
+        this.observer = observer;
+        this.retries = Utils.getConfigInteger("download.retries", 1);
+        this.TIMEOUT = Utils.getConfigInteger("download.timeout", 60000);
+        this.retrySleep = Utils.getConfigInteger("download.retry.sleep", 0);
+        this.getFileExtFromMIME = getFileExtFromMIME;
+    }
+
+    public void setReferrer(String referrer) {
+        this.referrer = referrer;
+    }
+
+    public void setCookies(Map<String, String> cookies) {
+        this.cookies = cookies;
+    }
+
+    /**
+     * Attempts to download the file. Retries as needed. Notifies observers upon
+     * completion/error/warn.
+     */
+    @Override
+    public void run() {
+        // First thing we make sure the file name doesn't have any illegal chars in it
+        saveAs = new File(
+                saveAs.getParentFile().getAbsolutePath() + File.separator + Utils.sanitizeSaveAs(saveAs.getName()));
+        long fileSize = 0;
+        int bytesTotal;
+        int bytesDownloaded = 0;
+        if (saveAs.exists() && observer.tryResumeDownload()) {
+            fileSize = saveAs.length();
+        }
+        try {
+            observer.stopCheck();
+        } catch (IOException e) {
+            observer.downloadErrored(url, Utils.getLocalizedString("download.interrupted"));
+            return;
+        }
+        if (saveAs.exists() && !observer.tryResumeDownload() && !getFileExtFromMIME
+                || Utils.fuzzyExists(Paths.get(saveAs.getParent()), saveAs.getName()) && getFileExtFromMIME
+                        && !observer.tryResumeDownload()) {
+            if (Utils.getConfigBoolean("file.overwrite", false)) {
+                logger.info("[!] " + Utils.getLocalizedString("deleting.existing.file") + prettySaveAs);
+                if (!saveAs.delete()) logger.error("could not delete existing file: " + saveAs.getAbsolutePath());
+            } else {
+                logger.info("[!] " + Utils.getLocalizedString("skipping") + " " + url + " -- "
+                        + Utils.getLocalizedString("file.already.exists") + ": " + prettySaveAs);
+                observer.downloadExists(url, saveAs.toPath());
+                return;
+            }
+        }
+        URL urlToDownload = this.url;
+        boolean redirected = false;
+        int tries = 0; // Number of attempts to download
+        do {
+            tries += 1;
+            try {
+                logger.info("    Downloading file: " + urlToDownload + (tries > 0 ? " Retry #" + tries : ""));
+                observer.sendUpdate(STATUS.DOWNLOAD_STARTED, url.toExternalForm());
+
+                // Setup HTTP request
+                HttpURLConnection huc;
+                if (this.url.toString().startsWith("https")) {
+                    huc = (HttpsURLConnection) urlToDownload.openConnection();
+                } else {
+                    huc = (HttpURLConnection) urlToDownload.openConnection();
+                }
+                huc.setInstanceFollowRedirects(true);
+                // It is important to set both ConnectTimeout and ReadTimeout. If you don't then
+                // ripme will wait forever
+                // for the server to send data after connecting.
+                huc.setConnectTimeout(TIMEOUT);
+                huc.setReadTimeout(TIMEOUT);
+                huc.setRequestProperty("accept", "*/*");
+                if (!referrer.equals("")) {
+                    huc.setRequestProperty("Referer", referrer); // Sic
+                }
+                huc.setRequestProperty("User-agent", AbstractRipper.USER_AGENT);
+                StringBuilder cookie = new StringBuilder();
+                for (String key : cookies.keySet()) {
+                    if (!cookie.toString().equals("")) {
+                        cookie.append("; ");
+                    }
+                    cookie.append(key).append("=").append(cookies.get(key));
+                }
+                huc.setRequestProperty("Cookie", cookie.toString());
+                if (observer.tryResumeDownload()) {
+                    if (fileSize != 0) {
+                        huc.setRequestProperty("Range", "bytes=" + fileSize + "-");
+                    }
+                }
+                logger.debug(Utils.getLocalizedString("request.properties") + ": " + huc.getRequestProperties());
+                huc.connect();
+
+                int statusCode = huc.getResponseCode();
+                logger.debug("Status code: " + statusCode);
+                // If the server doesn't allow resuming downloads error out
+                if (statusCode != 206 && observer.tryResumeDownload() && saveAs.exists()) {
+                    // TODO find a better way to handle servers that don't support resuming
+                    // downloads then just erroring out
+                    throw new IOException(Utils.getLocalizedString("server.doesnt.support.resuming.downloads"));
+                }
+                if (statusCode / 100 == 3) { // 3xx Redirect
+                    if (!redirected) {
+                        // Don't increment retries on the first redirect
+                        tries--;
+                        redirected = true;
+                    }
+                    String location = huc.getHeaderField("Location");
+                    urlToDownload = new URI(location).toURL();
+                    // Throw exception so download can be retried
+                    throw new IOException("Redirect status code " + statusCode + " - redirect to " + location);
+                }
+                if (statusCode / 100 == 4) { // 4xx errors
+                    logger.error("[!] " + Utils.getLocalizedString("nonretriable.status.code") + " " + statusCode
+                            + " while downloading from " + url);
+                    observer.downloadErrored(url, Utils.getLocalizedString("nonretriable.status.code") + " "
+                            + statusCode + " while downloading " + url.toExternalForm());
+                    return; // Not retriable, drop out.
+                }
+                if (statusCode / 100 == 5) { // 5xx errors
+                    observer.downloadErrored(url, Utils.getLocalizedString("retriable.status.code") + " " + statusCode
+                            + " while downloading " + url.toExternalForm());
+                    // Throw exception so download can be retried
+                    throw new IOException(Utils.getLocalizedString("retriable.status.code") + " " + statusCode);
+                }
+                if (huc.getContentLength() == 503 && urlToDownload.getHost().endsWith("imgur.com")) {
+                    // Imgur image with 503 bytes is "404"
+                    logger.error("[!] Imgur image is 404 (503 bytes long): " + url);
+                    observer.downloadErrored(url, "Imgur image is 404: " + url.toExternalForm());
+                    return;
+                }
+
+                // If the ripper is using the bytes progress bar set bytesTotal to
+                // huc.getContentLength()
+                if (observer.useByteProgessBar()) {
+                    bytesTotal = huc.getContentLength();
+                    observer.setBytesTotal(bytesTotal);
+                    observer.sendUpdate(STATUS.TOTAL_BYTES, bytesTotal);
+                    logger.debug("Size of file at " + this.url + " = " + bytesTotal + "b");
+                }
+
+                // Save file
+                InputStream bis;
+                bis = new BufferedInputStream(huc.getInputStream());
+
+                // Check if we should get the file ext from the MIME type
+                if (getFileExtFromMIME) {
+                    String fileExt = URLConnection.guessContentTypeFromStream(bis);
+                    if (fileExt != null) {
+                        fileExt = fileExt.replaceAll("image/", "");
+                        saveAs = new File(saveAs.toString() + "." + fileExt);
+                    } else {
+                        logger.error("Was unable to get content type from stream");
+                        // Try to get the file type from the magic number
+                        byte[] magicBytes = new byte[8];
+                        bis.read(magicBytes, 0, 5);
+                        bis.reset();
+                        fileExt = Utils.getEXTFromMagic(magicBytes);
+                        if (fileExt != null) {
+                            saveAs = new File(saveAs.toString() + "." + fileExt);
+                        } else {
+                            logger.error(Utils.getLocalizedString("was.unable.to.get.content.type.using.magic.number"));
+                            logger.error(
+                                    Utils.getLocalizedString("magic.number.was") + ": " + Arrays.toString(magicBytes));
+                        }
+                    }
+                }
+                // If we're resuming a download we append data to the existing file
+                OutputStream fos = null;
+                if (statusCode == 206) {
+                    fos = new FileOutputStream(saveAs, true);
+                } else {
+                    try {
+                        fos = new FileOutputStream(saveAs);
+                    } catch (FileNotFoundException e) {
+                        // We do this because some filesystems have a max name length
+                        if (e.getMessage().contains("File name too long")) {
+                            logger.error("The filename " + saveAs.getName()
+                                    + " is to long to be saved on this file system.");
+                            logger.info("Shortening filename");
+                            String[] saveAsSplit = saveAs.getName().split("\\.");
+                            // Get the file extension so when we shorten the file name we don't cut off the
+                            // file extension
+                            String fileExt = saveAsSplit[saveAsSplit.length - 1];
+                            // The max limit for filenames on Linux with Ext3/4 is 255 bytes
+                            logger.info(saveAs.getName().substring(0, 254 - fileExt.length()) + fileExt);
+                            String filename = saveAs.getName().substring(0, 254 - fileExt.length()) + "." + fileExt;
+                            // We can't just use the new file name as the saveAs because the file name
+                            // doesn't include the
+                            // users save path, so we get the user save path from the old saveAs
+                            saveAs = new File(saveAs.getParentFile().getAbsolutePath() + File.separator + filename);
+                            fos = new FileOutputStream(saveAs);
+                        } else if (saveAs.getAbsolutePath().length() > 259 && Utils.isWindows()) {
+                            // This if is for when the file path has gone above 260 chars which windows does
+                            // not allow
+                            fos = Files.newOutputStream(
+                                    Utils.shortenSaveAsWindows(saveAs.getParentFile().getPath(), saveAs.getName()));
+                            assert fos != null: "After shortenSaveAsWindows: " + saveAs.getAbsolutePath();
+                        }
+                        assert fos != null: e.getStackTrace();
+                    }
+                }
+                byte[] data = new byte[1024 * 256];
+                int bytesRead;
+                boolean shouldSkipFileDownload = huc.getContentLength() / 1000000 >= 10 && AbstractRipper.isThisATest();
+                // If this is a test rip we skip large downloads
+                if (shouldSkipFileDownload) {
+                    logger.debug("Not downloading whole file because it is over 10mb and this is a test");
+                } else {
+                    while ((bytesRead = bis.read(data)) != -1) {
+                        try {
+                            observer.stopCheck();
+                        } catch (IOException e) {
+                            observer.downloadErrored(url, Utils.getLocalizedString("download.interrupted"));
+                            return;
+                        }
+                        fos.write(data, 0, bytesRead);
+                        if (observer.useByteProgessBar()) {
+                            bytesDownloaded += bytesRead;
+                            observer.setBytesCompleted(bytesDownloaded);
+                            observer.sendUpdate(STATUS.COMPLETED_BYTES, bytesDownloaded);
+                        }
+                    }
+                }
+                bis.close();
+                fos.close();
+                break; // Download successful: break out of infinite loop
+            } catch (SocketTimeoutException timeoutEx) {
+                // Handle the timeout
+                logger.error("[!] " + url.toExternalForm() + " timedout!");
+                // Download failed, break out of loop
+                break;
+            } catch (HttpStatusException hse) {
+                logger.debug(Utils.getLocalizedString("http.status.exception"), hse);
+                logger.error("[!] HTTP status " + hse.getStatusCode() + " while downloading from " + urlToDownload);
+                if (hse.getStatusCode() == 404 && Utils.getConfigBoolean("errors.skip404", false)) {
+                    observer.downloadErrored(url,
+                            "HTTP status code " + hse.getStatusCode() + " while downloading " + url.toExternalForm());
+                    return;
+                }
+            } catch (IOException | URISyntaxException e) {
+                logger.debug("IOException", e);
+                logger.error("[!] " + Utils.getLocalizedString("exception.while.downloading.file") + ": " + url + " - "
+                        + e.getMessage());
+            } catch (NullPointerException npe){
+
+                logger.error("[!] " + Utils.getLocalizedString("failed.to.download") + " for URL " + url);
+                observer.downloadErrored(url,
+                        Utils.getLocalizedString("failed.to.download") + " " + url.toExternalForm());
+                return;
+
+            }
+            if (tries > this.retries) {
+                logger.error("[!] " + Utils.getLocalizedString("exceeded.maximum.retries") + " (" + this.retries
+                        + ") for URL " + url);
+                observer.downloadErrored(url,
+                        Utils.getLocalizedString("failed.to.download") + " " + url.toExternalForm());
+                return;
+            } else {
+                if (retrySleep > 0) {
+                    Utils.sleep(retrySleep);
+                }
+            }
+        } while (true);
+        observer.downloadCompleted(url, saveAs.toPath());
+        logger.info("[+] Saved " + url + " as " + this.prettySaveAs);
+    }
+
+}

+ 56 - 0
src/main/java/com/rarchives/ripme/ripper/DownloadThreadPool.java

@@ -0,0 +1,56 @@
+package com.rarchives.ripme.ripper;
+
+import java.util.concurrent.Executors;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+
+import com.rarchives.ripme.utils.Utils;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+
+/**
+ * Simple wrapper around a FixedThreadPool.
+ */
+public class DownloadThreadPool {
+
+    private static final Logger logger = LogManager.getLogger(DownloadThreadPool.class);
+    private ThreadPoolExecutor threadPool = null;
+
+    public DownloadThreadPool() {
+        initialize("Main");
+    }
+
+    public DownloadThreadPool(String threadPoolName) {
+        initialize(threadPoolName);
+    }
+    
+    /**
+     * Initializes the threadpool.
+     * @param threadPoolName Name of the threadpool.
+     */
+    private void initialize(String threadPoolName) {
+        int threads = Utils.getConfigInteger("threads.size", 10);
+        logger.debug("Initializing " + threadPoolName + " thread pool with " + threads + " threads");
+        threadPool = (ThreadPoolExecutor) Executors.newFixedThreadPool(threads);
+    }
+    /**
+     * For adding threads to execution pool.
+     * @param t
+     *      Thread to be added.
+     */
+    public void addThread(Runnable t) {
+        threadPool.execute(t);
+    }
+
+    /**
+     * Tries to shutdown threadpool.
+     */
+    public void waitForThreads() {
+        threadPool.shutdown();
+        try {
+            threadPool.awaitTermination(3600, TimeUnit.SECONDS);
+        } catch (InterruptedException e) {
+            logger.error("[!] Interrupted while waiting for threads to finish: ", e);
+        }
+    }
+}

+ 160 - 0
src/main/java/com/rarchives/ripme/ripper/DownloadVideoThread.java

@@ -0,0 +1,160 @@
+package com.rarchives.ripme.ripper;
+
+import java.io.BufferedInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.net.HttpURLConnection;
+import java.net.URL;
+import java.nio.file.Files;
+import java.nio.file.Path;
+
+import javax.net.ssl.HttpsURLConnection;
+
+import com.rarchives.ripme.ui.RipStatusMessage.STATUS;
+import com.rarchives.ripme.utils.Utils;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+
+/**
+ * Thread for downloading files.
+ * Includes retry logic, observer notifications, and other goodies.
+ */
+class DownloadVideoThread implements Runnable {
+
+    private static final Logger logger = LogManager.getLogger(DownloadVideoThread.class);
+
+    private final URL url;
+    private final Path saveAs;
+    private final String prettySaveAs;
+    private final AbstractRipper observer;
+    private final int retries;
+
+    public DownloadVideoThread(URL url, Path saveAs, AbstractRipper observer) {
+        super();
+        this.url = url;
+        this.saveAs = saveAs;
+        this.prettySaveAs = Utils.removeCWD(saveAs);
+        this.observer = observer;
+        this.retries = Utils.getConfigInteger("download.retries", 1);
+    }
+
+    /**
+     * Attempts to download the file. Retries as needed.
+     * Notifies observers upon completion/error/warn.
+     */
+    @Override
+    public void run() {
+        try {
+            observer.stopCheck();
+        } catch (IOException e) {
+            observer.downloadErrored(url, "Download interrupted");
+            return;
+        }
+        if (Files.exists(saveAs)) {
+            if (Utils.getConfigBoolean("file.overwrite", false)) {
+                logger.info("[!] Deleting existing file" + prettySaveAs);
+                try {
+                    Files.delete(saveAs);
+                } catch (IOException e) {
+                    e.printStackTrace();
+                }
+            } else {
+                logger.info("[!] Skipping " + url + " -- file already exists: " + prettySaveAs);
+                observer.downloadExists(url, saveAs);
+                return;
+            }
+        }
+
+        int bytesTotal, bytesDownloaded = 0;
+        try {
+            bytesTotal = getTotalBytes(this.url);
+        } catch (IOException e) {
+            logger.error("Failed to get file size at " + this.url, e);
+            observer.downloadErrored(this.url, "Failed to get file size of " + this.url);
+            return;
+        }
+        observer.setBytesTotal(bytesTotal);
+        observer.sendUpdate(STATUS.TOTAL_BYTES, bytesTotal);
+        logger.debug("Size of file at " + this.url + " = " + bytesTotal + "b");
+
+        int tries = 0; // Number of attempts to download
+        do {
+            InputStream bis = null; OutputStream fos = null;
+            byte[] data = new byte[1024 * 256];
+            int bytesRead;
+            try {
+                logger.info("    Downloading file: " + url + (tries > 0 ? " Retry #" + tries : ""));
+                observer.sendUpdate(STATUS.DOWNLOAD_STARTED, url.toExternalForm());
+
+                // Setup HTTP request
+                HttpURLConnection huc;
+                if (this.url.toString().startsWith("https")) {
+                    huc = (HttpsURLConnection) this.url.openConnection();
+                }
+                else {
+                    huc = (HttpURLConnection) this.url.openConnection();
+                }
+                huc.setInstanceFollowRedirects(true);
+                huc.setConnectTimeout(0); // Never timeout
+                huc.setRequestProperty("accept",  "*/*");
+                huc.setRequestProperty("Referer", this.url.toExternalForm()); // Sic
+                huc.setRequestProperty("User-agent", AbstractRipper.USER_AGENT);
+                tries += 1;
+                logger.debug("Request properties: " + huc.getRequestProperties().toString());
+                huc.connect();
+                // Check status code
+                bis = new BufferedInputStream(huc.getInputStream());
+                fos = Files.newOutputStream(saveAs);
+                while ( (bytesRead = bis.read(data)) != -1) {
+                    try {
+                        observer.stopCheck();
+                    } catch (IOException e) {
+                        observer.downloadErrored(url, "Download interrupted");
+                        return;
+                    }
+                    fos.write(data, 0, bytesRead);
+                    bytesDownloaded += bytesRead;
+                    observer.setBytesCompleted(bytesDownloaded);
+                    observer.sendUpdate(STATUS.COMPLETED_BYTES, bytesDownloaded);
+                }
+                bis.close();
+                fos.close();
+                break; // Download successful: break out of infinite loop
+            } catch (IOException e) {
+                logger.error("[!] Exception while downloading file: " + url + " - " + e.getMessage(), e);
+            } finally {
+                // Close any open streams
+                try {
+                    if (bis != null) { bis.close(); }
+                } catch (IOException ignored) { }
+                try {
+                    if (fos != null) { fos.close(); }
+                } catch (IOException ignored) { }
+            }
+            if (tries > this.retries) {
+                logger.error("[!] Exceeded maximum retries (" + this.retries + ") for URL " + url);
+                observer.downloadErrored(url, "Failed to download " + url.toExternalForm());
+                return;
+            }
+        } while (true);
+        observer.downloadCompleted(url, saveAs);
+        logger.info("[+] Saved " + url + " as " + this.prettySaveAs);
+    }
+
+    /**
+     * @param url
+     *      Target URL
+     * @return 
+     *      Returns connection length
+     */
+    private int getTotalBytes(URL url) throws IOException {
+        HttpURLConnection conn = (HttpURLConnection) url.openConnection();
+        conn.setRequestMethod("HEAD");
+        conn.setRequestProperty("accept",  "*/*");
+        conn.setRequestProperty("Referer", this.url.toExternalForm()); // Sic
+        conn.setRequestProperty("User-agent", AbstractRipper.USER_AGENT);
+        return conn.getContentLength();
+    }
+
+}

+ 21 - 0
src/main/java/com/rarchives/ripme/ripper/RipperInterface.java

@@ -0,0 +1,21 @@
+package com.rarchives.ripme.ripper;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URISyntaxException;
+import java.net.URL;
+
+/**
+ * I have no idea why I made this interface. Everything is captured within the AbstractRipper.
+ * Oh well, here's to encapsulation and abstraction! (raises glass)
+ * 
+ * (cheers!)
+ */
+interface RipperInterface {
+    void rip() throws IOException, URISyntaxException;
+    boolean canRip(URL url);
+    URL sanitizeURL(URL url) throws MalformedURLException, URISyntaxException;
+    void setWorkingDir(URL url) throws IOException, URISyntaxException;
+    String getHost();
+    String getGID(URL url) throws MalformedURLException, URISyntaxException;
+}

+ 214 - 0
src/main/java/com/rarchives/ripme/ripper/VideoRipper.java

@@ -0,0 +1,214 @@
+package com.rarchives.ripme.ripper;
+
+import java.io.File;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.nio.file.Path;
+import java.util.Map;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+
+import com.rarchives.ripme.ui.RipStatusMessage;
+import com.rarchives.ripme.ui.RipStatusMessage.STATUS;
+import com.rarchives.ripme.utils.Utils;
+
+public abstract class VideoRipper extends AbstractRipper {
+
+    private static final Logger logger = LogManager.getLogger(VideoRipper.class);
+
+    private int bytesTotal = 1;
+    private int bytesCompleted = 1;
+
+    protected VideoRipper(URL url) throws IOException {
+        super(url);
+    }
+
+    public abstract void rip() throws IOException, URISyntaxException;
+
+    public abstract String getHost();
+
+    public abstract String getGID(URL url) throws MalformedURLException;
+
+    @Override
+    public void setBytesTotal(int bytes) {
+        this.bytesTotal = bytes;
+    }
+
+    @Override
+    public void setBytesCompleted(int bytes) {
+        this.bytesCompleted = bytes;
+    }
+
+    @Override
+    public String getAlbumTitle(URL url) {
+        return "videos";
+    }
+
+    @Override
+    public boolean addURLToDownload(URL url, Path saveAs) {
+        if (Utils.getConfigBoolean("urls_only.save", false)) {
+            // Output URL to file
+            String urlFile = this.workingDir + "/urls.txt";
+
+            try (FileWriter fw = new FileWriter(urlFile, true)) {
+                fw.write(url.toExternalForm());
+                fw.write("\n");
+
+                RipStatusMessage msg = new RipStatusMessage(STATUS.DOWNLOAD_COMPLETE, urlFile);
+                observer.update(this, msg);
+            } catch (IOException e) {
+                logger.error("Error while writing to " + urlFile, e);
+                return false;
+            }
+        } else {
+            if (isThisATest()) {
+                // Tests shouldn't download the whole video
+                // Just change this.url to the download URL so the test knows we found it.
+                logger.debug("Test rip, found URL: " + url);
+                this.url = url;
+                return true;
+            }
+            if (shouldIgnoreURL(url)) {
+                sendUpdate(STATUS.DOWNLOAD_SKIP, "Skipping " + url.toExternalForm() + " - ignored extension");
+                return false;
+            }
+            threadPool.addThread(new DownloadVideoThread(url, saveAs, this));
+        }
+        return true;
+    }
+
+    @Override
+    public boolean addURLToDownload(URL url, Path saveAs, String referrer, Map<String, String> cookies, Boolean getFileExtFromMIME) {
+        return addURLToDownload(url, saveAs);
+    }
+
+    /**
+     * Creates & sets working directory based on URL.
+     *
+     * @param url Target URL
+     */
+    @Override
+    public void setWorkingDir(URL url) throws IOException {
+        Path wd = Utils.getWorkingDirectory();
+        // TODO - change to nio
+        String path = wd.toAbsolutePath().toString();
+
+        if (!path.endsWith(File.separator)) {
+            path += File.separator;
+        }
+
+        path += "videos" + File.separator;
+        workingDir = new File(path);
+
+        if (!workingDir.exists()) {
+            logger.info("[+] Creating directory: " + Utils.removeCWD(workingDir.toPath()));
+            workingDir.mkdirs();
+        }
+
+        logger.debug("Set working directory to: " + workingDir);
+    }
+
+    /**
+     * @return Returns % of video done downloading.
+     */
+    @Override
+    public int getCompletionPercentage() {
+        return (int) (100 * (bytesCompleted / (float) bytesTotal));
+    }
+
+    /**
+     * Runs if download successfully completed.
+     *
+     * @param url    Target URL
+     * @param saveAs Path to file, including filename.
+     */
+    @Override
+    public void downloadCompleted(URL url, Path saveAs) {
+        if (observer == null) {
+            return;
+        }
+
+        try {
+            String path = Utils.removeCWD(saveAs);
+            RipStatusMessage msg = new RipStatusMessage(STATUS.DOWNLOAD_COMPLETE, path);
+            observer.update(this, msg);
+
+            checkIfComplete();
+        } catch (Exception e) {
+            logger.error("Exception while updating observer: ", e);
+        }
+    }
+
+    /**
+     * Runs if the download errored somewhere.
+     *
+     * @param url    Target URL
+     * @param reason Reason why the download failed.
+     */
+    @Override
+    public void downloadErrored(URL url, String reason) {
+        if (observer == null) {
+            return;
+        }
+
+        observer.update(this, new RipStatusMessage(STATUS.DOWNLOAD_ERRORED, url + " : " + reason));
+        checkIfComplete();
+    }
+
+    /**
+     * Runs if user tries to redownload an already existing File.
+     *  @param url  Target URL
+     * @param file Existing file
+     */
+    @Override
+    public void downloadExists(URL url, Path file) {
+        if (observer == null) {
+            return;
+        }
+
+        observer.update(this, new RipStatusMessage(STATUS.DOWNLOAD_WARN, url + " already saved as " + file));
+        checkIfComplete();
+    }
+
+    /**
+     * Gets the status and changes it to a human-readable form.
+     *
+     * @return Status of current download.
+     */
+    @Override
+    public String getStatusText() {
+        return String.valueOf(getCompletionPercentage()) +
+                "%  - " +
+                Utils.bytesToHumanReadable(bytesCompleted) +
+                " / " +
+                Utils.bytesToHumanReadable(bytesTotal);
+    }
+
+    /**
+     * Sanitizes URL.
+     * Usually just returns itself.
+     */
+    @Override
+    public URL sanitizeURL(URL url) throws MalformedURLException {
+        return url;
+    }
+
+    /**
+     * Notifies observers and updates state if all files have been ripped.
+     */
+    @Override
+    protected void checkIfComplete() {
+        if (observer == null) {
+            return;
+        }
+
+        if (bytesCompleted >= bytesTotal) {
+            super.checkIfComplete();
+        }
+    }
+
+}

+ 82 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/AllporncomicRipper.java

@@ -0,0 +1,82 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+
+public class AllporncomicRipper extends AbstractHTMLRipper {
+
+    public AllporncomicRipper(URL url) throws IOException {
+        super(url);
+    }
+
+    @Override
+    public String getHost() {
+        return "allporncomic";
+    }
+
+    @Override
+    public String getDomain() {
+        return "allporncomic.com";
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        Pattern p = Pattern.compile("https?://allporncomic.com/porncomic/([a-zA-Z0-9_\\-]+)/([a-zA-Z0-9_\\-]+)/?$");
+        Matcher m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return m.group(1) + "_" + m.group(2);
+        }
+        p = Pattern.compile("^https?://allporncomic.com/porncomic/([a-zA-Z0-9_\\-]+)/?$");
+        m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return m.group(1);
+        }
+        throw new MalformedURLException("Expected allporncomic URL format: " +
+                "allporncomic.com/TITLE/CHAPTER - got " + url + " instead");
+    }
+
+    @Override
+    public List<String> getURLsFromPage(Document doc) {
+        List<String> result = new ArrayList<>();
+        for (Element el : doc.select(".wp-manga-chapter-img")) {
+            result.add(el.attr("data-src"));
+        }
+        return result;
+    }
+
+    @Override
+    public boolean hasQueueSupport() {
+        return true;
+    }
+
+    @Override
+    public boolean pageContainsAlbums(URL url) {
+        Pattern pa = Pattern.compile("^https?://allporncomic.com/porncomic/([a-zA-Z0-9_\\-]+)/?$");
+        Matcher ma = pa.matcher(url.toExternalForm());
+        return ma.matches();
+    }
+
+    @Override
+    public List<String> getAlbumsToQueue(Document doc) {
+        List<String> urlsToAddToQueue = new ArrayList<>();
+        for (Element elem : doc.select(".wp-manga-chapter > a")) {
+            urlsToAddToQueue.add(elem.attr("href"));
+        }
+        return urlsToAddToQueue;
+    }
+
+    @Override
+    public void downloadURL(URL url, int index) {
+        addURLToDownload(url, getPrefix(index));
+    }
+}

+ 336 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/ArtStationRipper.java

@@ -0,0 +1,336 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.json.JSONObject;
+import org.jsoup.Connection;
+import org.jsoup.Connection.Method;
+import org.jsoup.Connection.Response;
+
+import com.rarchives.ripme.ripper.AbstractJSONRipper;
+import com.rarchives.ripme.utils.Http;
+
+public class ArtStationRipper extends AbstractJSONRipper {
+
+    private static final Logger logger = LogManager.getLogger(ArtStationRipper.class);
+
+    enum URL_TYPE {
+        SINGLE_PROJECT, USER_PORTFOLIO, UNKNOWN
+    }
+
+    private ParsedURL albumURL;
+    private String projectName;
+    private Integer projectIndex;
+    private Integer projectPageNumber;
+
+    public ArtStationRipper(URL url) throws IOException {
+        super(url);
+    }
+
+    @Override
+    protected String getDomain() {
+        return "artstation.com";
+    }
+
+    @Override
+    public String getHost() {
+        return "ArtStation";
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        JSONObject groupData;
+
+        // Parse URL and store for later use
+        albumURL = parseURL(url);
+
+        if (albumURL.getType() == URL_TYPE.SINGLE_PROJECT) {
+            // URL points to single project, use project title as GID
+            try {
+                // groupData = Http.url(albumURL.getLocation()).getJSON();
+                groupData = getJson(albumURL.getLocation());
+            } catch (IOException | URISyntaxException e) {
+                throw new MalformedURLException("Couldn't load JSON from " + albumURL.getLocation());
+            }
+            return groupData.getString("title");
+        }
+
+        if (albumURL.getType() == URL_TYPE.USER_PORTFOLIO) {
+            // URL points to user portfolio, use user's full name as GID
+            String userInfoURL = "https://www.artstation.com/users/" + albumURL.getID() + "/quick.json";
+            try {
+//                 groupData = Http.url(userInfoURL).getJSON();
+                groupData = getJson(userInfoURL);
+            } catch (IOException | URISyntaxException e) {
+                throw new MalformedURLException("Couldn't load JSON from " + userInfoURL);
+            }
+            return groupData.getString("full_name");
+        }
+
+        // No JSON found in the URL entered, can't rip
+        throw new MalformedURLException(
+                "Expected URL to an ArtStation 'project url' or 'user profile url' - got " + url + " instead");
+    }
+
+    @Override
+    protected JSONObject getFirstPage() throws IOException, URISyntaxException {
+        if (albumURL.getType() == URL_TYPE.SINGLE_PROJECT) {
+            // URL points to JSON of a single project, just return it
+            // return Http.url(albumURL.getLocation()).getJSON();
+            return getJson(albumURL.getLocation());
+        }
+
+        if (albumURL.getType() == URL_TYPE.USER_PORTFOLIO) {
+            // URL points to JSON of a list of projects, load it to parse individual
+            // projects
+            // JSONObject albumContent = Http.url(albumURL.getLocation()).getJSON();
+            JSONObject albumContent = getJson(albumURL.getLocation());
+
+            if (albumContent.getInt("total_count") > 0) {
+                // Get JSON of the first project and return it
+                JSONObject projectInfo = albumContent.getJSONArray("data").getJSONObject(0);
+                ParsedURL projectURL = parseURL(new URI(projectInfo.getString("permalink")).toURL());
+                // return Http.url(projectURL.getLocation()).getJSON();
+                return getJson(projectURL.getLocation());
+            }
+        }
+
+        throw new IOException("URL specified points to an user with empty portfolio");
+    }
+
+    @Override
+    protected JSONObject getNextPage(JSONObject doc) throws IOException, URISyntaxException {
+        if (albumURL.getType() == URL_TYPE.USER_PORTFOLIO) {
+            // Initialize the page number if it hasn't been initialized already
+            if (projectPageNumber == null) {
+                projectPageNumber = 1;
+            }
+
+            // Each page holds a maximum of 50 projects. Initialize the index if it hasn't
+            // been initialized already or increment page number and reset the index if all
+            // projects of the current page were already processed
+            if (projectIndex == null) {
+                projectIndex = 0;
+            } else if (projectIndex > 49) {
+                projectPageNumber++;
+                projectIndex = 0;
+            }
+
+            int currentProject = ((projectPageNumber - 1) * 50) + (projectIndex + 1);
+            // JSONObject albumContent = Http.url(albumURL.getLocation() + "?page=" +
+            // projectPageNumber).getJSON();
+            JSONObject albumContent = getJson(albumURL.getLocation() + "?page=" + projectPageNumber);
+
+            if (albumContent.getInt("total_count") > currentProject) {
+                // Get JSON of the next project and return it
+                JSONObject projectInfo = albumContent.getJSONArray("data").getJSONObject(projectIndex);
+                ParsedURL projectURL = parseURL(new URI(projectInfo.getString("permalink")).toURL());
+                projectIndex++;
+                // return Http.url(projectURL.getLocation()).getJSON();
+                return getJson(projectURL.getLocation());
+            }
+
+            throw new IOException("No more projects");
+        }
+
+        throw new IOException("Downloading a single project");
+    }
+
+    @Override
+    protected List<String> getURLsFromJSON(JSONObject json) {
+        List<String> assetURLs = new ArrayList<>();
+        JSONObject currentObject;
+
+        // Update project name variable from JSON data. Used by downloadURL() to create
+        // subfolders when input URL is URL_TYPE.USER_PORTFOLIO
+        projectName = json.getString("title");
+
+        for (int i = 0; i < json.getJSONArray("assets").length(); i++) {
+            currentObject = json.getJSONArray("assets").getJSONObject(i);
+
+            if (!currentObject.getString("image_url").isEmpty()) {
+                // TODO: Find a way to rip external content.
+                // ArtStation hosts only image content, everything else (videos, 3D Models, etc)
+                // is hosted in other websites and displayed through embedded HTML5 players
+                assetURLs.add(currentObject.getString("image_url"));
+            }
+        }
+
+        return assetURLs;
+    }
+
+    @Override
+    protected void downloadURL(URL url, int index) {
+        if (albumURL.getType() == URL_TYPE.USER_PORTFOLIO) {
+            // Replace not allowed characters with underlines
+            String folderName = projectName.replaceAll("[\\\\/:*?\"<>|]", "_");
+
+            // Folder name also can't end with dots or spaces, strip them
+            folderName = folderName.replaceAll("\\s+$", "");
+            folderName = folderName.replaceAll("\\.+$", "");
+
+            // Downloading multiple projects, separate each one in subfolders
+            addURLToDownload(url, "", folderName);
+        } else {
+            addURLToDownload(url);
+        }
+    }
+
+    @Override
+    public String normalizeUrl(String url) {
+        // Strip URL parameters
+        return url.replaceAll("\\?\\w+$", "");
+    }
+
+    private static class ParsedURL {
+        URL_TYPE urlType;
+        String jsonURL, urlID;
+
+        /**
+         * Construct a new ParsedURL object.
+         *
+         * @param urlType URL_TYPE enum containing the URL type
+         * @param jsonURL String containing the JSON URL location
+         * @param urlID   String containing the ID of this URL
+         *
+         */
+        ParsedURL(URL_TYPE urlType, String jsonURL, String urlID) {
+            this.urlType = urlType;
+            this.jsonURL = jsonURL;
+            this.urlID = urlID;
+        }
+
+        /**
+         * Get URL Type of this ParsedURL object.
+         *
+         * @return URL_TYPE enum containing this object type
+         *
+         */
+        URL_TYPE getType() {
+            return this.urlType;
+        }
+
+        /**
+         * Get JSON location of this ParsedURL object.
+         *
+         * @return String containing the JSON URL
+         *
+         */
+        String getLocation() {
+            return this.jsonURL;
+        }
+
+        /**
+         * Get ID of this ParsedURL object.
+         *
+         * @return For URL_TYPE.SINGLE_PROJECT, returns the project hash. For
+         *         URL_TYPE.USER_PORTFOLIO, returns the account name
+         */
+        String getID() {
+            return this.urlID;
+        }
+    }
+
+    /**
+     * Parses an ArtStation URL.
+     *
+     * @param url URL to an ArtStation user profile
+     *            (https://www.artstation.com/username) or single project
+     *            (https://www.artstation.com/artwork/projectid)
+     * @return ParsedURL object containing URL type, JSON location and ID (stores
+     *         account name or project hash, depending of the URL type identified)
+     *
+     */
+    private ParsedURL parseURL(URL url) {
+        String htmlSource;
+        ParsedURL parsedURL;
+
+        // Load HTML Source of the specified URL
+        try {
+            // htmlSource = Http.url(url).get().html();
+            Connection con = Http.url(url).method(Method.GET).connection();
+            con.ignoreHttpErrors(true);
+            con.userAgent("Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:67.0) Gecko/20100101 Firefox/67.0");
+            con.header("Accept", "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8");
+            con.header("Accept-Language", "en-US,en;q=0.5");
+//            con.header("Accept-Encoding", "gzip, deflate, br");
+            con.header("Upgrade-Insecure-Requests", "1");
+            Response res = con.execute();
+            int status = res.statusCode();
+
+            if (status / 100 == 2) {
+                htmlSource = res.parse().html();
+            } else if (status == 403 && url.toString().contains("artwork/")) {
+                // Catches cloudflare page. Error 403.
+                // Usually caused by artwork URLs( arstation.com/artwork/someProjectId)
+                String urlId = url.toString().substring(url.toString().lastIndexOf("/") + 1);
+                String jsonURL = "https://www.artstation.com/projects/" + urlId + ".json";
+                parsedURL = new ParsedURL(URL_TYPE.SINGLE_PROJECT, jsonURL, urlId);
+                return parsedURL;
+            } else {
+                logger.error("Couldnt fetch URL: " + url);
+                throw new IOException("Error fetching URL: " + url + " Status Code: " + status);
+            }
+        } catch (IOException e) {
+            htmlSource = "";
+        }
+
+        // Check if HTML Source of the specified URL references a project
+        Pattern p = Pattern.compile("'/projects/(\\w+)\\.json'");
+        Matcher m = p.matcher(htmlSource);
+        if (m.find()) {
+            parsedURL = new ParsedURL(URL_TYPE.SINGLE_PROJECT,
+                    "https://www.artstation.com/projects/" + m.group(1) + ".json", m.group(1));
+            return parsedURL;
+        }
+
+        // Check if HTML Source of the specified URL references a user profile
+        p = Pattern.compile("'/users/([\\w-]+)/quick\\.json'");
+        m = p.matcher(htmlSource);
+        if (m.find()) {
+            parsedURL = new ParsedURL(URL_TYPE.USER_PORTFOLIO,
+                    "https://www.artstation.com/users/" + m.group(1) + "/projects.json", m.group(1));
+            return parsedURL;
+        }
+
+        // HTML Source of the specified URL doesn't reference a user profile or project
+        parsedURL = new ParsedURL(URL_TYPE.UNKNOWN, null, null);
+        return parsedURL;
+    }
+
+    // Use this method instead of direct call to Http.url(url).getJson() to avoid
+    // cloudflare 403 page.
+    private JSONObject getJson(URL url) throws IOException {
+        Connection con = Http.url(url).method(Method.GET).connection();
+        con.ignoreHttpErrors(true);
+        con.ignoreContentType(true);
+        con.userAgent(
+                "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.95 Safari/537.11");
+        con.header("Accept", "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8");
+        con.header("Accept-Language", "en-US,en;q=0.5");
+//        con.header("Accept-Encoding", "gzip, deflate, br");
+        con.header("Upgrade-Insecure-Requests", "1");
+        Response res = con.execute();
+        int status = res.statusCode();
+        if (status / 100 == 2) {
+            String jsonString = res.body();
+            return new JSONObject(jsonString);
+        }
+        throw new IOException("Error fetching json. Status code:" + status);
+    }
+
+    private JSONObject getJson(String url) throws IOException, URISyntaxException {
+        return getJson(new URI(url).toURL());
+    }
+
+}

+ 69 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/ArtstnRipper.java

@@ -0,0 +1,69 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.jsoup.Connection.Response;
+
+import com.rarchives.ripme.utils.Http;
+
+// TODO: Does this ripper need to exist, or can it be merged with ArtStationRipper
+// by adding another URL to the list of URLs it can handle?
+// Though, it's curious that this is so much shorter of an implementation than ArtStationRipper.
+
+/*
+ * Ripper for ArtStation's short URL domain.
+ * Example URL: https://artstn.co/p/JlE15Z
+ */
+
+public class ArtstnRipper extends ArtStationRipper {
+
+	private static final Logger logger = LogManager.getLogger(ArtstnRipper.class);
+
+	public URL artStationUrl = null;
+
+	public ArtstnRipper(URL url) throws IOException {
+		super(url);
+	}
+
+	@Override
+	public boolean canRip(URL url) {
+		return url.getHost().endsWith("artstn.co");
+	}
+
+	@Override
+	public String getGID(URL url) throws MalformedURLException {
+		if (artStationUrl == null) {
+			// Run only once.
+			try {
+				artStationUrl = getFinalUrl(url);
+				if (artStationUrl == null) {
+					throw new IOException("Null url received.");
+				}
+			} catch (IOException | URISyntaxException e) {
+				logger.error("Couldnt resolve URL.", e);
+			}
+
+		}
+		return super.getGID(artStationUrl);
+	}
+
+	public URL getFinalUrl(URL url) throws IOException, URISyntaxException {
+		if (url.getHost().endsWith("artstation.com")) {
+			return url;
+		}
+
+		logger.info("Checking url: " + url);
+		Response response = Http.url(url).connection().followRedirects(false).execute();
+		if (response.statusCode() / 100 == 3 && response.hasHeader("location")) {
+			return getFinalUrl(new URI(response.header("location")).toURL());
+		} else {
+			return null;
+		}
+	}
+}

+ 20 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/BaraagRipper.java

@@ -0,0 +1,20 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.URL;
+
+public class BaraagRipper extends MastodonRipper {
+    public BaraagRipper(URL url) throws IOException {
+        super(url);
+    }
+
+    @Override
+    public String getHost() {
+        return "baraag";
+    }
+
+    @Override
+    public String getDomain() {
+        return "baraag.net";
+    }
+}

+ 139 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/BatoRipper.java

@@ -0,0 +1,139 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.json.JSONArray;
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+
+public class BatoRipper extends AbstractHTMLRipper {
+
+    private static final Logger logger = LogManager.getLogger(BatoRipper.class);
+
+    public BatoRipper(URL url) throws IOException {
+        super(url);
+    }
+
+    @Override
+    public String getHost() {
+        return "bato";
+    }
+
+    @Override
+    public String getDomain() {
+        return "bato.to";
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        Pattern p = Pattern.compile("https?://bato.to/chapter/([\\d]+)/?");
+        Matcher m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return m.group(1);
+        }
+        // As this is just for quick queue support it does matter what this if returns
+        p = Pattern.compile("https?://bato.to/series/([\\d]+)/?");
+        m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return "";
+        }
+        throw new MalformedURLException("Expected bato.to URL format: " +
+                "bato.to/chapter/ID - got " + url + " instead");
+    }
+
+    @Override
+    public boolean hasQueueSupport() {
+        return true;
+    }
+
+    @Override
+    public boolean pageContainsAlbums(URL url) {
+        Pattern p = Pattern.compile("https?://bato.to/series/([\\d]+)/?");
+        Matcher m = p.matcher(url.toExternalForm());
+        return m.matches();
+    }
+
+    @Override
+    public List<String> getAlbumsToQueue(Document doc) {
+        List<String> urlsToAddToQueue = new ArrayList<>();
+        for (Element elem : doc.select("div.main > div > a")) {
+            urlsToAddToQueue.add("https://" + getDomain() + elem.attr("href"));
+        }
+        return urlsToAddToQueue;
+    }
+
+    @Override
+    public String getAlbumTitle(URL url) throws MalformedURLException, URISyntaxException {
+        try {
+            // Attempt to use album title as GID
+            return getHost() + "_" + getGID(url) + "_" + getCachedFirstPage().select("title").first().text().replaceAll(" ", "_");
+        } catch (IOException e) {
+            // Fall back to default album naming convention
+            logger.info("Unable to find title at " + url);
+        }
+        return super.getAlbumTitle(url);
+    }
+
+    @Override
+    public boolean canRip(URL url) {
+        Pattern p = Pattern.compile("https?://bato.to/series/([\\d]+)/?");
+        Matcher m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return true;
+        }
+
+        p = Pattern.compile("https?://bato.to/chapter/([\\d]+)/?");
+        m = p.matcher(url.toExternalForm());
+        return m.matches();
+    }
+
+    public String scanForImageList(Pattern p, String scriptData) {
+        for (String line : scriptData.split("\n")) {
+            Matcher m = p.matcher(line.strip());
+            if (m.matches()) {
+                return m.group(1);
+            }
+        }
+        return "[]";
+    }
+
+    @Override
+    public List<String> getURLsFromPage(Document doc) {
+        List<String> result = new ArrayList<>();
+        for (Element script : doc.select("script")) {
+            if (script.data().contains("imgHttps")) {
+                String s = script.data();
+                logger.info("Script data: " + s);
+
+                Pattern p = Pattern.compile(".*imgHttps = (\\[\"[^\\];]*\"\\]);.*");
+                Matcher m = p.matcher(s);
+                String json = scanForImageList(p, s);
+
+                logger.info("JSON: " + json);
+
+                JSONArray images = new JSONArray(json);
+                for (int i = 0; i < images.length(); i++) {
+                    result.add(images.getString(i));
+                }
+            }
+        }
+        return result;
+    }
+
+    @Override
+    public void downloadURL(URL url, int index) {
+        sleep(500);
+        addURLToDownload(url, getPrefix(index));
+    }
+}

+ 111 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/BooruRipper.java

@@ -0,0 +1,111 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.utils.Http;
+import com.rarchives.ripme.utils.Utils;
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+
+public class BooruRipper extends AbstractHTMLRipper {
+    private static final Logger logger = LogManager.getLogger(BooruRipper.class);
+
+    private static Pattern gidPattern = null;
+
+    public BooruRipper(URL url) throws IOException {
+        super(url);
+    }
+
+    @Override
+    public boolean canRip(URL url) {
+        if (url.toExternalForm().contains("xbooru") || url.toExternalForm().contains("gelbooru")) {
+            return true;
+        }
+        return false;
+
+    }
+
+    @Override
+    public String getHost() {
+        logger.info(url.toExternalForm().split("/")[2]);
+        return url.toExternalForm().split("/")[2].split("\\.")[0];
+    }
+
+    @Override
+    public String getDomain() {
+        return url.toExternalForm().split("/")[2];
+    }
+
+    private String getPage(int num) throws MalformedURLException {
+        return "http://" + getHost() + ".com/index.php?page=dapi&s=post&q=index&pid=" + num + "&tags=" + getTerm(url);
+
+    }
+
+    @Override
+    public Document getFirstPage() throws IOException {
+        return Http.url(getPage(0)).get();
+    }
+
+    @Override
+    public Document getNextPage(Document doc) throws IOException {
+        int offset = Integer.parseInt(doc.getElementsByTag("posts").first().attr("offset"));
+        int num = Integer.parseInt(doc.getElementsByTag("posts").first().attr("count"));
+
+        if (offset + 100 > num) {
+            return null;
+        }
+
+        return Http.url(getPage(offset / 100 + 1)).get();
+    }
+
+    @Override
+    public List<String> getURLsFromPage(Document page) {
+        List<String> res = new ArrayList<>(100);
+        for (Element e : page.getElementsByTag("post")) {
+            res.add(e.absUrl("file_url") + "#" + e.attr("id"));
+        }
+        return res;
+    }
+
+    @Override
+    public void downloadURL(URL url, int index) {
+        addURLToDownload(url, Utils.getConfigBoolean("download.save_order", true) ? url.getRef() + "-" : "");
+    }
+
+    private String getTerm(URL url) throws MalformedURLException {
+        if (gidPattern == null) {
+            gidPattern = Pattern.compile("^https?://(www\\.)?(x|gel)booru\\.com/(index.php)?.*([?&]tags=([a-zA-Z0-9$_.+!*'(),%-]+))(&|(#.*)?$)");
+        }
+
+        Matcher m = gidPattern.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return m.group(4);
+        }
+
+        throw new MalformedURLException("Expected xbooru.com URL format: " + getHost() + ".com/index.php?tags=searchterm - got " + url + " instead");
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        try {
+            // Get the search term and make it filesystem safe
+            String term = getTerm(url).replaceAll("&tags=", "");
+            return Utils.filesystemSafe(term);
+        } catch (Exception ex) {
+            logger.error("Error getting GID from URL: " + url, ex);
+        }
+
+        throw new MalformedURLException("Expected xbooru.com URL format: " + getHost() + ".com/index.php?tags=searchterm - got " + url + " instead");
+    }
+}

+ 98 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/CfakeRipper.java

@@ -0,0 +1,98 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+import org.jsoup.select.Elements;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.utils.Http;
+
+public class CfakeRipper extends AbstractHTMLRipper {
+    public CfakeRipper(URL url) throws IOException {
+        super(url);
+    }
+
+    @Override
+    public String getHost() {
+        return "cfake";
+    }
+
+    @Override
+    public String getDomain() {
+        return "cfake.com";
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        Pattern p = Pattern.compile("https?://cfake\\.com/images/celebrity/([a-zA-Z1-9_-]*)/\\d+/?$");
+        Matcher m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return m.group(1);
+        }
+        throw new MalformedURLException("Expected cfake URL format: " +
+                "cfake.com/images/celebrity/MODEL/ID - got " + url + " instead");
+    }
+
+    @Override
+    public Document getFirstPage() throws IOException {
+        // "url" is an instance field of the superclass
+        return Http.url(url).get();
+    }
+
+    @Override
+    public Document getNextPage(Document doc) throws IOException {
+        Element elem = doc.select("div#wrapper_path div#content_path div#num_page").last();
+        if (elem == null) {
+            throw new IOException("No more pages (cannot find nav)");
+        }
+
+        Element nextAnchor = elem.select("a").first();
+        if (nextAnchor == null) {
+            throw new IOException("No more pages (cannot find anchor)");
+        }
+
+        Elements nextSpans = nextAnchor.select("span");
+        if (nextSpans.isEmpty()) {
+            // This is the expected case that we're done iterating.
+            throw new IOException("No more pages (last page)");
+        }
+
+        // Use the nextAnchor (parent of the span) for the URL
+        String nextPage = nextAnchor.attr("href");
+
+        // Sometimes this returns an empty string; this stops that
+        if (nextPage.equals("")) {
+            return null;
+        } else {
+            return Http.url("https://cfake.com" + nextPage).get();
+        }
+    }
+
+    @Override
+    public List<String> getURLsFromPage(Document doc) {
+        List<String> result = new ArrayList<>();
+        for (Element el : doc.select("div#media_content .responsive .gallery > a img")) {
+            // Convert found src value e.g. /medias/thumbs/2025/17358722979850276d_cfake.jpg
+            // to photo src value e.g.
+            // https://cfake.com/medias/photos/2025/17358722979850276d_cfake.jpg
+            String imageSource = el.attr("src");
+            imageSource = imageSource.replace("thumbs", "photos");
+            result.add("https://cfake.com" + imageSource);
+        }
+
+        return result;
+    }
+
+    @Override
+    public void downloadURL(URL url, int index) {
+        addURLToDownload(url, getPrefix(index));
+    }
+}

+ 294 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/ChanRipper.java

@@ -0,0 +1,294 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.ripper.rippers.ripperhelpers.ChanSite;
+import com.rarchives.ripme.utils.RipUtils;
+import com.rarchives.ripme.utils.Utils;
+
+public class ChanRipper extends AbstractHTMLRipper {
+
+    private static final Logger logger = LogManager.getLogger(ChanRipper.class);
+
+    private static List<ChanSite> bakedin_explicit_domains = Arrays.asList(
+            new ChanSite("boards.4chan.org",   Arrays.asList("4cdn.org", "is.4chan.org", "is2.4chan.org", "is3.4chan.org")),
+            new ChanSite("boards.4channel.org",   Arrays.asList("4cdn.org", "is.4chan.org", "is2.4chan.org", "is3.4chan.org")),
+            new ChanSite("4archive.org",  "imgur.com"),
+            new ChanSite("archive.4plebs.org", "img.4plebs.org"),
+            new ChanSite("yuki.la", "ii.yuki.la"),
+            new ChanSite("55chan.org"),
+            new ChanSite("desuchan.net"),
+            new ChanSite("boards.420chan.org"),
+            new ChanSite("7chan.org"),
+            new ChanSite("desuarchive.org", "desu-usergeneratedcontent.xyz"),
+            new ChanSite("8ch.net", "media.8ch.net"),
+            new ChanSite("thebarchive.com"),
+            new ChanSite("archiveofsins.com"),
+            new ChanSite("archive.nyafuu.org"),
+            new ChanSite("rbt.asia")
+        );
+    private static List<ChanSite> user_give_explicit_domains = getChansFromConfig(Utils.getConfigString("chans.chan_sites", null));
+    private static List<ChanSite> explicit_domains = new ArrayList<>();
+
+    /**
+     * reads a string in the format of site1[cdn|cdn2|cdn3], site2[cdn]
+     */
+    public static List<ChanSite> getChansFromConfig(String rawChanString) {
+        List<ChanSite> userChans = new ArrayList<>();
+        if (rawChanString != null) {
+            String[] listOfChans = rawChanString.split(",");
+            for (String chanInfo : listOfChans) {
+                // If this is true we're parsing a chan with cdns
+                if (chanInfo.contains("[")) {
+                    String siteUrl = chanInfo.split("\\[")[0];
+                    String[] cdns = chanInfo.replaceAll(siteUrl + "\\[", "").replaceAll("]", "").split("\\|");
+                    logger.debug("site url: " + siteUrl);
+                    logger.debug("cdn: " + Arrays.toString(cdns));
+                    userChans.add(new ChanSite(siteUrl, Arrays.asList(cdns)));
+                } else {
+                    // We're parsing a site without cdns
+                    logger.debug("site: " + chanInfo);
+                    userChans.add(new ChanSite(chanInfo));
+                }
+            }
+            return userChans;
+        }
+        return null;
+    }
+
+    private static List<String> url_piece_blacklist = Arrays.asList(
+        "=http",
+        "http://imgops.com/",
+        "iqdb.org",
+        "saucenao.com"
+        );
+
+    private ChanSite chanSite;
+    private boolean generalChanSite = true;
+
+    public ChanRipper(URL url) throws IOException {
+        super(url);
+        for (ChanSite _chanSite : explicit_domains) {
+            logger.info(_chanSite.domains);
+            if (_chanSite.domains.contains(url.getHost())) {
+                chanSite = _chanSite;
+                generalChanSite = false;
+            }
+        }
+        if (chanSite == null) {
+            chanSite = new ChanSite(Arrays.asList(url.getHost()));
+        }
+    }
+
+    @Override
+    public String getHost() {
+        String host = this.url.getHost();
+        host = host.substring(0, host.lastIndexOf('.'));
+        if (host.contains(".")) {
+            // Host has subdomain (www)
+            host = host.substring(host.lastIndexOf('.') + 1);
+        }
+        String board = this.url.toExternalForm().split("/")[3];
+        return host + "_" + board;
+    }
+
+    @Override
+    public String getAlbumTitle(URL url) throws MalformedURLException {
+        try {
+            // Attempt to use album title as GID
+            Document doc = getCachedFirstPage();
+            try {
+                String subject = doc.select(".post.op > .postinfo > .subject").first().text();
+                return getHost() + "_" + getGID(url) + "_" + subject;
+            } catch (NullPointerException e) {
+                logger.warn("Failed to get thread title from " + url);
+            }
+        } catch (Exception e) {
+            // Fall back to default album naming convention
+            logger.warn("Failed to get album title from " + url, e);
+        }
+        // Fall back on the GID
+        return getHost() + "_" + getGID(url);
+    }
+
+    @Override
+    public boolean canRip(URL url) {
+        explicit_domains.addAll(bakedin_explicit_domains);
+        if (user_give_explicit_domains != null) {
+            explicit_domains.addAll(user_give_explicit_domains);
+        }
+        for (ChanSite _chanSite : explicit_domains) {
+            if (_chanSite.domains.contains(url.getHost())) {
+                return true;
+            }
+        }
+
+        return false;
+    }
+
+    /**
+     * For example the archives are all known. (Check 4chan-x)
+     * Should be based on the software the specific chan uses.
+     * FoolFuuka uses the same (url) layout as 4chan
+     *
+     * @param url
+     * @return
+     *      The thread id in string form
+     * @throws java.net.MalformedURLException */
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        Pattern p;
+        Matcher m;
+
+        String u = url.toExternalForm();
+        if (u.contains("/thread/") || u.contains("/res/") || u.contains("yuki.la") || u.contains("55chan.org")) {
+            p = Pattern.compile("^.*\\.[a-z]{1,4}/[a-zA-Z0-9]+/(thread|res)/([0-9]+)(\\.html|\\.php)?.*$");
+            m = p.matcher(u);
+            if (m.matches()) {
+                return m.group(2);
+            }
+
+            // Drawchan is weird, has drawchan.net/dc/dw/res/####.html
+            p = Pattern.compile("^.*\\.[a-z]{1,3}/[a-zA-Z0-9]+/[a-zA-Z0-9]+/res/([0-9]+)(\\.html|\\.php)?.*$");
+            m = p.matcher(u);
+            if (m.matches()) {
+                return m.group(1);
+            }
+            // xchan
+            p = Pattern.compile("^.*\\.[a-z]{1,3}/board/[a-zA-Z0-9]+/thread/([0-9]+)/?.*$");
+            m = p.matcher(u);
+            if (m.matches()) {
+                return m.group(1);
+            }
+
+            // yuki.la
+            p = Pattern.compile("https?://yuki.la/[a-zA-Z0-9]+/([0-9]+)");
+            m = p.matcher(u);
+            if (m.matches()) {
+                return m.group(1);
+            }
+
+            //55chan.org
+            p = Pattern.compile("https?://55chan.org/[a-z0-9]+/(res|thread)/[0-9]+.html");
+            m = p.matcher(u);
+            if (m.matches()) {
+                return m.group(1);
+            }
+        }
+
+        throw new MalformedURLException(
+                "Expected *chan URL formats: "
+                        + ".*/@/(res|thread)/####.html"
+                        + " Got: " + u);
+    }
+
+    @Override
+    public String getDomain() {
+        return this.url.getHost();
+    }
+
+    public Document getFirstPage() throws IOException, URISyntaxException {
+        return super.getFirstPage();
+    }
+    private boolean isURLBlacklisted(String url) {
+        for (String blacklist_item : url_piece_blacklist) {
+            if (url.contains(blacklist_item)) {
+                logger.debug("Skipping link that contains '"+blacklist_item+"': " + url);
+                return true;
+            }
+        }
+        return false;
+    }
+    @Override
+    public List<String> getURLsFromPage(Document page) throws URISyntaxException {
+        List<String> imageURLs = new ArrayList<>();
+        Pattern p; Matcher m;
+        for (Element link : page.select("a")) {
+            if (!link.hasAttr("href")) {
+                continue;
+            }
+            String href = link.attr("href").trim();
+
+            if (isURLBlacklisted(href)) {
+                continue;
+            }
+            //Check all blacklist items
+            Boolean self_hosted = false;
+            if (!generalChanSite) {
+                for (String cdnDomain : chanSite.cdnDomains) {
+                    if (href.contains(cdnDomain)) {
+                        self_hosted = true;
+                    }
+                }
+            }
+
+            if (self_hosted || generalChanSite) {
+                p = Pattern.compile("^.*\\.(jpg|jpeg|png|gif|apng|webp|tif|tiff|webm|mp4)$", Pattern.CASE_INSENSITIVE);
+                m = p.matcher(href);
+                if (m.matches()) {
+                    if (href.startsWith("//")) {
+                        href = "http:" + href;
+                    }
+                    if (href.startsWith("/")) {
+                        href = "http://" + this.url.getHost() + href;
+                    }
+                    // Don't download the same URL twice
+                    if (imageURLs.contains(href)) {
+                        logger.debug("Already attempted: " + href);
+                        continue;
+                    }
+                    imageURLs.add(href);
+                    if (isThisATest()) {
+                        break;
+                    }
+                }
+            } else {
+                //Copied code from RedditRipper, getFilesFromURL should also implement stuff like flickr albums
+                URL originalURL;
+                try {
+                    originalURL = new URI(href).toURL();
+                } catch (MalformedURLException | URISyntaxException | IllegalArgumentException e) {
+                    continue;
+                }
+
+                List<URL> urls = RipUtils.getFilesFromURL(originalURL);
+                for (URL imageurl : urls) {
+                    imageURLs.add(imageurl.toString());
+                }
+            }
+
+            if (isStopped()) {
+                break;
+            }
+        }
+        return imageURLs;
+    }
+
+    private boolean isVideo(URL url) {
+        String urlString = url.toExternalForm();
+        return urlString.endsWith(".webm") || urlString.endsWith(".mp4");
+    }
+
+    @Override
+    public void downloadURL(URL url, int index) {
+        if (isVideo(url)) {
+            sleep(5000);
+        }
+        addURLToDownload(url, getPrefix(index));
+    }
+}

+ 125 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/CheveretoRipper.java

@@ -0,0 +1,125 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.utils.Http;
+
+public class CheveretoRipper extends AbstractHTMLRipper {
+
+    private static final Logger logger = LogManager.getLogger(CheveretoRipper.class);
+
+    private static final Map<String, String> CONSENT_COOKIE;
+    static {
+        CONSENT_COOKIE = new TreeMap<String, String>();
+        CONSENT_COOKIE.put("AGREE_CONSENT", "1");
+    }
+
+    public CheveretoRipper(URL url) throws IOException {
+        super(url);
+    }
+
+    private static List<String> explicit_domains = Arrays.asList("kenzato.uk");
+
+    @Override
+    public String getHost() {
+        return url.toExternalForm().split("/")[2];
+    }
+
+    @Override
+    public String getDomain() {
+        return url.toExternalForm().split("/")[2];
+    }
+
+    @Override
+    public boolean canRip(URL url) {
+        String url_name = url.toExternalForm();
+        if (explicit_domains.contains(url_name.split("/")[2])) {
+                return true;
+        }
+        return false;
+    }
+
+    @Override
+    public String getAlbumTitle(URL url) throws MalformedURLException, URISyntaxException {
+        try {
+            // Attempt to use album title as GID
+            Element titleElement = getCachedFirstPage().select("meta[property=og:title]").first();
+            String title = titleElement.attr("content");
+            title = title.substring(title.lastIndexOf('/') + 1);
+            return getHost() + "_" + title.trim();
+        } catch (IOException e) {
+            // Fall back to default album naming convention
+            logger.info("Unable to find title at " + url);
+        }
+        return super.getAlbumTitle(url);
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        Pattern p = Pattern.compile("(?:https?://)?(?:www\\.)?[a-z1-9-]*\\.[a-z1-9]*(?:[a-zA-Z1-9]*)/album/([a-zA-Z1-9]*)/?$");
+        Matcher m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return m.group(1);
+        }
+        throw new MalformedURLException("Expected chevereto URL format: " +
+                        "site.domain/album/albumName or site.domain/username/albums- got " + url + " instead");
+    }
+
+    @Override
+    public Document getFirstPage() throws IOException {
+        // "url" is an instance field of the superclass
+        return Http.url(url).cookies(CONSENT_COOKIE).get();
+    }
+
+    @Override
+    public Document getNextPage(Document doc) throws IOException {
+        // Find next page
+        // We use comic-nav-next to the find the next page
+        Element elem = doc.select("li.pagination-next > a").first();
+            if (elem == null) {
+                throw new IOException("No more pages");
+            }
+            String nextPage = elem.attr("href");
+            // Some times this returns a empty string
+            // This for stops that
+            if (nextPage == "") {
+                return null;
+            } else {
+                return Http.url(nextPage).cookies(CONSENT_COOKIE).get();
+            }
+        }
+
+    @Override
+    public List<String> getURLsFromPage(Document doc) {
+        List<String> result = new ArrayList<>();
+            for (Element el : doc.select("a.image-container > img")) {
+                String imageSource = el.attr("src");
+                // We remove the .md from images so we download the full size image
+                // not the medium ones
+                imageSource = imageSource.replace(".md", "");
+                result.add(imageSource);
+            }
+        return result;
+    }
+
+    @Override
+    public void downloadURL(URL url, int index) {
+        addURLToDownload(url, getPrefix(index));
+    }
+}

+ 183 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/CoomerPartyRipper.java

@@ -0,0 +1,183 @@
+package com.rarchives.ripme.ripper.rippers;
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import java.util.stream.Collectors;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.json.JSONArray;
+import org.json.JSONException;
+import org.json.JSONObject;
+
+import com.rarchives.ripme.ripper.AbstractJSONRipper;
+import com.rarchives.ripme.utils.Http;
+import com.rarchives.ripme.utils.Utils;
+
+/**
+ * <a href="https://coomer.su/api/schema">See this link for the API schema</a>.
+ */
+public class CoomerPartyRipper extends AbstractJSONRipper {
+
+    private static final Logger logger = LogManager.getLogger(CoomerPartyRipper.class);
+
+    private static final String IMG_URL_BASE = "https://c3.coomer.su/data";
+    private static final String VID_URL_BASE = "https://c1.coomer.su/data";
+    private static final Pattern IMG_PATTERN = Pattern.compile("^.*\\.(jpg|jpeg|png|gif|apng|webp|tif|tiff)$", Pattern.CASE_INSENSITIVE);
+    private static final Pattern VID_PATTERN = Pattern.compile("^.*\\.(webm|mp4|m4v)$", Pattern.CASE_INSENSITIVE);
+
+    // just so we can return a JSONObject from getFirstPage
+    private static final String KEY_WRAPPER_JSON_ARRAY = "array";
+
+    private static final String KEY_FILE = "file";
+    private static final String KEY_PATH = "path";
+    private static final String KEY_ATTACHMENTS = "attachments";
+
+    // Posts Request Endpoint
+    private static final String POSTS_ENDPOINT = "https://coomer.su/api/v1/%s/user/%s?o=%d";
+
+    // Pagination is strictly 50 posts per page, per API schema.
+    private Integer pageCount = 0;
+    private static final Integer postCount = 50;
+
+    // "Service" of the page to be ripped: Onlyfans, Fansly, Candfans
+    private final String service;
+
+    // Username of the page to be ripped
+    private final String user;
+
+
+
+    public CoomerPartyRipper(URL url) throws IOException {
+        super(url);
+        List<String> pathElements = Arrays.stream(url.getPath().split("/"))
+                .filter(element -> !element.isBlank())
+                .collect(Collectors.toList());
+
+        service = pathElements.get(0);
+        user = pathElements.get(2);
+
+        if (service == null || user == null || service.isBlank() || user.isBlank()) {
+            logger.warn("service=" + service + ", user=" + user);
+            throw new MalformedURLException("Invalid coomer.party URL: " + url);
+        }
+        logger.debug("Parsed service=" + service + " and user=" + user + " from " + url);
+    }
+
+    @Override
+    protected String getDomain() {
+        return "coomer.party";
+    }
+
+    @Override
+    public String getHost() {
+        return "coomer.party";
+    }
+
+    @Override
+    public boolean canRip(URL url) {
+        String host = url.getHost();
+        return host.endsWith("coomer.party") || host.endsWith("coomer.su");
+    }
+
+    @Override
+    public String getGID(URL url) {
+        return Utils.filesystemSafe(String.format("%s_%s", service, user));
+    }
+
+    private JSONObject getJsonPostsForOffset(Integer offset) throws IOException {
+        String apiUrl = String.format(POSTS_ENDPOINT, service, user, offset);
+
+        String jsonArrayString = Http.url(apiUrl)
+                .ignoreContentType()
+                .response()
+                .body();
+        JSONArray jsonArray = new JSONArray(jsonArrayString);
+
+        // Ideally we'd just return the JSONArray from here, but we have to wrap it in a JSONObject
+        JSONObject wrapperObject = new JSONObject();
+        wrapperObject.put(KEY_WRAPPER_JSON_ARRAY, jsonArray);
+        return wrapperObject;
+    }
+
+    @Override
+    protected JSONObject getFirstPage() throws IOException {
+        return getJsonPostsForOffset(0);
+    }
+
+    @Override
+    protected JSONObject getNextPage(JSONObject doc) throws IOException, URISyntaxException {
+        pageCount++;
+        Integer offset = postCount * pageCount;
+        return getJsonPostsForOffset(offset);
+    }
+
+
+    @Override
+    protected List<String> getURLsFromJSON(JSONObject json) {
+        // extract the array from our wrapper JSONObject
+        JSONArray posts = json.getJSONArray(KEY_WRAPPER_JSON_ARRAY);
+        ArrayList<String> urls = new ArrayList<>();
+        for (int i = 0; i < posts.length(); i++) {
+            JSONObject post = posts.getJSONObject(i);
+            pullFileUrl(post, urls);
+            pullAttachmentUrls(post, urls);
+        }
+        logger.debug("Pulled " + urls.size() + " URLs from " + posts.length() + " posts");
+        return urls;
+    }
+
+    @Override
+    protected void downloadURL(URL url, int index) {
+        sleep(5000);
+        addURLToDownload(url, getPrefix(index));
+    }
+
+    private void pullFileUrl(JSONObject post, ArrayList<String> results) {
+        try {
+            JSONObject file = post.getJSONObject(KEY_FILE);
+            String path = file.getString(KEY_PATH);
+            if (isImage(path)) {
+                String url = IMG_URL_BASE + path;
+                results.add(url);
+            } else if (isVideo(path)) {
+                String url = VID_URL_BASE + path;
+                results.add(url);
+            } else {
+                logger.error("Unknown extension for coomer.su path: " + path);
+            }
+        } catch (JSONException e) {
+            /* No-op */
+            logger.error("Unable to Parse FileURL " + e.getMessage());
+        }
+    }
+
+    private void pullAttachmentUrls(JSONObject post, ArrayList<String> results) {
+        try {
+            JSONArray attachments = post.getJSONArray(KEY_ATTACHMENTS);
+            for (int i = 0; i < attachments.length(); i++) {
+                JSONObject attachment = attachments.getJSONObject(i);
+                pullFileUrl(attachment, results);
+            }
+        } catch (JSONException e) {
+             /* No-op */
+            logger.error("Unable to Parse AttachmentURL " + e.getMessage());
+        }
+    }
+
+    private boolean isImage(String path) {
+        Matcher matcher = IMG_PATTERN.matcher(path);
+        return matcher.matches();
+    }
+
+    private boolean isVideo(String path) {
+        Matcher matcher = VID_PATTERN.matcher(path);
+        return matcher.matches();
+    }
+}

+ 145 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/DanbooruRipper.java

@@ -0,0 +1,145 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.jetbrains.annotations.Nullable;
+import org.json.JSONArray;
+import org.json.JSONObject;
+
+import com.rarchives.ripme.ripper.AbstractJSONRipper;
+import com.rarchives.ripme.utils.Utils;
+
+import okhttp3.OkHttpClient;
+import okhttp3.Request;
+import okhttp3.Response;
+
+public class DanbooruRipper extends AbstractJSONRipper {
+
+    private static final Logger logger = LogManager.getLogger(DanbooruRipper.class);
+
+    private static final String DOMAIN = "danbooru.donmai.us",
+            HOST = "danbooru";
+    private final OkHttpClient client;
+
+    private Pattern gidPattern = null;
+
+    private int currentPageNum = 1;
+
+    public DanbooruRipper(URL url) throws IOException {
+        super(url);
+        this.client = new OkHttpClient.Builder()
+                .readTimeout(60, TimeUnit.SECONDS)
+                .writeTimeout(60, TimeUnit.SECONDS)
+                .build();
+    }
+
+    @Override
+    protected String getDomain() {
+        return DOMAIN;
+    }
+
+    @Override
+    public String getHost() {
+        return HOST;
+    }
+
+    private String getPage(int num) throws MalformedURLException {
+        return "https://" + getDomain() + "/posts.json?page=" + num + "&tags=" + getTag(url);
+    }
+
+    @Override
+    protected JSONObject getFirstPage() throws MalformedURLException {
+        return getCurrentPage();
+    }
+
+    @Override
+    protected JSONObject getNextPage(JSONObject doc) throws IOException {
+        return getCurrentPage();
+    }
+
+    @Nullable
+    private JSONObject getCurrentPage() throws MalformedURLException {
+        Request request = new Request.Builder()
+                .url(getPage(currentPageNum))
+                .header("User-Agent", "Mozilla/5.0 (iPhone; CPU iPhone OS 15_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.0 Mobile/15E148 Safari/604.1")
+                .header("Accept", "application/json,text/javascript,*/*;q=0.01")
+                .header("Accept-Language", "en-US,en;q=0.9")
+                .header("Sec-Fetch-Dest", "empty")
+                .header("Sec-Fetch-Mode", "cors")
+                .header("Sec-Fetch-Site", "same-origin")
+                .header("Referer", "https://danbooru.donmai.us/")
+                .header("X-Requested-With", "XMLHttpRequest")
+                .header("Connection", "keep-alive")
+                .build();
+        Response response = null;
+        currentPageNum++;
+        try {
+            response = client.newCall(request).execute();
+            if (!response.isSuccessful()) throw new IOException("Unexpected code " + response);
+
+            String responseData = response.body().string();
+            JSONArray jsonArray = new JSONArray(responseData);
+            if(!jsonArray.isEmpty()){
+                String newCompatibleJSON = "{ \"resources\":" + jsonArray + " }";
+                return new JSONObject(newCompatibleJSON);
+            }
+        } catch (IOException e) {
+            e.printStackTrace();
+        } finally {
+            if(response !=null) {
+                response.body().close();
+            }
+        }
+        return null;
+    }
+
+    @Override
+    protected List<String> getURLsFromJSON(JSONObject json) {
+        List<String> res = new ArrayList<>(100);
+        JSONArray jsonArray = json.getJSONArray("resources");
+        for (int i = 0; i < jsonArray.length(); i++) {
+            if (jsonArray.getJSONObject(i).has("file_url")) {
+                res.add(jsonArray.getJSONObject(i).getString("file_url"));
+            }
+        }
+        return res;
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        try {
+            return Utils.filesystemSafe(new URI(getTag(url).replaceAll("([?&])tags=", "")).getPath());
+        } catch (URISyntaxException ex) {
+            logger.error(ex);
+        }
+
+        throw new MalformedURLException("Expected booru URL format: " + getDomain() + "/posts?tags=searchterm - got " + url + " instead");
+    }
+
+    @Override
+    protected void downloadURL(URL url, int index) {
+        addURLToDownload(url, getPrefix(index));
+    }
+
+    private String getTag(URL url) throws MalformedURLException {
+        gidPattern = Pattern.compile("https?://danbooru.donmai.us/(posts)?.*([?&]tags=([^&]*)(?:&z=([0-9]+))?$)");
+        Matcher m = gidPattern.matcher(url.toExternalForm());
+
+        if (m.matches()) {
+            return m.group(3);
+        }
+
+        throw new MalformedURLException("Expected danbooru URL format: " + getDomain() + "/posts?tags=searchterm - got " + url + " instead");
+    }
+}

+ 149 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/DerpiRipper.java

@@ -0,0 +1,149 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import com.rarchives.ripme.ripper.AbstractJSONRipper;
+import com.rarchives.ripme.utils.Http;
+import com.rarchives.ripme.utils.Utils;
+
+import org.json.JSONObject;
+import org.json.JSONArray;
+
+public class DerpiRipper extends AbstractJSONRipper {
+
+    private URL currUrl;
+    private Integer currPage;
+
+    public DerpiRipper(URL url) throws IOException {
+        super(url);
+    }
+
+    @Override
+    public String getHost() {
+        return "DerpiBooru";
+    }
+
+    @Override
+    public String getDomain() {
+        return "derpibooru.org";
+    }
+
+    @Override
+    public URL sanitizeURL(URL url) throws MalformedURLException, URISyntaxException {
+        String u = url.toExternalForm();
+        String[] uu = u.split("\\?", 2);
+        String newU = uu[0];
+        if (newU.substring(newU.length() - 1).equals("/")) {
+            newU = newU.substring(0, newU.length() - 1);
+        }
+        newU += ".json?";
+        if (uu.length > 1) {
+            newU += uu[1];
+        }
+
+        String key = Utils.getConfigString("derpi.key", "");
+        if (!key.equals("")) {
+            newU += "&key=" + key;
+        }
+
+        return new URI(newU).toURL();
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        currUrl = url;
+        currPage = 1;
+
+        // search
+        Pattern p = Pattern.compile("^https?://derpibooru\\.org/search\\.json\\?q=([^&]+).*?$");
+        Matcher m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return "search_" + m.group(1);
+        }
+
+        // tags
+        p = Pattern.compile("^https?://derpibooru\\.org/tags/([^.]+)\\.json.*?$");
+        m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return "tags_" + m.group(1);
+        }
+
+        // galleries
+        p = Pattern.compile("^https?://derpibooru\\.org/galleries/([^/]+)/(\\d+)\\.json.*?$");
+        m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return "galleries_" + m.group(1) + "_" + m.group(2);
+        }
+
+        // single image
+        p = Pattern.compile("^https?://derpibooru\\.org/(\\d+)\\.json.*?$");
+        m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return "image_" + m.group(1);
+        }
+
+        throw new MalformedURLException("Unable to find image in " + url);
+    }
+
+    @Override
+    public JSONObject getFirstPage() throws IOException {
+        return Http.url(url).getJSON();
+    }
+
+    @Override
+    public JSONObject getNextPage(JSONObject doc) throws IOException, URISyntaxException {
+        currPage++;
+        String u = currUrl.toExternalForm() + "&page=" + Integer.toString(currPage);
+        JSONObject json = Http.url(new URI(u).toURL()).getJSON();
+        JSONArray arr;
+        if (json.has("images")) {
+            arr = json.getJSONArray("images");
+        } else if (json.has("search")) {
+            arr = json.getJSONArray("search");
+        } else {
+            throw new IOException("No more images");
+        }
+        if (arr.length() == 0) {
+            throw new IOException("No more images");
+        }
+        return json;
+    }
+
+    private String getImageUrlFromJson(JSONObject json) {
+        return "https:" + json.getJSONObject("representations").getString("full");
+    }
+
+    @Override
+    public List<String> getURLsFromJSON(JSONObject json) {
+        List<String> imageURLs = new ArrayList<>();
+
+        JSONArray arr = null;
+        if (json.has("images")) {
+            arr = json.getJSONArray("images");
+        } else if (json.has("search")) {
+            arr = json.getJSONArray("search");
+        }
+        if (arr != null) {
+            for (int i = 0; i < arr.length(); i++){
+                imageURLs.add(this.getImageUrlFromJson(arr.getJSONObject(i)));
+            }
+        } else {
+            imageURLs.add(this.getImageUrlFromJson(json));
+        }
+        return imageURLs;
+    }
+
+    @Override
+    public void downloadURL(URL url, int index) {
+        // we don't set an index prefix here as derpibooru already prefixes their images with their unique IDs
+        addURLToDownload(url, "");
+    }
+}

+ 653 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/DeviantartRipper.java

@@ -0,0 +1,653 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
+import java.io.Serializable;
+import java.net.MalformedURLException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Base64;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.jsoup.Connection;
+import org.jsoup.Connection.Method;
+import org.jsoup.Connection.Response;
+import org.jsoup.HttpStatusException;
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+import org.jsoup.select.Elements;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.ripper.DownloadThreadPool;
+import com.rarchives.ripme.ui.RipStatusMessage.STATUS;
+import com.rarchives.ripme.utils.Http;
+import com.rarchives.ripme.utils.Utils;
+
+/**
+ *
+ * @author MrPlaygon
+ *
+ *         NOT using Deviantart API like the old JSON ripper because it is SLOW
+ *         and somehow annoying to use. Things to consider: Using the API might
+ *         be less work/maintenance later because APIs do not change as
+ *         frequently as HTML source code does...?
+ *
+ *
+ *
+ *         Tested for:
+ *
+ *         SFW:
+ *
+ *         https://www.deviantart.com/apofiss/gallery/41388863/sceneries
+ *         https://www.deviantart.com/kageuri/gallery/
+ *         https://www.deviantart.com/kageuri/gallery/?catpath=/
+ *         https://www.deviantart.com/apofiss/favourites/39881418/gifts-and
+ *         https://www.deviantart.com/kageuri/favourites/
+ *         https://www.deviantart.com/kageuri/favourites/?catpath=/
+ *
+ *         NSFW:
+ *
+ *         https://www.deviantart.com/revpeng/gallery/67734353/Siren-Lee-Agent-of-S-I-R-E-N-S
+ *
+ *
+ *         Deactivated account:
+ *
+ *         https://www.deviantart.com/gingerbreadpony/gallery
+ *
+ *         Banned Account:
+ *
+ *         https://www.deviantart.com/ghostofflossenburg/gallery
+ *
+ *
+ *
+ *
+ *         Login Data (PLEASE DONT ACTUALLY USE!!!):
+ *
+ *         email: 5g5_8l4dii5lbbpc@byom.de
+ *
+ *         username: 5g58l4dii5lbbpc
+ *
+ *         password: 5g5_8l4dii5lbbpc
+ *
+ *
+ *
+ */
+public class DeviantartRipper extends AbstractHTMLRipper {
+
+	private static final Logger logger = LogManager.getLogger(DeviantartRipper.class);
+
+	private final String username = "5g58l4dii5lbbpc";
+	private final String password = "5g5_8l4dii5lbbpc";
+	private int offset = 0;
+	private boolean usingCatPath = false;
+	private int downloadCount = 0;
+	private Map<String, String> cookies = new HashMap<String, String>();
+	private DownloadThreadPool deviantartThreadPool = new DownloadThreadPool("deviantart");
+	private ArrayList<String> names = new ArrayList<String>();
+
+	List<String> allowedCookies = Arrays.asList("agegate_state", "userinfo", "auth", "auth_secure");
+
+	private Connection conn = null;
+
+	// Constants
+	private final String referer = "https://www.deviantart.com/";
+	private final String userAgent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0";
+	private final String utilsKey = "DeviantartLogin.cookies"; //for config file
+
+	@Override
+	public DownloadThreadPool getThreadPool() {
+		return deviantartThreadPool;
+	}
+
+	public DeviantartRipper(URL url) throws IOException {
+		super(url);
+	}
+
+	@Override
+	protected String getDomain() {
+		return "deviantart.com";
+	}
+
+	@Override
+	public String getHost() {
+		return "deviantart";
+	}
+
+	@Override
+	protected Document getFirstPage() throws IOException {
+		if (isDeactivated()) {
+			throw new IOException("Account Deactivated");
+		}
+		login();
+
+		// Saving connection to reuse later for following pages.
+		this.conn = Http.url(urlWithParams(this.offset)).cookies(getDACookie()).referrer(this.referer)
+				.userAgent(this.userAgent).connection();
+
+		return this.conn.get();
+	}
+
+	/**
+	 * Checks if the URL refers to a deactivated account using the HTTP status Codes
+	 *
+	 * @return true when the account is good
+	 * @throws IOException when the account is deactivated
+	 */
+	private boolean isDeactivated() throws IOException {
+		Response res = Http.url(this.url).connection().followRedirects(true).referrer(this.referer)
+				.userAgent(this.userAgent).execute();
+		return res.statusCode() != 200 ? true : false;
+
+	}
+
+	/**
+	 * Stores logged in Cookies. Needed for art pieces only visible to logged in
+	 * users.
+	 *
+	 *
+	 * @throws IOException when failed to load webpage or failed to read/write
+	 *                     cookies in file (used when running multiple instances of
+	 *                     RipMe)
+	 */
+	private void login() throws IOException {
+
+		String customUsername = Utils.getConfigString("DeviantartCustomLoginUsername", this.username);
+		String customPassword = Utils.getConfigString("DeviantartCustomLoginPassword", this.password);
+		try {
+			String dACookies = Utils.getConfigString(utilsKey, null);
+			updateCookie(dACookies != null ? deserialize(dACookies) : null);
+		} catch (ClassNotFoundException e) {
+			e.printStackTrace();
+		}
+		if (getDACookie() == null || !checkLogin()) {
+			logger.info("Do Login now");
+			// Do login now
+
+			Map<String, String> tmpCookies = new HashMap<String, String>();
+
+			// Load login page
+			Response res = Http.url("https://www.deviantart.com/users/login").connection().method(Method.GET)
+					.referrer(referer).userAgent(userAgent).execute();
+
+			tmpCookies.putAll(res.cookies());
+
+			// Find tokens
+			Document doc = res.parse();
+
+			tmpCookies.putAll(res.cookies());
+
+			Element form = doc.getElementById("login");
+			String token = form.select("input[name=\"validate_token\"]").first().attr("value");
+			String key = form.select("input[name=\"validate_key\"]").first().attr("value");
+			logger.info("Token: " + token + " & Key: " + key);
+
+			// Build Login Data
+			HashMap<String, String> loginData = new HashMap<String, String>();
+			loginData.put("challenge", "");
+			loginData.put("username", customUsername);
+			loginData.put("password", customPassword);
+			loginData.put("remember_me", "1");
+			loginData.put("validate_token", token);
+			loginData.put("validate_key", key);
+
+			// Log in using data. Handle redirect
+			res = Http.url("https://www.deviantart.com/users/login").connection().referrer(referer).userAgent(userAgent)
+					.method(Method.POST).data(loginData).cookies(tmpCookies).followRedirects(false).execute();
+
+			tmpCookies.putAll(res.cookies());
+
+			res = Http.url(res.header("location")).connection().referrer(referer).userAgent(userAgent)
+					.method(Method.GET).cookies(tmpCookies).followRedirects(false).execute();
+
+			// Store cookies
+			tmpCookies.putAll(res.cookies());
+
+			updateCookie(tmpCookies);
+
+
+		} else {
+			logger.info("No new Login needed");
+		}
+
+		logger.info("DA Cookies: " + getDACookie());
+	}
+
+	/**
+	 * Returns next page Document using offset.
+	 */
+	@Override
+	public Document getNextPage(Document doc) throws IOException {
+		this.offset += 24;
+		this.conn.url(urlWithParams(this.offset)).cookies(getDACookie());
+		Response re = this.conn.execute();
+		//updateCookie(re.cookies());
+		Document docu = re.parse();
+		Elements messages = docu.getElementsByClass("message");
+		logger.info("Current Offset: " + this.offset);
+
+		if (messages.size() > 0) {
+
+			// if message exists -> last page
+			logger.info("Messages amount: " + messages.size() + " - Next Page does not exists");
+			throw new IOException("No more pages");
+		}
+
+		return Http.url(urlWithParams(this.offset)).referrer(referer).userAgent(userAgent).cookies(getDACookie()).get();
+	}
+
+	/**
+	 * Returns list of Links to the Image pages. NOT links to fullsize image!!! e.g.
+	 * https://www.deviantart.com/kageuri/art/RUBY-568396655
+	 *
+	 * @param page Page of album with multiple images
+	 *
+	 */
+	@Override
+	protected List<String> getURLsFromPage(Document page) {
+		List<String> result = new ArrayList<String>();
+
+		Element div;
+		if (usingCatPath) {
+			div = page.getElementById("gmi-");
+
+		} else {
+			div = page.getElementsByClass("folderview-art").first().child(0);
+
+		}
+		Elements links = div.select("a.torpedo-thumb-link");
+
+		for (Element el : links) {
+			result.add(el.attr("href"));
+
+		}
+
+		logger.info("Amount of Images on Page: " + result.size());
+		logger.info(page.location());
+
+		return result;
+	}
+
+	/**
+	 * Starts new Thread to find download link + filename + filetype
+	 *
+	 * @param url The URL to an image site.
+	 */
+	@Override
+	protected void downloadURL(URL url, int index) {
+		this.downloadCount += 1;
+		logger.info("Downloading URL Number " + this.downloadCount);
+		logger.info("Deviant Art URL: " + url.toExternalForm());
+		try {
+			// Suppress this warning because it is part of code that was temporarily
+			// commented out to disable the behavior.
+			// We know there's a lot about this ripper that needs to be fixed so
+			// we're not too worried about warnings in this file.
+			@SuppressWarnings("unused")
+			Response re = Http.url(urlWithParams(this.offset)).cookies(getDACookie()).referrer(referer)
+					.userAgent(userAgent).response();
+			//updateCookie(re.cookies());
+		} catch (IOException e) {
+			e.printStackTrace();
+		}
+
+		// Start Thread and add to pool.
+		DeviantartImageThread t = new DeviantartImageThread(url);
+		deviantartThreadPool.addThread(t);
+
+	}
+
+	@Override
+	public String normalizeUrl(String url) {
+		return (urlWithParams(this.offset).toExternalForm());
+	}
+
+	/**
+	 * Returns name of album. Album name consists of 3 words: - Artist (owner of
+	 * gallery) - Type (gallery or favorites folder) - Name of the folder
+	 *
+	 * Returns artist_type_name
+	 */
+	@Override
+	public String getGID(URL url) throws MalformedURLException {
+		String s = url.toExternalForm();
+		String artist = "unknown";
+		String what = "unknown";
+		String albumname = "unknown";
+
+		if (url.toExternalForm().contains("catpath=/")) {
+			this.usingCatPath = true;
+		}
+
+		Pattern p = Pattern.compile("^https?://www.deviantart\\.com/([a-zA-Z0-9]+).*$");
+		Matcher m = p.matcher(s);
+
+		// Artist
+		if (m.matches()) {
+			artist = m.group(1);
+		} else {
+			throw new MalformedURLException("Expected deviantart.com URL format: "
+					+ "www.deviantart.com/<ARTIST>/gallery/<NUMBERS>/<NAME>\nOR\nwww.deviantart.com/<ARTIST>/favourites/<NUMBERS>/<NAME>\\nOr simply the gallery or favorites of some artist - got "
+					+ url + " instead");
+		}
+
+		// What is it
+		if (s.contains("/gallery")) {
+			what = "gallery";
+		} else if (s.contains("/favourites")) {
+			what = "favourites";
+		} else {
+			throw new MalformedURLException("Expected deviantart.com URL format: "
+					+ "www.deviantart.com/<ARTIST>/gallery/<NUMBERS>/<NAME>\nOR\nwww.deviantart.com/<ARTIST>/favourites/<NUMBERS>/<NAME>\nOr simply the gallery or favorites of some artist - got "
+					+ url + " instead");
+		}
+
+		// Album Name
+		Pattern artistP = Pattern
+				.compile("^https?://www.deviantart\\.com/[a-zA-Z0-9]+/[a-zA-Z]+/[0-9]+/([a-zA-Z0-9-]+).*$");
+		Matcher artistM = artistP.matcher(s);
+		if (s.endsWith("?catpath=/")) {
+			albumname = "all";
+		} else if (s.endsWith("/favourites/") || s.endsWith("/gallery/") || s.endsWith("/gallery") || s.endsWith("/favourites")) { //added andings without trailing / because of https://github.com/RipMeApp/ripme/issues/1303
+			albumname = "featured";
+		} else if (artistM.matches()) {
+			albumname = artistM.group(1);
+		}
+		logger.info("Album Name: " + artist + "_" + what + "_" + albumname);
+
+		return artist + "_" + what + "_" + albumname;
+	}
+
+	/**
+	 *
+	 * @return Clean URL as String
+	 */
+	private String cleanURL() {
+		return (this.url.toExternalForm().split("\\?"))[0];
+	}
+
+	/**
+	 * Return correct url with params (catpath) and current offset
+	 * Offset misleasing because it might say 24 but it is not the 24th image. (DA site is bugged I guess)
+	 *
+	 * @return URL to page with offset
+	 */
+	private URL urlWithParams(int offset) {
+		try {
+			String url = cleanURL();
+			if (this.usingCatPath) {
+				return (new URI(url + "?catpath=/&offset=" + offset)).toURL();
+			} else {
+				return (new URI(url + "?offset=" + offset).toURL());
+			}
+		} catch (MalformedURLException | URISyntaxException e) {
+			e.printStackTrace();
+		}
+		return null;
+	}
+
+	/**
+	 * Returns Hashmap usable as Cookie for NSFW Artworks. Method Not really needed but
+	 * maybe useful later.
+	 *
+	 * @return Cookie Hashmap
+	 */
+	private Map<String, String> getDACookie() {
+		return this.cookies;
+	}
+
+	/**
+	 * Updates cookies and saves to config file.
+	 *
+	 * @param m new Cookies
+	 */
+	private void updateCookie(Map<String, String> m) {
+		if (m == null) {
+			return;
+		}
+
+		/*Iterator<String> iter = m.keySet().iterator();
+		while (iter.hasNext()) {
+			String current = iter.next();
+			if (!this.allowedCookies.contains(current)) {
+				iter.remove();
+			}
+		}*/
+
+		logger.info("Updating Cookies");
+		logger.info("Old Cookies: " + getDACookie() + " ");
+		logger.info("New Cookies: " + m + " ");
+		this.cookies.putAll(m);
+		this.cookies.put("agegate_state", "1");
+		logger.info("Merged Cookies: " + getDACookie() + " ");
+
+		try {
+			Utils.setConfigString(utilsKey, serialize(new HashMap<String, String>(getDACookie())));
+			Utils.saveConfig();
+		} catch (IOException e) {
+			e.printStackTrace();
+		}
+	}
+
+	/**
+	 * Serializes an Object and returns a String ready to store Used to store
+	 * cookies in the config file because the deviantart cookies contain all sort of
+	 * special characters like ; , = : and so on.
+	 *
+	 * @param o Object to serialize
+	 * @return The serialized base64 encoded object
+	 * @throws IOException
+	 */
+	private String serialize(Serializable o) throws IOException {
+		ByteArrayOutputStream baos = new ByteArrayOutputStream();
+		ObjectOutputStream oos = new ObjectOutputStream(baos);
+		oos.writeObject(o);
+		oos.close();
+		return Base64.getEncoder().encodeToString(baos.toByteArray());
+	}
+
+	/**
+	 * Recreates the object from the base64 encoded String. Used for Cookies
+	 *
+	 * @param s the Base64 encoded string
+	 * @return the Cookie Map
+	 * @throws IOException
+	 * @throws ClassNotFoundException
+	 */
+	private Map<String, String> deserialize(String s) throws IOException, ClassNotFoundException {
+		byte[] data = Base64.getDecoder().decode(s);
+		ObjectInputStream ois = new ObjectInputStream(new ByteArrayInputStream(data));
+
+		// Suppress this warning because it's part of the legacy implementation.
+		// We know there's a lot about this ripper that needs to be fixed so
+		// we're not too worried about warnings in this file.
+		// Unchecked cast here but should never be something else.
+		@SuppressWarnings("unchecked")
+		HashMap<String, String> o = (HashMap<String, String>) ois.readObject();
+		ois.close();
+		return o;
+	}
+
+	/**
+	 * Checks if the current cookies are still valid/usable.
+	 * Also checks if agegate is given.
+	 *
+	 *
+	 * @return True when all is good.
+	 */
+	private boolean checkLogin() {
+		if (!getDACookie().containsKey("agegate_state")) {
+			logger.info("No agegate key");
+			return false;
+		} else if (!getDACookie().get("agegate_state").equals("1")) { // agegate == 1 -> all is fine. NSFW is visible
+			logger.info("Wrong agegate value");
+			return false;
+		}
+
+		try {
+			logger.info("Login with Cookies: " + getDACookie());
+			Response res = Http.url("https://www.deviantart.com/users/login").connection().followRedirects(true)
+					.cookies(getDACookie()).referrer(this.referer).userAgent(this.userAgent).execute();
+			if (!res.url().toExternalForm().equals("https://www.deviantart.com/users/login") && !res.url().toExternalForm().startsWith("https://www.deviantart.com/users/wrong-password")) {
+				logger.info("Cookies are valid: " + res.url());
+				return true;
+			} else {
+				logger.info("Cookies invalid. Wrong URL: " + res.url() + "  " + res.statusCode());
+				return false;
+			}
+		} catch (IOException e) {
+			e.printStackTrace();
+			return false;
+		}
+	}
+
+	/**
+	 * Analyzes an image page like
+	 * https://www.deviantart.com/kageuri/art/RUBY-568396655 .
+	 *
+	 * Looks for download button, follows the authentications and redirects and adds
+	 * the Image URL to the download queue. If no download button is present it will
+	 * use the largest version of the image.
+	 *
+	 * Should work with all filetypes on Deviantart. Tested with .JPG .PNG and .PDF
+	 *
+	 * @author MrPlaygon
+	 *
+	 */
+	private class DeviantartImageThread implements Runnable {
+		private final URL url;
+
+		public DeviantartImageThread(URL url) {
+			this.url = url;
+		}
+
+		@Override
+		public void run() {
+			getFullSizeURL();
+		}
+
+		/**
+		 * Get URL to Artwork and return fullsize URL with file ending.
+		 *
+		 * @return URL like
+		 *         https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/intermediary/f/07f7a6bb-2d35-4630-93fc-be249af22b3e/d7jak0y-d20e5932-df72-4d13-b002-5e122037b373.jpg
+		 *
+		 *
+		 */
+		private void getFullSizeURL() {
+
+			logger.info("Searching max. Resolution for " + url);
+			sendUpdate(STATUS.LOADING_RESOURCE, "Searching max. resolution for " + url);
+			try {
+				Response re = Http.url(url).connection().referrer(referer).userAgent(userAgent).cookies(getDACookie())
+						.execute();
+				Document doc = re.parse();
+
+				// Artwork Title
+				String title = doc.select("a.title").first().html();
+				title = title.replaceAll("[^a-zA-Z0-9\\.\\-]", "_").toLowerCase();
+
+				int counter = 1; // For images with same name add _X (X = number)
+				if (names.contains(title)) {
+					while (names.contains(title + "_" + counter)) {
+						counter++;
+					}
+					title = title + "_" + counter;
+				}
+				names.add(title);
+
+				// Check for download button
+				Element downloadButton = null;
+
+				downloadButton = doc.select("a.dev-page-download").first();
+
+				// Download Button
+				if (downloadButton != null) {
+					logger.info("Download Button found for "+ url +" : "  + downloadButton.attr("href"));
+
+					Response download = Http.url(downloadButton.attr("href")).connection().cookies(getDACookie())
+							.method(Method.GET).referrer(referer).userAgent(userAgent).ignoreContentType(true)
+							.followRedirects(true).execute();
+					URL location = download.url();
+
+					String[] filetypePart = download.header("Content-Disposition").split("\\.");
+
+					logger.info("Found Image URL");
+					logger.info(url);
+					logger.info(location);
+
+					addURLToDownload(location, "", "", "", getDACookie(),
+							title + "." + filetypePart[filetypePart.length - 1]);
+					return;
+				}
+
+				// No Download Button
+				logger.info("No Download Button for: "+ url);
+
+				Element div = doc.select("div.dev-view-deviation").first();
+
+				Element image = div.getElementsByTag("img").first();
+
+				String scaledImage = "";
+				if (image == null) {
+					logger.error("ERROR on " + url);
+
+					logger.error("Cookies: " + getDACookie() + "    ");
+					logger.error(div);
+					sendUpdate(STATUS.DOWNLOAD_ERRORED, "ERROR at\n" + url);
+					return;
+				}
+
+				// When it is text art (e.g. story) the only image is the profile
+				// picture
+				if (image.hasClass("avatar")) {
+					logger.error("No Image found, probably text art: " + url);
+					return;
+				}
+
+				scaledImage = image.attr("src").split("\\?")[0];
+
+				String[] parts = scaledImage.split("/v1/"); // Image page uses scaled down version. Split at /v1/ to receive max size.
+
+				if (parts.length > 2) {
+					logger.error("Unexpected URL Format");
+					sendUpdate(STATUS.DOWNLOAD_ERRORED, "Unexpected URL Format");
+					return;
+				}
+
+				String originalImage = parts[0]; // URL to original image without scaling (works not alwys. weird 404 errors.)
+				String downloadString = originalImage; // this works always
+				try {
+					Http.url(downloadString).connection().cookies(getDACookie()).method(Method.GET).referrer(referer).userAgent(userAgent).ignoreContentType(true).followRedirects(true).execute().statusCode(); //Error on 404
+				}catch (HttpStatusException e) {
+					downloadString = scaledImage; //revert back to save url because of error
+				}
+				String[] tmpParts = downloadString.split("\\."); //split to get file ending
+
+				addURLToDownload(new URI(downloadString).toURL(), "", "", "", new HashMap<String, String>(),
+						title + "." + tmpParts[tmpParts.length - 1]);
+				return;
+
+			} catch (IOException | URISyntaxException e) {
+				e.printStackTrace();
+			}
+
+			logger.error("No Full Size URL for: " + url);
+			sendUpdate(STATUS.DOWNLOAD_ERRORED, "No image found for " + url);
+
+			return;
+
+		}
+	}
+}

+ 94 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/DribbbleRipper.java

@@ -0,0 +1,94 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+import org.jsoup.select.Elements;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.utils.Http;
+
+public class DribbbleRipper extends AbstractHTMLRipper {
+
+    public DribbbleRipper(URL url) throws IOException {
+        super(url);
+    }
+
+    @Override
+    public String getHost() {
+        return "dribbble";
+    }
+    @Override
+    public String getDomain() {
+        return "dribbble.com";
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        Pattern p = Pattern.compile("^https?://[wm.]*dribbble\\.com/([a-zA-Z0-9]+).*$");
+        Matcher m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return m.group(1);
+        }
+        throw new MalformedURLException("Expected dribbble.com URL format: " +
+                "dribbble.com/albumid - got " + url + "instead");
+    }
+
+    @Override
+    public Document getNextPage(Document doc) throws IOException {
+        // Find next page
+        Elements hrefs = doc.select("a.next_page");
+        if (hrefs.isEmpty()) {
+            throw new IOException("No more pages");
+        }
+        String nextUrl = "https://www.dribbble.com" + hrefs.first().attr("href");
+        sleep(500);
+        return Http.url(nextUrl).get();
+    }
+    @Override
+    public List<String> getURLsFromPage(Document doc) {
+        List<String> imageURLs = new ArrayList<>();
+        for (Element thumb : doc.select("div.shot-thumbnail-base > figure > img")) {
+            String srcset = thumb.attr("data-srcset");
+            String imageURL = getLargestImageURL(srcset);
+            if (imageURL != null) {
+                imageURLs.add(imageURL);
+            }
+        }
+        return imageURLs;
+    }
+    @Override
+    public void downloadURL(URL url, int index) {
+        addURLToDownload(url, getPrefix(index));
+    }
+
+    private String getLargestImageURL(String srcset) {
+        int maxWidth = 0;
+        String largestURL = null;
+
+        String[] imageURLs = srcset.split(", ");
+        for (String imageURL : imageURLs) {
+            try {
+                String[] parts = imageURL.trim().split(" ");
+                String url = parts[0];
+                String size = parts[1];
+                int width = Integer.parseInt(size.replace("w", ""));
+
+                if (width > maxWidth) {
+                    maxWidth = width;
+                    largestURL = url;
+                }
+            } catch (ArrayIndexOutOfBoundsException | NumberFormatException e) {
+                continue;
+            }
+        }
+        return largestURL;
+    }
+}

+ 78 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/DynastyscansRipper.java

@@ -0,0 +1,78 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.json.JSONArray;
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.utils.Http;
+
+public class DynastyscansRipper extends AbstractHTMLRipper {
+
+    public DynastyscansRipper(URL url) throws IOException {
+        super(url);
+    }
+
+    @Override
+    public String getHost() {
+        return "dynasty-scans";
+    }
+
+    @Override
+    public String getDomain() {
+        return "dynasty-scans.com";
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        Pattern p = Pattern.compile("https?://dynasty-scans.com/chapters/([\\S]+)/?$");
+        Matcher m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return m.group(1);
+        }
+        throw new MalformedURLException("Expected dynasty-scans URL format: " +
+                "dynasty-scans.com/chapters/ID - got " + url + " instead");
+    }
+
+    @Override
+    public Document getNextPage(Document doc) throws IOException {
+        Element elem = doc.select("a[id=next_link]").first();
+        if (elem == null || elem.attr("href").equals("#")) {
+            throw new IOException("No more pages");
+        }
+        return Http.url("https://dynasty-scans.com" + elem.attr("href")).get();
+
+    }
+
+    @Override
+    public List<String> getURLsFromPage(Document doc) {
+        List<String> result = new ArrayList<>();
+        String jsonText = null;
+        for (Element script : doc.select("script")) {
+            if (script.data().contains("var pages")) {
+                jsonText = script.data().replaceAll("var pages = ", "");
+                jsonText = jsonText.replaceAll("//<!\\[CDATA\\[", "");
+                jsonText = jsonText.replaceAll("//]]>", "");
+            }
+        }
+        JSONArray imageArray = new JSONArray(jsonText);
+        for (int i = 0; i < imageArray.length(); i++) {
+            result.add("https://dynasty-scans.com" + imageArray.getJSONObject(i).getString("image"));
+        }
+
+        return result;
+    }
+
+    @Override
+    public void downloadURL(URL url, int index) {
+        addURLToDownload(url, getPrefix(index));
+    }
+}

+ 267 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/E621Ripper.java

@@ -0,0 +1,267 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+import org.jsoup.select.Elements;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.ripper.DownloadThreadPool;
+import com.rarchives.ripme.ui.RipStatusMessage;
+import com.rarchives.ripme.ui.RipStatusMessage.STATUS;
+import com.rarchives.ripme.utils.Http;
+import com.rarchives.ripme.utils.RipUtils;
+import com.rarchives.ripme.utils.Utils;
+
+public class E621Ripper extends AbstractHTMLRipper {
+
+    private static final Logger logger = LogManager.getLogger(E621Ripper.class);
+
+    private static Pattern gidPattern = null;
+    private static Pattern gidPattern2 = null;
+    private static Pattern gidPatternPool = null;
+
+    private static Pattern gidPatternNew = null;
+    private static Pattern gidPatternPoolNew = null;
+
+    private DownloadThreadPool e621ThreadPool = new DownloadThreadPool("e621");
+
+    private Map<String, String> cookies = new HashMap<String, String>();
+    private String userAgent = USER_AGENT;
+
+    public E621Ripper(URL url) throws IOException {
+        super(url);
+    }
+
+    private void loadConfig() {
+        String cookiesString = Utils.getConfigString("e621.cookies", "");
+        if (!cookiesString.equals("")) {
+            cookies = RipUtils.getCookiesFromString(cookiesString);
+
+            if (cookies.containsKey("cf_clearance")) {
+                sendUpdate(STATUS.DOWNLOAD_WARN,
+                        "Using CloudFlare captcha cookies, make sure to update them and set your browser's useragent in config!");
+            }
+
+            if (cookies.containsKey("remember")) {
+                sendUpdate(STATUS.DOWNLOAD_WARN, "Logging in using auth cookie.");
+            }
+        }
+
+        userAgent = Utils.getConfigString("e621.useragent", USER_AGENT);
+    }
+
+    private void warnAboutBlacklist(Document page) {
+        if (!page.select("div.hidden-posts-notice").isEmpty())
+            sendUpdate(STATUS.DOWNLOAD_WARN,
+                    "Some posts are blacklisted. Consider logging in. Search for \"e621\" in this wiki page: https://github.com/RipMeApp/ripme/wiki/Config-options");
+    }
+
+    private Document getDocument(String url, int retries) throws IOException {
+        return Http.url(url).userAgent(userAgent).retries(retries).cookies(cookies).get();
+    }
+
+    private Document getDocument(String url) throws IOException {
+        return getDocument(url, 1);
+    }
+
+    @Override
+    public DownloadThreadPool getThreadPool() {
+        return e621ThreadPool;
+    }
+
+    @Override
+    public String getDomain() {
+        return "e621.net";
+    }
+
+    @Override
+    public String getHost() {
+        return "e621";
+    }
+
+    @Override
+    public Document getFirstPage() throws IOException {
+        loadConfig();
+        Document page;
+        if (url.getPath().startsWith("/pool")) {
+            page = getDocument("https://e621.net/pools/" + getTerm(url));
+        } else {
+            page = getDocument("https://e621.net/posts?tags=" + getTerm(url));
+        }
+
+        warnAboutBlacklist(page);
+        return page;
+    }
+
+    @Override
+    public List<String> getURLsFromPage(Document page) {
+        Elements elements = page.select("article > a");
+        List<String> res = new ArrayList<>();
+
+        for (Element e : elements) {
+            if (!e.attr("href").isEmpty()) {
+                res.add(e.attr("abs:href"));
+            }
+        }
+
+        return res;
+    }
+
+    @Override
+    public Document getNextPage(Document page) throws IOException {
+        warnAboutBlacklist(page);
+        if (!page.select("a#paginator-next").isEmpty()) {
+            return getDocument(page.select("a#paginator-next").attr("abs:href"));
+        } else {
+            throw new IOException("No more pages.");
+        }
+    }
+
+    @Override
+    public void downloadURL(final URL url, int index) {
+        // rate limit
+        sleep(3000);
+        // addURLToDownload(url, getPrefix(index));
+        e621ThreadPool.addThread(new E621FileThread(url, getPrefix(index)));
+    }
+
+    private String getTerm(URL url) throws MalformedURLException {
+        // old url style => new url style:
+        // /post/index/1/<tags> => /posts?tags=<tags>
+        // /pool/show/<id> => /pools/id
+        if (gidPattern == null) {
+            gidPattern = Pattern.compile(
+                    "^https?://(www\\.)?e621\\.net/post/index/[^/]+/([a-zA-Z0-9$_.+!*'():,%\\-]+)(/.*)?(#.*)?$");
+        }
+
+        if (gidPatternPool == null) {
+            gidPatternPool = Pattern.compile(
+                    "^https?://(www\\.)?e621\\.net/pool/show/([a-zA-Z0-9$_.+!*'(),%:\\-]+)(\\?.*)?(/.*)?(#.*)?$");
+        }
+
+        if (gidPatternNew == null) {
+            gidPatternNew = Pattern.compile(
+                    "^https?://(www\\.)?e621\\.net/posts\\?([\\S]*?)tags=([a-zA-Z0-9$_.+!*'(),%:\\-]+)(\\&[\\S]+)?");
+        }
+
+        if (gidPatternPoolNew == null) {
+            gidPatternPoolNew = Pattern.compile("^https?://(www\\.)?e621\\.net/pools/([\\d]+)(\\?[\\S]*)?");
+        }
+
+        Matcher m = gidPattern.matcher(url.toExternalForm());
+        if (m.matches()) {
+            logger.info(m.group(2));
+            return m.group(2);
+        }
+
+        m = gidPatternPool.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return m.group(2);
+        }
+
+        m = gidPatternNew.matcher(url.toExternalForm());
+        if (m.matches()) {
+            logger.info(m.group(3));
+            return m.group(3);
+        }
+
+        m = gidPatternPoolNew.matcher(url.toExternalForm());
+        if (m.matches()) {
+            logger.info(m.group(2));
+            return m.group(2);
+        }
+
+        throw new MalformedURLException(
+                "Expected e621.net URL format: e621.net/posts?tags=searchterm - got " + url + " instead");
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        String prefix = "";
+        if (url.getPath().startsWith("/pool")) {
+            prefix = "pool_";
+        }
+        return Utils.filesystemSafe(prefix + getTerm(url));
+    }
+
+    @Override
+    public URL sanitizeURL(URL url) throws MalformedURLException, URISyntaxException {
+        if (gidPattern2 == null) {
+            gidPattern2 = Pattern.compile(
+                    "^https?://(www\\.)?e621\\.net/post/search\\?tags=([a-zA-Z0-9$_.+!*'():,%-]+)(/.*)?(#.*)?$");
+        }
+
+        Matcher m = gidPattern2.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return new URI("https://e621.net/post/index/1/" + m.group(2).replace("+", "%20")).toURL();
+        }
+
+        return url;
+    }
+
+    public class E621FileThread implements Runnable {
+        private final URL url;
+        private final String index;
+
+        public E621FileThread(URL url, String index) {
+            this.url = url;
+            this.index = index;
+        }
+
+        @Override
+        public void run() {
+            try {
+                String fullSizedImage = getFullSizedImage(url);
+                if (fullSizedImage != null && !fullSizedImage.equals("")) {
+                    addURLToDownload(new URI(fullSizedImage).toURL(), index);
+                }
+            } catch (IOException | URISyntaxException e) {
+                logger.error("Unable to get full sized image from " + url);
+            }
+        }
+
+        private String getFullSizedImage(URL imageURL) throws IOException {
+            Document page = getDocument(imageURL.toExternalForm(), 3);
+
+            /*
+             * Elements video = page.select("video > source");
+             * Elements flash = page.select("embed");
+             * Elements image = page.select("a#highres");
+             * if (video.size() > 0) {
+             * return video.attr("src");
+             * } else if (flash.size() > 0) {
+             * return flash.attr("src");
+             * } else if (image.size() > 0) {
+             * return image.attr("href");
+             * } else {
+             * throw new IOException();
+             * }
+             */
+
+            if (!page.select("div#image-download-link > a").isEmpty()) {
+                return page.select("div#image-download-link > a").attr("abs:href");
+            } else {
+                if (!page.select("#blacklist-box").isEmpty()) {
+                    sendUpdate(RipStatusMessage.STATUS.RIP_ERRORED,
+                            "Cannot download image - blocked by blacklist. Consider logging in. Search for \"e621\" in this wiki page: https://github.com/RipMeApp/ripme/wiki/Config-options");
+                }
+                throw new IOException();
+            }
+        }
+
+    }
+}

+ 267 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/EHentaiRipper.java

@@ -0,0 +1,267 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+import org.jsoup.select.Elements;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.ripper.DownloadThreadPool;
+import com.rarchives.ripme.ui.RipStatusMessage;
+import com.rarchives.ripme.ui.RipStatusMessage.STATUS;
+import com.rarchives.ripme.utils.Http;
+import com.rarchives.ripme.utils.RipUtils;
+import com.rarchives.ripme.utils.Utils;
+
+public class EHentaiRipper extends AbstractHTMLRipper {
+
+    private static final Logger logger = LogManager.getLogger(EHentaiRipper.class);
+
+    // All sleep times are in milliseconds
+    private static final int PAGE_SLEEP_TIME = 3000;
+    private static final int IMAGE_SLEEP_TIME = 1500;
+    private static final int IP_BLOCK_SLEEP_TIME = 60 * 1000;
+    private static final Map<String, String> cookies = new HashMap<>();
+
+    static {
+        cookies.put("nw", "1");
+        cookies.put("tip", "1");
+    }
+
+    private String lastURL = null;
+    // Thread pool for finding direct image links from "image" pages (html)
+    private final DownloadThreadPool ehentaiThreadPool = new DownloadThreadPool("ehentai");
+    // Current HTML document
+    private Document albumDoc = null;
+
+    public EHentaiRipper(URL url) throws IOException {
+        super(url);
+    }
+
+    @Override
+    public DownloadThreadPool getThreadPool() {
+        return ehentaiThreadPool;
+    }
+
+    @Override
+    public String getHost() {
+        return "e-hentai";
+    }
+
+    @Override
+    public String getDomain() {
+        return "e-hentai.org";
+    }
+
+    public String getAlbumTitle(URL url) throws MalformedURLException, URISyntaxException {
+        try {
+            // Attempt to use album title as GID
+            if (albumDoc == null) {
+                albumDoc = getPageWithRetries(url);
+            }
+            Elements elems = albumDoc.select("#gn");
+            return getHost() + "_" + elems.first().text();
+        } catch (Exception e) {
+            // Fall back to default album naming convention
+            logger.warn("Failed to get album title from " + url, e);
+        }
+        return super.getAlbumTitle(url);
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        Pattern p;
+        Matcher m;
+
+        p = Pattern.compile("^https?://e-hentai\\.org/g/([0-9]+)/([a-fA-F0-9]+)/?");
+        m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return m.group(1) + "-" + m.group(2);
+        }
+
+        throw new MalformedURLException(
+                "Expected e-hentai.org gallery format: "
+                        + "http://e-hentai.org/g/####/####/"
+                        + " Got: " + url);
+    }
+
+    private Document getPageWithRetries(URL url) throws IOException {
+        Document doc;
+        int retries = 3;
+        while (true) {
+            sendUpdate(STATUS.LOADING_RESOURCE, url.toExternalForm());
+            logger.info("Retrieving " + url);
+            doc = Http.url(url)
+                    .referrer(this.url)
+                    .cookies(cookies)
+                    .get();
+            if (doc.toString().contains("IP address will be automatically banned")) {
+                if (retries == 0) {
+                    throw new IOException("Hit rate limit and maximum number of retries, giving up");
+                }
+                logger.warn("Hit rate limit while loading " + url + ", sleeping for " + IP_BLOCK_SLEEP_TIME + "ms, " + retries + " retries remaining");
+                retries--;
+                try {
+                    Thread.sleep(IP_BLOCK_SLEEP_TIME);
+                } catch (InterruptedException e) {
+                    throw new IOException("Interrupted while waiting for rate limit to subside");
+                }
+            } else {
+                return doc;
+            }
+        }
+    }
+
+    public List<String> getTags(Document doc) {
+        List<String> tags = new ArrayList<>();
+        logger.info("Getting tags");
+        for (Element tag : doc.select("td > div > a")) {
+            logger.info("Found tag " + tag.text());
+            tags.add(tag.text());
+        }
+        return tags;
+    }
+
+
+    @Override
+    public Document getFirstPage() throws IOException {
+        if (albumDoc == null) {
+            albumDoc = getPageWithRetries(this.url);
+        }
+        this.lastURL = this.url.toExternalForm();
+        logger.info("Checking blacklist");
+        String blacklistedTag = RipUtils.checkTags(Utils.getConfigStringArray("ehentai.blacklist.tags"), getTags(albumDoc));
+        if (blacklistedTag != null) {
+            sendUpdate(RipStatusMessage.STATUS.DOWNLOAD_WARN, "Skipping " + url.toExternalForm() + " as it " +
+                    "contains the blacklisted tag \"" + blacklistedTag + "\"");
+            return null;
+        }
+        return albumDoc;
+    }
+
+    @Override
+    public Document getNextPage(Document doc) throws IOException, URISyntaxException {
+        // Check if we've stopped
+        if (isStopped()) {
+            throw new IOException("Ripping interrupted");
+        }
+        // Find next page
+        Elements hrefs = doc.select(".ptt a");
+        if (hrefs.isEmpty()) {
+            logger.info("doc: " + doc.html());
+            throw new IOException("No navigation links found");
+        }
+        // Ensure next page is different from the current page
+        String nextURL = hrefs.last().attr("href");
+        if (nextURL.equals(this.lastURL)) {
+            logger.info("lastURL = nextURL : " + nextURL);
+            throw new IOException("Reached last page of results");
+        }
+        // Sleep before loading next page
+        sleep(PAGE_SLEEP_TIME);
+        // Load next page
+        Document nextPage = getPageWithRetries(new URI(nextURL).toURL());
+        this.lastURL = nextURL;
+        return nextPage;
+    }
+
+    @Override
+    public List<String> getURLsFromPage(Document page) {
+        List<String> imageURLs = new ArrayList<>();
+        Elements thumbs = page.select("#gdt > a");
+        // Iterate over images on page
+        for (Element thumb : thumbs) {
+            imageURLs.add(thumb.attr("href"));
+        }
+        return imageURLs;
+    }
+
+    @Override
+    public void downloadURL(URL url, int index) {
+        EHentaiImageThread t = new EHentaiImageThread(url, index, this.workingDir.toPath());
+        ehentaiThreadPool.addThread(t);
+        try {
+            Thread.sleep(IMAGE_SLEEP_TIME);
+        } catch (InterruptedException e) {
+            logger.warn("Interrupted while waiting to load next image", e);
+        }
+    }
+
+    /**
+     * Helper class to find and download images found on "image" pages
+     * <p>
+     * Handles case when site has IP-banned the user.
+     */
+    private class EHentaiImageThread implements Runnable {
+        private final URL url;
+        private final int index;
+        private final Path workingDir;
+
+        EHentaiImageThread(URL url, int index, Path workingDir) {
+            super();
+            this.url = url;
+            this.index = index;
+            this.workingDir = workingDir;
+        }
+
+        @Override
+        public void run() {
+            fetchImage();
+        }
+
+        private void fetchImage() {
+            try {
+                Document doc = getPageWithRetries(this.url);
+
+                // Find image
+                Elements images = doc.select(".sni > a > img");
+                if (images.isEmpty()) {
+                    // Attempt to find image elsewise (Issue #41)
+                    images = doc.select("img#img");
+                    if (images.isEmpty()) {
+                        logger.warn("Image not found at " + this.url);
+                        return;
+                    }
+                }
+                Element image = images.first();
+                String imgsrc = image.attr("src");
+                logger.info("Found URL " + imgsrc + " via " + images.get(0));
+                Pattern p = Pattern.compile("^http://.*/ehg/image.php.*&n=([^&]+).*$");
+                Matcher m = p.matcher(imgsrc);
+                if (m.matches()) {
+                    // Manually discover filename from URL
+                    String savePath = this.workingDir + "/";
+                    if (Utils.getConfigBoolean("download.save_order", true)) {
+                        savePath += String.format("%03d_", index);
+                    }
+                    savePath += m.group(1);
+                    addURLToDownload(new URI(imgsrc).toURL(), Paths.get(savePath));
+                } else {
+                    // Provide prefix and let the AbstractRipper "guess" the filename
+                    String prefix = "";
+                    if (Utils.getConfigBoolean("download.save_order", true)) {
+                        prefix = String.format("%03d_", index);
+                    }
+                    addURLToDownload(new URI(imgsrc).toURL(), prefix);
+                }
+            } catch (IOException | URISyntaxException e) {
+                logger.error("[!] Exception while loading/parsing " + this.url, e);
+            }
+        }
+    }
+}

+ 161 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/EightmusesRipper.java

@@ -0,0 +1,161 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.jsoup.Connection.Response;
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+import org.jsoup.select.Elements;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.ui.RipStatusMessage.STATUS;
+import com.rarchives.ripme.utils.Http;
+
+public class EightmusesRipper extends AbstractHTMLRipper {
+
+    private static final Logger logger = LogManager.getLogger(EightmusesRipper.class);
+
+    private Map<String, String> cookies = new HashMap<>();
+
+    public EightmusesRipper(URL url) throws IOException {
+        super(url);
+    }
+
+    @Override
+    public boolean hasASAPRipping() {
+        return true;
+    }
+
+    @Override
+    public String getHost() {
+        return "8muses";
+    }
+
+    @Override
+    public String getDomain() {
+        return "8muses.com";
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        Pattern p = Pattern.compile("^https?://(www\\.)?8muses\\.com/(comix|comics)/album/([a-zA-Z0-9\\-_]+).*$");
+        Matcher m = p.matcher(url.toExternalForm());
+        if (!m.matches()) {
+            throw new MalformedURLException("Expected URL format: http://www.8muses.com/index/category/albumname, got: " + url);
+        }
+        return m.group(m.groupCount());
+    }
+
+    @Override
+    public String getAlbumTitle(URL url) throws MalformedURLException, URISyntaxException {
+        try {
+            // Attempt to use album title as GID
+            Element titleElement = getCachedFirstPage().select("meta[name=description]").first();
+            String title = titleElement.attr("content");
+            title = title.replace("A huge collection of free porn comics for adults. Read", "");
+            title = title.replace("online for free at 8muses.com", "");
+            return getHost() + "_" + title.trim();
+        } catch (IOException e) {
+            // Fall back to default album naming convention
+            logger.info("Unable to find title at " + url);
+        }
+        return super.getAlbumTitle(url);
+    }
+
+    @Override
+    public Document getFirstPage() throws IOException {
+        Response resp = Http.url(url).response();
+        cookies.putAll(resp.cookies());
+        return resp.parse();
+    }
+
+    @Override
+    public List<String> getURLsFromPage(Document page) {
+        List<String> imageURLs = new ArrayList<>();
+        // This contains the thumbnails of all images on the page
+        Elements pageImages = page.getElementsByClass("c-tile");
+        for (int i = 0; i < pageImages.size(); i++) {
+            Element thumb = pageImages.get(i);
+            // If true this link is a sub album
+            if (thumb.attr("href").contains("/comics/album/")) {
+                String subUrl = "https://www.8muses.com" + thumb.attr("href");
+                try {
+                    logger.info("Retrieving " + subUrl);
+                    sendUpdate(STATUS.LOADING_RESOURCE, subUrl);
+                    Document subPage = Http.url(subUrl).get();
+                    // If the page below this one has images this line will download them
+                    List<String> subalbumImages = getURLsFromPage(subPage);
+                    logger.info("Found " + subalbumImages.size() + " images in subalbum");
+                } catch (IOException e) {
+                    logger.warn("Error while loading subalbum " + subUrl, e);
+                }
+
+            } else if (thumb.attr("href").contains("/comics/picture/")) {
+                logger.info("This page is a album");
+                logger.info("Ripping image");
+                if (super.isStopped()) break;
+                // Find thumbnail image source
+                String image = null;
+                if (thumb.hasAttr("data-cfsrc")) {
+                    image = thumb.attr("data-cfsrc");
+                } else {
+                    Element imageElement = thumb.select("img").first();
+                    image = "https://comics.8muses.com" + imageElement.attr("data-src").replace("/th/", "/fl/");
+                    try {
+                        URL imageUrl = new URI(image).toURL();
+                        addURLToDownload(imageUrl, getSubdir(page.select("title").text()), this.url.toExternalForm(), cookies, getPrefixShort(i), "", null, true);
+                    } catch (MalformedURLException | URISyntaxException e) {
+                        logger.error("\"" + image + "\" is malformed");
+                        logger.error(e.getMessage());
+                    }
+                }
+                if (!image.contains("8muses.com")) {
+                    // Not hosted on 8muses.
+                    continue;
+                }
+                imageURLs.add(image);
+                if (isThisATest()) break;
+            }
+
+        }
+        return imageURLs;
+    }
+
+    public String getSubdir(String rawHref) {
+        logger.info("Raw title: " + rawHref);
+        String title = rawHref;
+        title = title.replaceAll("8muses - Sex and Porn Comics", "");
+        title = title.replaceAll("\\s+", " ");
+        title = title.replaceAll("\n", "");
+        title = title.replaceAll("\\| ", "");
+        title = title.replace(" - ", "-");
+        title = title.replace(" ", "-");
+        logger.info(title);
+        return title;
+    }
+
+    @Override
+    public void downloadURL(URL url, int index) {
+        addURLToDownload(url, getPrefix(index), "", this.url.toExternalForm(), cookies);
+    }
+
+    public String getPrefixLong(int index) {
+        return String.format("%03d_", index);
+    }
+
+    public String getPrefixShort(int index) {
+        return String.format("%03d", index);
+    }
+}

+ 119 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/ErofusRipper.java

@@ -0,0 +1,119 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+import org.jsoup.select.Elements;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.ui.RipStatusMessage;
+import com.rarchives.ripme.utils.Http;
+
+public class ErofusRipper extends AbstractHTMLRipper {
+
+    private static final Logger logger = LogManager.getLogger(ErofusRipper.class);
+
+    public ErofusRipper(URL url) throws IOException {
+        super(url);
+    }
+
+    @Override
+    public boolean hasASAPRipping() {
+        return true;
+    }
+
+    @Override
+    public String getHost() {
+        return "erofus";
+    }
+
+    @Override
+    public String getDomain() {
+        return "erofus.com";
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        Pattern p = Pattern.compile("^https://www.erofus.com/comics/([a-zA-Z0-9\\-_]+).*$");
+        Matcher m = p.matcher(url.toExternalForm());
+        if (!m.matches()) {
+            throw new MalformedURLException("Expected URL format: http://www.8muses.com/index/category/albumname, got: " + url);
+        }
+        return m.group(m.groupCount());
+    }
+
+    @Override
+    public List<String> getURLsFromPage(Document page) {
+        logger.info(page);
+        List<String> imageURLs = new ArrayList<>();
+        if (pageContainsImages(page)) {
+            logger.info("Page contains images");
+            ripAlbum(page);
+        } else {
+            // This contains the thumbnails of all images on the page
+            Elements pageImages = page.select("a.a-click");
+            for (Element pageLink : pageImages) {
+                if (super.isStopped()) break;
+                if (pageLink.attr("href").contains("comics")) {
+                    String subUrl = "https://erofus.com" + pageLink.attr("href");
+                    try {
+                        logger.info("Retrieving " + subUrl);
+                        sendUpdate(RipStatusMessage.STATUS.LOADING_RESOURCE, subUrl);
+                        Document subPage = Http.url(subUrl).get();
+                        List<String> subalbumImages = getURLsFromPage(subPage);
+                    } catch (IOException e) {
+                        logger.warn("Error while loading subalbum " + subUrl, e);
+                    }
+                }
+                if (isThisATest()) break;
+            }
+        }
+
+        return imageURLs;
+    }
+
+    public void ripAlbum(Document page) {
+        int x = 1;
+        Elements thumbs = page.select("a.a-click > div.thumbnail > img");
+        for (Element thumb : thumbs) {
+            String image = "https://www.erofus.com" + thumb.attr("src").replaceAll("thumb", "medium");
+            try {
+                Map<String,String> opts = new HashMap<String, String>();
+                opts.put("subdirectory", page.title().replaceAll(" \\| Erofus - Sex and Porn Comics", "").replaceAll(" ", "_"));
+                opts.put("prefix", getPrefix(x));
+                addURLToDownload(new URI(image).toURL(), opts);
+            } catch (MalformedURLException | URISyntaxException e) {
+                logger.info(e.getMessage());
+            }
+            x++;
+        }
+    }
+
+    private boolean pageContainsImages(Document page) {
+        Elements pageImages = page.select("a.a-click");
+        for (Element pageLink : pageImages) {
+            if (pageLink.attr("href").contains("/pic/")) {
+                return true;
+            }
+        }
+        return false;
+    }
+
+    @Override
+    public void downloadURL(URL url, int index) {
+        addURLToDownload(url, getPrefix(index));
+    }
+}

+ 178 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/EromeRipper.java

@@ -0,0 +1,178 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.jsoup.Connection.Response;
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.utils.Http;
+import com.rarchives.ripme.utils.Utils;
+
+/**
+ *
+ * @author losipher
+ */
+public class EromeRipper extends AbstractHTMLRipper {
+
+    private static final Logger logger = LogManager.getLogger(EromeRipper.class);
+
+    boolean rippingProfile;
+    private HashMap<String, String> cookies = new HashMap<>();
+
+    public EromeRipper(URL url) throws IOException {
+        super(url);
+    }
+
+    @Override
+    public String getDomain() {
+        return "erome.com";
+    }
+
+    @Override
+    public String getHost() {
+        return "erome";
+    }
+
+    @Override
+    public void downloadURL(URL url, int index) {
+        addURLToDownload(url, getPrefix(index), "", this.url.toString(), this.cookies);
+    }
+
+    @Override
+    public boolean hasQueueSupport() {
+        return true;
+    }
+
+    @Override
+    public boolean pageContainsAlbums(URL url) {
+        Pattern pa = Pattern.compile("https?://www.erome.com/([a-zA-Z0-9_\\-?=]*)/?");
+        Matcher ma = pa.matcher(url.toExternalForm());
+        return ma.matches();
+    }
+
+    @Override
+    public List<String> getAlbumsToQueue(Document doc) {
+        List<String> urlsToAddToQueue = new ArrayList<>();
+        for (Element elem : doc.select("div#albums > div.album > a")) {
+            urlsToAddToQueue.add(elem.attr("href"));
+        }
+        return urlsToAddToQueue;
+    }
+
+    @Override
+    public String getAlbumTitle(URL url) throws MalformedURLException, URISyntaxException {
+        try {
+            // Attempt to use album title as GID
+            Element titleElement = getCachedFirstPage().select("meta[property=og:title]").first();
+            String title = titleElement.attr("content");
+            title = title.substring(title.lastIndexOf('/') + 1);
+            return getHost() + "_" + getGID(url) + "_" + title.trim();
+        } catch (IOException e) {
+            // Fall back to default album naming convention
+            logger.info("Unable to find title at " + url);
+        } catch (NullPointerException e) {
+            return getHost() + "_" + getGID(url);
+        }
+        return super.getAlbumTitle(url);
+    }
+
+    @Override
+    public URL sanitizeURL(URL url) throws MalformedURLException, URISyntaxException {
+        return new URI(url.toExternalForm().replaceAll("https?://erome.com", "https://www.erome.com")).toURL();
+    }
+
+    @Override
+    public List<String> getURLsFromPage(Document doc) {
+        return getMediaFromPage(doc);
+    }
+
+    @Override
+    public Document getFirstPage() throws IOException {
+        this.setAuthCookie();
+        Response resp = Http.url(this.url)
+                .cookies(cookies)
+                .ignoreContentType()
+                .response();
+
+        return resp.parse();
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        Pattern p = Pattern.compile("^https?://www.erome.com/[ai]/([a-zA-Z0-9]*)/?$");
+        Matcher m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return m.group(1);
+        }
+
+        p = Pattern.compile("^https?://www.erome.com/([a-zA-Z0-9_\\-?=]+)/?$");
+        m = p.matcher(url.toExternalForm());
+
+        if (m.matches()) {
+            return m.group(1);
+        }
+
+        throw new MalformedURLException("erome album not found in " + url + ", expected https://www.erome.com/album");
+    }
+
+    private List<String> getMediaFromPage(Document doc) {
+        List<String> results = new ArrayList<>();
+        for (Element el : doc.select("img.img-front")) {
+            if (el.hasAttr("data-src")) {
+                // to add images that are not loaded
+                // (all images are lasy-loaded as we scroll)
+                results.add(el.attr("data-src"));
+            } else if (el.hasAttr("src")) {
+                if (el.attr("src").startsWith("https:")) {
+                    results.add(el.attr("src"));
+                } else {
+                    results.add("https:" + el.attr("src"));
+                }
+            }
+        }
+        for (Element el : doc.select("source[label=HD]")) {
+            if (el.attr("src").startsWith("https:")) {
+                results.add(el.attr("src"));
+            } else {
+                results.add("https:" + el.attr("src"));
+            }
+        }
+        for (Element el : doc.select("source[label=SD]")) {
+            if (el.attr("src").startsWith("https:")) {
+                results.add(el.attr("src"));
+            } else {
+                results.add("https:" + el.attr("src"));
+            }
+        }
+
+        if (results.size() == 0) {
+            if (cookies.isEmpty()) {
+                logger.warn("You might try setting erome.laravel_session manually " +
+                        "if you think this page definitely contains media.");
+            }
+        }
+
+        return results;
+    }
+
+    private void setAuthCookie() {
+        String sessionId = Utils.getConfigString("erome.laravel_session", null);
+        if (sessionId != null) {
+            cookies.put("laravel_session", sessionId);
+        }
+    }
+
+}

+ 147 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/FapDungeonRipper.java

@@ -0,0 +1,147 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Optional;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+import org.jsoup.select.Elements;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+
+public class FapDungeonRipper extends AbstractHTMLRipper {
+
+    private static final Logger logger = LogManager.getLogger(FapDungeonRipper.class);
+
+    private static final String HOST = "fapdungeon";
+
+    private static final Pattern pagePattern = Pattern
+            .compile("^https?://[wm\\.]*fapdungeon\\.com/([a-zA-Z0-9_-]+)/(.+)/?$");
+
+    public FapDungeonRipper(URL url) throws IOException {
+        super(url);
+    }
+
+    @Override
+    public String getHost() {
+        return HOST;
+    }
+
+    @Override
+    public String getDomain() {
+        return HOST + ".com";
+    }
+
+    @Override
+    public boolean canRip(URL url) {
+        Matcher m = pagePattern.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return true;
+        }
+        return false;
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        Matcher m = pagePattern.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return m.group(1);
+        }
+
+        throw new MalformedURLException(
+                "Expected fapdungeon format:"
+                        + "fapdungeon.com/category/albumname/"
+                        + " Got: " + url);
+    }
+
+    public String returnLargestImgUrlFromSrcAndSrcset(String src, String sourceSet) {
+        String[] parts = sourceSet.split(",");
+        logger.info("While ripping url: " + this.url + " img src: " + src + " has sourceSet: " + sourceSet);
+
+        Optional<String> largestImgUrl = Optional.empty();
+        int maxWidthSoFar = 0;
+
+        for (String part : parts) {
+            String[] subParts = part.strip().split(" ");
+            if (subParts.length == 2) {
+                String imgUrlPart = subParts[0].strip();
+
+                // parse the integer out of values like "1080w"
+                String widthStringPart = subParts[1].strip();
+                String widthNumberString = widthStringPart.substring(0, widthStringPart.length() - 1);
+                int width = Integer.parseInt(widthNumberString);
+
+                if (width > maxWidthSoFar) {
+                    logger.info("Found larger image: " + part);
+                    largestImgUrl = Optional.of(imgUrlPart);
+                    maxWidthSoFar = width;
+                }
+            }
+        }
+
+        if (largestImgUrl.isPresent()) {
+            String imgUrl = largestImgUrl.get();
+            logger.info("For img src " + src + " with srcset, using largestImgUrl: " + imgUrl);
+            return imgUrl;
+        } else {
+            logger.info("Using img src: " + src);
+            return src;
+        }
+    }
+
+    @Override
+    public List<String> getURLsFromPage(Document doc) {
+        List<String> results = new ArrayList<>();
+
+        Matcher m = pagePattern.matcher(url.toExternalForm());
+        if (m.matches()) {
+            Element content = doc.select("div.entry-content").get(0);
+
+            // Debug this selector on the page itself in Dev Tools with
+            // Array.from(document.querySelectorAll("div.entry-content img")).map((x) =>
+            // x['srcset'])
+            Elements pictures = content.select("img");
+            for (Element e : pictures) {
+                String imageSrc = e.attr("src"); // fallback on <img src="..."> value
+                String imageSourceSet = e.attr("srcset"); // get the largest resolution from this srcset
+                String imageUrl = returnLargestImgUrlFromSrcAndSrcset(imageSrc, imageSourceSet);
+                results.add(imageUrl);
+            }
+
+            // Debug this selector on the page itself in Dev Tools with
+            // Array.from(document.querySelectorAll("div.entry-content video
+            // source")).map((x) => x['src'])
+            Elements videos = content.select("video source");
+            for (Element e : videos) {
+                results.add(e.attr("src"));
+            }
+        }
+
+        return results;
+    }
+
+    @Override
+    public void downloadURL(URL url, int index) {
+        sleep(1000);
+        addURLToDownload(url, getPrefix(index));
+    }
+
+    @Override
+    public String getAlbumTitle(URL url) throws MalformedURLException, URISyntaxException {
+        Matcher m = pagePattern.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return getHost() + "_" + m.group(1) + "_" + m.group(2);
+        } else {
+            return super.getAlbumTitle(url);
+        }
+    }
+}

+ 154 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/FapwizRipper.java

@@ -0,0 +1,154 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.jsoup.nodes.Document;
+import org.jsoup.select.Elements;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.utils.Http;
+
+public class FapwizRipper extends AbstractHTMLRipper {
+
+    private static final Logger logger = LogManager.getLogger(FapwizRipper.class);
+
+    private static final Pattern CATEGORY_PATTERN = Pattern.compile("https?://fapwiz.com/category/([a-zA-Z0-9_-]+)/?$");
+
+    private static final Pattern USER_PATTERN = Pattern.compile("https?://fapwiz.com/([a-zA-Z0-9_-]+)/?$");
+
+    // Note that the last part of the pattern can contain unicode emoji which
+    // get encoded as %-encoded UTF-8 bytes in the URL, so we allow % characters.
+    private static final Pattern POST_PATTERN = Pattern
+            .compile("https?://fapwiz.com/([a-zA-Z0-9_-]+)/([a-zA-Z0-9_%-]+)/?$");
+
+    public FapwizRipper(URL url) throws IOException {
+        super(url);
+    }
+
+    @Override
+    public String getHost() {
+        return "fapwiz";
+    }
+
+    @Override
+    public String getDomain() {
+        return "fapwiz.com";
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        Matcher m;
+
+        m = CATEGORY_PATTERN.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return "category_" + m.group(1);
+        }
+
+        m = USER_PATTERN.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return "user_" + m.group(1);
+        }
+
+        m = POST_PATTERN.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return "post_" + m.group(1) + "_" + m.group(2);
+        }
+
+        throw new MalformedURLException("Expected fapwiz URL format: " +
+                "fapwiz.com/USER or fapwiz.com/USER/POST or " +
+                "fapwiz.com/CATEGORY - got " + url + " instead");
+    }
+
+    void processUserOrCategoryPage(Document doc, List<String> results) {
+        // The category page looks a lot like the structure of a user page,
+        // so processUserPage is written to be compatible with both.
+        doc.select(".post-items-holder img").forEach(e -> {
+            String imgSrc = e.attr("src");
+
+            // Skip the user profile picture thumbnail insets
+            if (imgSrc.endsWith("-thumbnail-icon.jpg")) {
+                return;
+            }
+
+            // Replace -thumbnail.jpg with .mp4
+            String videoSrc = imgSrc.replace("-thumbnail.jpg", ".mp4");
+            results.add(videoSrc);
+        });
+    }
+
+    void processCategoryPage(Document doc, List<String> results) {
+        logger.info("Processing category page: " + url);
+        processUserOrCategoryPage(doc, results);
+    }
+
+    void processUserPage(Document doc, List<String> results) {
+        logger.info("Processing user page: " + url);
+        processUserOrCategoryPage(doc, results);
+    }
+
+    void processPostPage(Document doc, List<String> results) {
+        logger.info("Processing post page: " + url);
+        doc.select("video source").forEach(video -> {
+            results.add(video.attr("src"));
+        });
+    }
+
+    @Override
+    public List<String> getURLsFromPage(Document doc) {
+        List<String> results = new ArrayList<>();
+        Matcher m;
+
+        m = CATEGORY_PATTERN.matcher(url.toExternalForm());
+        if (m.matches()) {
+            processCategoryPage(doc, results);
+        }
+
+        m = USER_PATTERN.matcher(url.toExternalForm());
+        if (m.matches()) {
+            processUserPage(doc, results);
+        }
+
+        m = POST_PATTERN.matcher(url.toExternalForm());
+        if (m.matches()) {
+            processPostPage(doc, results);
+        }
+
+        return results;
+    }
+
+    private Document getDocument(String url, int retries) throws IOException {
+        return Http.url(url).userAgent(USER_AGENT).retries(retries).get();
+    }
+
+    private Document getDocument(String url) throws IOException {
+        return getDocument(url, 1);
+    }
+
+    @Override
+    public Document getNextPage(Document page) throws IOException {
+        logger.info("Getting next page for url: " + url);
+        Elements next = page.select("a.next");
+        if (!next.isEmpty()) {
+            String href = next.attr("href");
+            logger.info("Found next page: " + href);
+            return getDocument(href);
+        } else {
+            logger.info("No more pages");
+            throw new IOException("No more pages.");
+        }
+    }
+
+    @Override
+    public void downloadURL(URL url, int index) {
+        sleep(2000);
+        addURLToDownload(url, getPrefix(index));
+    }
+}

+ 57 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/FemjoyhunterRipper.java

@@ -0,0 +1,57 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+
+public class FemjoyhunterRipper extends AbstractHTMLRipper {
+
+    public FemjoyhunterRipper(URL url) throws IOException {
+        super(url);
+    }
+
+    @Override
+    public String getHost() {
+        return "femjoyhunter";
+    }
+
+    @Override
+    public String getDomain() {
+        return "femjoyhunter.com";
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        Pattern p = Pattern.compile("https?://www.femjoyhunter.com/([a-zA-Z0-9_-]+)/?");
+        Matcher m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return m.group(1);
+        }
+        throw new MalformedURLException("Expected femjoyhunter URL format: " +
+                "femjoyhunter.com/ID - got " + url + " instead");
+    }
+
+    @Override
+    public List<String> getURLsFromPage(Document doc) {
+        List<String> result = new ArrayList<>();
+        for (Element el : doc.select("img")) {
+            result.add(el.attr("src"));
+        }
+        return result;
+    }
+
+    @Override
+    public void downloadURL(URL url, int index) {
+
+        addURLToDownload(url, getPrefix(index), "", "https://a2h6m3w6.ssl.hwcdn.net/", null);
+    }
+}

+ 75 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/FitnakedgirlsRipper.java

@@ -0,0 +1,75 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+import org.jsoup.select.Elements;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+
+public class FitnakedgirlsRipper extends AbstractHTMLRipper {
+
+    public FitnakedgirlsRipper(URL url) throws IOException {
+        super(url);
+    }
+
+    @Override
+    public String getHost() {
+        return "fitnakedgirls";
+    }
+
+    @Override
+    public String getDomain() {
+        return "fitnakedgirls.com";
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        Pattern p;
+        Matcher m;
+
+        p = Pattern.compile("^https?://(\\w+\\.)?fitnakedgirls\\.com/photos/gallery/(.+)$");
+        m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return m.group(2);
+        }
+
+        throw new MalformedURLException(
+                "Expected fitnakedgirls.com gallery format: " + "fitnakedgirls.com/gallery/####" + " Got: " + url);
+    }
+
+    @Override
+    public List<String> getURLsFromPage(Document doc) {
+        List<String> imageURLs = new ArrayList<>();
+
+        Elements imgs = doc.select(".entry-inner img");
+        for (Element img : imgs) {
+            String imgSrc = img.attr("data-src");
+            if (imgSrc.strip().isEmpty()) {
+                imgSrc = img.attr("src");
+                if (imgSrc.strip().isEmpty()) {
+                    continue;
+                }
+            }
+            imageURLs.add(imgSrc);
+        }
+
+        return imageURLs;
+    }
+
+    @Override
+    public void downloadURL(URL url, int index) {
+         // site is slow and goes down easily so don't overwhelm it
+        sleep(1000);
+
+        // Send referrer when downloading images
+        addURLToDownload(url, getPrefix(index), "", this.url.toExternalForm(), null);
+    }
+}

+ 340 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/FivehundredpxRipper.java

@@ -0,0 +1,340 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.HttpURLConnection;
+import java.net.MalformedURLException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.json.JSONArray;
+import org.json.JSONObject;
+import org.jsoup.nodes.Document;
+import org.jsoup.select.Elements;
+
+import com.rarchives.ripme.ripper.AbstractJSONRipper;
+import com.rarchives.ripme.ui.RipStatusMessage.STATUS;
+import com.rarchives.ripme.utils.Http;
+
+/**
+ * https://github.com/500px/api-documentation
+ * http://500px.com/tsyganov/stories/80675/galya ("blog")
+ * http://500px.com/tsyganov/stories ("blogs") - get HTML, parse stories
+ * http://500px.com/tsyganov/favorites
+ * http://500px.com/tsyganov (photos)
+ * https://api.500px.com/v1/photo
+ *  ?rpp=100
+ *  &feature=user
+ *  &image_size=3
+ *  &page=3
+ *  &sort=created_at
+ *  &include_states=false
+ *  &user_id=1913159
+ *  &consumer_key=XPm2br2zGBq6TOfd2xbDIHYoLnt3cLxr1HYryGCv
+ *
+ */
+public class FivehundredpxRipper extends AbstractJSONRipper {
+
+    private static final Logger logger = LogManager.getLogger(FivehundredpxRipper.class);
+
+    private int page = 1;
+    private String baseURL = "https://api.500px.com/v1";
+    private static final String CONSUMER_KEY = "XPm2br2zGBq6TOfd2xbDIHYoLnt3cLxr1HYryGCv";
+
+    public FivehundredpxRipper(URL url) throws IOException {
+        super(url);
+    }
+
+    @Override
+    public String getHost() {
+        return "500px";
+    }
+    @Override
+    public String getDomain() {
+        return "500px.com";
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        Pattern p; Matcher m;
+
+        // http://500px.com/tsyganov/stories/80675/galya ("blog")
+        p = Pattern.compile("^.*500px.com/([a-zA-Z0-9\\-_]+)/stories/([0-9]+).*$");
+        m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            String username = m.group(1),
+                   blogid   = m.group(2);
+            baseURL += "/blogs/" + blogid
+                     + "?feature=user"
+                     + "&username=" + username
+                     + "&image_size=5"
+                     + "&rpp=100";
+            return username + "_stories_" + blogid;
+        }
+
+        // http://500px.com/tsyganov/stories ("blogs")
+        p = Pattern.compile("^.*500px.com/([a-zA-Z0-9\\-_]+)/stories/?$");
+        m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            String username = m.group(1);
+            baseURL += "/blogs"
+                     + "?feature=user"
+                     + "&username=" + username
+                     + "&rpp=100";
+            return username + "_stories";
+        }
+
+        // http://500px.com/tsyganov/favorites
+        p = Pattern.compile("^.*500px.com/([a-zA-Z0-9\\-_]+)/favorites/?$");
+        m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            String username = m.group(1);
+            baseURL += "/photos"
+                     + "?feature=user_favorites"
+                     + "&username=" + username
+                     + "&rpp=100"
+                     + "&image_size=5";
+            return username + "_faves";
+        }
+
+        // http://500px.com/tsyganov/galleries
+        p = Pattern.compile("^.*500px.com/([a-zA-Z0-9\\-_]+)/galleries/?$");
+        m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            String username = m.group(1);
+            String userID;
+            try {
+                userID = getUserID(username);
+            } catch (IOException e) {
+                throw new MalformedURLException("Unable to get User ID from username (" + username + ")");
+            }
+            baseURL += "/users/" + userID + "/galleries"
+                     + "?rpp=100";
+            return username + "_galleries";
+        }
+
+        // https://500px.com/getesmart86/galleries/olga
+        p = Pattern.compile("^.*500px.com/([a-zA-Z0-9\\-_]+)/galleries/([a-zA-Z0-9\\-_]+)/?$");
+        m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            String username = m.group(1);
+            String subgallery = m.group(2);
+            String userID;
+            try {
+                userID = getUserID(username);
+            } catch (IOException e) {
+                throw new MalformedURLException("Unable to get User ID from username (" + username + ")");
+            }
+            baseURL += "/users/" + userID + "/galleries/" + subgallery + "/items"
+                     + "?rpp=100"
+                     + "&image_size=5";
+            return username + "_galleries_" + subgallery;
+        }
+
+        // http://500px.com/tsyganov (photos)
+        p = Pattern.compile("^.*500px.com/([a-zA-Z0-9\\-_]+)/?$");
+        m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            String username = m.group(1);
+            baseURL += "/photos"
+                     + "?feature=user"
+                     + "&username=" + username
+                     + "&rpp=100"
+                     + "&image_size=5";
+            return username;
+        }
+
+        throw new MalformedURLException(
+                "Expected 500px.com gallery formats: "
+                + "/stories/###  /stories  /favorites  /"
+                + " Got: " + url);
+    }
+
+    /** Convert username to UserID. */
+    private String getUserID(String username) throws IOException {
+        logger.info("Fetching user ID for " + username);
+        JSONObject json = new Http("https://api.500px.com/v1/" +
+                    "users/show" +
+                    "?username=" + username +
+                    "&consumer_key=" + CONSUMER_KEY)
+                .getJSON();
+        return Long.toString(json.getJSONObject("user").getLong("id"));
+    }
+
+    @Override
+    public JSONObject getFirstPage() throws IOException, URISyntaxException {
+        URL apiURL = new URI(baseURL + "&consumer_key=" + CONSUMER_KEY).toURL();
+        logger.debug("apiURL: " + apiURL);
+        JSONObject json = Http.url(apiURL).getJSON();
+
+        if (baseURL.contains("/galleries?")) {
+            // We're in the root /galleries folder, need to get all images from all galleries.
+            JSONObject result = new JSONObject();
+            result.put("photos", new JSONArray());
+            // Iterate over every gallery
+            JSONArray jsonGalleries = json.getJSONArray("galleries");
+            for (int i = 0; i < jsonGalleries.length(); i++) {
+                if (i > 0) {
+                    sleep(500);
+                }
+                JSONObject jsonGallery = jsonGalleries.getJSONObject(i);
+                long galleryID = jsonGallery.getLong("id");
+                String userID = Long.toString(jsonGallery.getLong("user_id"));
+                String blogURL = "https://api.500px.com/v1/users/" + userID + "/galleries/" + galleryID + "/items"
+                     + "?rpp=100"
+                     + "&image_size=5"
+                     + "&consumer_key=" + CONSUMER_KEY;
+                logger.info("Loading " + blogURL);
+                sendUpdate(STATUS.LOADING_RESOURCE, "Gallery ID " + galleryID + " for userID " + userID);
+                JSONObject thisJSON = Http.url(blogURL).getJSON();
+                JSONArray thisPhotos = thisJSON.getJSONArray("photos");
+                // Iterate over every image in this story
+                for (int j = 0; j < thisPhotos.length(); j++) {
+                    result.getJSONArray("photos").put(thisPhotos.getJSONObject(j));
+                }
+            }
+            return result;
+        }
+        else if (baseURL.contains("/blogs?")) {
+            // List of stories to return
+            JSONObject result = new JSONObject();
+            result.put("photos", new JSONArray());
+
+            // Iterate over every story
+            JSONArray jsonBlogs = json.getJSONArray("blog_posts");
+            for (int i = 0; i < jsonBlogs.length(); i++) {
+                if (i > 0) {
+                    sleep(500);
+                }
+                JSONObject jsonBlog = jsonBlogs.getJSONObject(i);
+                int blogid = jsonBlog.getInt("id");
+                String username = jsonBlog.getJSONObject("user").getString("username");
+                String blogURL = "https://api.500px.com/v1/blogs/" + blogid
+                     + "?feature=user"
+                     + "&username=" + username
+                     + "&rpp=100"
+                     + "&image_size=5"
+                     + "&consumer_key=" + CONSUMER_KEY;
+                logger.info("Loading " + blogURL);
+                sendUpdate(STATUS.LOADING_RESOURCE, "Story ID " + blogid + " for user " + username);
+                JSONObject thisJSON = Http.url(blogURL).getJSON();
+                JSONArray thisPhotos = thisJSON.getJSONArray("photos");
+                // Iterate over every image in this story
+                for (int j = 0; j < thisPhotos.length(); j++) {
+                    result.getJSONArray("photos").put(thisPhotos.getJSONObject(j));
+                }
+            }
+            return result;
+        }
+        return json;
+    }
+
+    @Override
+    public JSONObject getNextPage(JSONObject json) throws IOException, URISyntaxException {
+        if (isThisATest()) {
+            return null;
+        }
+        // Check previous JSON to see if we hit the last page
+        if (!json.has("current_page")
+         || !json.has("total_pages")) {
+            throw new IOException("No more pages");
+        }
+        int currentPage = json.getInt("current_page"),
+            totalPages  = json.getInt("total_pages");
+        if (currentPage == totalPages) {
+            throw new IOException("No more results");
+        }
+
+        sleep(500);
+        ++page;
+        URL apiURL = new URI(baseURL
+                             + "&page=" + page
+                             + "&consumer_key=" + CONSUMER_KEY).toURL();
+        return Http.url(apiURL).getJSON();
+    }
+
+    @Override
+    public List<String> getURLsFromJSON(JSONObject json) {
+        List<String> imageURLs = new ArrayList<>();
+        JSONArray photos = json.getJSONArray("photos");
+        for (int i = 0; i < photos.length(); i++) {
+            if (super.isStopped()) {
+                break;
+            }
+            JSONObject photo = photos.getJSONObject(i);
+            String imageURL = null;
+            String rawUrl = "https://500px.com" + photo.getString("url");
+            Document doc;
+            Elements images = new Elements();
+            try {
+                logger.debug("Loading " + rawUrl);
+                super.retrievingSource(rawUrl);
+                doc = Http.url(rawUrl).get();
+                images = doc.select("div#preload img");
+            }
+            catch (IOException e) {
+                logger.error("Error fetching full-size image from " + rawUrl, e);
+            }
+            if (!images.isEmpty()) {
+                imageURL = images.first().attr("src");
+                logger.debug("Found full-size non-watermarked image: " + imageURL);
+            }
+            else {
+                logger.debug("Falling back to image_url from API response");
+                imageURL = photo.getString("image_url");
+                imageURL = imageURL.replaceAll("/4\\.", "/5.");
+                // See if there's larger images
+                for (String imageSize : new String[] { "2048" } ) {
+                    String fsURL = imageURL.replaceAll("/5\\.", "/" + imageSize + ".");
+                    sleep(10);
+                    if (urlExists(fsURL)) {
+                        logger.info("Found larger image at " + fsURL);
+                        imageURL = fsURL;
+                        break;
+                    }
+                }
+            }
+            imageURLs.add(imageURL);
+            if (isThisATest()) {
+                break;
+            }
+        }
+        return imageURLs;
+    }
+
+    private boolean urlExists(String url) {
+        try {
+            HttpURLConnection connection = (HttpURLConnection) new URI(url).toURL().openConnection();
+            connection.setRequestMethod("HEAD");
+            if (connection.getResponseCode() != 200) {
+                throw new IOException("Couldn't find full-size image at " + url);
+            }
+            return true;
+        } catch (IOException | URISyntaxException e) {
+            return false;
+        }
+    }
+
+    @Override
+    public boolean keepSortOrder() {
+        return false;
+    }
+
+    @Override
+    public void downloadURL(URL url, int index) {
+        String u = url.toExternalForm();
+        String[] fields = u.split("/");
+        String prefix = "/" + getPrefix(index) + fields[fields.length - 3];
+        Path saveAs = Paths.get(getWorkingDir() + prefix + ".jpg");
+        addURLToDownload(url,  saveAs,  "", null, false);
+    }
+
+}

+ 315 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/FlickrRipper.java

@@ -0,0 +1,315 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.TreeMap;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.json.JSONArray;
+import org.json.JSONException;
+import org.json.JSONObject;
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.ripper.DownloadThreadPool;
+import com.rarchives.ripme.ui.RipStatusMessage;
+import com.rarchives.ripme.utils.Http;
+
+public class FlickrRipper extends AbstractHTMLRipper {
+
+    private static final Logger logger = LogManager.getLogger(FlickrRipper.class);
+
+    private final DownloadThreadPool flickrThreadPool;
+
+    private enum UrlType {
+        USER,
+        PHOTOSET
+    }
+
+    private class Album {
+        final UrlType type;
+        final String id;
+
+        Album(UrlType type, String id) {
+            this.type = type;
+            this.id = id;
+        }
+    }
+
+    @Override
+    public DownloadThreadPool getThreadPool() {
+        return flickrThreadPool;
+    }
+
+    @Override
+    public boolean hasASAPRipping() {
+        return true;
+    }
+
+    public FlickrRipper(URL url) throws IOException {
+        super(url);
+        flickrThreadPool = new DownloadThreadPool();
+    }
+
+    @Override
+    public String getHost() {
+        return "flickr";
+    }
+    @Override
+    public String getDomain() {
+        return "flickr.com";
+    }
+
+    @Override
+    public URL sanitizeURL(URL url) throws MalformedURLException, URISyntaxException {
+        String sUrl = url.toExternalForm();
+        // Strip out https
+        sUrl = sUrl.replace("https://secure.flickr.com", "http://www.flickr.com");
+        // For /groups/ links, add a /pool to the end of the URL
+        if (sUrl.contains("flickr.com/groups/") && !sUrl.contains("/pool")) {
+            if (!sUrl.endsWith("/")) {
+                sUrl += "/";
+            }
+            sUrl += "pool";
+        }
+        return new URI(sUrl).toURL();
+    }
+    // FLickr is one of those sites what includes a api key in sites javascript
+    // TODO let the user provide their own api key
+    private String getAPIKey(Document doc) {
+        Pattern p;
+        Matcher m;
+        p = Pattern.compile("root.YUI_config.flickr.api.site_key = \"([a-zA-Z0-9]*)\";");
+        for (Element e : doc.select("script")) {
+            // You have to use .html here as .text will strip most of the javascript
+            m = p.matcher(e.html());
+            if (m.find()) {
+                logger.info("Found api key:" + m.group(1));
+                return m.group(1);
+            }
+        }
+        logger.error("Unable to get api key");
+        // A nice error message to tell our users what went wrong
+        sendUpdate(RipStatusMessage.STATUS.DOWNLOAD_WARN, "Unable to extract api key from flickr");
+        sendUpdate(RipStatusMessage.STATUS.DOWNLOAD_WARN, "Using hardcoded api key");
+        return "935649baf09b2cc50628e2b306e4da5d";
+    }
+
+    // The flickr api is a monster of weird settings so we just request everything that the webview does
+    private String apiURLBuilder(Album album, String pageNumber, String apiKey) {
+        String method = null;
+        String idField = null;
+        switch (album.type) {
+            case PHOTOSET:
+                method = "flickr.photosets.getPhotos";
+                idField = "photoset_id=" + album.id;
+                break;
+            case USER:
+                method = "flickr.people.getPhotos";
+                idField = "user_id=" + album.id;
+                break;
+        }
+
+        return "https://api.flickr.com/services/rest?extras=can_addmeta," +
+        "can_comment,can_download,can_share,contact,count_comments,count_faves,count_views,date_taken," +
+        "date_upload,icon_urls_deep,isfavorite,ispro,license,media,needs_interstitial,owner_name," +
+        "owner_datecreate,path_alias,realname,rotation,safety_level,secret_k,secret_h,url_c,url_f,url_h,url_k," +
+        "url_l,url_m,url_n,url_o,url_q,url_s,url_sq,url_t,url_z,visibility,visibility_source,o_dims," +
+        "is_marketplace_printable,is_marketplace_licensable,publiceditability&per_page=100&page="+ pageNumber + "&" +
+        "get_user_info=1&primary_photo_extras=url_c,%20url_h,%20url_k,%20url_l,%20url_m,%20url_n,%20url_o" +
+        ",%20url_q,%20url_s,%20url_sq,%20url_t,%20url_z,%20needs_interstitial,%20can_share&jump_to=&" +
+        idField + "&viewerNSID=&method=" + method + "&csrf=&" +
+        "api_key=" + apiKey + "&format=json&hermes=1&hermesClient=1&reqId=358ed6a0&nojsoncallback=1";
+    }
+
+    private JSONObject getJSON(String page, String apiKey) {
+        URL pageURL = null;
+        String apiURL = null;
+        try {
+            apiURL = apiURLBuilder(getAlbum(url.toExternalForm()), page, apiKey);
+            pageURL = new URI(apiURL).toURL();
+        }  catch (MalformedURLException | URISyntaxException e) {
+            logger.error("Unable to get api link " + apiURL + " is malformed");
+        }
+        try {
+            logger.info("Fetching: " + apiURL);
+            logger.info("Response: " + Http.url(pageURL).ignoreContentType().get().text());
+            return new JSONObject(Http.url(pageURL).ignoreContentType().get().text());
+        } catch (IOException e) {
+            logger.error("Unable to get api link " + apiURL + " is malformed");
+            return null;
+        }
+    }
+
+    private Album getAlbum(String url) throws MalformedURLException {
+        Pattern p; Matcher m;
+
+        // User photostream:  https://www.flickr.com/photos/115858035@N04/
+        // Album: https://www.flickr.com/photos/115858035@N04/sets/72157644042355643/
+
+        final String domainRegex = "https?://[wm.]*flickr.com";
+        final String userRegex = "[a-zA-Z0-9@_-]+";
+        // Album
+        p = Pattern.compile("^" + domainRegex + "/photos/" + userRegex + "/(sets|albums)/([0-9]+)/?.*$");
+        m = p.matcher(url);
+        if (m.matches()) {
+            return new Album(UrlType.PHOTOSET, m.group(2));
+        }
+
+        // User photostream
+        p = Pattern.compile("^" + domainRegex + "/photos/(" + userRegex + ")/?$");
+        m = p.matcher(url);
+        if (m.matches()) {
+            return new Album(UrlType.USER, m.group(1));
+        }
+
+        String errorMessage = "Failed to extract photoset ID from url: " + url;
+
+        logger.error(errorMessage);
+        throw new MalformedURLException(errorMessage);
+    }
+
+    @Override
+    public String getAlbumTitle(URL url) throws MalformedURLException, URISyntaxException {
+        if (!url.toExternalForm().contains("/sets/")) {
+            return super.getAlbumTitle(url);
+        }
+        try {
+            // Attempt to use album title as GID
+            Document doc = getCachedFirstPage();
+            String user = url.toExternalForm();
+            user = user.substring(user.indexOf("/photos/") + "/photos/".length());
+            user = user.substring(0, user.indexOf("/"));
+            String title = doc.select("meta[name=description]").get(0).attr("content");
+            if (!title.equals("")) {
+                return getHost() + "_" + user + "_" + title;
+            }
+        } catch (Exception e) {
+            // Fall back to default album naming convention
+        }
+        return super.getAlbumTitle(url);
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        Pattern p; Matcher m;
+
+        // Root:  https://www.flickr.com/photos/115858035@N04/
+        // Album: https://www.flickr.com/photos/115858035@N04/sets/72157644042355643/
+
+        final String domainRegex = "https?://[wm.]*flickr.com";
+        final String userRegex = "[a-zA-Z0-9@_-]+";
+        // Album
+        p = Pattern.compile("^" + domainRegex + "/photos/(" + userRegex + ")/sets/([0-9]+)/?.*$");
+        m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return m.group(1) + "_" + m.group(2);
+        }
+
+        // User page
+        p = Pattern.compile("^" + domainRegex + "/photos/(" + userRegex + ").*$");
+        m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return m.group(1);
+        }
+
+        // Groups page
+        p = Pattern.compile("^" + domainRegex + "/groups/(" + userRegex + ").*$");
+        m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return "groups-" + m.group(1);
+        }
+        throw new MalformedURLException(
+                "Expected flickr.com URL formats: "
+                        + "flickr.com/photos/username or "
+                        + "flickr.com/photos/username/sets/albumid"
+                        + " Got: " + url);
+    }
+
+
+    @Override
+    public List<String> getURLsFromPage(Document doc) {
+        List<String> imageURLs = new ArrayList<>();
+        String apiKey = getAPIKey(doc);
+        int x = 1;
+        while (true) {
+            JSONObject jsonData = getJSON(String.valueOf(x), apiKey);
+            if (jsonData.has("stat") && jsonData.getString("stat").equals("fail")) {
+                break;
+            } else {
+                // Determine root key
+                JSONObject rootData;
+
+                try {
+                    rootData = jsonData.getJSONObject("photoset");
+                } catch (JSONException e) {
+                    try {
+                        rootData = jsonData.getJSONObject("photos");
+                    } catch (JSONException innerE) {
+                        logger.error("Unable to find photos in response");
+                        break;
+                    }
+                }
+
+                int totalPages = rootData.getInt("pages");
+                logger.info(jsonData);
+                JSONArray pictures = rootData.getJSONArray("photo");
+                for (int i = 0; i < pictures.length(); i++) {
+                    logger.info(i);
+                    JSONObject data = (JSONObject) pictures.get(i);
+                    try {
+                        addURLToDownload(getLargestImageURL(data.getString("id"), apiKey));
+                    } catch (MalformedURLException | URISyntaxException e) {
+                        logger.error("Flickr MalformedURLException: " + e.getMessage());
+                    }
+
+                }
+                if (x >= totalPages) {
+                    // The rips done
+                    break;
+                }
+                // We have more pages to download so we rerun the loop
+                x++;
+
+            }
+        }
+
+        return imageURLs;
+    }
+
+    @Override
+    public void downloadURL(URL url, int index) {
+        addURLToDownload(url, getPrefix(index));
+    }
+
+    private URL getLargestImageURL(String imageID, String apiKey) throws MalformedURLException, URISyntaxException {
+        TreeMap<Integer, String> imageURLMap = new TreeMap<>();
+
+        try {
+            URL imageAPIURL = new URI("https://www.flickr.com/services/rest/?method=flickr.photos.getSizes&api_key=" + apiKey + "&photo_id=" + imageID + "&format=json&nojsoncallback=1").toURL();
+            JSONArray imageSizes = new JSONObject(Http.url(imageAPIURL).ignoreContentType().get().text()).getJSONObject("sizes").getJSONArray("size");
+            for (int i = 0; i < imageSizes.length(); i++) {
+                JSONObject imageInfo = imageSizes.getJSONObject(i);
+                imageURLMap.put(imageInfo.getInt("width") * imageInfo.getInt("height"), imageInfo.getString("source"));
+            }
+
+        } catch (org.json.JSONException e) {
+            logger.error("Error in  parsing of Flickr API: " + e.getMessage());
+        } catch (MalformedURLException e) {
+            logger.error("Malformed URL returned by API");
+        } catch (IOException e) {
+            logger.error("IOException while looking at image sizes: " + e.getMessage());
+        }
+
+        return new URI(imageURLMap.lastEntry().getValue()).toURL();
+    }
+}

+ 74 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/FreeComicOnlineRipper.java

@@ -0,0 +1,74 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.utils.Http;
+
+public class FreeComicOnlineRipper extends AbstractHTMLRipper {
+
+    public FreeComicOnlineRipper(URL url) throws IOException {
+        super(url);
+    }
+
+    @Override
+    public String getHost() {
+        return "freecomiconline";
+    }
+
+    @Override
+    public String getDomain() {
+        return "freecomiconline.me";
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        Pattern p = Pattern.compile("https://freecomiconline.me/comic/([a-zA-Z0-9_\\-]+)/([a-zA-Z0-9_\\-]+)/?$");
+	Matcher m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return m.group(1) + "_" + m.group(2);
+        }
+        p = Pattern.compile("^https://freecomiconline.me/comic/([a-zA-Z0-9_\\-]+)/?$");
+        m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return m.group(1);
+        }
+        throw new MalformedURLException("Expected freecomiconline URL format: " +
+                "freecomiconline.me/TITLE/CHAPTER - got " + url + " instead");
+    }
+
+    @Override
+    public Document getNextPage(Document doc) throws IOException {
+        String nextPage = doc.select("div.select-pagination a").get(1).attr("href");
+	String nextUrl = "";
+	Pattern p = Pattern.compile("https://freecomiconline.me/comic/([a-zA-Z0-9_\\-]+)/([a-zA-Z0-9_\\-]+)/?$");
+        Matcher m = p.matcher(nextPage);
+	if(m.matches()){ 
+	    nextUrl = m.group(0);
+	}
+	if(nextUrl.equals("")) throw new IOException("No more pages");
+	sleep(500);
+        return Http.url(nextUrl).get();
+    }
+
+    @Override
+    public List<String> getURLsFromPage(Document doc) {
+        List<String> result = new ArrayList<>();
+        for (Element el : doc.select(".wp-manga-chapter-img")) {
+            result.add(el.attr("src"));
+        }
+        return result;
+    }
+
+    @Override
+    public void downloadURL(URL url, int index) {
+        addURLToDownload(url, getPrefix(index));
+    }
+}

+ 249 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/FuraffinityRipper.java

@@ -0,0 +1,249 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import static com.rarchives.ripme.utils.RipUtils.getCookiesFromString;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.jsoup.Connection.Response;
+import org.jsoup.Jsoup;
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+import org.jsoup.safety.Safelist;
+import org.jsoup.select.Elements;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.ripper.DownloadThreadPool;
+import com.rarchives.ripme.ui.RipStatusMessage;
+import com.rarchives.ripme.utils.Http;
+import com.rarchives.ripme.utils.Utils;
+
+public class FuraffinityRipper extends AbstractHTMLRipper {
+
+    private static final Logger logger = LogManager.getLogger(FuraffinityRipper.class);
+
+    private static final String urlBase = "https://www.furaffinity.net";
+    private  Map<String,String> cookies = new HashMap<>();
+
+    private void setCookies() {
+        if (Utils.getConfigBoolean("furaffinity.login", true)) {
+            logger.info("Logging in using cookies");
+            String faCookies = Utils.getConfigString("furaffinity.cookies", "a=897bc45b-1f87-49f1-8a85-9412bc103e7a;b=c8807f36-7a85-4caf-80ca-01c2a2368267");
+            warnAboutSharedAccount(faCookies);
+            cookies = getCookiesFromString(faCookies);
+        }
+    }
+
+    private void warnAboutSharedAccount(String loginCookies) {
+        if (loginCookies.equals("a=897bc45b-1f87-49f1-8a85-9412bc103e7a;b=c8807f36-7a85-4caf-80ca-01c2a2368267")) {
+            sendUpdate(RipStatusMessage.STATUS.DOWNLOAD_ERRORED,
+                    "WARNING: Using the shared furaffinity account exposes both your IP and how many items you downloaded to the other users of the share account");
+        }
+    }
+
+    // Thread pool for finding direct image links from "image" pages (html)
+    private DownloadThreadPool furaffinityThreadPool
+            = new DownloadThreadPool( "furaffinity");
+
+    @Override
+    public DownloadThreadPool getThreadPool() {
+        return furaffinityThreadPool;
+    }
+
+    public FuraffinityRipper(URL url) throws IOException {
+        super(url);
+    }
+
+    @Override
+    public String getDomain() {
+        return "furaffinity.net";
+    }
+
+    @Override
+    public String getHost() {
+        return "furaffinity";
+    }
+    @Override
+    public boolean hasDescriptionSupport() {
+        return false;
+    }
+    @Override
+    public Document getFirstPage() throws IOException {
+        setCookies();
+        return Http.url(url).cookies(cookies).get();
+    }
+
+    @Override
+    public Document getNextPage(Document doc) throws IOException {
+        // Find next page
+        Elements nextPageUrl = doc.select("a.right");
+        if (nextPageUrl.isEmpty()) {
+            throw new IOException("No more pages");
+        }
+        String nextUrl = urlBase + nextPageUrl.first().attr("href");
+
+        sleep(500);
+
+        return Http.url(nextUrl).cookies(cookies).get();
+    }
+
+    private String getImageFromPost(String url) {
+        sleep(1000);
+        Document d;
+        try {
+            d = Http.url(url).cookies(cookies).get();
+            Elements links = d.getElementsByTag("a");
+            for (Element link : links) {
+                if (link.text().equals("Download")) {
+                    logger.info("Found image " + link.attr("href"));
+                   return "https:" + link.attr("href");
+                }
+            }
+        } catch (IOException e) {
+            return null;
+        }
+        return null;
+    }
+
+    @Override
+    public List<String> getURLsFromPage(Document page) {
+        List<String> urls = new ArrayList<>();
+        Elements urlElements = page.select("figure.t-image > b > u > a");
+        for (Element e : urlElements) {
+            String urlToAdd = getImageFromPost(urlBase + e.select("a").first().attr("href"));
+            if (url != null) {
+                if (urlToAdd.startsWith("http")) {
+                    urls.add(urlToAdd);
+                }
+            }
+            if (isStopped() || isThisATest()) {
+                break;
+            }
+        }
+        return urls;
+    }
+    @Override
+    public List<String> getDescriptionsFromPage(Document page) {
+        List<String> urls = new ArrayList<>();
+        Elements urlElements = page.select("figure.t-image > b > u > a");
+        for (Element e : urlElements) {
+            urls.add(urlBase + e.select("a").first().attr("href"));
+            logger.debug("Desc2 " + urlBase + e.select("a").first().attr("href"));
+        }
+        return urls;
+    }
+    @Override
+    public int descSleepTime() {
+        return 400;
+    }
+    public String getDescription(String page) {
+        try {
+            // Fetch the image page
+            Response resp = Http.url(page)
+                    .referrer(this.url)
+                    .response();
+            cookies.putAll(resp.cookies());
+
+            // Try to find the description
+            Elements els = resp.parse().select("td[class=alt1][width=\"70%\"]");
+            if (els.isEmpty()) {
+                logger.debug("No description at " + page);
+                throw new IOException("No description found");
+            }
+            logger.debug("Description found!");
+            Document documentz = resp.parse();
+            Element ele = documentz.select("td[class=alt1][width=\"70%\"]").get(0); // This is where the description is.
+            // Would break completely if FurAffinity changed site layout.
+            documentz.outputSettings(new Document.OutputSettings().prettyPrint(false));
+            ele.select("br").append("\\n");
+            ele.select("p").prepend("\\n\\n");
+            logger.debug("Returning description at " + page);
+            String tempPage = Jsoup.clean(ele.html().replaceAll("\\\\n", System.getProperty("line.separator")), "", Safelist.none(), new Document.OutputSettings().prettyPrint(false));
+            return documentz.select("meta[property=og:title]").attr("content") + "\n" + tempPage; // Overridden saveText takes first line and makes it the file name.
+        } catch (IOException ioe) {
+            logger.info("Failed to get description " + page + " : '" + ioe.getMessage() + "'");
+            return null;
+        }
+    }
+    @Override
+    public boolean saveText(URL url, String subdirectory, String text, int index) {
+        //TODO Make this better please?
+        try {
+            stopCheck();
+        } catch (IOException e) {
+            return false;
+        }
+        String newText = "";
+        String saveAs = "";
+        Path saveFileAs;
+        saveAs = text.split("\n")[0];
+        saveAs = saveAs.replaceAll("^(\\S+)\\s+by\\s+(.*)$", "$2_$1");
+        for (int i = 1;i < text.split("\n").length; i++) {
+            newText = newText.replace("\\","").replace("/","").replace("~","") + "\n" + text.split("\n")[i];
+        }
+        try {
+            saveFileAs = Paths.get(
+                    workingDir
+                            + "/"
+                            + subdirectory
+                            + "/"
+                            + saveAs
+                            + ".txt");
+            // Write the file
+            OutputStream out = Files.newOutputStream(saveFileAs);
+            out.write(text.getBytes());
+            out.close();
+        } catch (IOException e) {
+            logger.error("[!] Error creating save file path for description '" + url + "':", e);
+            return false;
+        }
+        logger.debug("Downloading " + url + "'s description to " + saveFileAs);
+        if (!Files.exists(saveFileAs.getParent())) {
+            logger.info("[+] Creating directory: " + Utils.removeCWD(saveFileAs.getParent()));
+            try {
+                Files.createDirectory(saveFileAs.getParent());
+            } catch (IOException e) {
+                e.printStackTrace();
+            }
+        }
+        return true;
+    }
+    @Override
+    public void downloadURL(URL url, int index) {
+        addURLToDownload(url, getPrefix(index));
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        // Gallery
+        Pattern p = Pattern.compile("^https?://www\\.furaffinity\\.net/gallery/([-_.0-9a-zA-Z]+).*$");
+        Matcher m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return m.group(1);
+        }
+
+        //Scraps
+        p = Pattern.compile("^https?://www\\.furaffinity\\.net/scraps/([-_.0-9a-zA-Z]+).*$");
+        m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return m.group(1);
+        }
+
+        throw new MalformedURLException("Unable to find images in" + url);
+    }
+
+
+}

+ 118 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/FuskatorRipper.java

@@ -0,0 +1,118 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.json.JSONArray;
+import org.json.JSONObject;
+import org.jsoup.Connection.Method;
+import org.jsoup.Connection.Response;
+import org.jsoup.nodes.Document;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.utils.Http;
+
+public class FuskatorRipper extends AbstractHTMLRipper {
+
+    private static final Logger logger = LogManager.getLogger(FuskatorRipper.class);
+
+    private String jsonurl = "https://fuskator.com/ajax/gal.aspx";
+    private String xAuthUrl = "https://fuskator.com/ajax/auth.aspx";
+    private String xAuthToken;
+    private Map<String, String> cookies;
+
+    public FuskatorRipper(URL url) throws IOException {
+        super(url);
+    }
+
+    @Override
+    public String getHost() {
+        return "fuskator";
+    }
+
+    @Override
+    public String getDomain() {
+        return "fuskator.com";
+    }
+
+    @Override
+    public URL sanitizeURL(URL url) throws MalformedURLException, URISyntaxException {
+        String u = url.toExternalForm();
+        if (u.contains("/thumbs/")) {
+            u = u.replace("/thumbs/", "/full/");
+        }
+        if (u.contains("/expanded/")) {
+            u = u.replaceAll("/expanded/", "/full/");
+        }
+        return new URI(u).toURL();
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        Pattern p = Pattern.compile("^.*fuskator.com/full/([a-zA-Z0-9\\-~]+).*$");
+        Matcher m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return m.group(1);
+        }
+        throw new MalformedURLException(
+                "Expected fuskator.com gallery formats: " + "fuskator.com/full/id/..." + " Got: " + url);
+    }
+
+    @Override
+    public Document getFirstPage() throws IOException {
+        // return Http.url(url).get();
+        Response res = Http.url(url).response();
+        cookies = res.cookies();
+        return res.parse();
+    }
+
+    @Override
+    public List<String> getURLsFromPage(Document doc) {
+        List<String> imageURLs = new ArrayList<>();
+        JSONObject json;
+
+        try {
+            getXAuthToken();
+            if (xAuthToken == null || xAuthToken.isEmpty()) {
+                throw new IOException("No xAuthToken found.");
+            }
+
+            // All good. Fetch JSON data from jsonUrl.
+            json = Http.url(jsonurl).cookies(cookies).data("X-Auth", xAuthToken).data("hash", getGID(url))
+                    .data("_", Long.toString(System.currentTimeMillis())).getJSON();
+        } catch (IOException e) {
+            logger.error("Couldnt fetch images.", e.getCause());
+            return imageURLs;
+        }
+
+        JSONArray imageArray = json.getJSONArray("images");
+        for (int i = 0; i < imageArray.length(); i++) {
+            imageURLs.add("https:" + imageArray.getJSONObject(i).getString("imageUrl"));
+        }
+
+        return imageURLs;
+    }
+
+    @Override
+    public void downloadURL(URL url, int index) {
+        addURLToDownload(url, getPrefix(index));
+    }
+
+    private void getXAuthToken() throws IOException {
+        if (cookies == null || cookies.isEmpty()) {
+            throw new IOException("Null cookies or no cookies found.");
+        }
+        Response res = Http.url(xAuthUrl).cookies(cookies).method(Method.POST).response();
+        xAuthToken = res.body();
+    }
+}

+ 87 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/GirlsOfDesireRipper.java

@@ -0,0 +1,87 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+import org.jsoup.select.Elements;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+
+public class GirlsOfDesireRipper extends AbstractHTMLRipper {
+
+    private static final Logger logger = LogManager.getLogger(EromeRipper.class);
+
+    public GirlsOfDesireRipper(URL url) throws IOException {
+        super(url);
+    }
+
+    @Override
+    public String getHost() {
+        return "GirlsOfDesire";
+    }
+    @Override
+    public String getDomain() {
+        return "girlsofdesire.org";
+    }
+
+    public String getAlbumTitle(URL url) throws MalformedURLException, URISyntaxException {
+        try {
+            // Attempt to use album title as GID
+            Document doc = getCachedFirstPage();
+            Elements elems = doc.select(".albumName");
+            return getHost() + "_" + elems.first().text();
+        } catch (Exception e) {
+            // Fall back to default album naming convention
+            logger.warn("Failed to get album title from " + url, e);
+        }
+        return super.getAlbumTitle(url);
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        Pattern p;
+        Matcher m;
+
+        p = Pattern.compile("^www\\.girlsofdesire\\.org/galleries/([\\w\\d-]+)/$");
+        m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return m.group(1);
+        }
+
+        throw new MalformedURLException(
+                "Expected girlsofdesire.org gallery format: "
+                        + "http://www.girlsofdesire.org/galleries/<name>/"
+                        + " Got: " + url);
+    }
+
+    @Override
+    public List<String> getURLsFromPage(Document doc) {
+        List<String> imageURLs = new ArrayList<>();
+        for (Element thumb : doc.select("td.vtop > a > img")) {
+            String imgSrc = thumb.attr("src");
+            imgSrc = imgSrc.replaceAll("_thumb\\.", ".");
+            if (imgSrc.startsWith("/")) {
+                imgSrc = "http://www.girlsofdesire.org" + imgSrc;
+            }
+            imageURLs.add(imgSrc);
+        }
+        return imageURLs;
+    }
+
+    @Override
+    public void downloadURL(URL url, int index) {
+        // Send referrer when downloading images
+        addURLToDownload(url, getPrefix(index), "", this.url.toExternalForm(), null);
+    }
+}

+ 146 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/Hentai2readRipper.java

@@ -0,0 +1,146 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.utils.Http;
+
+public class Hentai2readRipper extends AbstractHTMLRipper {
+
+    private static final Logger logger = LogManager.getLogger(Hentai2readRipper.class);
+
+    String lastPage;
+
+    public Hentai2readRipper(URL url) throws IOException {
+        super(url);
+    }
+
+        @Override
+        public String getHost() {
+            return "hentai2read";
+        }
+
+        @Override
+        public String getDomain() {
+            return "hentai2read.com";
+        }
+
+    @Override
+    public boolean hasQueueSupport() {
+        return true;
+    }
+
+    @Override
+    public boolean pageContainsAlbums(URL url) {
+        logger.info("Page contains albums");
+        Pattern pat = Pattern.compile("https?://hentai2read\\.com/([a-zA-Z0-9_-]*)/?");
+        Matcher mat = pat.matcher(url.toExternalForm());
+        if (mat.matches()) {
+            return true;
+        }
+        return false;
+    }
+
+    @Override
+    public List<String> getAlbumsToQueue(Document doc) {
+        List<String> urlsToAddToQueue = new ArrayList<>();
+        for (Element elem : doc.select(".nav-chapters > li > div.media > a")) {
+            urlsToAddToQueue.add(elem.attr("href"));
+        }
+        return urlsToAddToQueue;
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        Pattern p = Pattern.compile("https?://hentai2read\\.com/([a-zA-Z0-9_-]*)/(\\d+)?/?");
+        Matcher m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return m.group(1) + "_" + m.group(2);
+        }
+        throw new MalformedURLException("Expected hentai2read.com URL format: " +
+                            "hentai2read.com/COMICID - got " + url + " instead");
+    }
+
+        @Override
+        public Document getFirstPage() throws IOException {
+            String thumbnailLink;
+            try {
+                // If the page contains albums we want to load the main page
+                if (pageContainsAlbums(url)) {
+                    return Http.url(url).get();
+                }
+                Document tempDoc;
+                tempDoc = Http.url(url).get();
+                // Get the thumbnail page so we can rip all images without loading every page in the comic
+                thumbnailLink = tempDoc.select("div.col-xs-12 > div.reader-controls > div.controls-block > button > a").attr("href");
+                if (!thumbnailLink.equals("")) {
+                    return Http.url(thumbnailLink).get();
+                } else {
+                    return Http.url(tempDoc.select("a[data-original-title=Thumbnails").attr("href")).get();
+                }
+            } catch (IOException e) {
+                throw new IOException("Unable to get first page");
+            }
+        }
+
+        @Override
+        public String getAlbumTitle(URL url) throws MalformedURLException, URISyntaxException {
+            try {
+                return getHost() + "_" + getGID(url);
+            } catch (Exception e) {
+                // Fall back to default album naming convention
+                logger.warn("Failed to get album title from " + url, e);
+            }
+            return super.getAlbumTitle(url);
+        }
+
+        @Override
+        public List<String> getURLsFromPage(Document doc) {
+            List<String> result = new ArrayList<String>();
+            for (Element el : doc.select("div.block-content > div > div.img-container > a > img.img-responsive")) {
+                String imageURL = "https:" + el.attr("src");
+                imageURL = imageURL.replace("hentaicdn.com", "static.hentaicdn.com");
+                imageURL = imageURL.replace("thumbnails/", "");
+                imageURL = imageURL.replace("tmb", "");
+                result.add(imageURL);
+            }
+                return result;
+        }
+
+        @Override
+        public Document getNextPage(Document doc) throws IOException {
+            // Find next page
+            String nextUrl = "";
+            Element elem = doc.select("div.bg-white > ul.pagination > li > a").last();
+            if (elem == null) {
+                throw new IOException("No more pages");
+            }
+            nextUrl = elem.attr("href");
+            // We use the global lastPage to check if we've already ripped this page
+            // and is so we quit as there are no more pages
+            if (nextUrl.equals(lastPage)) {
+                throw new IOException("No more pages");
+            }
+            lastPage = nextUrl;
+            // Sleep for half a sec to avoid getting IP banned
+            sleep(500);
+            return Http.url(nextUrl).get();
+        }
+
+        @Override
+        public void downloadURL(URL url, int index) {
+            addURLToDownload(url, getPrefix(index));
+        }
+    }

+ 181 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/HentaiNexusRipper.java

@@ -0,0 +1,181 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.Base64;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import com.rarchives.ripme.utils.Http;
+import org.json.JSONArray;
+import org.json.JSONException;
+import org.json.JSONObject;
+
+import com.rarchives.ripme.ripper.AbstractJSONRipper;
+import org.jsoup.nodes.DataNode;
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+
+public class HentaiNexusRipper extends AbstractJSONRipper {
+
+    public HentaiNexusRipper(URL url) throws IOException {
+        super(url);
+    }
+
+    @Override
+    public String getHost() {
+        return "hentainexus";
+    }
+    @Override
+    public String getDomain() {
+        return "hentainexus.com";
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        /*
+            Valid URLs are /view/id, /read/id and those 2 with #pagenumber
+            https://hentainexus.com/view/9202
+            https://hentainexus.com/read/9202
+            https://hentainexus.com/view/9202#001
+            https://hentainexus.com/read/9202#001
+         */
+
+        Pattern p = Pattern.compile("^https?://hentainexus\\.com/(?:view|read)/([0-9]+)(?:\\#[0-9]+)*$");
+        Matcher m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return m.group(1);
+        }
+        throw new MalformedURLException("Expected hentainexus.com URL format: " +
+                "hentainexus.com/view/id OR hentainexus.com/read/id - got " + url + "instead");
+    }
+
+    @Override
+    public void downloadURL(URL url, int index) {
+        addURLToDownload(url, getPrefix(index));
+    }
+
+    @Override
+    protected List<String> getURLsFromJSON(JSONObject json) throws JSONException {
+
+        List<String> urlList = new ArrayList<>();
+
+        JSONArray imagesList = json.getJSONArray("f");
+        String host = json.getString("b");
+        String folder = json.getString("r");
+        String id = json.getString("i");
+
+        for (Object singleImage : imagesList) {
+            String hashTMP = ((JSONObject) singleImage).getString("h");
+            String fileNameTMP = ((JSONObject) singleImage).getString("p");
+            String imageUrlTMP = String.format("%s%s%s/%s/%s",host,folder,hashTMP,id,fileNameTMP);
+            urlList.add(imageUrlTMP);
+        }
+
+        return urlList;
+    }
+
+    @Override
+    protected JSONObject getFirstPage() throws IOException, URISyntaxException {
+        String jsonEncodedString = getJsonEncodedStringFromPage();
+        String jsonDecodedString = decodeJsonString(jsonEncodedString);
+        return new JSONObject(jsonDecodedString);
+    }
+
+    public String getJsonEncodedStringFromPage() throws MalformedURLException, IOException, URISyntaxException {
+        // Image data only appears on the /read/ page and not on the /view/ one.
+        URL readUrl = new URI(String.format("http://hentainexus.com/read/%s",getGID(url))).toURL();
+        Document document = Http.url(readUrl).response().parse();
+
+        for (Element scripts : document.getElementsByTag("script")) {
+            for (DataNode dataNode : scripts.dataNodes()) {
+                if (dataNode.getWholeData().contains("initReader")) {
+                    // Extract JSON encoded string from the JavaScript initReader() call.
+                    String data = dataNode.getWholeData().trim().replaceAll("\\r|\\n|\\t","");
+
+                    Pattern p = Pattern.compile(".*?initReader\\(\"(.*?)\",.*?\\).*?");
+                    Matcher m = p.matcher(data);
+                    if (m.matches()) {
+                        return m.group(1);
+                    }
+                }
+            }
+        }
+        return "";
+    }
+
+    public String decodeJsonString(String jsonEncodedString)
+    {
+        /*
+            The initReader() JavaScript function accepts 2 parameters: a weird string and the window title (we can ignore this).
+            The weird string is a JSON string with some bytes shifted and swapped around and then encoded in base64.
+            The following code is a Java adaptation of the initRender() JavaScript function after manual deobfuscation.
+         */
+
+        byte[] jsonBytes = Base64.getDecoder().decode(jsonEncodedString);
+
+        ArrayList<Integer> unknownArray = new ArrayList<>();
+        ArrayList<Integer> indexesToUse = new ArrayList<>();
+
+        for (int i = 0x2; unknownArray.size() < 0x10; ++i) {
+            if (!indexesToUse.contains(i)) {
+                unknownArray.add(i);
+                for (int j = i << 0x1; j <= 0x100; j += i) {
+                    if (!indexesToUse.contains(j)) {
+                        indexesToUse.add(j);
+                    }
+                }
+            }
+        }
+
+        byte magicByte = 0x0;
+        for (int i = 0x0; i < 0x40; i++) {
+            magicByte = (byte) (signedToUnsigned(magicByte) ^ signedToUnsigned(jsonBytes[i]));
+            for (int j = 0x0; j < 0x8; j++) {
+                long unsignedMagicByteTMP = signedToUnsigned(magicByte);
+                magicByte = (byte) ((unsignedMagicByteTMP & 0x1) == 1 ? unsignedMagicByteTMP >>> 0x1 ^ 0xc : unsignedMagicByteTMP >>> 0x1);
+            }
+        }
+
+        magicByte = (byte) (magicByte & 0x7);
+        ArrayList<Integer> newArray = new ArrayList<>();
+
+        for (int i = 0x0; i < 0x100; i++) {
+            newArray.add(i);
+        }
+
+        int newIndex = 0, backup = 0;
+        for (int i = 0x0; i < 0x100; i++) {
+            newIndex = (newIndex + newArray.get(i) + (int) signedToUnsigned(jsonBytes[i % 0x40])) % 0x100;
+            backup = newArray.get(i);
+            newArray.set(i, newArray.get(newIndex));
+            newArray.set(newIndex, backup);
+        }
+
+        int magicByteTranslated = (int) unknownArray.get(magicByte);
+        int index1 = 0x0, index2 = 0x0, index3 = 0x0, swap1 = 0x0, xorNumber = 0x0;
+        String decodedJsonString = "";
+
+        for (int i = 0x0; i + 0x40 < jsonBytes.length; i++) {
+            index1 = (index1 + magicByteTranslated) % 0x100;
+            index2 = (index3 + newArray.get((index2 + newArray.get(index1)) % 0x100)) % 0x100;
+            index3 = (index3 + index1 + newArray.get(index1)) % 0x100;
+            swap1 = newArray.get(index1);
+            newArray.set(index1, newArray.get(index2));
+            newArray.set(index2,swap1);
+            xorNumber = newArray.get((index2 + newArray.get((index1 + newArray.get((xorNumber + index3) % 0x100)) % 0x100)) % 0x100);
+            decodedJsonString += Character.toString((char) signedToUnsigned((jsonBytes[i + 0x40] ^ xorNumber)));
+        }
+
+        return decodedJsonString;
+    }
+
+    private static long signedToUnsigned(int signed) {
+        return (byte) signed & 0xFF;
+    }
+}

+ 196 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/HentaifoundryRipper.java

@@ -0,0 +1,196 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.jsoup.Connection.Method;
+import org.jsoup.Connection.Response;
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+import org.jsoup.select.Elements;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.utils.Http;
+import com.rarchives.ripme.utils.Utils;
+
+public class HentaifoundryRipper extends AbstractHTMLRipper {
+
+    private static final Logger logger = LogManager.getLogger(HentaifoundryRipper.class);
+
+    private Map<String,String> cookies = new HashMap<>();
+    public HentaifoundryRipper(URL url) throws IOException {
+        super(url);
+    }
+
+    @Override
+    public String getHost() {
+        return "hentai-foundry";
+    }
+    @Override
+    public String getDomain() {
+        return "hentai-foundry.com";
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        Pattern p = Pattern.compile("^.*hentai-foundry\\.com/(pictures|stories)/user/([a-zA-Z0-9\\-_]+).*$");
+        Matcher m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return m.group(2);
+        }
+        throw new MalformedURLException(
+                "Expected hentai-foundry.com gallery format: "
+                        + "hentai-foundry.com/pictures/user/USERNAME"
+                        + " Got: " + url);
+    }
+
+    @Override
+    public Document getFirstPage() throws IOException {
+        Response resp;
+        Document doc;
+
+        resp = Http.url("https://www.hentai-foundry.com/?enterAgree=1&size=1500")
+                .referrer("https://www.hentai-foundry.com/")
+                .cookies(cookies)
+                .response();
+        // The only cookie that seems to matter in getting around the age wall is the phpsession cookie
+        cookies.putAll(resp.cookies());
+
+        doc = resp.parse();
+        String csrf_token = doc.select("input[name=YII_CSRF_TOKEN]")
+                               .first().attr("value");
+        if (csrf_token != null) {
+            Map<String,String> data = new HashMap<>();
+            data.put("YII_CSRF_TOKEN"  , csrf_token);
+            data.put("rating_nudity"   , "3");
+            data.put("rating_violence" , "3");
+            data.put("rating_profanity", "3");
+            data.put("rating_racism"   , "3");
+            data.put("rating_sex"      , "3");
+            data.put("rating_spoilers" , "3");
+            data.put("rating_yaoi"     , "1");
+            data.put("rating_yuri"     , "1");
+            data.put("rating_teen"     , "1");
+            data.put("rating_guro"     , "1");
+            data.put("rating_furry"    , "1");
+            data.put("rating_beast"    , "1");
+            data.put("rating_male"     , "1");
+            data.put("rating_female"   , "1");
+            data.put("rating_futa"     , "1");
+            data.put("rating_other"    , "1");
+            data.put("rating_scat"     , "1");
+            data.put("rating_incest"   , "1");
+            data.put("rating_rape"     , "1");
+            data.put("filter_media"    , "A");
+            data.put("filter_order"    , Utils.getConfigString("hentai-foundry.filter_order","date_old"));
+            data.put("filter_type"     , "0");
+
+            resp = Http.url("https://www.hentai-foundry.com/site/filters")
+                       .referrer("https://www.hentai-foundry.com/")
+                       .cookies(cookies)
+                       .data(data)
+                       .method(Method.POST)
+                       .response();
+            cookies.putAll(resp.cookies());
+        }
+        else {
+            logger.info("unable to find csrf_token and set filter");
+        }
+
+        resp = Http.url(url)
+                .referrer("https://www.hentai-foundry.com/")
+                .cookies(cookies)
+                .response();
+        cookies.putAll(resp.cookies());
+        return resp.parse();
+    }
+
+    @Override
+    public Document getNextPage(Document doc) throws IOException {
+        if (!doc.select("li.next.hidden").isEmpty()) {
+            // Last page
+            throw new IOException("No more pages");
+        }
+        Elements els = doc.select("li.next > a");
+        Element first = els.first();
+        try {
+            String nextURL = first.attr("href");
+            nextURL = "https://www.hentai-foundry.com" + nextURL;
+            return Http.url(nextURL)
+                    .referrer(url)
+                    .cookies(cookies)
+                    .get();
+        } catch (NullPointerException e) {
+            throw new IOException("No more pages");
+        }
+    }
+
+    @Override
+    public List<String> getURLsFromPage(Document doc) {
+        List<String> imageURLs = new ArrayList<>();
+        // this if is for ripping pdf stories
+        if (url.toExternalForm().contains("/stories/")) {
+            for (Element pdflink : doc.select("a.pdfLink")) {
+                logger.info("grabbing " + "https://www.hentai-foundry.com" + pdflink.attr("href"));
+                imageURLs.add("https://www.hentai-foundry.com" + pdflink.attr("href"));
+            }
+            return imageURLs;
+        }
+        Pattern imgRegex = Pattern.compile(".*/user/([a-zA-Z0-9\\-_]+)/(\\d+)/.*");
+        for (Element thumb : doc.select("div.thumb_square > a.thumbLink")) {
+            if (isStopped()) {
+                break;
+            }
+            Matcher imgMatcher = imgRegex.matcher(thumb.attr("href"));
+            if (!imgMatcher.matches()) {
+                logger.info("Couldn't find user & image ID in " + thumb.attr("href"));
+                continue;
+            }
+            Document imagePage;
+            try {
+
+                logger.info("grabbing " + "https://www.hentai-foundry.com" + thumb.attr("href"));
+                imagePage = Http.url("https://www.hentai-foundry.com" + thumb.attr("href")).cookies(cookies).get();
+            }
+
+            catch (IOException e) {
+                logger.debug(e.getMessage());
+                logger.debug("Warning: imagePage is null!");
+                imagePage = null;
+            }
+            // This is here for when the image is resized to a thumbnail because ripme doesn't report a screensize
+            if (imagePage.select("div.boxbody > img.center").attr("src").contains("thumbs.")) {
+                imageURLs.add("https:" + imagePage.select("div.boxbody > img.center").attr("onclick").replace("this.src=", "").replace("'", "").replace("; $(#resize_message).hide();", ""));
+            }
+            else {
+                imageURLs.add("https:" + imagePage.select("div.boxbody > img.center").attr("src"));
+            }
+        }
+        return imageURLs;
+    }
+
+    @Override
+    public void downloadURL(URL url, int index) {
+        // When downloading pdfs you *NEED* to end the cookies with the request or you just get the consent page
+        if (url.toExternalForm().endsWith(".pdf")) {
+            addURLToDownload(url, getPrefix(index), "", this.url.toExternalForm(), cookies);
+        } else {
+//            If hentai-foundry.use_prefix is false the ripper will not add a numbered prefix to any images
+            if (Utils.getConfigBoolean("hentai-foundry.use_prefix", true)) {
+                addURLToDownload(url, getPrefix(index));
+            } else {
+                addURLToDownload(url, "");
+            }
+        }
+    }
+
+}

+ 76 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/HentaifoxRipper.java

@@ -0,0 +1,76 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+
+public class HentaifoxRipper extends AbstractHTMLRipper {
+
+    private static final Logger logger = LogManager.getLogger(HentaifoxRipper.class);
+
+    public HentaifoxRipper(URL url) throws IOException {
+        super(url);
+    }
+
+    @Override
+    public String getHost() {
+        return "hentaifox";
+    }
+
+    @Override
+    public String getDomain() {
+        return "hentaifox.com";
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        Pattern p = Pattern.compile("https://hentaifox.com/gallery/([\\d]+)/?");
+        Matcher m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return m.group(1);
+        }
+        throw new MalformedURLException("Expected hentaifox URL format: " +
+                "https://hentaifox.com/gallery/ID - got " + url + " instead");
+    }
+
+    @Override
+    public List<String> getURLsFromPage(Document doc) {
+        logger.info(doc);
+        List<String> result = new ArrayList<>();
+        for (Element el : doc.select("div.preview_thumb > a > img")) {
+                String imageSource = "https:" + el.attr("data-src").replaceAll("t\\.jpg", ".jpg");
+                result.add(imageSource);
+            }
+        return result;
+    }
+
+    @Override
+    public String getAlbumTitle(URL url) throws MalformedURLException, URISyntaxException {
+        try {
+            Document doc = getCachedFirstPage();
+            String title = doc.select("div.info > h1").first().text();
+            return getHost() + "_" + title + "_" + getGID(url);
+        } catch (Exception e) {
+            // Fall back to default album naming convention
+            logger.warn("Failed to get album title from " + url, e);
+        }
+        return super.getAlbumTitle(url);
+    }
+
+    @Override
+    public void downloadURL(URL url, int index) {
+        addURLToDownload(url, getPrefix(index));
+    }
+}

+ 78 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/HentaiimageRipper.java

@@ -0,0 +1,78 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.utils.Http;
+
+public class HentaiimageRipper extends AbstractHTMLRipper {
+    public HentaiimageRipper(URL url) throws IOException {
+        super(url);
+    }
+
+    @Override
+    public String getHost() {
+        return url.toExternalForm().split("/")[2];
+    }
+
+    @Override
+    public String getDomain() {
+        return url.toExternalForm().split("/")[2];
+    }
+
+    @Override
+    public boolean canRip(URL url) {
+        try {
+            getGID(url);
+            return true;
+        } catch (MalformedURLException e) {
+            return false;
+        }
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        Pattern p = Pattern.compile("https://(?:\\w\\w\\.)?hentai-(image|comic|img-xxx).com/image/([a-zA-Z0-9_-]+)/?");
+        Matcher m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return m.group(1);
+        }
+        throw new MalformedURLException("Expected hitomi URL format: " +
+                "https://hentai-img-xxx.com/image/ID - got " + url + " instead");
+    }
+
+    @Override
+    public List<String> getURLsFromPage(Document doc) {
+        List<String> result = new ArrayList<>();
+        for (Element el : doc.select("div.icon-overlay > a > img")) {
+            result.add(el.attr("src"));
+        }
+        return result;
+    }
+
+    @Override
+    public Document getNextPage(Document doc) throws IOException {
+
+        for (Element el : doc.select("div[id=paginator] > span")) {
+            if (el.select("a").text().equals("next>")) {
+                return Http.url("https://" + getDomain() + el.select("a").attr("href")).get();
+            }
+        }
+
+        throw new IOException("No more pages");
+    }
+
+    @Override
+    public void downloadURL(URL url, int index) {
+        addURLToDownload(url, getPrefix(index));
+    }
+}

+ 89 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/HitomiRipper.java

@@ -0,0 +1,89 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.json.JSONArray;
+import org.jsoup.nodes.Document;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.utils.Http;
+
+public class HitomiRipper extends AbstractHTMLRipper {
+
+    private static final Logger logger = LogManager.getLogger(HitomiRipper.class);
+
+    private String galleryId = "";
+
+    public HitomiRipper(URL url) throws IOException {
+        super(url);
+    }
+
+    @Override
+    public String getHost() {
+        return "hitomi";
+    }
+
+    @Override
+    public String getDomain() {
+        return "hitomi.la";
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        Pattern p = Pattern.compile("https://hitomi.la/(cg|doujinshi|gamecg|manga)/(.+).html");
+        Matcher m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            galleryId = m.group(1);
+            return m.group(1);
+        }
+        throw new MalformedURLException("Expected hitomi URL format: " +
+                "https://hitomi.la/(cg|doujinshi|gamecg|manga)/ID.html - got " + url + " instead");
+    }
+
+    @Override
+    public Document getFirstPage() throws IOException, URISyntaxException {
+        // if we go to /GALLERYID.js we get a nice json array of all images in the gallery
+        return Http.url(new URI(url.toExternalForm().replaceAll("hitomi", "ltn.hitomi").replaceAll(".html", ".js")).toURL()).ignoreContentType().get();
+    }
+
+
+    @Override
+    public List<String> getURLsFromPage(Document doc) {
+        List<String> result = new ArrayList<>();
+        String json = doc.text().replaceAll("var galleryinfo =", "");
+        JSONArray json_data = new JSONArray(json);
+        for (int i = 0; i < json_data.length(); i++) {
+            result.add("https://ba.hitomi.la/galleries/" + galleryId + "/" + json_data.getJSONObject(i).getString("name"));
+        }
+
+        return result;
+    }
+
+    @Override
+    public String getAlbumTitle(URL url) throws MalformedURLException, URISyntaxException {
+        try {
+            // Attempt to use album title and username as GID
+            Document doc = Http.url(url).get();
+            return getHost() + "_" + getGID(url) + "_" +
+                    doc.select("title").text().replaceAll(" - Read Online - hentai artistcg \\| Hitomi.la", "");
+        } catch (IOException e) {
+            logger.info("Falling back");
+        }
+        return super.getAlbumTitle(url);
+    }
+
+    @Override
+    public void downloadURL(URL url, int index) {
+        addURLToDownload(url, getPrefix(index));
+    }
+}

+ 305 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/HqpornerRipper.java

@@ -0,0 +1,305 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.jsoup.Connection.Response;
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+import org.jsoup.select.Elements;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.ripper.DownloadThreadPool;
+import com.rarchives.ripme.utils.Http;
+
+public class HqpornerRipper extends AbstractHTMLRipper {
+
+	private static final Logger logger = LogManager.getLogger(HqpornerRipper.class);
+
+	private static final String VIDEO_URL_PREFIX = "https://hqporner.com";
+
+	private Pattern p1 = Pattern.compile("https?://hqporner.com/hdporn/([a-zA-Z0-9_-]*).html/?$"); // video pattern.
+	private Pattern p2 = Pattern.compile("https://hqporner.com/([a-zA-Z0-9/_-]+)"); // category/top/actress/studio pattern.
+	private Pattern p3 = Pattern.compile("https?://[A-Za-z0-9/.-_]+\\.mp4"); // to match links ending with .mp4
+	private DownloadThreadPool hqpornerThreadPool = new DownloadThreadPool("hqpornerThreadPool");
+	private String subdirectory = "";
+
+	public HqpornerRipper(URL url) throws IOException {
+		super(url);
+	}
+
+	@Override
+	public String getHost() {
+		return "hqporner";
+	}
+
+	@Override
+	public String getDomain() {
+		return "hqporner.com";
+	}
+
+	@Override
+	public String getGID(URL url) throws MalformedURLException {
+
+		Matcher m1 = p1.matcher(url.toExternalForm());
+		if (m1.matches()) {
+			return m1.group(1);
+		}
+		Matcher m2 = p2.matcher(url.toExternalForm());
+		if (m2.matches()) {
+			if (m2.group(1).indexOf('/') == -1) {
+				return m2.group(1);
+			}
+			return m2.group(1).substring(0, m2.group(1).indexOf('/')); //returns actress/category/top/studio
+		}
+		throw new MalformedURLException("Expected hqporner URL format: " + "hqporner.com/hdporn/NAME\n"
+				+ "hqporner.com/category/myfavcategory\n" + "hqporner.com/actress/myfavactress\n"
+				+ "hqporner.com/studio/myFavStudio\n" + " - got " + url + " instead.");
+	}
+
+	@Override
+	public Document getFirstPage() throws IOException, URISyntaxException {
+		return super.getFirstPage();
+	}
+
+	@Override
+	public List<String> getURLsFromPage(Document doc) {
+		List<String> result = new ArrayList<>();
+		Matcher m1 = p1.matcher(this.url.toExternalForm()); // video url.
+		Matcher m2 = p2.matcher(this.url.toExternalForm()); // category/top/actress/studio url.
+
+		if (m1.matches()) {
+			//subdirectory = subdirectory
+			result.add(this.url.toExternalForm());
+			return result;
+		} else if (m2.matches()) {
+			if (m2.group(1).indexOf('/') != -1)
+				subdirectory = m2.group(1).substring(m2.group(1).indexOf('/') + 1);
+			result = getAllVideoUrls(doc);
+			return result;
+		}
+		//empty array for rest.
+		return result;
+	}
+
+	public List<String> getAllVideoUrls(Document doc) {
+		// div.6u h3  a.click-trigger
+		List<String> result = new ArrayList<>();
+		Elements videoLinks = doc.select("div.6u h3  a.click-trigger");
+		for (Element e : videoLinks) {
+			if (e.hasAttr("href")) {
+				result.add(VIDEO_URL_PREFIX + e.attr("href"));
+			}
+		}
+
+		return result;
+	}
+
+	@Override
+	public boolean tryResumeDownload() {
+		return true;
+	}
+
+	@Override
+	public void downloadURL(URL url, int index) {
+		hqpornerThreadPool.addThread(new HqpornerDownloadThread(url, index, subdirectory));
+	}
+
+	@Override
+	public Document getNextPage(Document doc) throws IOException {
+		Elements pageNumbers = doc.select("ul.pagination a[href]");
+		if (!pageNumbers.isEmpty() && pageNumbers.last().text().contains("Next")) {
+			return Http.url(VIDEO_URL_PREFIX + pageNumbers.last().attr("href")).get();
+		}
+		throw new IOException("No next page found.");
+	}
+
+	@Override
+	public DownloadThreadPool getThreadPool() {
+		return hqpornerThreadPool;
+	}
+
+	@Override
+	public boolean useByteProgessBar() {
+		return true;
+	}
+
+	private class HqpornerDownloadThread implements Runnable {
+
+		private URL hqpornerVideoPageUrl;
+		//private int index;
+		private String subdirectory;
+
+		public HqpornerDownloadThread(URL url, int index, String subdirectory) {
+			this.hqpornerVideoPageUrl = url;
+			//this.index = index;
+			this.subdirectory = subdirectory;
+		}
+
+		@Override
+		public void run() {
+			fetchVideo();
+		}
+
+		public void fetchVideo() {
+			try {
+
+				Document doc = Http.url(hqpornerVideoPageUrl).retries(3).get();
+				String downloadUrl = null;
+				String videoPageUrl = "https:" + doc.select("div.videoWrapper > iframe").attr("src");
+
+				if (videoPageUrl.contains("mydaddy")) {
+					downloadUrl = getVideoFromMyDaddycc(videoPageUrl);
+				} else if (videoPageUrl.contains("flyflv")) {
+					downloadUrl = getVideoFromFlyFlv(videoPageUrl);
+				} else {
+					//trying a generic selector to grab video url.
+					downloadUrl = getVideoFromUnknown(videoPageUrl);
+				}
+
+				if (downloadUrl != null) {
+					addURLToDownload(new URI(downloadUrl).toURL(), "", subdirectory, "", null, getVideoName(), "mp4");
+				}
+
+			} catch (IOException | URISyntaxException e) {
+				logger.error("[!] Exception while downloading video.", e);
+			}
+		}
+
+		private String getVideoFromMyDaddycc(String videoPageUrl) {
+			Pattern p = Pattern.compile("(//[a-zA-Z0-9\\.]+/pub/cid/[a-z0-9]+/1080.mp4)");
+			try {
+				logger.info("Downloading from mydaddy " + videoPageUrl);
+				Document page = Http.url(videoPageUrl).referrer(hqpornerVideoPageUrl).get();
+				Matcher m = p.matcher(page.html());
+				logger.info(page.html());
+				if (m.find()) {
+					return "https:" + m.group(0);
+				}
+
+			} catch (IOException e) {
+				logger.error("Unable to get page with video");
+			}
+			return null;
+		}
+
+		private String getVideoFromFlyFlv(String videoPageUrl) {
+			try {
+				logger.info("Downloading from flyflv " + videoPageUrl);
+				Document page = Http.url(videoPageUrl).referrer(hqpornerVideoPageUrl).get();
+				String[] videoSizes = { "1080p", "720p", "360p" };
+				for (String videoSize : videoSizes) {
+					String urlToReturn = page.select("video > source[label=" + videoSize).attr("src");
+					if (urlToReturn != null && !urlToReturn.equals("")) {
+						return "https:" + urlToReturn;
+					}
+				}
+
+			} catch (IOException e) {
+				logger.error("Unable to get page with video");
+			}
+			return null;
+		}
+
+		private String getVideoFromUnknown(String videoPageurl) {
+			// If video host is neither daddycc or flyflv TRY generic way.
+			// 1. Search any src$=.mp4
+			// 2. Pattern match http(s)://.../../abcd.mp4
+			// 3. GET all src link with same host and run 2.
+
+			try {
+				logger.info("Trying to download from unknown video host " + videoPageurl);
+				URL url = new URI(videoPageurl).toURL();
+				Response response = Http.url(url).referrer(hqpornerVideoPageUrl).response();
+				Document doc = response.parse();
+
+				// 1. Search for src$=.mp4
+				Elements endingWithMp4 = doc.select("[src$=.mp4]");
+				if (!endingWithMp4.isEmpty()) {
+					List<String> list = new ArrayList<>();
+					endingWithMp4.forEach((e) -> list.add(e.attr("src")));
+					return getBestQualityLink(list);
+				}
+
+				// 2. Pattern match https?://somehost.cc/example123/abcd.mp4
+				String link = matchUrlByPattern(p3, doc.html());
+				if (link != null) {
+					return link;
+				}
+
+				// 3. GET all src link with same host and run 2.
+				link = null;
+				Elements allElementsWithSrc = doc.select("[src*=" + url.getHost() + "]"); //all urls from same host.
+				allElementsWithSrc = allElementsWithSrc.select("[src~=/[A-Za-z0-9_-]+$]"); // remove links with extensions( .js).
+				for (Element e : allElementsWithSrc) {
+					Document d = Http.url(e.attr("src")).referrer(url.getHost()).get();
+					link = matchUrlByPattern(p3, d.html());
+					if (link != null) {
+						return link;
+					}
+				}
+
+			} catch (IOException | URISyntaxException e) {
+				logger.error("Unable to get video url using generic methods.");
+			}
+
+			// RIP unknown ripper.
+			logger.error("Unable to get video url using generic methods.");
+			return null;
+
+		}
+
+		private String matchUrlByPattern(Pattern pattern, String html) {
+			// Step 2. function
+			Matcher m = pattern.matcher(html);
+			List<String> list = new ArrayList<>();
+			while (m.find()) {
+				list.add(m.group());
+			}
+			if (!list.isEmpty()) {
+				return getBestQualityLink(list);
+			}
+
+			return null;
+		}
+
+		private String getVideoName() {
+			try {
+				String filename = getGID(hqpornerVideoPageUrl);
+				return filename;
+			} catch (MalformedURLException e) {
+				return "1080";
+			}
+		}
+
+	}// class HqpornerDownloadThread
+
+	public String getBestQualityLink(List<String> list) {
+		// return link with the highest quality subsubstring. Keeping it simple for now.
+		// 1080 > 720 > 480 > 360 > 240
+		if (list.isEmpty()) {
+			return null;
+		}
+
+		String[] qualities = { "2160", "2160p", "1440", "1440p", "1080", "1080p", "720", "720p", "480", "480p" };
+		for (String quality : qualities) {
+			for (String s : list) {
+				if (s.contains(quality)) {
+					return s;
+				}
+			}
+		}
+		// Could not find the best link. Return fist link.
+		return list.get(0);
+	}
+
+}

+ 164 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/HypnohubRipper.java

@@ -0,0 +1,164 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.utils.Http;
+
+public class HypnohubRipper extends AbstractHTMLRipper {
+
+    private static final Logger logger = LogManager.getLogger(HypnohubRipper.class);
+
+    public HypnohubRipper(URL url) throws IOException {
+        super(url);
+    }
+
+    @Override
+    public String getHost() {
+        return "hypnohub";
+    }
+
+    @Override
+    public String getDomain() {
+        return "hypnohub.net";
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        String query = url.getQuery();
+        if (query == null) {
+            throw new MalformedURLException("URL missing query: " + url);
+        }
+        if (query.contains("page=pool")) {
+            for (String param : query.split("&")) {
+                if (param.startsWith("id=")) {
+                    return param.substring("id=".length());
+                }
+            }
+            throw new MalformedURLException("Pool URL missing id: " + url);
+        } else if (query.startsWith("page=post")) {
+            // Drop "page=" to satisfy testGetGID
+            return query.substring("page=".length());
+        }
+        throw new MalformedURLException("Unexpected URL format for GID: " + url);
+    }
+
+    /**
+     * Fetches a post page and extracts its full-size image URL.
+     */
+    private String ripPost(String postUrl) throws IOException {
+        logger.info("Fetching post: {}", postUrl);
+        Document doc = Http.url(postUrl).get();
+        // Try primary selector: the displayed sample image
+        Element img = doc.selectFirst("img#image");
+        if (img != null) {
+            String src = img.attr("src");
+            if (src.startsWith("//"))
+                return "https:" + src;
+            if (src.startsWith("/"))
+                return "https://hypnohub.net" + src;
+            return src;
+        }
+        // Fallback to original image link
+        Element origLink = doc.selectFirst("a:matchesOwn(^Original image$");
+        if (origLink != null) {
+            String href = origLink.attr("href");
+            if (href.startsWith("//"))
+                return "https:" + href;
+            if (href.startsWith("/"))
+                return "https://hypnohub.net" + href;
+            return href;
+        }
+        // Final fallback: meta og:image
+        Element meta = doc.selectFirst("meta[property=og:image]");
+        if (meta != null) {
+            String content = meta.attr("content");
+            if (content.startsWith("//"))
+                return "https:" + content;
+            if (content.startsWith("/"))
+                return "https://hypnohub.net" + content;
+            return content;
+        }
+        logger.warn("No image found on post page: {}", postUrl);
+        return null;
+    }
+
+    /**
+     * Extracts the full-size image URL from an already-fetched post Document.
+     */
+    private String ripPost(Document doc) {
+        logger.info("Parsing post document: {}", url);
+        // Use same logic as string-based ripPost
+        Element img = doc.selectFirst("img#image");
+        if (img != null) {
+            String src = img.attr("src");
+            if (src.startsWith("//"))
+                return "https:" + src;
+            if (src.startsWith("/"))
+                return "https://hypnohub.net" + src;
+            return src;
+        }
+        Element origLink = doc.selectFirst("a:matchesOwn(^Original image$");
+        if (origLink != null) {
+            String href = origLink.attr("href");
+            if (href.startsWith("//"))
+                return "https:" + href;
+            if (href.startsWith("/"))
+                return "https://hypnohub.net" + href;
+            return href;
+        }
+        Element meta = doc.selectFirst("meta[property=og:image]");
+        if (meta != null) {
+            String content = meta.attr("content");
+            if (content.startsWith("//"))
+                return "https:" + content;
+            if (content.startsWith("/"))
+                return "https://hypnohub.net" + content;
+            return content;
+        }
+        logger.warn("No image found in document at: {}", url);
+        return null;
+    }
+
+    @Override
+    public List<String> getURLsFromPage(Document doc) {
+        List<String> result = new ArrayList<>();
+        String pageUrl = url.toExternalForm();
+        if (pageUrl.contains("page=pool")) {
+            // Iterate over all thumbnail spans on the pool page
+            for (Element link : doc.select("span.thumb > a[href*='page=post']")) {
+                String href = link.attr("href");
+                String fullPostUrl = href.startsWith("http") ? href : "https://hypnohub.net/" + href;
+                try {
+                    String imgUrl = ripPost(fullPostUrl);
+                    if (imgUrl != null) {
+                        result.add(imgUrl);
+                    }
+                } catch (IOException e) {
+                    logger.error("Failed to rip post {}", fullPostUrl, e);
+                }
+            }
+        } else if (pageUrl.contains("page=post")) {
+            String imgUrl = ripPost(doc);
+            if (imgUrl != null) {
+                result.add(imgUrl);
+            }
+        }
+        return result;
+    }
+
+    @Override
+    public void downloadURL(URL url, int index) {
+        // url here is already a direct image URL
+        addURLToDownload(url, getPrefix(index));
+    }
+}

+ 170 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/ImagebamRipper.java

@@ -0,0 +1,170 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.ripper.DownloadThreadPool;
+import com.rarchives.ripme.utils.Http;
+import com.rarchives.ripme.utils.Utils;
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.commons.lang.StringUtils;
+import org.jsoup.Jsoup;
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+import org.jsoup.select.Elements;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+
+public class ImagebamRipper extends AbstractHTMLRipper {
+
+    private static final Logger logger = LogManager.getLogger(ImagebamRipper.class);
+
+    // Thread pool for finding direct image links from "image" pages (html)
+    private DownloadThreadPool imagebamThreadPool = new DownloadThreadPool("imagebam");
+    @Override
+    public DownloadThreadPool getThreadPool() {
+        return imagebamThreadPool;
+    }
+
+    public ImagebamRipper(URL url) throws IOException {
+        super(url);
+    }
+
+    @Override
+    public String getHost() {
+        return "imagebam";
+    }
+    @Override
+    public String getDomain() {
+        return "imagebam.com";
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        Pattern p;
+        Matcher m;
+
+        p = Pattern.compile("^https?://[wm.]*imagebam.com/(gallery|view)/([a-zA-Z0-9]+).*$");
+        m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return m.group(1);
+        }
+
+        throw new MalformedURLException(
+                "Expected imagebam gallery format: "
+                        + "http://www.imagebam.com/gallery/galleryid"
+                        + " Got: " + url);
+    }
+
+    @Override
+    public Document getNextPage(Document doc) throws IOException {
+        // Find next page
+        Elements hrefs = doc.select("a.pagination_current + a.pagination_link");
+        if (hrefs.isEmpty()) {
+            throw new IOException("No more pages");
+        }
+        String nextUrl = "http://www.imagebam.com" + hrefs.first().attr("href");
+        sleep(500);
+        return Http.url(nextUrl).get();
+    }
+
+    @Override
+    public List<String> getURLsFromPage(Document doc) {
+        List<String> imageURLs = new ArrayList<>();
+        for (Element thumb : doc.select("div > a[class=thumbnail]:not(.footera)")) {
+            imageURLs.add(thumb.attr("href"));
+        }
+        return imageURLs;
+    }
+
+    @Override
+    public void downloadURL(URL url, int index) {
+        ImagebamImageThread t = new ImagebamImageThread(url, index);
+        imagebamThreadPool.addThread(t);
+        sleep(500);
+    }
+
+    @Override
+    public String getAlbumTitle(URL url) throws MalformedURLException, URISyntaxException {
+        try {
+            // Attempt to use album title as GID
+            Elements elems = getCachedFirstPage().select("[id=gallery-name]");
+            String title = elems.first().text();
+            logger.info("Title text: '" + title + "'");
+            if (StringUtils.isNotBlank(title)) {
+                return getHost() + "_" + getGID(url) + " (" + title + ")";
+            }
+        } catch (Exception e) {
+            // Fall back to default album naming convention
+            logger.warn("Failed to get album title from " + url, e);
+        }
+        return super.getAlbumTitle(url);
+    }
+
+    /**
+     * Helper class to find and download images found on "image" pages
+     *
+     * Handles case when site has IP-banned the user.
+     */
+    private class ImagebamImageThread implements Runnable {
+        private final URL url; //link to "image page"
+        private final int index; //index in album
+
+        ImagebamImageThread(URL url, int index) {
+            super();
+            this.url = url;
+            this.index = index;
+        }
+
+        @Override
+        public void run() {
+            fetchImage();
+        }
+
+        /**
+         * Rips useful image from "image page"
+         */
+        private void fetchImage() {
+            try {
+                Map<String, String> cookies = new HashMap<>();
+                cookies.put("nsfw_inter", "1");
+                Document doc = Jsoup.connect(url.toString())
+                        .cookies(cookies)
+                        .get();
+
+                // Find image
+                String imgsrc = "";//initialize, so no NullPointerExceptions should ever happen.
+                Elements elem = doc.select("img[class*=main-image]");
+                if ((elem != null) && (elem.size() > 0)) {
+                    imgsrc = elem.first().attr("src");
+                }
+
+                //for debug, or something goes wrong.
+                if (imgsrc.isEmpty()) {
+                    logger.warn("Image not found at " + this.url);
+                    return;
+                }
+
+                // Provide prefix and let the AbstractRipper "guess" the filename
+                String prefix = "";
+                if (Utils.getConfigBoolean("download.save_order", true)) {
+                    prefix = String.format("%03d_", index);
+                }
+
+                addURLToDownload(new URI(imgsrc).toURL(), prefix);
+            } catch (IOException | URISyntaxException e) {
+                logger.error("[!] Exception while loading/parsing " + this.url, e);
+            }
+        }
+    }
+}

+ 318 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/ImagefapRipper.java

@@ -0,0 +1,318 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+import org.jsoup.select.Elements;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.ui.RipStatusMessage.STATUS;
+import com.rarchives.ripme.utils.Http;
+
+public class ImagefapRipper extends AbstractHTMLRipper {
+
+    private static final Logger logger = LogManager.getLogger(ImagefapRipper.class);
+
+    private int callsMade = 0;
+    private long startTime = System.nanoTime();
+
+    private static final int RETRY_LIMIT = 10;
+    private static final int HTTP_RETRY_LIMIT = 3;
+    private static final int RATE_LIMIT_HOUR = 1000;
+
+    // All sleep times are in milliseconds
+    private static final int PAGE_SLEEP_TIME = 60 * 60 * 1000 / RATE_LIMIT_HOUR;
+    private static final int IMAGE_SLEEP_TIME = 60 * 60 * 1000 / RATE_LIMIT_HOUR;
+    // Timeout when blocked = 1 hours. Retry every retry within the hour mark + 1 time after the hour mark.
+    private static final int IP_BLOCK_SLEEP_TIME = (int) Math.round((double) 60 / (RETRY_LIMIT - 1) * 60 * 1000);
+
+    public ImagefapRipper(URL url) throws IOException {
+        super(url);
+    }
+
+    @Override
+    public String getHost() {
+        return "imagefap";
+    }
+    @Override
+    public String getDomain() {
+        return "imagefap.com";
+    }
+
+    /**
+     * Reformat given URL into the desired format (all images on single page)
+     */
+    @Override
+    public URL sanitizeURL(URL url) throws MalformedURLException, URISyntaxException {
+        String gid = getGID(url);
+        String newURL = "https://www.imagefap.com/pictures/" + gid + "/random-string";
+        logger.debug("Changed URL from " + url + " to " + newURL);
+        return new URI(newURL).toURL();
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        Pattern p; Matcher m;
+
+        // Old format (I suspect no longer supported)
+        p = Pattern.compile("^.*imagefap.com/gallery.php\\?pgid=([a-f0-9]+).*$");
+        m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return m.group(1);
+        }
+
+        p = Pattern.compile("^.*imagefap.com/gallery.php\\?gid=([0-9]+).*$");
+        m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return m.group(1);
+        }
+
+        p = Pattern.compile("^.*imagefap.com/gallery/([a-f0-9]+).*$");
+        m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return m.group(1);
+        }
+
+        // most recent format
+        p = Pattern.compile("^.*imagefap.com/pictures/([a-f0-9]+).*$");
+        m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return m.group(1);
+        }
+
+        throw new MalformedURLException(
+                "Expected imagefap.com gallery formats: "
+                        + "imagefap.com/gallery.php?gid=####... or "
+                        + "imagefap.com/pictures/####..."
+                        + " Got: " + url);
+    }
+
+    @Override
+    public Document getFirstPage() throws IOException {
+
+        Document firstPage = getPageWithRetries(url);
+
+        sendUpdate(STATUS.LOADING_RESOURCE, "Loading first page...");
+
+        return firstPage;
+    }
+
+    @Override
+    public Document getNextPage(Document doc) throws IOException, URISyntaxException {
+        String nextURL = null;
+        for (Element a : doc.select("a.link3")) {
+            if (a.text().contains("next")) {
+                nextURL = this.sanitizeURL(this.url) + a.attr("href");
+                break;
+            }
+        }
+        if (nextURL == null) {
+            throw new IOException("No next page found");
+        }
+        // Sleep before fetching next page.
+        sleep(PAGE_SLEEP_TIME);
+
+        sendUpdate(STATUS.LOADING_RESOURCE, "Loading next page URL: " + nextURL);
+        logger.info("Attempting to load next page URL: " + nextURL);
+
+        // Load next page
+        Document nextPage = getPageWithRetries(new URI(nextURL).toURL());
+
+        return nextPage;
+    }
+
+    @Override
+    public List<String> getURLsFromPage(Document doc) {
+
+        List<String> imageURLs = new ArrayList<>();
+
+        logger.debug("Trying to get URLs from document... ");
+
+        for (Element thumb : doc.select("#gallery img")) {
+            if (!thumb.hasAttr("src") || !thumb.hasAttr("width")) {
+                continue;
+            }
+            String image = getFullSizedImage("https://www.imagefap.com" + thumb.parent().attr("href"));
+
+            if (image == null) {
+                for (int i = 0; i < HTTP_RETRY_LIMIT; i++) {
+                    image = getFullSizedImage("https://www.imagefap.com" + thumb.parent().attr("href"));
+                    if (image != null) {
+                        break;
+                    }
+                    sleep(PAGE_SLEEP_TIME);
+                }
+                if (image == null)
+                    throw new RuntimeException("Unable to extract image URL from single image page! Unable to continue");
+            }
+
+            logger.debug("Adding imageURL: '" + image + "'");
+
+            imageURLs.add(image);
+            if (isThisATest()) {
+                break;
+            }
+        }
+        logger.debug("Adding " + imageURLs.size() + " URLs to download");
+
+        return imageURLs;
+    }
+
+    @Override
+    public void downloadURL(URL url, int index) {
+        // Send referrer for image downloads
+        addURLToDownload(url, getPrefix(index), "", this.url.toExternalForm(), null);
+    }
+
+    @Override
+    public String getAlbumTitle(URL url) throws MalformedURLException, URISyntaxException {
+        try {
+            // Attempt to use album title as GID
+            String title = getCachedFirstPage().title();
+            title = title.replace("Porn Pics & Porn GIFs", "");
+            title = title.replace(" ", "_");
+            String toReturn = getHost() + "_" + title + "_" + getGID(url);
+            return toReturn.replaceAll("__", "_");
+        } catch (IOException e) {
+            return super.getAlbumTitle(url);
+        }
+    }
+
+    private String getFullSizedImage(String pageURL) {
+        try {
+            // Sleep before fetching image.
+            sleep(IMAGE_SLEEP_TIME);
+
+            Document doc = getPageWithRetries(new URI(pageURL).toURL());
+
+            String framedPhotoUrl = doc.select("img#mainPhoto").attr("data-src");
+
+            // we use a no query param version of the URL to reduce failure rate because of some query params that change between the li elements and the mainPhotoURL
+            String noQueryPhotoUrl = framedPhotoUrl.split("\\?")[0];
+
+            logger.debug("noQueryPhotoUrl: " + noQueryPhotoUrl);
+
+            // we look for a li > a element who's framed attribute starts with the noQueryPhotoUrl (only reference in the page to the full URL)
+            Elements selectedItem = doc.select("ul.thumbs > li > a[framed^='"+noQueryPhotoUrl+"']");
+
+            // the fullsize URL is in the href attribute
+            String fullSizedUrl = selectedItem.attr("href");
+
+            if("".equals(fullSizedUrl))
+                throw new IOException("JSoup full URL extraction failed from '" + selectedItem.html() + "'");
+
+            logger.debug("fullSizedUrl: " + fullSizedUrl);
+
+            return fullSizedUrl;
+
+        } catch (IOException | URISyntaxException e) {
+            logger.debug("Unable to get full size image URL from page: " + pageURL + " because: " +  e.getMessage());
+            return null;
+        }
+    }
+
+    /**
+     * Attempts to get page, checks for IP ban, waits.
+     * @param url
+     * @return Page document
+     * @throws IOException If page loading errors, or if retries are exhausted
+     */
+    private Document getPageWithRetries(URL url) throws IOException {
+        Document doc = null;
+        int retries = RETRY_LIMIT;
+        while (true) {
+
+            sendUpdate(STATUS.LOADING_RESOURCE, url.toExternalForm());
+
+            // For debugging rate limit checker. Useful to track wheter the timeout should be altered or not.
+            callsMade++;
+            checkRateLimit();
+
+            logger.info("Retrieving " + url);
+
+            boolean httpCallThrottled = false;
+            int httpAttempts = 0;
+
+            // we attempt the http call, knowing it can fail for network reasons
+            while(true) {
+                httpAttempts++;
+                try {
+                    doc = Http.url(url).get();
+                } catch(IOException e) {
+
+                    logger.info("Retrieving " + url + " error: " + e.getMessage());
+
+                    if(e.getMessage().contains("404"))
+                        throw new IOException("Gallery/Page not found!");
+
+                    if(httpAttempts < HTTP_RETRY_LIMIT) {
+                        sendUpdate(STATUS.DOWNLOAD_WARN, "HTTP call failed: " + e.getMessage() + " retrying " + httpAttempts + " / " + HTTP_RETRY_LIMIT);
+
+                        // we sleep for a few seconds
+                        sleep(PAGE_SLEEP_TIME);
+                        continue;
+                    } else {
+                        sendUpdate(STATUS.DOWNLOAD_WARN, "HTTP call failed too many times: " + e.getMessage() + " treating this as a throttle");
+                        httpCallThrottled = true;
+                    }
+                }
+                // no errors, we exit
+                break;
+            }
+
+            if (httpCallThrottled || (doc != null && doc.toString().contains("Your IP made too many requests to our servers and we need to check that you are a real human being"))) {
+                if (retries == 0) {
+                    throw new IOException("Hit rate limit and maximum number of retries, giving up");
+                }
+                String message = "Probably hit rate limit while loading " + url + ", sleeping for " + IP_BLOCK_SLEEP_TIME + "ms, " + retries + " retries remaining";
+                logger.warn(message);
+                sendUpdate(STATUS.DOWNLOAD_WARN, message);
+                retries--;
+                try {
+                    Thread.sleep(IP_BLOCK_SLEEP_TIME);
+                } catch (InterruptedException e) {
+                    throw new IOException("Interrupted while waiting for rate limit to subside");
+                }
+            } else {
+                return doc;
+            }
+        }
+    }
+
+    /**
+     * Used for debugging the rate limit issue.
+     * This in order to prevent hitting the rate limit altoghether by remaining under the limit threshold.
+     * @return Long duration
+     */
+    private long checkRateLimit() {
+        long endTime = System.nanoTime();
+        long duration = (endTime - startTime) / 1000000;
+
+        int rateLimitMinute = 100;
+        int rateLimitFiveMinutes = 200;
+        int rateLimitHour = RATE_LIMIT_HOUR;        // Request allowed every 3.6 seconds.
+
+        if(duration / 1000 < 60){
+            logger.debug("Rate limit: " + (rateLimitMinute - callsMade) + " calls remaining for first minute mark.");
+        } else if(duration / 1000 <  300){
+            logger.debug("Rate limit: " + (rateLimitFiveMinutes - callsMade) + " calls remaining for first 5 minute mark.");
+        } else if(duration / 1000 <  3600){
+            logger.debug("Rate limit: " + (rateLimitHour - callsMade) + " calls remaining for first hour mark.");
+        }
+
+        return duration;
+    }
+
+
+}

+ 123 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/ImagevenueRipper.java

@@ -0,0 +1,123 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+import org.jsoup.select.Elements;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.ripper.DownloadThreadPool;
+import com.rarchives.ripme.utils.Http;
+import com.rarchives.ripme.utils.Utils;
+
+public class ImagevenueRipper extends AbstractHTMLRipper {
+
+    private static final Logger logger = LogManager.getLogger(ImagevenueRipper.class);
+
+    // Thread pool for finding direct image links from "image" pages (html)
+    private DownloadThreadPool imagevenueThreadPool = new DownloadThreadPool("imagevenue");
+    @Override
+    public DownloadThreadPool getThreadPool() {
+        return imagevenueThreadPool;
+    }
+
+    public ImagevenueRipper(URL url) throws IOException {
+        super(url);
+    }
+
+    @Override
+    public String getHost() {
+        return "imagevenue";
+    }
+    @Override
+    public String getDomain() {
+        return "imagevenue.com";
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        Pattern p;
+        Matcher m;
+
+        p = Pattern.compile("^https?://.*imagevenue.com/galshow.php\\?gal=([a-zA-Z0-9\\-_]+).*$");
+        m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return m.group(1);
+        }
+
+        throw new MalformedURLException(
+                "Expected imagevenue gallery format: "
+                        + "http://...imagevenue.com/galshow.php?gal=gallery_...."
+                        + " Got: " + url);
+    }
+
+    public List<String> getURLsFromPage(Document doc) {
+        List<String> imageURLs = new ArrayList<>();
+        for (Element thumb : doc.select("a[target=_blank]")) {
+            imageURLs.add(thumb.attr("href"));
+        }
+        return imageURLs;
+    }
+
+    public void downloadURL(URL url, int index) {
+        ImagevenueImageThread t = new ImagevenueImageThread(url, index);
+        imagevenueThreadPool.addThread(t);
+    }
+
+    /**
+     * Helper class to find and download images found on "image" pages
+     *
+     * Handles case when site has IP-banned the user.
+     */
+    private class ImagevenueImageThread implements Runnable {
+        private final URL url;
+        private final int index;
+
+        ImagevenueImageThread(URL url, int index) {
+            super();
+            this.url = url;
+            this.index = index;
+        }
+
+        @Override
+        public void run() {
+            fetchImage();
+        }
+
+        private void fetchImage() {
+            try {
+                Document doc = Http.url(url)
+                                   .retries(3)
+                                   .get();
+                // Find image
+                Elements images = doc.select("a > img");
+                if (images.isEmpty()) {
+                    logger.warn("Image not found at " + this.url);
+                    return;
+                }
+                Element image = images.first();
+                String imgsrc = image.attr("src");
+                imgsrc = "http://" + this.url.getHost() + "/" + imgsrc;
+                // Provide prefix and let the AbstractRipper "guess" the filename
+                String prefix = "";
+                if (Utils.getConfigBoolean("download.save_order", true)) {
+                    prefix = String.format("%03d_", index);
+                }
+                addURLToDownload(new URI(imgsrc).toURL(), prefix);
+            } catch (IOException | URISyntaxException e) {
+                logger.error("[!] Exception while loading/parsing " + this.url, e);
+            }
+        }
+    }
+}

+ 57 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/ImgboxRipper.java

@@ -0,0 +1,57 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+
+public class ImgboxRipper extends AbstractHTMLRipper {
+
+    public ImgboxRipper(URL url) throws IOException {
+        super(url);
+    }
+
+    @Override
+    public String getHost() {
+        return "imgbox";
+    }
+    @Override
+    public String getDomain() {
+        return "imgbox.com";
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        Pattern p = Pattern.compile("^https?://[wm.]*imgbox\\.com/g/([a-zA-Z0-9]+).*$");
+        Matcher m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return m.group(1);
+        }
+        throw new MalformedURLException("Expected imgbox.com URL format: " +
+                        "imgbox.com/g/albumid - got " + url + "instead");
+    }
+
+    @Override
+    public List<String> getURLsFromPage(Document doc) {
+        List<String> imageURLs = new ArrayList<>();
+        for (Element thumb : doc.select("div.boxed-content > a > img")) {
+            String image = thumb.attr("src").replaceAll("thumbs", "images");
+            image = image.replace("_b", "_o");
+            image = image.replaceAll("\\d-s", "i");
+            imageURLs.add(image);
+        }
+        return imageURLs;
+    }
+    @Override
+    public void downloadURL(URL url, int index) {
+        addURLToDownload(url, getPrefix(index));
+    }
+}

+ 629 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/ImgurRipper.java

@@ -0,0 +1,629 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.json.JSONArray;
+import org.json.JSONException;
+import org.json.JSONObject;
+import org.jsoup.Jsoup;
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+import org.jsoup.safety.Safelist;
+import org.jsoup.select.Elements;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.ui.RipStatusMessage.STATUS;
+import com.rarchives.ripme.utils.Http;
+import com.rarchives.ripme.utils.Utils;
+
+public class ImgurRipper extends AbstractHTMLRipper {
+
+    private static final Logger logger = LogManager.getLogger(ImgurRipper.class);
+
+    private static final String DOMAIN = "imgur.com",
+                                HOST   = "imgur";
+
+    private final int SLEEP_BETWEEN_ALBUMS;
+
+    private Document albumDoc;
+
+    enum ALBUM_TYPE {
+        ALBUM,
+        USER,
+        USER_ALBUM,
+        USER_IMAGES,
+        SINGLE_IMAGE,
+        SUBREDDIT
+    }
+
+    private ALBUM_TYPE albumType;
+
+    public ImgurRipper(URL url) throws IOException {
+        super(url);
+        SLEEP_BETWEEN_ALBUMS = 1;
+    }
+
+    /**
+     * Imgur ripper does not return the same URL except when ripping
+     * many albums at once (USER). In this case, we want duplicates.
+     */
+    @Override
+    public boolean allowDuplicates() {
+        return albumType == ALBUM_TYPE.USER;
+    }
+
+    @Override
+    public boolean canRip(URL url) {
+        if (!url.getHost().endsWith(DOMAIN)) {
+           return false;
+        }
+        try {
+            getGID(url);
+        } catch (Exception e) {
+            // Can't get GID, can't rip it.
+            return false;
+        }
+        return true;
+    }
+
+    @Override
+    protected String getDomain() {
+        return DOMAIN;
+    }
+
+    @Override
+    protected void downloadURL(URL url, int index) {
+        // No-op as we override rip() method
+    }
+
+    @Override
+    protected List<String> getURLsFromPage(Document page) {
+        // No-op as we override rip() method
+        return Arrays.asList();
+    }
+
+    @Override
+    public URL sanitizeURL(URL url) throws MalformedURLException, URISyntaxException {
+        String u = url.toExternalForm();
+        if (u.indexOf('#') >= 0) {
+            u = u.substring(0, u.indexOf('#'));
+        }
+        u = u.replace("imgur.com/gallery/", "imgur.com/a/");
+        u = u.replace("https?://m\\.imgur\\.com", "http://imgur.com");
+        u = u.replace("https?://i\\.imgur\\.com", "http://imgur.com");
+        return new URI(u).toURL();
+    }
+
+    @Override
+    public String getAlbumTitle(URL url) throws MalformedURLException {
+        String gid = null;
+        try {
+            gid = getGID(url);
+        } catch (URISyntaxException e) {
+            throw new MalformedURLException(e.getMessage());
+        }
+        if (this.albumType == ALBUM_TYPE.ALBUM) {
+            try {
+                // Attempt to use album title as GID
+                if (albumDoc == null) {
+                    albumDoc = Http.url(url).get();
+                }
+
+                Elements elems;
+
+                /*
+                // TODO: Add config option for including username in album title.
+                // It's possible a lot of users would not be interested in that info.
+                String user = null;
+                elems = albumDoc.select(".post-account");
+                if (elems.size() > 0) {
+                    Element postAccount = elems.get(0);
+                    if (postAccount != null) {
+                        user = postAccount.text();
+                    }
+                }
+                */
+
+                String title;
+                final String defaultTitle1 = "Imgur: The most awesome images on the Internet";
+                final String defaultTitle2 = "Imgur: The magic of the Internet";
+                logger.info("Trying to get album title");
+                elems = albumDoc.select("meta[property=og:title]");
+                title = elems.attr("content");
+                logger.debug("Title is " + title);
+                // This is here encase the album is unnamed, to prevent
+                // Imgur: The most awesome images on the Internet from being added onto the album name
+                if (title.contains(defaultTitle1) || title.contains(defaultTitle2)) {
+                    logger.debug("Album is untitled or imgur is returning the default title");
+                    // We set the title to "" here because if it's found in the next few attempts it will be changed
+                    // but if it's nto found there will be no reason to set it later
+                    title = "";
+                    logger.debug("Trying to use title tag to get title");
+                    elems = albumDoc.select("title");
+                    if (elems.text().contains(defaultTitle1) || elems.text().contains(defaultTitle2)) {
+                        logger.debug("Was unable to get album title or album was untitled");
+                    }
+                    else {
+                        title = elems.text();
+                    }
+                }
+
+                String albumTitle = "imgur_";
+                albumTitle += gid;
+                albumTitle += "_" + title;
+
+                return albumTitle;
+            } catch (IOException e) {
+                // Fall back to default album naming convention
+            }
+        }
+        return getHost() + "_" + gid;
+    }
+
+    @Override
+    public void rip() throws IOException {
+        try {
+            switch (albumType) {
+                case ALBUM:
+                    // Fall-through
+                case USER_ALBUM:
+                    logger.info("Album type is USER_ALBUM");
+                    // Don't call getAlbumTitle(this.url) with this
+                    // as it seems to cause the album to be downloaded to a subdir.
+                    ripAlbum(this.url);
+                    break;
+                case SINGLE_IMAGE:
+                    logger.info("Album type is SINGLE_IMAGE");
+                    ripSingleImage(this.url);
+                    break;
+                case USER:
+                    logger.info("Album type is USER");
+                    ripUserAccount(url);
+                    break;
+                case SUBREDDIT:
+                    logger.info("Album type is SUBREDDIT");
+                    ripSubreddit(url);
+                    break;
+                case USER_IMAGES:
+                    logger.info("Album type is USER_IMAGES");
+                    ripUserImages(url);
+                    break;
+            }
+        } catch (URISyntaxException e) {
+            throw new IOException("Failed ripping " + this.url, e);
+        }
+        waitForThreads();
+    }
+
+    private void ripSingleImage(URL url) throws IOException, URISyntaxException {
+        String strUrl = url.toExternalForm();
+        var gid = getGID(url);
+        var json = getSingleImageData(String.format("https://api.imgur.com/post/v1/media/%s?include=media,adconfig,account", gid));
+        var media = json.getJSONArray("media");
+        if (media.length()==0) {
+            throw new IOException(String.format("Failed to fetch image for url %s", strUrl));
+        }
+        if (media.length()>1) {
+            logger.warn(String.format("Got multiple images for url %s", strUrl));
+        }
+        addURLToDownload(extractImageUrlFromJson((JSONObject)media.get(0)), "");
+    }
+
+    private void ripAlbum(URL url) throws IOException, URISyntaxException {
+        ripAlbum(url, "");
+    }
+
+    private void ripAlbum(URL url, String subdirectory) throws IOException, URISyntaxException {
+        int index;
+        this.sendUpdate(STATUS.LOADING_RESOURCE, url.toExternalForm());
+        index = 0;
+        ImgurAlbum album = getImgurAlbum(url);
+        for (ImgurImage imgurImage : album.images) {
+            stopCheck();
+            Path saveAs = workingDir.toPath();
+            if (subdirectory != null && !subdirectory.equals("")) {
+                saveAs = saveAs.resolve(subdirectory);
+            }
+            if (!Files.exists(saveAs)) {
+                Files.createDirectory(saveAs);
+            }
+            index += 1;
+            var imgPath = imgurImage.getSaveAs().replaceAll("\\?\\d", "");
+            if (Utils.getConfigBoolean("download.save_order", true)) {
+                saveAs = saveAs.resolve(String.format("%03d_%s", index, imgPath));
+            } else {
+                saveAs = saveAs.resolve(imgPath);
+            }
+            addURLToDownload(imgurImage.url, saveAs);
+        }
+    }
+
+    public static ImgurAlbum getImgurAlbum(URL url) throws IOException, URISyntaxException {
+        String strUrl = url.toExternalForm();
+        if (!strUrl.contains(",")) {
+            strUrl += "/all";
+        }
+        logger.info("    Retrieving " + strUrl);
+        Document doc = getAlbumData("https://api.imgur.com/3/album/" + strUrl.split("/a/")[1]);
+        // Try to use embedded JSON to retrieve images
+            try {
+                JSONObject json = new JSONObject(Jsoup.clean(doc.body().toString(), Safelist.none()));
+                JSONArray jsonImages = json.getJSONObject("data").getJSONArray("images");
+                return createImgurAlbumFromJsonArray(url, jsonImages);
+            } catch (JSONException | URISyntaxException e) {
+                logger.debug("Error while parsing JSON at " + url + ", continuing", e);
+            }
+
+        // TODO If album is empty, use this to check for cached images:
+        // http://i.rarchives.com/search.cgi?cache=http://imgur.com/a/albumID
+        // At the least, get the thumbnails.
+
+        logger.info("[!] Falling back to /noscript method");
+
+        String newUrl = url.toExternalForm() + "/noscript";
+        logger.info("    Retrieving " + newUrl);
+        doc = Jsoup.connect(newUrl)
+                            .userAgent(USER_AGENT)
+                            .get();
+
+        // Fall back to parsing HTML elements
+        // NOTE: This does not always get the highest-resolution images!
+        ImgurAlbum imgurAlbum = new ImgurAlbum(url);
+        for (Element thumb : doc.select("div.image")) {
+            String image;
+            if (!thumb.select("a.zoom").isEmpty()) {
+                // Clickably full-size
+                image = "http:" + thumb.select("a").attr("href");
+            } else if (!thumb.select("img").isEmpty()) {
+                image = "http:" + thumb.select("img").attr("src");
+            } else {
+                // Unable to find image in this div
+                logger.error("[!] Unable to find image in div: " + thumb);
+                continue;
+            }
+            if (image.endsWith(".gif") && Utils.getConfigBoolean("prefer.mp4", false)) {
+                image = image.replace(".gif", ".mp4");
+            }
+            ImgurImage imgurImage = new ImgurImage(new URI(image).toURL());
+            imgurAlbum.addImage(imgurImage);
+        }
+        return imgurAlbum;
+    }
+
+    private static ImgurAlbum createImgurAlbumFromJsonArray(URL url, JSONArray jsonImages) throws MalformedURLException, URISyntaxException {
+        ImgurAlbum imgurAlbum = new ImgurAlbum(url);
+        int imagesLength = jsonImages.length();
+        for (int i = 0; i < imagesLength; i++) {
+            JSONObject ob = jsonImages.getJSONObject(i);
+            imgurAlbum.addImage(new ImgurImage( new URI(ob.getString("link")).toURL()));
+        }
+        return imgurAlbum;
+    }
+
+    private static URL extractImageUrlFromJson(JSONObject json) throws MalformedURLException, URISyntaxException {
+        String ext = json.getString("ext");
+        if (!ext.startsWith(".")) {
+            ext = "." + ext;
+        }
+        if (ext.equals(".gif") && Utils.getConfigBoolean("prefer.mp4", false)) {
+            ext = ".mp4";
+        }
+        return  new URI(
+                "https://i.imgur.com/"
+                        + json.getString("id")
+                        + ext).toURL();
+    }
+
+    private static JSONObject getSingleImageData(String strUrl) throws IOException {
+        return Http.url(strUrl)
+                                .userAgent(USER_AGENT)
+                                .timeout(10 * 1000)
+                                .header("Authorization", "Client-ID " + Utils.getConfigString("imgur.client_id", "546c25a59c58ad7"))
+                                .getJSON();
+    }
+
+    private static Document getAlbumData(String strUrl) throws IOException {
+        return Jsoup.connect(strUrl)
+                .userAgent(USER_AGENT)
+                .timeout(10 * 1000)
+                .maxBodySize(0)
+                .header("Authorization", "Client-ID " + Utils.getConfigString("imgur.client_id", "546c25a59c58ad7"))
+                .ignoreContentType(true)
+                .get();
+    }
+
+    private static JSONObject getUserData(String userUrl) throws IOException {
+        return Http.url(userUrl)
+            .userAgent(USER_AGENT)
+            .timeout(10 * 1000)
+            .header("Authorization", "Client-ID " + Utils.getConfigString("imgur.client_id", "546c25a59c58ad7"))
+            .getJSON();
+    }
+
+
+    /**
+     * Rips all albums in an imgur user's account.
+     * @param url
+     *      URL to imgur user account (http://username.imgur.com | https://imgur.com/user/username)
+     */
+    private void ripUserAccount(URL url) throws IOException, URISyntaxException {
+        int cPage = -1, cImage = 0;
+        String apiUrl = "https://api.imgur.com/3/account/%s/submissions/%d/newest?album_previews=1";
+        // Strip 'user_' from username
+        var username = getGID(url).replace("user_", "");
+        logger.info("Retrieving " + url);
+        sendUpdate(STATUS.LOADING_RESOURCE, url.toExternalForm());
+
+        while (true) {
+            cPage += 1;
+            var pageUrl = String.format(apiUrl, username, cPage);
+            var json = getUserData(pageUrl);
+            var success = json.getBoolean("success");
+            var status = json.getInt("status");
+            if (!success || status!=200) {
+                throw new IOException(String.format("Unexpected status code %d for url %s and page %d", status, url, cPage));
+            }
+            var data = json.getJSONArray("data");
+            if (data.isEmpty()) {
+                // Data array is empty for pages beyond the last page
+                break;
+            }
+            for (int i = 0; i < data.length(); i++) {
+                cImage += 1;
+                String prefixOrSubdir = "";
+                if (Utils.getConfigBoolean("download.save_order", true)) {
+                    prefixOrSubdir = String.format("%03d_", cImage);
+                }
+                var d = (JSONObject)data.get(i);
+                var l = d.getString("link");
+                if (d.getBoolean("is_album")) {
+                    // For album links with multiple images create a prefixed folder with album id
+                    prefixOrSubdir += d.getString("id");
+                    ripAlbum(new URI(l).toURL(), prefixOrSubdir);
+                    try {
+                        Thread.sleep(SLEEP_BETWEEN_ALBUMS * 1000L);
+                    } catch (InterruptedException e) {
+                        logger.error(String.format("Error! Interrupted ripping album %s for user account %s", l, username), e);
+                    }
+                } else {
+                    // For direct links
+                    if (d.has("mp4") && Utils.getConfigBoolean("prefer.mp4", false)) {
+                        l =  d.getString("mp4");
+                    }
+                    addURLToDownload(new URI(l).toURL(), prefixOrSubdir);
+                }
+            }
+        }
+    }
+
+    private void ripUserImages(URL url) {
+        int page = 0; int imagesFound = 0; int imagesTotal = 0;
+        String jsonUrl = url.toExternalForm().replace("/all", "/ajax/images");
+        if (jsonUrl.contains("#")) {
+            jsonUrl = jsonUrl.substring(0, jsonUrl.indexOf("#"));
+        }
+
+        while (true) {
+            try {
+                page++;
+                String jsonUrlWithParams = jsonUrl + "?sort=0&order=1&album=0&page=" + page + "&perPage=60";
+                JSONObject json = Http.url(jsonUrlWithParams).getJSON();
+                JSONObject jsonData = json.getJSONObject("data");
+                if (jsonData.has("count")) {
+                    imagesTotal = jsonData.getInt("count");
+                }
+                JSONArray images = jsonData.getJSONArray("images");
+                for (int i = 0; i < images.length(); i++) {
+                    imagesFound++;
+                    JSONObject image = images.getJSONObject(i);
+                    String imageUrl = "https://i.imgur.com/" + image.getString("hash") + image.getString("ext");
+                    String prefix = "";
+                    if (Utils.getConfigBoolean("download.save_order", true)) {
+                        prefix = String.format("%03d_", imagesFound);
+                    }
+                    addURLToDownload(new URI(imageUrl).toURL(), prefix);
+                }
+                if (imagesFound >= imagesTotal) {
+                    break;
+                }
+                Thread.sleep(1000);
+            } catch (Exception e) {
+                logger.error("Error while ripping user images: " + e.getMessage(), e);
+                break;
+            }
+        }
+    }
+
+    private void ripSubreddit(URL url) throws IOException, URISyntaxException {
+        int page = 0;
+        while (true) {
+            stopCheck();
+            String pageURL = url.toExternalForm();
+            if (!pageURL.endsWith("/")) {
+                pageURL += "/";
+            }
+            pageURL += "page/" + page + "/miss?scrolled";
+            logger.info("    Retrieving " + pageURL);
+            Document doc = Http.url(pageURL).get();
+            Elements imgs = doc.select(".post img");
+            for (Element img : imgs) {
+                String image = img.attr("src");
+                if (image.startsWith("//")) {
+                    image = "http:" + image;
+                }
+                if (image.contains("b.")) {
+                    image = image.replace("b.", ".");
+                }
+                URL imageURL = new URI(image).toURL();
+                addURLToDownload(imageURL);
+            }
+            if (imgs.isEmpty()) {
+                break;
+            }
+            page++;
+            try {
+                Thread.sleep(1000);
+            } catch (InterruptedException e) {
+                logger.error("Interrupted while waiting to load next album: ", e);
+                break;
+            }
+        }
+    }
+
+    @Override
+    public String getHost() {
+        return HOST;
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException, URISyntaxException {
+        Pattern p;
+        Matcher m;
+
+        p = Pattern.compile("^https?://(?:www\\.|m\\.)?imgur\\.com/gallery/(?:(?:[a-zA-Z0-9]*/)?.*-)?([a-zA-Z0-9]+)$");
+        m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            // Imgur album or gallery
+            albumType = ALBUM_TYPE.ALBUM;
+            String gid = m.group(m.groupCount());
+            this.url = new URI("https://imgur.com/a/" + gid).toURL();
+            return gid;
+        }
+        // Match urls with path /a
+        p = Pattern.compile("^https?://(?:www\\.|m\\.)?imgur\\.com/(?:a|t)/(?:(?:[a-zA-Z0-9]*/)?.*-)?([a-zA-Z0-9]+).*$");
+        m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            // Imgur album or gallery
+            albumType = ALBUM_TYPE.ALBUM;
+            String gid = m.group(m.groupCount());
+            this.url = new URI("https://imgur.com/a/" + gid).toURL();
+            return gid;
+        }
+        p = Pattern.compile("^https?://([a-zA-Z0-9\\-]{4,})\\.imgur\\.com/?$");
+        m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            // Root imgur account
+            String gid = m.group(1);
+            if (gid.equals("www")) {
+                throw new MalformedURLException("Cannot rip the www.imgur.com homepage");
+            }
+            albumType = ALBUM_TYPE.USER;
+            return "user_" + gid;
+        }
+        // Pattern for new imgur user url https://imgur.com/user/username
+        p = Pattern.compile("^https?://(?:www\\.|m\\.)?imgur\\.com/user/([a-zA-Z0-9]+).*$");
+        m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            String gid = m.group(1);
+            albumType = ALBUM_TYPE.USER;
+            return "user_" + gid;
+        }
+        p = Pattern.compile("^https?://([a-zA-Z0-9\\-]{3,})\\.imgur\\.com/all.*$");
+        m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            // Imgur account images
+            albumType = ALBUM_TYPE.USER_IMAGES;
+            return m.group(1) + "_images";
+        }
+        p = Pattern.compile("^https?://([a-zA-Z0-9\\-]{3,})\\.imgur\\.com/([a-zA-Z0-9\\-_]+).*$");
+        m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            // Imgur account album
+            albumType = ALBUM_TYPE.USER_ALBUM;
+            return m.group(1) + "-" + m.group(2);
+        }
+        p = Pattern.compile("^https?://(www\\.|m\\.)?imgur\\.com/r/([a-zA-Z0-9\\-_]{3,})(/top|/new)?(/all|/year|/month|/week|/day)?/?$");
+        m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            // Imgur subreddit aggregator
+            albumType = ALBUM_TYPE.SUBREDDIT;
+            StringBuilder album = new StringBuilder(m.group(2));
+            for (int i = 3; i <= m.groupCount(); i++) {
+                if (m.group(i) != null) {
+                    album.append("_").append(m.group(i).replace("/", ""));
+                }
+            }
+            return album.toString();
+        }
+        p = Pattern.compile("^https?://(i\\.|www\\.|m\\.)?imgur\\.com/r/(\\w+)/([a-zA-Z0-9,]{5,}).*$");
+        m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            // Imgur subreddit album or image (treat as album)
+            albumType = ALBUM_TYPE.ALBUM;
+            String subreddit = m.group(m.groupCount() - 1);
+            String gid = m.group(m.groupCount());
+            this.url = new URI("https://imgur.com/r/" + subreddit + "/" + gid).toURL();
+            return "r_" + subreddit + "_" + gid;
+        }
+        p = Pattern.compile("^https?://(i\\.|www\\.|m\\.)?imgur\\.com/([a-zA-Z0-9]{5,})$");
+        m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            // Single imgur image
+            albumType = ALBUM_TYPE.SINGLE_IMAGE;
+            return  m.group(m.groupCount());
+        }
+        throw new MalformedURLException("Unsupported imgur URL format: " + url.toExternalForm());
+    }
+
+    public static class ImgurImage {
+        String title = "";
+        String description = "";
+        String extension;
+        public URL url;
+
+        ImgurImage(URL url) {
+            this.url = url;
+            String tempUrl = url.toExternalForm();
+            this.extension = tempUrl.substring(tempUrl.lastIndexOf('.'));
+            if (this.extension.contains("?")) {
+                this.extension = this.extension.substring(0, this.extension.indexOf("?"));
+            }
+        }
+
+        String getSaveAs() {
+            String saveAs = this.title;
+            String u = url.toExternalForm();
+            if (u.contains("?")) {
+                u = u.substring(0, u.indexOf("?"));
+            }
+            String imgId = u.substring(u.lastIndexOf('/') + 1, u.lastIndexOf('.'));
+            if (saveAs == null || saveAs.equals("")) {
+                saveAs = imgId;
+            } else {
+                saveAs = saveAs + "_" + imgId;
+            }
+            saveAs = Utils.filesystemSafe(saveAs);
+            return saveAs + this.extension;
+        }
+    }
+
+    public static class ImgurAlbum {
+        String title = null;
+        public URL    url;
+        public List<ImgurImage> images = new ArrayList<>();
+        ImgurAlbum(URL url) {
+            this.url = url;
+        }
+        public ImgurAlbum(URL url, String title) {
+            this(url);
+            this.title = title;
+        }
+        void addImage(ImgurImage image) {
+            images.add(image);
+        }
+    }
+
+}

+ 521 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/InstagramRipper.java

@@ -0,0 +1,521 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import static java.lang.String.format;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.time.Instant;
+import java.time.ZoneOffset;
+import java.time.ZonedDateTime;
+import java.time.format.DateTimeFormatter;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Spliterators;
+import java.util.function.BiFunction;
+import java.util.function.Consumer;
+import java.util.function.Function;
+import java.util.function.Predicate;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+import java.util.stream.StreamSupport;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.json.JSONArray;
+import org.json.JSONObject;
+import org.jsoup.Connection;
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+
+import com.oracle.js.parser.ErrorManager;
+import com.oracle.js.parser.Parser;
+import com.oracle.js.parser.ScriptEnvironment;
+import com.oracle.js.parser.Source;
+import com.oracle.js.parser.ir.Block;
+import com.oracle.js.parser.ir.CallNode;
+import com.oracle.js.parser.ir.ExpressionStatement;
+import com.oracle.js.parser.ir.FunctionNode;
+import com.oracle.js.parser.ir.Statement;
+import com.rarchives.ripme.ripper.AbstractJSONRipper;
+import com.rarchives.ripme.utils.Http;
+import com.rarchives.ripme.utils.Utils;
+
+// Available configuration options:
+// instagram.download_images_only - use to skip video links
+// instagram.session_id - should be set for stories and private accounts (look for sessionid cookie)
+public class InstagramRipper extends AbstractJSONRipper {
+
+    private static final Logger logger = LogManager.getLogger(ImagebamRipper.class);
+
+    private String qHash;
+    private Map<String, String> cookies = new HashMap<>();
+    private String idString;
+    private List<String> itemPrefixes = new ArrayList<>();
+    private List<String> failedItems = new ArrayList<>();
+
+    private boolean hashtagRip;
+    private boolean taggedRip;
+    private boolean igtvRip;
+    private boolean postRip;
+    private boolean storiesRip;
+    private boolean pinnedRip;
+    private boolean pinnedReelRip;
+
+    private enum UrlTypePattern {
+        // e.g. https://www.instagram.com/explore/tags/rachelc00k/
+        HASHTAG("explore/tags/(?<tagname>[^?/]+)"),
+
+        // e.g. https://www.instagram.com/stories/rachelc00k/
+        STORIES("stories/(?<username>[^?/]+)"),
+
+        // e.g. https://www.instagram.com/rachelc00k/tagged/
+        USER_TAGGED("(?<username>[^?/]+)/tagged"),
+
+        // e.g. https://www.instagram.com/rachelc00k/channel/
+        IGTV("(?<username>[^?/]+)/channel"),
+
+        // e.g. https://www.instagram.com/p/Bu4CEfbhNk4/
+        SINGLE_POST("(?:p|tv)/(?<shortcode>[^?/]+)"),
+
+        // pseudo-url, e.g. https://www.instagram.com/rachelc00k/?pinned
+        PINNED("(?<username>[^?/]+)/?[?]pinned"),
+
+        // e.g. https://www.instagram.com/rachelc00k/
+        USER_PROFILE("(?<username>[^?/]+)");
+
+        private final String urlTypePattern;
+
+        UrlTypePattern(String urlTypePattern) {
+            this.urlTypePattern = urlTypePattern;
+        }
+    }
+
+    public InstagramRipper(URL url) throws IOException {
+        super(url);
+    }
+
+    @Override
+    protected String getDomain() {
+        return "instagram.com";
+    }
+
+    @Override
+    public String getHost() {
+        return "instagram";
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        for (UrlTypePattern urlType : UrlTypePattern.values()) {
+            Matcher urlMatcher = getUrlMatcher(url, urlType);
+            if (urlMatcher.matches()) {
+                switch (urlType) {
+                    case HASHTAG:
+                        hashtagRip = true;
+                        return "tag_" + urlMatcher.group("tagname");
+                    case PINNED:
+                        pinnedRip = true;
+                        return urlMatcher.group("username") + "_pinned";
+                    case STORIES:
+                        storiesRip = true;
+                        return urlMatcher.group("username") + "_stories";
+                    case USER_TAGGED:
+                        taggedRip = true;
+                        return urlMatcher.group("username") + "_tagged";
+                    case IGTV:
+                        igtvRip = true;
+                        return urlMatcher.group("username") + "_igtv";
+                    case SINGLE_POST:
+                        postRip = true;
+                        return "post_" + urlMatcher.group("shortcode");
+                    case USER_PROFILE:
+                        return urlMatcher.group("username");
+                    default:
+                        throw new RuntimeException("Reached unreachable");
+                }
+            }
+        }
+        throw new MalformedURLException("This URL can't be ripped");
+    }
+
+    private Matcher getUrlMatcher(URL url, UrlTypePattern type) {
+        String baseRegex = "^https?://(?:www[.])?instagram[.]com/%s(?:[?/].*)?";
+        Pattern pattern = Pattern.compile(format(baseRegex, type.urlTypePattern));
+        return pattern.matcher(url.toExternalForm());
+    }
+
+    @Override
+    public JSONObject getFirstPage() throws IOException {
+        setAuthCookie();
+        Document document = Http.url(url).cookies(cookies).response().parse();
+        qHash = getQhash(document);
+        JSONObject jsonObject = getJsonObjectFromDoc(document);
+        String hashtagNamePath = "entry_data.TagPage[0].graphql.hashtag.name";
+        String singlePostIdPath = "graphql.shortcode_media.shortcode";
+        String profileIdPath = "entry_data.ProfilePage[0].graphql.user.id";
+        String storiesPath = "entry_data.StoriesPage[0].user.id";
+        String idPath = hashtagRip ? hashtagNamePath : storiesRip ? storiesPath : postRip ? singlePostIdPath : profileIdPath;
+        idString = getJsonStringByPath(jsonObject, idPath);
+        return taggedRip ? getNextPage(null) : pinnedRip ? getPinnedItems(document) : storiesRip ? getStoriesItems() : jsonObject;
+    }
+
+    private void setAuthCookie() throws IOException {
+        String sessionId = Utils.getConfigString("instagram.session_id", null);
+        if ((storiesRip || pinnedRip) && sessionId == null) {
+            throw new IOException("instagram.session_id should be set up for Instagram stories");
+        }
+        if (sessionId != null) {
+            cookies.put("sessionid", sessionId);
+        }
+    }
+
+    // Query hash is used for graphql requests
+    private String getQhash(Document doc) throws IOException {
+        if (postRip) {
+            return null;
+        }
+
+        Predicate<String> hrefFilter = href -> href.contains("Consumer.js");
+        if (taggedRip) {
+            hrefFilter = href -> href.contains("ProfilePageContainer.js") || href.contains("TagPageContainer.js");
+        }
+
+        String href = doc.select("link[rel=preload]").stream()
+                .map(link -> link.attr("href"))
+                .filter(hrefFilter)
+                .findFirst().orElse("");
+
+        String body = Http.url("https://www.instagram.com" + href).cookies(cookies).response().body();
+
+        Function<String, String> hashExtractor =
+                storiesRip || pinnedReelRip ? this::getStoriesHash :
+                        pinnedRip ? this::getPinnedHash : hashtagRip ? this::getTagHash :
+                                taggedRip ? this::getUserTagHash : this::getProfileHash;
+
+        return hashExtractor.apply(body);
+    }
+
+    private String getStoriesHash(String jsData) {
+        return getHashValue(jsData, "loadStoryViewers", -5);
+    }
+
+    private String getProfileHash(String jsData) {
+        return getHashValue(jsData, "loadProfilePageExtras", -1,
+                s -> s.replaceAll(".*queryId\\s?:\\s?\"([0-9a-f]*)\".*", "$1"));
+    }
+
+    private String getPinnedHash(String jsData) {
+        return getHashValue(jsData, "loadProfilePageExtras", -2);
+    }
+
+    private String getTagHash(String jsData) {
+        return getHashValue(jsData, "requestNextTagMedia", -1);
+    }
+
+    private String getUserTagHash(String jsData) {
+        return getHashValue(jsData, "requestNextTaggedPosts", -1);
+    }
+
+    private JSONObject getJsonObjectFromDoc(Document document) {
+        for (Element script : document.select("script[type=text/javascript]")) {
+            String scriptText = script.data();
+            if (scriptText.startsWith("window._sharedData") || scriptText.startsWith("window.__additionalDataLoaded")) {
+                String jsonText = scriptText.replaceAll("[^{]*([{].*})[^}]*", "$1");
+                if (jsonText.contains("graphql") || jsonText.contains("StoriesPage")) {
+                    return new JSONObject(jsonText);
+                }
+            }
+        }
+        return null;
+    }
+
+    @Override
+    public JSONObject getNextPage(JSONObject source) throws IOException {
+        if (postRip || storiesRip || pinnedReelRip) {
+            return null;
+        }
+        JSONObject nextPageQuery = new JSONObject().put(hashtagRip ? "tag_name" : "id", idString).put("first", 12);
+        if (source == null) {
+            return graphqlRequest(nextPageQuery);
+        }
+        JSONObject pageInfo = getMediaRoot(source).getJSONObject("page_info");
+        if (pageInfo.getBoolean("has_next_page")) {
+            return graphqlRequest(nextPageQuery.put("after", pageInfo.getString("end_cursor")));
+        } else {
+            failedItems.forEach(logger::error);
+            return null;
+        }
+    }
+
+    private JSONObject getStoriesItems() throws IOException {
+        return graphqlRequest(new JSONObject().append("reel_ids", idString).put("precomposed_overlay", false));
+    }
+
+    // Two requests with different query hashes required for pinned items.
+    // Query hash to be used depends on flag specified:
+    // pinnedRip flag is used initially to get list of pinned albums;
+    // pinnedReelRip flag is used next to get media urls.
+    private JSONObject getPinnedItems(Document document) throws IOException {
+        JSONObject queryForIds = new JSONObject().put("user_id", idString).put("include_highlight_reels", true);
+        JSONObject pinnedIdsJson = graphqlRequest(queryForIds);
+        JSONArray pinnedItems = getJsonArrayByPath(pinnedIdsJson, "data.user.edge_highlight_reels.edges");
+        pinnedRip = false;
+        pinnedReelRip = true;
+        qHash = getQhash(document);
+        JSONObject queryForDetails = new JSONObject();
+        getStreamOfJsonArray(pinnedItems)
+                .map(object -> getJsonStringByPath(object, "node.id"))
+                .forEach(id -> queryForDetails.append("highlight_reel_ids", id));
+        queryForDetails.put("precomposed_overlay", false);
+        return graphqlRequest(queryForDetails);
+    }
+
+    private JSONObject graphqlRequest(JSONObject vars) throws IOException {
+        // Sleep for a while to avoid a ban
+        sleep(2500);
+        String url = format("https://www.instagram.com/graphql/query/?query_hash=%s&variables=%s", qHash, vars.toString());
+        return Http.url(url).cookies(cookies).getJSON();
+    }
+
+    @Override
+    public List<String> getURLsFromJSON(JSONObject json) {
+        if (storiesRip || pinnedReelRip) {
+            JSONArray storyAlbums = getJsonArrayByPath(json, "data.reels_media");
+            return getStreamOfJsonArray(storyAlbums)
+                    .flatMap(album -> getStreamOfJsonArray(album.getJSONArray("items")))
+                    .peek(storyItem -> itemPrefixes.add(getTimestampPrefix(storyItem)))
+                    .flatMap(this::parseStoryItemForUrls)
+                    .collect(Collectors.toList());
+        }
+        if (postRip) {
+            JSONObject detailsJson = downloadItemDetailsJson(idString);
+            addPrefixInfo(detailsJson);
+            return parseItemDetailsForUrls(detailsJson).collect(Collectors.toList());
+        }
+        JSONArray edges = getMediaRoot(json).getJSONArray("edges");
+        return getStreamOfJsonArray(edges)
+                .map(edge -> getJsonStringByPath(edge, "node.shortcode"))
+                .map(this::downloadItemDetailsJson)
+                .filter(Objects::nonNull)
+                .peek(this::addPrefixInfo)
+                .flatMap(this::parseItemDetailsForUrls)
+                .collect(Collectors.toList());
+    }
+
+    private Stream<? extends String> parseStoryItemForUrls(JSONObject storyItem) {
+        if (storyItem.getBoolean("is_video")) {
+            itemPrefixes.add(getTimestampPrefix(storyItem) + "preview_");
+            int lastIndex = storyItem.getJSONArray("video_resources").length() - 1;
+            return Stream.of(
+                    getJsonStringByPath(storyItem, "video_resources[" + lastIndex + "].src"),
+                    storyItem.getString("display_url"));
+        }
+        return Stream.of(storyItem.getString("display_url"));
+    }
+
+    private JSONObject getMediaRoot(JSONObject json) {
+        String userExtra = "data.user.edge_owner_to_timeline_media";
+        String igtvExtra = "data.user.edge_felix_video_timeline";
+        String taggedExtra = "data.user.edge_user_to_photos_of_you";
+        String hashtagExtra = "data.hashtag.edge_hashtag_to_media";
+        String userHomeRoot = "entry_data.ProfilePage[0].graphql.user.edge_owner_to_timeline_media";
+        String igtvHomeRoot = "entry_data.ProfilePage[0].graphql.user.edge_felix_video_timeline";
+        String hashtagHomeRoot = "entry_data.TagPage[0].graphql.hashtag.edge_hashtag_to_media";
+        String mediaRootPath = json.optJSONObject("entry_data") != null ?
+                (hashtagRip ? hashtagHomeRoot : igtvRip ? igtvHomeRoot : userHomeRoot) : hashtagRip ?
+                hashtagExtra : igtvRip ? igtvExtra : taggedRip ? taggedExtra : userExtra;
+        return getJsonObjectByPath(json, mediaRootPath);
+    }
+
+    private JSONObject downloadItemDetailsJson(String shortcode) {
+        String url = "https://www.instagram.com/p/%s/?__a=1";
+        try {
+            Http http = Http.url(format(url, shortcode));
+            http.ignoreContentType();
+            http.connection().followRedirects(false);
+            Connection.Response response = http.cookies(cookies).response();
+            // Fix for redirection link; repeat request with the new shortcode
+            if (response.statusCode() == 302) {
+                Pattern redirectIdPattern = Pattern.compile("/p/(?<shortcode>[^?/]+)");
+                Matcher m = redirectIdPattern.matcher(response.header("location"));
+                return m.find() ? downloadItemDetailsJson(m.group("shortcode")) : null;
+            }
+            return new JSONObject(response.body());
+        } catch (Exception e) {
+            failedItems.add(shortcode);
+            logger.trace(format("No item %s found", shortcode), e);
+        }
+        return null;
+    }
+
+    private void addPrefixInfo(JSONObject itemDetailsJson) {
+        JSONObject mediaItem = getJsonObjectByPath(itemDetailsJson, "graphql.shortcode_media");
+        String shortcode = mediaItem.getString("shortcode");
+        int subItemsCount = "GraphSidecar".equals(mediaItem.getString("__typename")) ?
+                getJsonArrayByPath(mediaItem, "edge_sidecar_to_children.edges").length() : 1;
+        for (int i = 0; i < subItemsCount; i++) {
+            itemPrefixes.add(getTimestampPrefix(mediaItem) + shortcode + "_");
+        }
+    }
+
+    private String getTimestampPrefix(JSONObject item) {
+        Instant instant = Instant.ofEpochSecond(item.getLong("taken_at_timestamp"));
+        return DateTimeFormatter.ofPattern("yyyy-MM-dd_HH-mm-ss_").format(ZonedDateTime.ofInstant(instant, ZoneOffset.UTC));
+    }
+
+    private Stream<? extends String> parseItemDetailsForUrls(JSONObject itemDetailsJson) {
+        JSONObject mediaItem = getJsonObjectByPath(itemDetailsJson, "graphql.shortcode_media");
+        // For some reason JSON video_url has lower quality than the HTML-tag one
+        // HTML-tag url is requested here and marked with _extra_ prefix
+        if ("GraphVideo".equals(mediaItem.getString("__typename"))) {
+            String shortcode = mediaItem.getString("shortcode");
+            String urlFromPage = getVideoUrlFromPage(shortcode);
+            if (!urlFromPage.isEmpty()) {
+                itemPrefixes.add(getTimestampPrefix(mediaItem) + shortcode + "_extra_");
+                return Stream.of(mediaItem.getString("video_url"), urlFromPage);
+            }
+        }
+        return parseRootForUrls(mediaItem);
+    }
+
+    // Uses recursion for GraphSidecar
+    private Stream<? extends String> parseRootForUrls(JSONObject mediaItem) {
+        String typeName = mediaItem.getString("__typename");
+        switch (typeName) {
+            case "GraphImage":
+                return Stream.of(mediaItem.getString("display_url"));
+            case "GraphVideo":
+                return Stream.of(mediaItem.getString("video_url"));
+            case "GraphSidecar":
+                JSONArray sideCar = getJsonArrayByPath(mediaItem, "edge_sidecar_to_children.edges");
+                return getStreamOfJsonArray(sideCar).map(object -> object.getJSONObject("node"))
+                        .flatMap(this::parseRootForUrls);
+            default:
+                return Stream.empty();
+        }
+    }
+
+    private String getVideoUrlFromPage(String videoID) {
+        try {
+            Document doc = Http.url("https://www.instagram.com/p/" + videoID).cookies(cookies).get();
+            return doc.select("meta[property=og:video]").attr("content");
+        } catch (Exception e) {
+            logger.warn("Unable to get page " + "https://www.instagram.com/p/" + videoID);
+        }
+        return "";
+    }
+
+    @Override
+    protected void downloadURL(URL url, int index) {
+        if (Utils.getConfigBoolean("instagram.download_images_only", false) && url.toString().contains(".mp4?")) {
+            logger.info("Skipped video url: " + url);
+            return;
+        }
+        addURLToDownload(url, itemPrefixes.get(index - 1), "", null, cookies);
+    }
+
+    // Javascript parsing
+    /* ------------------------------------------------------------------------------------------------------- */
+    private String getHashValue(String javaScriptData, String keyword, int offset,
+            Function<String, String> extractHash) {
+        List<Statement> statements = getJsBodyBlock(javaScriptData).getStatements();
+
+        return statements.stream()
+                .flatMap(statement -> filterItems(statement, ExpressionStatement.class))
+                .map(ExpressionStatement::getExpression)
+                .flatMap(expression -> filterItems(expression, CallNode.class))
+                .map(CallNode::getArgs)
+                .map(expressions -> expressions.get(0))
+                .flatMap(expression -> filterItems(expression, FunctionNode.class))
+                .map(FunctionNode::getBody)
+                .map(Block::getStatements)
+                .map(statementList -> lookForHash(statementList, keyword, offset, extractHash))
+                .filter(Objects::nonNull)
+                .findFirst().orElse(null);
+    }
+
+    private String getHashValue(String javaScriptData, String keyword, int offset) {
+        return getHashValue(javaScriptData, keyword, offset, null);
+    }
+
+    private String lookForHash(List<Statement> list, String keyword, int offset, Function<String, String> extractHash) {
+        for (int i = 0; i < list.size(); i++) {
+            Statement st = list.get(i);
+            if (st.toString().contains(keyword)) {
+                if (extractHash != null) {
+                    return extractHash.apply(list.get(i + offset).toString());
+                }
+                return list.get(i + offset).toString().replaceAll(".*\"([0-9a-f]*)\".*", "$1");
+            }
+        }
+        return null;
+    }
+
+    private <T> Stream<T> filterItems(Object obj, Class<T> aClass) {
+        return Stream.of(obj).filter(aClass::isInstance).map(aClass::cast);
+    }
+
+    private Block getJsBodyBlock(String javaScriptData) {
+        ScriptEnvironment env = ScriptEnvironment.builder().ecmaScriptVersion(10).constAsVar(true).build();
+        ErrorManager errorManager = new ErrorManager.ThrowErrorManager();
+        Source src = Source.sourceFor("name", javaScriptData);
+        return new Parser(env, src, errorManager).parse().getBody();
+    }
+
+    // Some JSON helper methods below
+    /* ------------------------------------------------------------------------------------------------------- */
+    private JSONObject getJsonObjectByPath(JSONObject object, String key) {
+        Pattern arrayPattern = Pattern.compile("(?<arr>.*)\\[(?<idx>\\d+)]");
+        JSONObject result = object;
+        for (String s : key.split("[.]")) {
+            Matcher m = arrayPattern.matcher(s);
+            result = m.matches() ?
+                    result.getJSONArray(m.group("arr")).getJSONObject(Integer.parseInt(m.group("idx"))) :
+                    result.getJSONObject(s);
+        }
+        return result;
+    }
+
+    private <T> T getByPath(BiFunction<JSONObject, String, T> func, JSONObject object, String key) {
+        int namePos = key.lastIndexOf('.');
+        JSONObject parent = namePos < 0 ? object : getJsonObjectByPath(object, key.substring(0, namePos));
+        return func.apply(parent, key.substring(namePos + 1));
+    }
+
+    private JSONArray getJsonArrayByPath(JSONObject object, String key) {
+        return getByPath(JSONObject::getJSONArray, object, key);
+    }
+
+    private String getJsonStringByPath(JSONObject object, String key) {
+        return getByPath(JSONObject::getString, object, key);
+    }
+
+    private Stream<JSONObject> getStreamOfJsonArray(JSONArray array) {
+        return StreamSupport.stream(new JSONSpliterator(array), false);
+    }
+
+    private class JSONSpliterator extends Spliterators.AbstractSpliterator<JSONObject> {
+        private JSONArray array;
+        private int index = 0;
+
+        JSONSpliterator(JSONArray array) {
+            super(array.length(), SIZED | ORDERED);
+            this.array = array;
+        }
+
+        @Override
+        public boolean tryAdvance(Consumer<? super JSONObject> action) {
+            if (index == array.length()) {
+                return false;
+            }
+            action.accept(array.getJSONObject(index++));
+            return true;
+        }
+    }
+}

+ 95 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/JabArchivesRipper.java

@@ -0,0 +1,95 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Locale;
+import java.text.Normalizer;
+import java.text.Normalizer.Form;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.utils.Http;
+
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+import org.jsoup.select.Elements;
+
+public class JabArchivesRipper extends AbstractHTMLRipper {
+
+    private static final Pattern NONLATIN = Pattern.compile("[^\\w-]");
+    private static final Pattern WHITESPACE = Pattern.compile("[\\s]");
+
+    private Map<String, String> itemPrefixes = Collections.synchronizedMap(new HashMap<String, String>());
+
+    public JabArchivesRipper(URL url) throws IOException {
+        super(url);
+    }
+
+    @Override
+    public String getHost() {
+        return "jabarchives";
+    }
+
+    @Override
+    public String getDomain() {
+        return "jabarchives.com";
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        Pattern p = Pattern.compile("^https?://(?:www\\.)?jabarchives.com/main/view/([a-zA-Z0-9_]+).*$");
+        Matcher m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            // Return the text contained between () in the regex
+            return m.group(1);
+        }
+        throw new MalformedURLException(
+                "Expected javarchives.com URL format: " +
+                "jabarchives.com/main/view/albumname - got " + url + " instead");
+    }
+
+    @Override
+    public Document getNextPage(Document doc) throws IOException {
+        // Find next page
+        Elements hrefs = doc.select("a[title=\"Next page\"]");
+        if (hrefs.isEmpty()) {
+            throw new IOException("No more pages");
+        }
+        String nextUrl = "https://jabarchives.com" + hrefs.first().attr("href");
+        sleep(500);
+        return Http.url(nextUrl).get();
+    }
+
+    protected String getSlug(String input) {
+        // Get a URL/file-safe version of a string
+        String nowhitespace = WHITESPACE.matcher(input).replaceAll("-");
+        String normalized = Normalizer.normalize(nowhitespace, Form.NFD);
+        String slug = NONLATIN.matcher(normalized).replaceAll("");
+        return slug.toLowerCase(Locale.ENGLISH);
+    }
+
+    @Override
+    public List<String> getURLsFromPage(Document doc) {
+        List<String> result = new ArrayList<String>();
+        for (Element el : doc.select("#contentMain img")) {
+            String url = "https://jabarchives.com" + el.attr("src").replace("thumb", "large");
+            result.add(url);
+
+            String title = el.parent().attr("title");
+            itemPrefixes.put(url, getSlug(title) + "_");
+        }
+        return result;
+    }
+
+    @Override
+    public void downloadURL(URL url, int index) {
+        addURLToDownload(url, itemPrefixes.get(url.toString()));
+    }
+}

+ 84 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/JagodibujaRipper.java

@@ -0,0 +1,84 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.utils.Http;
+
+public class JagodibujaRipper extends AbstractHTMLRipper {
+
+    private static final Logger logger = LogManager.getLogger(JagodibujaRipper.class);
+
+    public JagodibujaRipper(URL url) throws IOException {
+        super(url);
+    }
+
+    @Override
+    public String getHost() {
+        return "jagodibuja";
+    }
+
+    @Override
+    public String getDomain() {
+        return "jagodibuja.com";
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        Pattern p = Pattern.compile("^https?://www.jagodibuja.com/([a-zA-Z0-9_-]*)/?");
+        Matcher m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return m.group(1);
+        }
+        throw new MalformedURLException("Expected jagodibuja.com gallery formats hwww.jagodibuja.com/Comic name/ got " + url + " instead");
+    }
+
+    @Override
+    public List<String> getURLsFromPage(Document doc) {
+        List<String> result = new ArrayList<>();
+        for (Element comicPageUrl : doc.select("div.gallery-icon > a")) {
+            // Check if the ripper has been stopped
+            try {
+                stopCheck();
+            } catch (IOException e) {
+                return result;
+            }
+            try {
+                sleep(500);
+                Document comicPage = Http.url(comicPageUrl.attr("href")).get();
+                Element elem = comicPage.select("span.full-size-link > a").first();
+                logger.info("Got link " + elem.attr("href"));
+                try {
+                    addURLToDownload(new URI(elem.attr("href")).toURL(), "");
+                } catch (MalformedURLException | URISyntaxException e) {
+                    logger.warn("Malformed URL");
+                    e.printStackTrace();
+                }
+                result.add(elem.attr("href"));
+            } catch (IOException e) {
+                logger.info("Error loading " + comicPageUrl);
+            }
+        }
+        return result;
+    }
+
+    @Override
+    public void downloadURL(URL url, int index) {
+        // sleep(500);
+        // addURLToDownload(url, getPrefix(index));
+    }
+
+}

+ 75 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/Jpg3Ripper.java

@@ -0,0 +1,75 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.utils.Http;
+
+public class Jpg3Ripper extends AbstractHTMLRipper {
+
+    private static final Logger logger = LogManager.getLogger(Jpg3Ripper.class);
+
+    public Jpg3Ripper(URL url) throws IOException {
+        super(url);
+    }
+
+    @Override
+    public String getDomain() {
+        return "jpg3.su";
+    }
+
+    @Override
+    public String getHost() {
+        return "jpg3";
+    }
+
+    @Override
+    public List<String> getURLsFromPage(Document page) {
+        List<String> urls = new ArrayList<>();
+
+        for (Element el : page.select(".image-container > img")) {
+            urls.add(el.attr("src").replaceAll("\\.md", ""));
+        }
+
+        return urls;
+    }
+
+    @Override
+    public URL sanitizeURL(URL url) throws MalformedURLException, URISyntaxException {
+        String u = url.toExternalForm();
+        u = u.replaceAll("https?://jpg3.su/a/([^/]+)/?.*", "https://jpg3.su/a/$1");
+        logger.debug("Changed URL from " + url + " to " + u);
+        return new URI(u).toURL();
+    }
+
+    @Override
+    public Document getNextPage(Document page) throws IOException, URISyntaxException {
+        String href = page.select("[data-pagination='next']").attr("href");
+        if (!href.isEmpty()) {
+            return Http.url(href).get();
+        } else {
+            return null;
+        }
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        return url.toString().split("/")[url.toString().split("/").length - 1];
+    }
+
+    @Override
+    protected void downloadURL(URL url, int index) {
+        addURLToDownload(url, getPrefix(index), "", this.url.toExternalForm(), null);
+    }
+}

+ 56 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/KingcomixRipper.java

@@ -0,0 +1,56 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+
+public class KingcomixRipper extends AbstractHTMLRipper {
+
+    public KingcomixRipper(URL url) throws IOException {
+        super(url);
+    }
+
+    @Override
+    public String getHost() {
+        return "kingcomix";
+    }
+
+    @Override
+    public String getDomain() {
+        return "kingcomix.com";
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        Pattern p = Pattern.compile("https://kingcomix.com/([a-zA-Z1-9_-]*)/?$");
+        Matcher m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return m.group(1);
+        }
+        throw new MalformedURLException("Expected kingcomix URL format: " +
+                "kingcomix.com/COMIX - got " + url + " instead");
+    }
+
+    @Override
+    public List<String> getURLsFromPage(Document doc) {
+        List<String> result = new ArrayList<>();
+        for (Element el : doc.select("div.entry-content > p > img")) {
+            result.add(el.attr("src"));
+        }
+        return result;
+    }
+
+    @Override
+    public void downloadURL(URL url, int index) {
+        addURLToDownload(url, getPrefix(index));
+    }
+}

+ 241 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/ListalRipper.java

@@ -0,0 +1,241 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+import org.jsoup.select.Elements;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.ripper.DownloadThreadPool;
+import com.rarchives.ripme.utils.Http;
+
+/**
+ * @author Tushar
+ *
+ */
+public class ListalRipper extends AbstractHTMLRipper {
+
+    private static final Logger logger = LogManager.getLogger(ListalRipper.class);
+
+    private final Pattern p1 = Pattern.compile("https:\\/\\/www.listal.com\\/list\\/([a-zA-Z0-9-]+)");
+    private final Pattern p2 =
+            Pattern.compile("https:\\/\\/www.listal.com\\/((?:(?:[a-zA-Z0-9-_%]+)\\/?)+)");
+    private final String postUrl = "https://www.listal.com/item-list/"; //to load more images.
+
+    private String listId = null; // listId to get more images via POST.
+    private UrlType urlType = UrlType.UNKNOWN;
+
+    private DownloadThreadPool listalThreadPool = new DownloadThreadPool("listalThreadPool");
+
+    public ListalRipper(URL url) throws IOException {
+        super(url);
+    }
+
+    @Override
+    public String getDomain() {
+        return "listal.com";
+    }
+
+    @Override
+    public String getHost() {
+        return "listal";
+    }
+
+    @Override
+    public Document getFirstPage() throws IOException {
+        Document doc = Http.url(url).get();
+        if (urlType == UrlType.LIST) {
+            listId = doc.select("#customlistitems").first().attr("data-listid"); // Used for list types.
+        }
+        return doc;
+    }
+
+    @Override
+    public List<String> getURLsFromPage(Document page) {
+        if (urlType == UrlType.LIST) {
+            // for url of type LIST, https://www.listal.com/list/my-list
+            return getURLsForListType(page);
+        } else if (urlType == UrlType.FOLDER) {
+            // for url of type FOLDER,  https://www.listal.com/jim-carrey/pictures
+            return getURLsForFolderType(page);
+        }
+        return null;
+    }
+
+    @Override
+    public void downloadURL(URL url, int index) {
+        listalThreadPool.addThread(new ListalImageDownloadThread(url, index));
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        Matcher m1 = p1.matcher(url.toExternalForm());
+        if (m1.matches()) {
+            // Return the text contained between () in the regex
+            urlType = UrlType.LIST;
+            return m1.group(1);
+        }
+
+        Matcher m2 = p2.matcher(url.toExternalForm());
+        if (m2.matches()) {
+            // Return only gid from capturing group of type listal.com/tvOrSomething/dexter/pictures
+            urlType = UrlType.FOLDER;
+            return getFolderTypeGid(m2.group(1));
+        }
+
+        throw new MalformedURLException("Expected listal.com URL format: "
+                + "listal.com/list/my-list-name - got " + url + " instead.");
+    }
+
+    @Override
+    public Document getNextPage(Document page) throws IOException, URISyntaxException {
+        Document nextPage = super.getNextPage(page);
+        switch (urlType) {
+            case LIST:
+                if (!page.select(".loadmoreitems").isEmpty()) {
+                    // All items are not loaded.
+                    // Load remaining items using postUrl.
+
+                    String offSet = page.select(".loadmoreitems").last().attr("data-offset");
+                    Map<String, String> postParams = new HashMap<>();
+                    postParams.put("listid", listId);
+                    postParams.put("offset", offSet);
+                    try {
+                        nextPage = Http.url(postUrl).data(postParams).retries(3).post();
+                    } catch (IOException e1) {
+                        logger.error("Failed to load more images after " + offSet, e1);
+                        throw e1;
+                    }
+                }
+                break;
+
+            case FOLDER:
+                Elements pageLinks = page.select(".pages a");
+                if (!pageLinks.isEmpty() && pageLinks.last().text().startsWith("Next")) {
+                    String nextUrl = pageLinks.last().attr("abs:href");
+                    nextPage = Http.url(nextUrl).retries(3).get();
+                }
+                break;
+
+            case UNKNOWN:
+            default:
+        }
+        return nextPage;
+    }
+
+
+    @Override
+    public DownloadThreadPool getThreadPool() {
+        return listalThreadPool;
+    }
+
+    /**
+     * Returns the image urls for UrlType LIST.
+     */
+    private List<String> getURLsForListType(Document page) {
+        List<String> list = new ArrayList<>();
+        for (Element e : page.select(".pure-g a[href*=viewimage]")) {
+            //list.add("https://www.listal.com" + e.attr("href") + "h");
+            list.add(e.attr("abs:href") + "h");
+        }
+
+        return list;
+    }
+
+    /**
+     * Returns the image urls for UrlType FOLDER.
+     */
+    private List<String> getURLsForFolderType(Document page) {
+        List<String> list = new ArrayList<>();
+        for (Element e : page.select("#browseimagescontainer .imagewrap-outer a")) {
+            list.add(e.attr("abs:href") + "h");
+        }
+        return list;
+    }
+
+    /**
+     * Returns the gid for url type listal.com/tvOrSomething/dexter/pictures
+     */
+    public String getFolderTypeGid(String group) throws MalformedURLException {
+        String[] folders = group.split("/");
+        try {
+            if (folders.length == 2 && folders[1].equals("pictures")) {
+                // Url is probably for an actor.
+                return folders[0];
+            }
+
+            if (folders.length == 3 && folders[2].equals("pictures")) {
+                // Url if for a folder(like movies, tv etc).
+                Document doc = Http.url(url).get();
+                return doc.select(".itemheadingmedium").first().text();
+            }
+
+        } catch (Exception e) {
+            logger.error(e);
+        }
+        throw new MalformedURLException("Unable to fetch the gid for given url.");
+    }
+
+    private class ListalImageDownloadThread implements Runnable {
+
+        private final URL url;
+        private final int index;
+
+        public ListalImageDownloadThread(URL url, int index) {
+            super();
+            this.url = url;
+            this.index = index;
+        }
+
+        @Override
+        public void run() {
+            getImage();
+        }
+
+        public void getImage() {
+            try {
+                Document doc = Http.url(url).get();
+
+                String imageUrl = doc.getElementsByClass("pure-img").attr("src");
+                if (imageUrl != "") {
+                    addURLToDownload(new URI(imageUrl).toURL(), getPrefix(index), "", null, null,
+                            getImageName());
+                } else {
+                    logger.error("Couldnt find image from url: " + url);
+                }
+            } catch (IOException | URISyntaxException e) {
+                logger.error("[!] Exception while downloading image: " + url, e);
+            }
+        }
+
+        public String getImageName() {
+            // Returns the image number of the link if possible.
+            String name = this.url.toExternalForm();
+            try {
+                name = name.substring(name.lastIndexOf("/") + 1);
+            } catch (Exception e) {
+                logger.info("Failed to get name for the image.");
+                name = null;
+            }
+            // Listal stores images as .jpg
+            return name + ".jpg";
+        }
+    }
+
+    private static enum UrlType {
+        LIST, FOLDER, UNKNOWN
+    }
+}

+ 119 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/LusciousRipper.java

@@ -0,0 +1,119 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.io.UnsupportedEncodingException;
+import java.net.MalformedURLException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.net.URLEncoder;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.json.JSONArray;
+import org.json.JSONObject;
+import org.jsoup.Connection;
+import org.jsoup.nodes.Document;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.utils.Http;
+
+public class LusciousRipper extends AbstractHTMLRipper {
+
+    private static final Logger logger = LogManager.getLogger(LusciousRipper.class);
+
+    private static String albumid;
+
+    private static final Pattern p = Pattern.compile("^https?://(?:www\\.)?(?:members\\.||legacy\\.||old\\.)?luscious\\.net/albums/([-_.0-9a-zA-Z]+).*$");
+
+    public LusciousRipper(URL url) throws IOException {
+        super(url);
+    }
+
+    @Override
+    public URL sanitizeURL(URL url) throws MalformedURLException, URISyntaxException{
+        String URLToReturn = url.toExternalForm();
+        URLToReturn = URLToReturn.replaceAll("https?://(?:www\\.)?luscious\\.", "https://old.luscious.");
+        URL san_url = new URI(URLToReturn).toURL();
+        logger.info("sanitized URL is " + san_url.toExternalForm());
+        return san_url;
+    }
+
+    @Override
+    public String getDomain() {
+        return "luscious.net";
+    }
+
+    @Override
+    public String getHost() {
+        return "luscious";
+    }
+
+    @Override
+    public List<String> getURLsFromPage(Document page) { // gets urls for all pages through the api
+        List<String> urls = new ArrayList<>();
+        int totalPages = 1;
+
+        for (int i = 1; i <= totalPages; i++) {
+            String APIStringWOVariables = "https://apicdn.luscious.net/graphql/nobatch/?operationName=PictureListInsideAlbum&query=%2520query%2520PictureListInsideAlbum%28%2524input%253A%2520PictureListInput%21%29%2520%257B%2520picture%2520%257B%2520list%28input%253A%2520%2524input%29%2520%257B%2520info%2520%257B%2520...FacetCollectionInfo%2520%257D%2520items%2520%257B%2520__typename%2520id%2520title%2520description%2520created%2520like_status%2520number_of_comments%2520number_of_favorites%2520moderation_status%2520width%2520height%2520resolution%2520aspect_ratio%2520url_to_original%2520url_to_video%2520is_animated%2520position%2520permissions%2520url%2520tags%2520%257B%2520category%2520text%2520url%2520%257D%2520thumbnails%2520%257B%2520width%2520height%2520size%2520url%2520%257D%2520%257D%2520%257D%2520%257D%2520%257D%2520fragment%2520FacetCollectionInfo%2520on%2520FacetCollectionInfo%2520%257B%2520page%2520has_next_page%2520has_previous_page%2520total_items%2520total_pages%2520items_per_page%2520url_complete%2520%257D%2520&variables=";
+            Connection con = Http.url(APIStringWOVariables + encodeVariablesPartOfURL(i, albumid)).method(Connection.Method.GET).retries(5).connection();
+            con.ignoreHttpErrors(true);
+            con.ignoreContentType(true);
+            con.userAgent("Mozilla/5.0 (X11; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0");
+            Connection.Response res;
+            try {
+                res = con.execute();
+            } catch (IOException e) {
+                throw new RuntimeException(e);
+            }
+            String body = res.body();
+
+            JSONObject jsonObject = new JSONObject(body);
+
+            JSONObject data = jsonObject.getJSONObject("data");
+            JSONObject picture = data.getJSONObject("picture");
+            JSONObject list = picture.getJSONObject("list");
+            JSONArray items = list.getJSONArray("items");
+            JSONObject info = list.getJSONObject("info");
+            totalPages = info.getInt("total_pages");
+
+            for (int j = 0; j < items.length(); j++) {
+                JSONObject item = items.getJSONObject(j);
+                String urlToOriginal = item.getString("url_to_original");
+                urls.add(urlToOriginal);
+            }
+        }
+
+        return urls;
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        Matcher m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            albumid = m.group(1).split("_")[m.group(1).split("_").length - 1];
+            return m.group(1);
+        }
+        throw new MalformedURLException("Expected luscious.net URL format: "
+                + "luscious.net/albums/albumname \n members.luscious.net/albums/albumname  - got " + url + " instead.");
+    }
+
+    @Override
+    protected void downloadURL(URL url, int index) {
+        addURLToDownload(url, getPrefix(index), "", this.url.toExternalForm(), null);
+    }
+
+    public static String encodeVariablesPartOfURL(int page, String albumId) {
+        try {
+            String json = "{\"input\":{\"filters\":[{\"name\":\"album_id\",\"value\":\"" + albumId + "\"}],\"display\":\"rating_all_time\",\"items_per_page\":50,\"page\":" + page + "}}";
+
+            return URLEncoder.encode(json, "UTF-8");
+        } catch (UnsupportedEncodingException e) {
+            throw new IllegalStateException("Could not encode variables");
+        }
+    }
+}

+ 159 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/MangadexRipper.java

@@ -0,0 +1,159 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import com.rarchives.ripme.ripper.AbstractJSONRipper;
+import com.rarchives.ripme.ui.RipStatusMessage;
+import com.rarchives.ripme.utils.Http;
+import org.json.JSONArray;
+import org.json.JSONObject;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.TreeMap;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+public class MangadexRipper extends AbstractJSONRipper {
+    private final String chapterApiEndPoint = "https://mangadex.org/api/chapter/";
+    private final String mangaApiEndPoint = "https://mangadex.org/api/manga/";
+    private boolean isSingleChapter;
+
+    public MangadexRipper(URL url) throws IOException {
+        super(url);
+    }
+
+    private String getImageUrl(String chapterHash, String imageName, String server) {
+        return server + chapterHash + "/" + imageName;
+    }
+
+    @Override
+    public String getHost() {
+        return "mangadex";
+    }
+
+    @Override
+    public String getDomain() {
+        return "mangadex.org";
+    }
+
+    @Override
+    public boolean canRip(URL url) {
+        return (url.getHost().endsWith("mangadex.org"));
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        String capID = getChapterID(url.toExternalForm());
+        String mangaID = getMangaID(url.toExternalForm());
+        if (capID != null) {
+            isSingleChapter = true;
+            return capID;
+        } else if (mangaID != null) {
+            isSingleChapter = false;
+            return mangaID;
+        }
+        throw new MalformedURLException("Unable to get chapter ID from" + url);
+    }
+
+    private String getChapterID(String url) {
+        Pattern p = Pattern.compile("https://mangadex.org/chapter/([\\d]+)/([\\d+]?)");
+        Matcher m = p.matcher(url);
+        if (m.matches()) {
+            return m.group(1);
+        }
+        return null;
+    }
+
+    private String getMangaID(String url) {
+        Pattern p = Pattern.compile("https://mangadex.org/title/([\\d]+)/(.+)");
+        Matcher m = p.matcher(url);
+        if (m.matches()) {
+            return m.group(1);
+        }
+        return null;
+    }
+
+
+    @Override
+    public JSONObject getFirstPage() throws IOException, URISyntaxException {
+        // Get the chapter ID
+        String chapterID = getChapterID(url.toExternalForm());
+        String mangaID = getMangaID(url.toExternalForm());
+        if (mangaID != null) {
+            return Http.url(new URI(mangaApiEndPoint + mangaID).toURL()).getJSON();
+        } else
+            return Http.url(new URI(chapterApiEndPoint + chapterID).toURL()).getJSON();
+    }
+
+    @Override
+    protected List<String> getURLsFromJSON(JSONObject json) {
+        if (isSingleChapter) {
+            List<String> assetURLs = new ArrayList<>();
+            JSONArray currentObject;
+            String chapterHash;
+            // Server is the cdn hosting the images.
+            String server;
+            chapterHash = json.getString("hash");
+            server = json.getString("server");
+            for (int i = 0; i < json.getJSONArray("page_array").length(); i++) {
+                currentObject = json.getJSONArray("page_array");
+
+                assetURLs.add(getImageUrl(chapterHash, currentObject.getString(i), server));
+            }
+            return assetURLs;
+        }
+        JSONObject chaptersJSON = (JSONObject) json.get("chapter");
+        JSONObject temp;
+        Iterator<String> keys = chaptersJSON.keys();
+        HashMap<Double, String> chapterIDs = new HashMap<>();
+        while (keys.hasNext()) {
+            String keyValue = keys.next();
+            temp = (JSONObject) chaptersJSON.get(keyValue);
+            if (temp.getString("lang_name").equals("English")) {
+                chapterIDs.put(temp.getDouble("chapter"), keyValue);
+            }
+
+        }
+
+        List<String> assetURLs = new ArrayList<>();
+        JSONArray currentObject;
+        String chapterHash;
+        // Server is the cdn hosting the images.
+        String server;
+        JSONObject chapterJSON = null;
+        TreeMap<Double, String> treeMap = new TreeMap<>(chapterIDs);
+        for (Double aDouble : treeMap.keySet()) {
+            double key = (double) aDouble;
+            try {
+                chapterJSON = Http.url(new URI(chapterApiEndPoint + treeMap.get(key)).toURL()).getJSON();
+            } catch (IOException | URISyntaxException e) {
+                e.printStackTrace();
+            }
+            sendUpdate(RipStatusMessage.STATUS.LOADING_RESOURCE, "chapter " + key);
+            chapterHash = chapterJSON.getString("hash");
+            server = chapterJSON.getString("server");
+            for (int i = 0; i < chapterJSON.getJSONArray("page_array").length(); i++) {
+                currentObject = chapterJSON.getJSONArray("page_array");
+
+                assetURLs.add(getImageUrl(chapterHash, currentObject.getString(i), server));
+            }
+        }
+
+        return assetURLs;
+    }
+
+    @Override
+    protected void downloadURL(URL url, int index) {
+        // mangadex does not like rippers one bit, so we wait a good long while between requests
+        sleep(1000);
+        addURLToDownload(url, getPrefix(index));
+    }
+
+
+}

+ 96 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/MastodonRipper.java

@@ -0,0 +1,96 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.json.JSONObject;
+import org.json.JSONArray;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.utils.Http;
+
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+import org.jsoup.select.Elements;
+
+public class MastodonRipper extends AbstractHTMLRipper {
+    private Map<String, String> itemIDs = Collections.synchronizedMap(new HashMap<String, String>());
+
+    public MastodonRipper(URL url) throws IOException {
+        super(url);
+    }
+
+    @Override
+    public String getHost() {
+        return "mastodon";
+    }
+
+    @Override
+    public String getDomain() {
+        return "mastodon.social";
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        Pattern p = Pattern.compile("^https?://(" + getDomain() + ")/@([a-zA-Z0-9_-]+)(/media/?)?$");
+        Matcher m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            // Return the text contained between () in the regex
+            return m.group(1) + "@" + m.group(2);
+        }
+        throw new MalformedURLException(
+                "Expected " + getDomain() + " URL format: " +
+                getDomain() + "/@username - got " + url + " instead");
+    }
+
+    @Override
+    public Document getFirstPage() throws IOException {
+        Pattern p = Pattern.compile("^/@[a-zA-Z0-9_-]+/media/?$");
+        Matcher m = p.matcher(url.getPath());
+        if (m.matches()) {
+            return Http.url(url).get();
+        }
+        return Http.url(url.toExternalForm().replaceAll("/$", "") + "/media").get();
+    }
+
+    @Override
+    public Document getNextPage(Document doc) throws IOException {
+        Elements hrefs = doc.select(".h-entry + .entry > a.load-more.load-gap");
+        if (hrefs.isEmpty()) {
+            throw new IOException("No more pages");
+        }
+        String nextUrl = hrefs.last().attr("href");
+        sleep(500);
+        return Http.url(nextUrl).get();
+    }
+
+    @Override
+    public List<String> getURLsFromPage(Document doc) {
+        List<String> result = new ArrayList<String>();
+        for (Element el : doc.select("[data-component=\"MediaGallery\"]")) {
+            String props = el.attr("data-props");
+            JSONObject obj = new JSONObject(props);
+            JSONArray arr = obj.getJSONArray("media");
+            for (int i = 0; i < arr.length(); i++) {
+                String url = arr.getJSONObject(i).getString("url");
+                result.add(url);
+                String id = arr.getJSONObject(i).getString("id");
+                itemIDs.put(url, id);
+            }
+        }
+        return result;
+    }
+
+    @Override
+    public void downloadURL(URL url, int index) {
+        addURLToDownload(url, itemIDs.get(url.toString()) + "_");
+    }
+}

+ 20 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/MastodonXyzRipper.java

@@ -0,0 +1,20 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.URL;
+
+public class MastodonXyzRipper extends MastodonRipper {
+    public MastodonXyzRipper(URL url) throws IOException {
+        super(url);
+    }
+
+    @Override
+    public String getHost() {
+        return "mastodonxyz";
+    }
+
+    @Override
+    public String getDomain() {
+        return "mastodon.xyz";
+    }
+}

+ 72 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/ModelmayhemRipper.java

@@ -0,0 +1,72 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.utils.Http;
+
+public class ModelmayhemRipper extends AbstractHTMLRipper {
+
+    private Map<String,String> cookies = new HashMap<>();
+
+    public ModelmayhemRipper(URL url) throws IOException {
+        super(url);
+    }
+
+    @Override
+    public String getHost() {
+        return "modelmayhem";
+    }
+
+    @Override
+    public String getDomain() {
+        return "modelmayhem.com";
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        Pattern p = Pattern.compile("https?://www\\.modelmayhem\\.com/portfolio/(\\d+)/viewall");
+        Matcher m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return m.group(1);
+        }
+        throw new MalformedURLException("Expected modelmayhem URL format: " +
+                "modelmayhem.com/portfolio/ID/viewall - got " + url + " instead");
+    }
+
+    @Override
+    public Document getFirstPage() throws IOException {
+        // Bypass NSFW filter
+        cookies.put("worksafe", "0");
+        // "url" is an instance field of the superclass
+        return Http.url(url).cookies(cookies).get();
+    }
+
+    @Override
+    public List<String> getURLsFromPage(Document doc) {
+        List<String> result = new ArrayList<>();
+        for (Element el : doc.select("tr.a_pics > td > div > a")) {
+            String image_URL = el.select("img").attr("src").replaceAll("_m", "");
+            if (image_URL.contains("http")) {
+                result.add(image_URL);
+            }
+        }
+        return result;
+    }
+
+    @Override
+    public void downloadURL(URL url, int index) {
+        addURLToDownload(url, getPrefix(index));
+    }
+}

+ 204 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/MotherlessRipper.java

@@ -0,0 +1,204 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+import org.jsoup.select.Elements;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.ripper.DownloadThreadPool;
+import com.rarchives.ripme.utils.Http;
+import com.rarchives.ripme.utils.Utils;
+
+public class MotherlessRipper extends AbstractHTMLRipper {
+
+    private static final Logger logger = LogManager.getLogger(MotherlessRipper.class);
+
+    // All sleep times are in milliseconds
+    private static final int IMAGE_SLEEP_TIME    = 1000;
+
+    private static final String DOMAIN = "motherless.com",
+                                HOST   = "motherless";
+
+    private DownloadThreadPool motherlessThreadPool;
+
+    public MotherlessRipper(URL url) throws IOException {
+        super(url);
+        motherlessThreadPool = new DownloadThreadPool();
+    }
+
+    @Override
+    public boolean canRip(URL url) {
+        try {
+            getGID(url);
+        } catch (Exception e) {
+            return false;
+        }
+        return url.getHost().endsWith(DOMAIN);
+    }
+
+    @Override
+    protected String getDomain() {
+        return DOMAIN;
+    }
+
+    @Override
+    protected Document getFirstPage() throws IOException {
+        URL firstURL = this.url;
+        String path = this.url.getPath();
+        // Check if "All Uploads" (/GMxxxx), Image (/GIxxxx) or Video (/GVxxxx) gallery since there's no "next" after the homepage (/Gxxxx)
+        Pattern p = Pattern.compile("[MIV]");
+        Matcher m = p.matcher(String.valueOf(path.charAt(2)));
+        boolean notHome = m.matches();
+        // If it's the homepage go to the "All Uploads" gallery (/Gxxxxx -> /GMxxxxx)
+        if (!notHome) {
+            StringBuilder newPath = new StringBuilder(path);
+            newPath.insert(2, "M");
+            firstURL = URI.create("https://" + DOMAIN + newPath).toURL();
+            logger.info("Changed URL to " + firstURL);
+        }
+        return Http.url(firstURL).referrer("https://motherless.com").get();
+    }
+
+    @Override
+    public Document getNextPage(Document doc) throws IOException, URISyntaxException {
+
+        Elements nextPageLink = doc.head().select("link[rel=next]");
+        if (nextPageLink.isEmpty()) {
+            throw new IOException("Last page reached");
+        } else {
+            String referrerLink = doc.head().select("link[rel=canonical]").first().attr("href");
+            URL nextURL = this.url.toURI().resolve(nextPageLink.first().attr("href")).toURL();
+            return Http.url(nextURL).referrer(referrerLink).get();
+        }
+    }
+
+    @Override
+    protected List<String> getURLsFromPage(Document page) {
+        List<String> pageURLs = new ArrayList<>();
+
+        for (Element thumb : page.select("div.thumb-container a.img-container")) {
+            if (isStopped()) {
+                break;
+            }
+            String thumbURL = thumb.attr("href");
+            if (thumbURL.contains("pornmd.com")) {
+                continue;
+            }
+
+            String url;
+            if (!thumbURL.startsWith("http")) {
+                url = "https://" + DOMAIN + thumbURL;
+            } else {
+                url = thumbURL;
+            }
+            pageURLs.add(url);
+
+            if (isThisATest()) {
+                break;
+            }
+        }
+
+        return pageURLs;
+    }
+
+    @Override
+    protected void downloadURL(URL url, int index) {
+        // Create thread for finding image at "url" page
+        MotherlessImageRunnable mit = new MotherlessImageRunnable(url, index);
+        motherlessThreadPool.addThread(mit);
+        try {
+            Thread.sleep(IMAGE_SLEEP_TIME);
+        } catch (InterruptedException e) {
+            logger.warn("Interrupted while waiting to load next image", e);
+        }
+    }
+
+    @Override
+    public String getHost() {
+        return HOST;
+    }
+
+    @Override
+    public URL sanitizeURL(URL url) throws MalformedURLException {
+        return url;
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        Pattern p = Pattern.compile("^https?://(www\\.)?motherless\\.com/G([MVI]?[A-F0-9]{6,8}).*$");
+        Matcher m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return m.group(m.groupCount());
+        }
+        p = Pattern.compile("^https?://(www\\.)?motherless\\.com/term/(images/|videos/)([a-zA-Z0-9%]+)$");
+        m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return m.group(m.groupCount());
+        }
+        p = Pattern.compile("^https?://(www\\.)?motherless\\.com/g[iv]/([a-zA-Z0-9%\\-_]+)$");
+        m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return m.group(m.groupCount());
+        }
+        throw new MalformedURLException("Expected URL format: https://motherless.com/GIXXXXXXX, got: " + url);
+    }
+
+    @Override
+    protected DownloadThreadPool getThreadPool() {
+        return motherlessThreadPool;
+    }
+
+    /**
+     * Helper class to find and download images found on "image" pages
+     */
+    private class MotherlessImageRunnable implements Runnable {
+        private final URL url;
+        private final int index;
+
+        MotherlessImageRunnable(URL url, int index) {
+            super();
+            this.url = url;
+            this.index = index;
+        }
+
+        @Override
+        public void run() {
+            try {
+                if (isStopped() && !isThisATest()) {
+                    return;
+                }
+                String u = this.url.toExternalForm();
+                Document doc = Http.url(u)
+                                   .referrer(u)
+                                   .get();
+                Pattern p = Pattern.compile("^.*__fileurl = '([^']+)';.*$", Pattern.DOTALL);
+                Matcher m = p.matcher(doc.outerHtml());
+                if (m.matches()) {
+                    String file = m.group(1);
+                    String prefix = "";
+                    if (Utils.getConfigBoolean("download.save_order", true)) {
+                        prefix = String.format("%03d_", index);
+                    }
+                    addURLToDownload(new URI(file).toURL(), prefix);
+                } else {
+                    logger.warn("[!] could not find '__fileurl' at " + url);
+                }
+            } catch (IOException | URISyntaxException e) {
+                logger.error("[!] Exception while loading/parsing " + this.url, e);
+            }
+        }
+    }
+
+}

+ 196 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/MrCongRipper.java

@@ -0,0 +1,196 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.utils.Http;
+
+public class MrCongRipper extends AbstractHTMLRipper {
+    private Document currDoc;
+    private int lastPageNum;
+    private int currPageNum;
+    private boolean tagPage = false;
+
+    public MrCongRipper(URL url) throws IOException {
+        super(url);
+        currPageNum = 1;
+    }
+
+    @Override
+    public String getHost() {
+        return "misskon";
+    }
+
+    @Override
+    public String getDomain() {
+        // NOTE: This was previously mrcong.com, which now redirects to
+        // misskon.com. Some resources still refer to mrcong.com
+        // but all the top level URLs are now misskon.com
+        return "misskon.com";
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        System.out.println(url.toExternalForm());
+        Pattern p = Pattern.compile(
+                "^https?://(?:[a-z]+\\.)?misskon\\.com/([-0-9a-zA-Z]+)(?:/?|/[0-9]+/?)?$");
+        Pattern p2 = Pattern.compile("^https?://misskon\\.com/tag/(\\S*)/$");
+        Matcher m = p.matcher(url.toExternalForm());
+        Matcher m2 = p2.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return m.group(1);
+        } else if (m2.matches()) {
+            tagPage = true;
+            System.out.println("tagPage = TRUE");
+            return m2.group(1);
+        }
+
+        throw new MalformedURLException("Expected misskon.com URL format: "
+                + "misskon.com/GALLERY_NAME (or /PAGE_NUMBER/) - got " + url
+                + " instead");
+    }
+
+    @Override
+    public Document getFirstPage() throws IOException {
+        // returns the root gallery page regardless of actual page number
+        // "url" is an instance field of the superclass
+        String rootUrlStr;
+        URL rootUrl;
+
+        if (!tagPage) {
+            rootUrlStr = url.toExternalForm().replaceAll("(|/|/[0-9]+/?)$", "/");
+        } else {
+            rootUrlStr = url.toExternalForm().replaceAll("(page/[0-9]+/)$", "page/1/");
+        }
+
+        rootUrl = URI.create(rootUrlStr).toURL();
+        url = rootUrl;
+        currPageNum = 1;
+        currDoc = Http.url(url).get();
+        getMaxPageNumber(currDoc);
+        return currDoc;
+    }
+
+    @Override
+    public Document getNextPage(Document doc) throws IOException {
+        int pageNum = currPageNum;
+        String urlStr;
+        if (!tagPage) {
+            if (pageNum == 1 && lastPageNum > 1) {
+                urlStr = url.toExternalForm().concat((pageNum + 1) + "");
+                System.out.printf("Old Str: %s   New Str: %s\n", url.toExternalForm(), urlStr);
+            } else if (pageNum < lastPageNum) {
+                urlStr = url.toExternalForm().replaceAll("(/([0-9]*)/?)$", ("/" + (pageNum + 1) + "/"));
+                System.out.printf("Old Str: %s   New Str: %s\n", url.toString(), urlStr);
+            } else {
+                throw new IOException("Error: Page number provided goes past last valid page number\n");
+            }
+        } else { // 6-10-21
+            // if (pageNum == 1 && lastPageNum >= 1) {
+            if (pageNum == 1 && lastPageNum > 1) { // 6-10-21
+                urlStr = url.toExternalForm().concat("page/" + (pageNum + 1) + "");
+                System.out.printf("Old Str: %s   New Str: %s\n", url.toExternalForm(), urlStr);
+            } else if (pageNum < lastPageNum) {
+                urlStr = url.toExternalForm().replaceAll("(page/([0-9]*)/?)$", ("page/" + (pageNum + 1) + "/"));
+                System.out.printf("Old Str: %s   New Str: %s\n", url.toString(), urlStr);
+            } else {
+                System.out.print("Error: There is no next page!\n");
+                return null;
+            }
+        }
+
+        url = URI.create(urlStr).toURL();
+        currDoc = Http.url(url).get();
+        currPageNum++;
+        return currDoc;
+    }
+
+    private int getMaxPageNumber(Document doc) {
+        if (!tagPage) {
+            try {
+                // gets the last possible page for the gallery
+                lastPageNum = Integer.parseInt(doc.select("div.page-link > a").last().text());
+            } catch (Exception e) {
+                return 1;
+            }
+        } else {
+            try {
+                // gets the last possible page for the gallery
+                lastPageNum = Integer.parseInt(doc.select("div.pagination > a").last().text());
+                System.out.println("The last page found for " + url + " was " + lastPageNum);
+            } catch (Exception e) {
+                return 1;
+            }
+        }
+
+        return lastPageNum;
+    }
+
+    @Override
+    public List<String> getURLsFromPage(Document doc) {
+        // gets the urls of the images
+        List<String> result = new ArrayList<>();
+
+        if (!tagPage) {
+            for (Element el : doc.select("p > img")) {
+                String imageSource = el.attr("data-src");
+                if (imageSource == null || imageSource.isEmpty()) {
+                    imageSource = el.attr("src");
+                }
+                result.add(imageSource);
+            }
+
+            System.out.println("\n1.)Printing List: " + result + "\n");
+        } else {
+            for (Element el : doc.select("h2 > a")) {
+                String pageSource = el.attr("href");
+                if (!pageSource.equals("https://misskon.com/")) {
+                    result.add(pageSource);
+                    System.out.println("\n" + pageSource + " has been added to the list.");
+                }
+            }
+
+            System.out.println("\n2.)Printing List: " + result + "\n");
+        }
+
+        return result;
+    }
+
+    @Override
+    public void downloadURL(URL url, int index) {
+        if (!tagPage) {
+            addURLToDownload(url, getPrefix(index));
+        } else {
+            try {
+                List<String> ls = this.getURLsFromPage(this.currDoc);
+                Document np = this.getNextPage(this.currDoc);
+
+                // Creates a list of all sets to download
+                while (np != null) {
+                    ls.addAll(this.getURLsFromPage(np));
+                    np = this.getNextPage(np);
+                }
+
+                for (String urlStr : ls) {
+                    MrCongRipper mcr = new MrCongRipper(URI.create(urlStr).toURL());
+                    mcr.setup();
+                    mcr.rip();
+                }
+
+            } catch (IOException | URISyntaxException e) {
+                e.printStackTrace();
+            }
+        }
+    }
+}

+ 71 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/MultpornRipper.java

@@ -0,0 +1,71 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.utils.Http;
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+import org.jsoup.select.Elements;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+public class MultpornRipper extends AbstractHTMLRipper {
+
+    public MultpornRipper(URL url) throws IOException {
+        super(url);
+    }
+
+    @Override
+    protected String getDomain() {
+        return "multporn.net";
+    }
+
+    @Override
+    public String getHost() {
+        return "multporn";
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException, URISyntaxException {
+        Pattern p = Pattern.compile("^https?://multporn\\.net/node/(\\d+)/.*$");
+        Matcher m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return m.group(1);
+        }
+
+        try {
+            String nodeHref = Http.url(url).get().select(".simple-mode-switcher").attr("href");
+            p = Pattern.compile("/node/(\\d+)/.*");
+            m = p.matcher(nodeHref);
+            if (m.matches()) {
+                this.url = new URI("https://multporn.net" + nodeHref).toURL();
+                return m.group(1);
+            }
+        }catch (Exception ignored){};
+
+        throw new MalformedURLException("Expected multporn.net URL format: " +
+                "multporn.net/comics/comicid / multporn.net/node/id/* - got " + url + " instead");
+    }
+
+    @Override
+    protected List<String> getURLsFromPage(Document page) {
+        List<String> imageURLs = new ArrayList<>();
+        Elements thumbs = page.select(".mfp-gallery-image .mfp-item");
+        for (Element el : thumbs) {
+            imageURLs.add(el.attr("href"));
+        }
+        return imageURLs;
+    }
+
+    @Override
+    protected void downloadURL(URL url, int index) {
+        addURLToDownload(url, getPrefix(index), "", this.url.toExternalForm(), null);
+    }
+}

+ 132 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/MyhentaicomicsRipper.java

@@ -0,0 +1,132 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.utils.Http;
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+
+public class MyhentaicomicsRipper extends AbstractHTMLRipper {
+    public MyhentaicomicsRipper(URL url) throws IOException {
+    super(url);
+    }
+
+    @Override
+    public String getHost() {
+        return "myhentaicomics";
+    }
+
+    @Override
+    public String getDomain() {
+        return "myhentaicomics.com";
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        Pattern p = Pattern.compile("^https?://myhentaicomics.com/index.php/([a-zA-Z0-9-]*)/?$");
+        Matcher m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return m.group(1);
+        }
+
+        Pattern pa = Pattern.compile("^https?://myhentaicomics.com/index.php/search\\?q=([a-zA-Z0-9-]*)([a-zA-Z0-9=&]*)?$");
+        Matcher ma = pa.matcher(url.toExternalForm());
+        if (ma.matches()) {
+            return ma.group(1);
+        }
+
+        Pattern pat = Pattern.compile("^https?://myhentaicomics.com/index.php/tag/([0-9]*)/?([a-zA-Z%0-9+?=:]*)?$");
+        Matcher mat = pat.matcher(url.toExternalForm());
+        if (mat.matches()) {
+            return mat.group(1);
+        }
+
+        throw new MalformedURLException("Expected myhentaicomics.com URL format: " +
+                        "myhentaicomics.com/index.php/albumName - got " + url + " instead");
+    }
+
+    @Override
+    public boolean hasQueueSupport() {
+        return true;
+    }
+
+    @Override
+    public boolean pageContainsAlbums(URL url) {
+        Pattern pa = Pattern.compile("^https?://myhentaicomics.com/index.php/search\\?q=([a-zA-Z0-9-]*)([a-zA-Z0-9=&]*)?$");
+        Matcher ma = pa.matcher(url.toExternalForm());
+        if (ma.matches()) {
+            return true;
+        }
+
+        Pattern pat = Pattern.compile("^https?://myhentaicomics.com/index.php/tag/([0-9]*)/?([a-zA-Z%0-9+?=:]*)?$");
+        Matcher mat = pat.matcher(url.toExternalForm());
+        if (mat.matches()) {
+            return true;
+        }
+        return false;
+    }
+
+    @Override
+    public List<String> getAlbumsToQueue(Document doc) {
+        List<String> urlsToAddToQueue = new ArrayList<>();
+        for (Element elem : doc.select(".g-album > a")) {
+            urlsToAddToQueue.add(getDomain() + elem.attr("href"));
+        }
+        return urlsToAddToQueue;
+    }
+
+    @Override
+    public Document getFirstPage() throws IOException, URISyntaxException {
+        return super.getFirstPage();
+    }
+
+    @Override
+    public Document getNextPage(Document doc) throws IOException {
+        // Find next page
+        String nextUrl = "";
+        Element elem = doc.select("a.ui-icon-right").first();
+            String nextPage = elem.attr("href");
+            Pattern p = Pattern.compile("/index.php/[a-zA-Z0-9_-]*\\?page=\\d");
+            Matcher m = p.matcher(nextPage);
+            if (m.matches()) {
+                nextUrl = "https://myhentaicomics.com" + m.group(0);
+                }
+            if (nextUrl.equals("")) {
+                throw new IOException("No more pages");
+            }
+            // Sleep for half a sec to avoid getting IP banned
+            sleep(500);
+            return Http.url(nextUrl).get();
+        }
+
+
+
+    @Override
+    public List<String> getURLsFromPage(Document doc) {
+        List<String> result = new ArrayList<>();
+        for (Element el : doc.select("img")) {
+            String imageSource = el.attr("src");
+            // This bool is here so we don't try and download the site logo
+            if (!imageSource.startsWith("http://") && !imageSource.startsWith("https://")) {
+            // We replace thumbs with resizes so we can the full sized images
+            imageSource = imageSource.replace("thumbs", "resizes");
+            result.add("https://myhentaicomics.com" + imageSource);
+                }
+            }
+        return result;
+    }
+
+    @Override
+    public void downloadURL(URL url, int index) {
+        addURLToDownload(url, getPrefix(index));
+    }
+
+
+}

+ 60 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/MyhentaigalleryRipper.java

@@ -0,0 +1,60 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.utils.Http;
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+
+public class MyhentaigalleryRipper extends AbstractHTMLRipper {
+
+    public MyhentaigalleryRipper(URL url) throws IOException {
+        super(url);
+    }
+
+    @Override
+    public String getHost() {
+        return "myhentaigallery";
+    }
+
+    @Override
+    public String getDomain() {
+        return "myhentaigallery.com";
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        Pattern p = Pattern.compile("https://myhentaigallery.com/gallery/thumbnails/([0-9]+)/?$");
+        Matcher m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return m.group(1);
+        }
+
+        throw new MalformedURLException("Expected myhentaicomics.com URL format: "
+                + "myhentaigallery.com/gallery/thumbnails/ID - got " + url + " instead");
+    }
+
+    @Override
+    public List<String> getURLsFromPage(Document doc) {
+        List<String> result = new ArrayList<>();
+        for (Element el : doc.select(".comic-thumb > img")) {
+            String imageSource = el.attr("src");
+            // We replace thumbs with resizes so we can the full sized images
+            imageSource = imageSource.replace("thumbnail", "original");
+            result.add(imageSource);
+        }
+        return result;
+    }
+
+    @Override
+    public void downloadURL(URL url, int index) {
+        addURLToDownload(url, getPrefix(index));
+    }
+
+}

+ 59 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/MyreadingmangaRipper.java

@@ -0,0 +1,59 @@
+
+package com.rarchives.ripme.ripper.rippers;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.utils.Http;
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+
+public class MyreadingmangaRipper extends AbstractHTMLRipper {
+
+    public MyreadingmangaRipper(URL url) throws IOException {
+        super(url);
+    }
+
+    @Override
+    public String getHost() {
+        return "myreadingmanga";
+    }
+
+    @Override
+    public String getDomain() {
+        return "myreadingmanga.info";
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        Pattern p = Pattern.compile("https://myreadingmanga.info/([a-zA-Z_\\-0-9]+)/?$");
+        Matcher m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return m.group(1);
+        }
+
+        throw new MalformedURLException("Expected myreadingmanga.info URL format: "
+                + "myreadingmanga.info/title - got " + url + " instead");
+    }
+
+    @Override
+    public List<String> getURLsFromPage(Document doc) {
+        List<String> result = new ArrayList<>();
+        for (Element el : doc.select("div * img[data-lazy-src]")) {
+            String imageSource = el.attr("data-lazy-src");
+            result.add(imageSource);
+        }
+        return result;
+    }
+
+    @Override
+    public void downloadURL(URL url, int index) {
+        addURLToDownload(url, getPrefix(index));
+    }
+
+}

+ 130 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/NatalieMuRipper.java

@@ -0,0 +1,130 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+
+public class NatalieMuRipper extends AbstractHTMLRipper {
+
+    private static final Logger logger = LogManager.getLogger(NatalieMuRipper.class);
+
+    public int news_id = 0;
+
+    public NatalieMuRipper(URL url) throws IOException {
+        super(url);
+    }
+
+    @Override
+    public String getHost() {
+        String host = this.url.getHost();
+        host = host.substring(0, host.lastIndexOf('.'));
+        if (host.contains(".")) {
+            // Host has subdomain (www)
+            host = host.substring(host.lastIndexOf('.') + 1);
+        }
+        String board = this.url.toExternalForm().split("/")[3];
+        return host + "_" + board;
+    }
+
+    @Override
+    public boolean canRip(URL url) {
+        //urls like:
+        // http://cdn2.natalie.mu/music/gallery/show/news_id/xxxxxx/image_id/xxxxxx
+        // http://cdn2.natalie.mu/music/news/140411
+        return  url.toExternalForm().contains("natalie.mu")     // Most chans
+             && (url.toExternalForm().contains("/news_id/")
+             || url.toExternalForm().contains("/news/")); // 4chan, archive.moe
+    }
+
+    /**
+     * For example the achrives are all known. (Check 4chan-x)
+     * Should be based on the software the specific chan uses.
+     * FoolFuuka uses the same (url) layout as 4chan
+     * */
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        Pattern p; Matcher m;
+
+        String u = url.toExternalForm();
+        if (u.contains("/news_id/")) {
+            p = Pattern.compile("/news_id/([0-9]+)/");
+            m = p.matcher(u);
+            if (m.find()) {
+                return m.group(1);
+            }
+        } else if (u.contains("/news/")) {
+            p = Pattern.compile("/news/([0-9]+)/?");
+            m = p.matcher(u);
+            if (m.find()) {
+                return m.group(1);
+            }
+        }
+
+        throw new MalformedURLException(
+                "Expected natalie.mu URL formats: "
+                        + "http://natalie.mu/music/news/xxxxxx or http://natalie.mu/music/gallery/show/news_id/xxxxxx/image_id/yyyyyy"
+                        + " Got: " + u);
+    }
+
+    @Override
+    public String getDomain() {
+        return this.url.getHost();
+    }
+
+    @Override
+    public List<String> getURLsFromPage(Document page) {
+        List<String> imageURLs = new ArrayList<>();
+        Pattern p; Matcher m;
+        //select all album thumbnails
+        for (Element span : page.select(".NA_articleGallery span")) {
+            if (!span.hasAttr("style")) {
+                continue;
+            }
+            String style = span.attr("style").trim();
+
+            p = Pattern.compile("background-image: url\\((.*list_thumb_inbox.*)\\);", Pattern.CASE_INSENSITIVE);
+            m = p.matcher(style);
+            if (m.find()) {
+                String imgUrl = m.group(1);
+                if (imgUrl.startsWith("//")) {
+                    imgUrl = "http:" + imgUrl;
+                }
+                if (imgUrl.startsWith("/")) {
+                    imgUrl = "http://" + this.url.getHost() + imgUrl;
+                }
+                //convert thumbnail url into fullsize url
+                imgUrl = imgUrl.replace("list_thumb_inbox","xlarge");
+                // Don't download the same URL twice
+                if (imageURLs.contains(imgUrl)) {
+                    logger.debug("Already attempted: " + imgUrl);
+                    continue;
+                }
+                imageURLs.add(imgUrl);
+                if (isThisATest()) {
+                    break;
+                }
+            }
+
+            if (isStopped()) {
+                break;
+            }
+        }
+        return imageURLs;
+    }
+
+    @Override
+    public void downloadURL(URL url, int index) {
+        addURLToDownload(url, getPrefix(index), "", this.url.toString(), null);
+    }
+}

+ 140 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/NewgroundsRipper.java

@@ -0,0 +1,140 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.jsoup.nodes.Document;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.utils.Http;
+
+public class NewgroundsRipper extends AbstractHTMLRipper {
+
+    private static final Logger logger = LogManager.getLogger(NewgroundsRipper.class);
+
+    private String username = "";  // Name of artist
+
+    // Extensions supported by Newgrounds
+    private List<String> ALLOWED_EXTENSIONS = Arrays.asList("png", "gif", "jpeg", "jpg");
+
+    // Images are pulled 60 at a time, a new page request is needed when count == 60
+    private int pageNumber = 1;
+    private int count = 0;
+
+
+    public NewgroundsRipper(URL url) throws IOException {
+        super(url);
+    }
+
+    @Override
+    public String getHost() {
+        return "newgrounds";
+    }
+
+    @Override
+    protected String getDomain() {
+        return "newgrounds.com";
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        Pattern p = Pattern.compile("^https?://(.+).newgrounds.com/?.*");
+        Matcher m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            this.username = m.group(1);
+            return m.group(1);
+        }
+        throw new MalformedURLException("Expected newgrounds.com URL format: " +
+                "username.newgrounds.com/art - got " + url + " instead");
+    }
+
+    @Override
+    protected Document getFirstPage() throws IOException {
+        return Http.url("https://" + this.username + ".newgrounds.com/art").timeout(10*1000).get();
+    }
+
+    @Override
+    public Document getNextPage(Document doc) throws IOException {
+        if(this.count < 60) {
+            throw new IOException("No more pages");
+        }
+        this.count = 0; // New page found so reset count
+        return Http.url("https://" + this.username + ".newgrounds.com/art/page/" + this.pageNumber)
+                .header("X-Requested-With", "XMLHttpRequest").get(); // Send header to imitate scrolling
+    }
+
+    @Override
+    protected List<String> getURLsFromPage(Document page) {
+
+        List<String> imageURLs = new ArrayList<>();
+        String documentHTMLString = page.toString().replaceAll("&quot;", "");
+        String findStr = "newgrounds.com/art/view/" + this.username;
+        int lastIndex = 0;
+
+        // Index where findStr is found; each occasion contains the link to an image
+        ArrayList<Integer> indices = new ArrayList<>();
+
+        while(lastIndex != -1){
+            lastIndex = documentHTMLString.indexOf(findStr, lastIndex);
+            if(lastIndex != -1){
+                this.count ++;
+                lastIndex += findStr.length();
+                indices.add(lastIndex);
+            }
+        }
+
+        // Retrieve direct URL for image
+        for(int i = 0; i < indices.size(); i++){
+            String imageUrl = "https://art.ngfiles.com/images/";
+
+            String inLink = "https://www.newgrounds.com/art/view/" + this.username + "/";
+            String s;
+            if(i == indices.size() - 1){
+                s = documentHTMLString.substring(indices.get(i) + 2);
+            } else{
+                s = documentHTMLString.substring(indices.get(i) + 1, indices.get(i + 1));
+            }
+
+            s = s.replaceAll("\n", "").replaceAll("\t", "")
+                    .replaceAll("\\\\", "");
+
+            Pattern p = Pattern.compile("(.*?)\" class.*/thumbnails/(.*?)/(.*?)\\.");
+            Matcher m = p.matcher(s);
+
+            if (m.lookingAt()) {
+                String testURL = m.group(3) + "_" + this.username + "_" + m.group(1);
+                testURL = testURL.replace("_full", "");
+
+                // Open new document to get full sized image
+                try {
+                    Document imagePage = Http.url(inLink + m.group(1)).get();
+                    for(String extensions: this.ALLOWED_EXTENSIONS){
+                        if(imagePage.toString().contains(testURL + "." + extensions)){
+                            imageUrl += m.group(2) + "/" + m.group(3).replace("_full","") + "_" + this.username + "_" + m.group(1) + "." + extensions;
+                            imageURLs.add(imageUrl);
+                            break;
+                        }
+                    }
+
+                } catch (IOException e) {
+                    logger.error("IO Error on trying to check extension: " + inLink + m.group(1));
+                }
+            }
+        }
+        this.pageNumber += 1;
+        return imageURLs;
+    }
+
+    @Override
+    protected void downloadURL(URL url, int index) {
+        addURLToDownload(url, getPrefix(index));
+    }
+}

+ 234 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/NfsfwRipper.java

@@ -0,0 +1,234 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+import org.jsoup.select.Elements;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.ripper.DownloadThreadPool;
+import com.rarchives.ripme.utils.Http;
+
+public class NfsfwRipper extends AbstractHTMLRipper {
+
+    private static final Logger logger = LogManager.getLogger(NfsfwRipper.class);
+
+    private static final String DOMAIN = "nfsfw.com",
+                                HOST   = "nfsfw";
+
+    private int index = 0;
+    private String currentDir = "";
+    private List<String> subalbumURLs = new ArrayList<>();
+    private Pattern subalbumURLPattern = Pattern.compile(
+            "https?://[wm.]*nfsfw.com/gallery/v/[^/]+/(.+)$"
+    );
+
+    // threads pool for downloading images from image pages
+    private DownloadThreadPool nfsfwThreadPool;
+
+    public NfsfwRipper(URL url) throws IOException {
+        super(url);
+        nfsfwThreadPool = new DownloadThreadPool("NFSFW");
+    }
+
+    @Override
+    protected String getDomain() {
+        return DOMAIN;
+    }
+
+    @Override
+    public String getHost() {
+        return HOST;
+    }
+
+    @Override
+    public Document getNextPage(Document page) throws IOException {
+        String nextURL = null;
+        Elements a = page.select("a.next");
+        if (!a.isEmpty()){
+            // Get next page of current album
+            nextURL = "http://nfsfw.com" + a.first().attr("href");
+        } else if (!subalbumURLs.isEmpty()){
+            // Get next sub-album
+            nextURL = subalbumURLs.remove(0);
+            logger.info("Detected subalbum URL at:" + nextURL);
+            Matcher m = subalbumURLPattern.matcher(nextURL);
+            if (m.matches()) {
+                // Set the new save directory and save images with a new index
+                this.currentDir = m.group(1);
+                this.index = 0;
+            } else {
+                logger.error("Invalid sub-album URL: " + nextURL);
+                nextURL = null;
+            }
+        }
+        // Wait
+        try {
+            Thread.sleep(2000);
+        } catch (InterruptedException e) {
+            logger.error("Interrupted while waiting to load next page", e);
+        }
+        if (nextURL == null){
+            throw new IOException("No more pages");
+        } else {
+            return Http.url(nextURL).get();
+        }
+    }
+
+    @Override
+    protected List<String> getURLsFromPage(Document page) {
+        List<String> imagePageURLs = getImagePageURLs(page);
+
+        // Check if any sub-albums are present on this page
+        List<String> subalbumURLs = getSubalbumURLs(page);
+        this.subalbumURLs.addAll(subalbumURLs);
+
+        return imagePageURLs;
+    }
+
+    @Override
+    protected void downloadURL(URL url, int index) {
+        // if we are now downloading a sub-album, all images in it
+        // should be indexed starting from 0
+        if (!this.currentDir.equals("")){
+            index = ++this.index;
+        }
+        NfsfwImageThread t = new NfsfwImageThread(url, currentDir, index);
+        nfsfwThreadPool.addThread(t);
+    }
+
+    @Override
+    public URL sanitizeURL(URL url) throws MalformedURLException, URISyntaxException {
+        // always start on the first page of an album
+        // (strip the options after the '?')
+        String u = url.toExternalForm();
+        if (u.contains("?")) {
+            u = u.substring(0, u.indexOf("?"));
+            return new URI(u).toURL();
+        } else {
+            return url;
+        }
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        Pattern p; Matcher m;
+
+        p = Pattern.compile("https?://[wm.]*nfsfw.com/gallery/v/(.*)$");
+        m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            String group = m.group(1);
+            if (group.endsWith("/")) {
+                group = group.substring(0, group.length() - 1);
+            }
+            return group.replaceAll("/", "__");
+        }
+
+        throw new MalformedURLException(
+                "Expected nfsfw.com gallery format: "
+                        + "nfsfw.com/v/albumname"
+                        + " Got: " + url);
+    }
+
+    @Override
+    public DownloadThreadPool getThreadPool() {
+        return nfsfwThreadPool;
+    }
+
+    @Override
+    public boolean hasQueueSupport() {
+        return true;
+    }
+
+    @Override
+    public boolean pageContainsAlbums(URL url) {
+        try {
+            final var fstPage = getCachedFirstPage();
+            List<String> imageURLs = getImagePageURLs(fstPage);
+            List<String> subalbumURLs = getSubalbumURLs(fstPage);
+            return imageURLs.isEmpty() && !subalbumURLs.isEmpty();
+        } catch (IOException | URISyntaxException e) {
+            logger.error("Unable to load " + url, e);
+            return false;
+        }
+    }
+
+    @Override
+    public List<String> getAlbumsToQueue(Document doc) {
+        return getSubalbumURLs(doc);
+    }
+
+    // helper methods
+
+    private List<String> getImagePageURLs(Document page){
+        // get image pages
+        // NOTE: It might be possible to get the (non-thumbnail) image URL
+        // without going to its page first as there seems to be a pattern
+        // between the thumb and actual image URLs, but that is outside the
+        // scope of the current issue being solved.
+        List<String> imagePageURLs = new ArrayList<>();
+        for (Element thumb : page.select("td.giItemCell > div > a")) {
+            String imagePage = "http://nfsfw.com" + thumb.attr("href");
+            imagePageURLs.add(imagePage);
+        }
+        return imagePageURLs;
+    }
+
+    private List<String> getSubalbumURLs(Document page){
+        // Check if sub-albums are present on this page
+        List<String> subalbumURLs = new ArrayList<>();
+        for (Element suba : page.select("td.IMG > a")) {
+            String subURL = "http://nfsfw.com" + suba.attr("href");
+            subalbumURLs.add(subURL);
+        }
+        return subalbumURLs;
+    }
+
+    /**
+     * Helper class to find and download images found on "image" pages
+     */
+    private class NfsfwImageThread implements Runnable {
+        private final URL url;
+        private final String subdir;
+        private final int index;
+
+        NfsfwImageThread(URL url, String subdir, int index) {
+            super();
+            this.url = url;
+            this.subdir = subdir;
+            this.index = index;
+        }
+
+        @Override
+        public void run() {
+            try {
+                Document doc = Http.url(this.url)
+                                   .referrer(this.url)
+                                   .get();
+                Elements images = doc.select(".gbBlock img");
+                if (images.isEmpty()) {
+                    logger.error("Failed to find image at " + this.url);
+                    return;
+                }
+                String file = images.first().attr("src");
+                if (file.startsWith("/")) {
+                    file = "http://nfsfw.com" + file;
+                }
+                addURLToDownload(new URI(file).toURL(), getPrefix(index), this.subdir);
+            } catch (IOException | URISyntaxException e) {
+                logger.error("[!] Exception while loading/parsing " + this.url, e);
+            }
+        }
+    }
+}

+ 144 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/NhentaiRipper.java

@@ -0,0 +1,144 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+import org.jsoup.select.Elements;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.ripper.DownloadThreadPool;
+import com.rarchives.ripme.ui.RipStatusMessage;
+import com.rarchives.ripme.utils.Http;
+import com.rarchives.ripme.utils.RipUtils;
+import com.rarchives.ripme.utils.Utils;
+
+public class NhentaiRipper extends AbstractHTMLRipper {
+
+    private static final Logger logger = LogManager.getLogger(NhentaiRipper.class);
+
+    private Document firstPage;
+
+    // Thread pool for finding direct image links from "image" pages (html)
+    private DownloadThreadPool nhentaiThreadPool = new DownloadThreadPool("nhentai");
+
+    @Override
+    public boolean hasQueueSupport() {
+        return true;
+    }
+
+    @Override
+    public boolean pageContainsAlbums(URL url) {
+        Pattern pa = Pattern.compile("^https?://nhentai\\.net/tag/([a-zA-Z0-9_\\-]+)/?");
+        Matcher ma = pa.matcher(url.toExternalForm());
+        return ma.matches();
+    }
+
+    @Override
+    public List<String> getAlbumsToQueue(Document doc) {
+        List<String> urlsToAddToQueue = new ArrayList<>();
+        for (Element elem : doc.select("a.cover")) {
+            urlsToAddToQueue.add("https://" + getDomain() + elem.attr("href"));
+        }
+        return urlsToAddToQueue;
+    }
+
+    @Override
+    public DownloadThreadPool getThreadPool() {
+        return nhentaiThreadPool;
+    }
+
+    public NhentaiRipper(URL url) throws IOException {
+        super(url);
+    }
+
+    @Override
+    public String getDomain() {
+        return "nhentai.net";
+    }
+
+    @Override
+    public String getHost() {
+        return "nhentai";
+    }
+
+    @Override
+    public String getAlbumTitle(URL url) throws MalformedURLException {
+        if (firstPage == null) {
+            try {
+                firstPage = Http.url(url).get();
+            } catch (IOException e) {
+                e.printStackTrace();
+            }
+        }
+
+        String title = firstPage.select("#info > h1").text();
+        if (title == null) {
+            return getAlbumTitle(url);
+        }
+        return "nhentai" + title;
+    }
+
+    public List<String> getTags(Document doc) {
+        List<String> tags = new ArrayList<>();
+        for (Element tag : doc.select("a.tag")) {
+            String tagString = tag.attr("href").replaceAll("/tag/", "").replaceAll("/", "");
+            logger.info("Found tag: " + tagString);
+            tags.add(tagString);
+        }
+        return tags;
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        // Ex: https://nhentai.net/g/159174/
+        Pattern p = Pattern.compile("^https?://nhentai\\.net/g/(\\d+).*$");
+        Matcher m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            // Return the text contained between () in the regex - 159174 in this case
+            return m.group(1);
+        }
+        throw new MalformedURLException("Expected nhentai.net URL format: " +
+                "nhentai.net/g/albumid - got " + url + " instead");
+    }
+
+    @Override
+    public Document getFirstPage() throws IOException {
+        if (firstPage == null) {
+            firstPage = Http.url(url).get();
+        }
+
+        String blacklistedTag = RipUtils.checkTags(Utils.getConfigStringArray("nhentai.blacklist.tags"), getTags(firstPage));
+        if (blacklistedTag != null) {
+            sendUpdate(RipStatusMessage.STATUS.DOWNLOAD_WARN, "Skipping " + url.toExternalForm() + " as it " +
+                    "contains the blacklisted tag \"" + blacklistedTag + "\"");
+            return null;
+        }
+        return firstPage;
+    }
+
+    @Override
+    public List<String> getURLsFromPage(Document page) {
+        List<String> imageURLs = new ArrayList<>();
+        Elements thumbs = page.select("a.gallerythumb > img");
+        for (Element el : thumbs) {
+            imageURLs.add(el.attr("data-src").replaceAll("://t", "://i").replaceAll("t\\.", "."));
+        }
+        return imageURLs;
+    }
+
+    @Override
+    public void downloadURL(URL url, int index) {
+        addURLToDownload(url, getPrefix(index), "", this.url.toExternalForm(), null);
+    }
+
+
+}

+ 82 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/NsfwAlbumRipper.java

@@ -0,0 +1,82 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+import org.jsoup.select.Elements;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+
+public class NsfwAlbumRipper extends AbstractHTMLRipper {
+    private static final String HOST = "nsfwalbum";
+    private static final String DOMAIN = "nsfwalbum.com";
+
+    public NsfwAlbumRipper(URL url) throws IOException {
+        super(url);
+    }
+
+    @Override
+    public String getHost() {
+        return HOST;
+    }
+
+    @Override
+    public String getDomain() {
+        return DOMAIN;
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        Pattern pattern = Pattern.compile("(?!https:\\/\\/nsfwalbum.com\\/album\\/)\\d+");
+        Matcher matcher = pattern.matcher(url.toExternalForm());
+
+        if (matcher.find()) {
+            return matcher.group();
+        }
+
+        throw new MalformedURLException(
+                "Expected nsfwalbum.com URL format nsfwalbum.com/album/albumid - got " + url + " instead.");
+    }
+
+    @Override
+    public List<String> getURLsFromPage(Document doc) {
+        List<String> results = new ArrayList<String>();
+
+        Elements imgs = doc.select(".album img");
+
+        System.out.println(imgs.size() + " elements (thumbnails) found.");
+
+        for (Element img : imgs) {
+            String thumbURL = img.attr("data-src");
+            String fullResURL = null;
+
+            if (thumbURL.contains("imgspice.com")) {
+                fullResURL = thumbURL.replace("_t.jpg", ".jpg");
+            } else if (thumbURL.contains("imagetwist.com")) {
+                fullResURL = thumbURL.replace("/th/", "/i/");
+            } else if (thumbURL.contains("pixhost.com")) {
+                fullResURL = thumbURL.replace("https://t", "https://img");
+                fullResURL = fullResURL.replace("/thumbs/", "/images/");
+            } else if (thumbURL.contains("imx.to")) {
+                fullResURL = thumbURL.replace("/t/", "/i/");
+            }
+
+            if (fullResURL != null)
+                results.add(fullResURL);
+        }
+
+        return results;
+    }
+
+    @Override
+    public void downloadURL(URL url, int index) {
+        addURLToDownload(url, getPrefix(index));
+    }
+}

+ 135 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/NsfwXxxRipper.java

@@ -0,0 +1,135 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import com.rarchives.ripme.ripper.AbstractJSONRipper;
+import com.rarchives.ripme.utils.Http;
+import org.apache.commons.lang.StringEscapeUtils;
+import org.json.JSONArray;
+import org.json.JSONObject;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+
+public class NsfwXxxRipper extends AbstractJSONRipper {
+
+    public NsfwXxxRipper(URL url) throws IOException {
+        super(url);
+    }
+
+    @Override
+    protected String getDomain() {
+        return "nsfw.xxx";
+    }
+
+    @Override
+    public String getHost() {
+        return "nsfw_xxx";
+    }
+
+
+    @Override
+    public URL sanitizeURL(URL url) throws MalformedURLException, URISyntaxException {
+        String u = url.toExternalForm();
+        // https://nsfw.xxx/user/kelly-kat/foo -> https://nsfw.xxx/user/kelly-kat
+        // https://nsfw.xxx/user/kelly-kat -> https://nsfw.xxx/user/kelly-kat
+        // keep up to and including the username
+        u = u.replaceAll("https?://nsfw.xxx/user/([^/]+)/?.*", "https://nsfw.xxx/user/$1");
+        if (!u.contains("nsfw.xxx/user")) {
+            throw new MalformedURLException("Invalid URL: " + url);
+        }
+
+        return new URI(u).toURL();
+    }
+
+    String getUser() throws MalformedURLException {
+        return getGID(url);
+    }
+
+    URL getPage(int page) throws MalformedURLException, URISyntaxException {
+        return new URI("https://nsfw.xxx/slide-page/" + page + "?nsfw%5B%5D=0&types%5B%5D=image&types%5B%5D=video&types%5B%5D=gallery&slider=1&jsload=1&user=" + getUser()).toURL();
+    }
+
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        Pattern p = Pattern.compile("https://nsfw.xxx/user/([^/]+)/?$");
+        Matcher m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return m.group(1);
+        }
+        throw new MalformedURLException("Expected URL format: " +
+                "nsfw.xxx/user/USER - got " + url + " instead");
+    }
+
+
+    int currentPage = 1;
+
+    @Override
+    protected JSONObject getFirstPage() throws IOException, URISyntaxException {
+        return Http.url(getPage(1)).getJSON();
+    }
+
+    List<String> descriptions = new ArrayList<>();
+
+    @Override
+    protected JSONObject getNextPage(JSONObject doc) throws IOException, URISyntaxException {
+        currentPage++;
+        JSONObject nextPage = Http.url(getPage(doc.getInt("page") + 1)).getJSON();
+        JSONArray items = nextPage.getJSONArray("items");
+        if (items.isEmpty()) {
+            throw new IOException("No more pages");
+        }
+        return nextPage;
+    }
+
+    class ApiEntry {
+        String srcUrl;
+        String author;
+        String title;
+
+        public ApiEntry(String srcUrl, String author, String title) {
+            this.srcUrl = srcUrl;
+            this.author = author;
+            this.title = title;
+        }
+    }
+
+    @Override
+    protected List<String> getURLsFromJSON(JSONObject json) {
+        JSONArray items = json.getJSONArray("items");
+        List<ApiEntry> data = IntStream
+                .range(0, items.length())
+                .mapToObj(items::getJSONObject)
+                .map(o -> {
+                    String srcUrl;
+                    if(o.has("src")) {
+                        srcUrl = o.getString("src");
+                    } else {
+                        // video source
+                        Pattern videoHtmlSrcPattern = Pattern.compile("src=\"([^\"]+)\"");
+                        Matcher matches = videoHtmlSrcPattern.matcher(o.getString("html"));
+                        matches.find();
+                        srcUrl = StringEscapeUtils.unescapeHtml(matches.group(1));
+                    }
+
+                    return new ApiEntry(srcUrl, o.getString("author"), o.getString("title"));
+                })
+                .toList();
+
+        data.forEach(e -> descriptions.add(e.title));
+        return data.stream().map(e -> e.srcUrl).collect(Collectors.toList());
+    }
+
+    @Override
+    protected void downloadURL(URL url, int index) {
+        addURLToDownload(url, getPrefix(index) + descriptions.get(index - 1) + "_" , "", "", null);
+    }
+}

+ 106 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/NudeGalsRipper.java

@@ -0,0 +1,106 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+import org.jsoup.select.Elements;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+
+public class NudeGalsRipper extends AbstractHTMLRipper {
+
+    private static final Logger logger = LogManager.getLogger(NudeGalsRipper.class);
+
+    private static final Pattern ALBUM_PATTERN = Pattern.compile("^.*nude-gals\\.com/photoshoot\\.php\\?photoshoot_id=(\\d+)$");
+    private static final Pattern VIDEO_PATTERN = Pattern.compile("^.*nude-gals\\.com/video\\.php\\?video_id=(\\d+)$");
+
+    public NudeGalsRipper(URL url) throws IOException {
+        super(url);
+    }
+
+    @Override
+    public String getHost() {
+        return "Nude-Gals";
+    }
+
+    @Override
+    public String getDomain() {
+        return "nude-gals.com";
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        Pattern p;
+        Matcher m;
+
+        p = ALBUM_PATTERN;
+        m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            logger.info("Found nude-gals photo album page");
+            return "album_" + m.group(1);
+        }
+
+        p = VIDEO_PATTERN;
+        m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            logger.info("Found nude-gals video page");
+            return "video_" + m.group(1);
+        }
+
+        throw new MalformedURLException(
+                "Expected nude-gals.com gallery format: "
+                        + "nude-gals.com/photoshoot.php?phtoshoot_id=####"
+                        + " Got: " + url);
+    }
+
+    @Override
+    public List<String> getURLsFromPage(Document doc) {
+        List<String> urlsToDownload = new ArrayList<>();
+
+        Pattern p;
+        Matcher m;
+
+        p = ALBUM_PATTERN;
+        m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            logger.info("Ripping nude-gals photo album");
+            Elements thumbs = doc.select("img.thumbnail");
+            for (Element thumb : thumbs) {
+                String link = thumb.attr("src").strip().replaceAll("thumbs/th_", "");
+                String imgSrc = "http://nude-gals.com/" + link;
+                imgSrc = imgSrc.replaceAll(" ", "%20");
+                urlsToDownload.add(imgSrc);
+            }
+        }
+
+        p = VIDEO_PATTERN;
+        m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            logger.info("Ripping nude-gals video");
+            Elements thumbs = doc.select("video source");
+            for (Element thumb : thumbs) {
+                String link = thumb.attr("src").strip();
+                String videoSrc = "http://nude-gals.com/" + link;
+                videoSrc = videoSrc.replaceAll(" ", "%20");
+                urlsToDownload.add(videoSrc);
+            }
+        }
+
+        return urlsToDownload;
+    }
+
+    @Override
+    public void downloadURL(URL url, int index) {
+        // Send referrer when downloading images
+        addURLToDownload(url, getPrefix(index), "", this.url.toExternalForm(), null);
+    }
+}

+ 81 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/OglafRipper.java

@@ -0,0 +1,81 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.utils.Http;
+
+public class OglafRipper extends AbstractHTMLRipper {
+
+    public OglafRipper(URL url) throws IOException {
+        super(url);
+    }
+
+    @Override
+    public String getHost() {
+        return "oglaf";
+    }
+
+    @Override
+    public String getDomain() {
+        return "oglaf.com";
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        Pattern p = Pattern.compile("http://oglaf\\.com/([a-zA-Z1-9_-]*)/?");
+        Matcher m = p.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return m.group(1);
+        }
+        throw new MalformedURLException("Expected oglaf URL format: " +
+                "oglaf.com/NAME - got " + url + " instead");
+    }
+
+    @Override
+    public String getAlbumTitle(URL url) throws MalformedURLException {
+        return getDomain();
+    }
+
+    @Override
+    public Document getNextPage(Document doc) throws IOException {
+        if (doc.select("div#nav > a > div#nx").first() == null) {
+            throw new IOException("No more pages");
+        }
+        Element elem = doc.select("div#nav > a > div#nx").first().parent();
+        String nextPage = elem.attr("href");
+        // Some times this returns a empty string
+        // This for stops that
+        if (nextPage.equals("")) {
+            throw new IOException("No more pages");
+        }
+        else {
+            sleep(1000);
+            return Http.url("http://oglaf.com" + nextPage).get();
+        }
+    }
+
+    @Override
+    public List<String> getURLsFromPage(Document doc) {
+        List<String> result = new ArrayList<>();
+        for (Element el : doc.select("b > img#strip")) {
+                String imageSource = el.select("img").attr("src");
+                result.add(imageSource);
+        }
+        return result;
+    }
+
+    @Override
+    public void downloadURL(URL url, int index) {
+        addURLToDownload(url, getPrefix(index));
+    }
+}

+ 127 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/PahealRipper.java

@@ -0,0 +1,127 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.utils.Http;
+import com.rarchives.ripme.utils.Utils;
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+import org.jsoup.select.Elements;
+
+public class PahealRipper extends AbstractHTMLRipper {
+    private static final Logger logger = LogManager.getLogger(PahealRipper.class);
+
+    private static Map<String, String> cookies = null;
+    private static Pattern gidPattern = null;
+
+    private static Map<String, String> getCookies() {
+        if (cookies == null) {
+            cookies = new HashMap<>(1);
+            cookies.put("ui-tnc-agreed", "true");
+        }
+        return cookies;
+    }
+
+    public PahealRipper(URL url) throws IOException {
+        super(url);
+    }
+
+    @Override
+    public String getDomain() {
+        return "rule34.paheal.net";
+    }
+
+    @Override
+    public String getHost() {
+        return "paheal";
+    }
+
+    @Override
+    public Document getFirstPage() throws IOException {
+        return Http.url("http://rule34.paheal.net/post/list/" + getTerm(url) + "/1").cookies(getCookies()).get();
+    }
+
+    @Override
+    public Document getNextPage(Document page) throws IOException {
+        for (Element e : page.select("#paginator a")) {
+            if (e.text().equalsIgnoreCase("next")) {
+                return Http.url(e.absUrl("href")).cookies(getCookies()).get();
+            }
+        }
+
+        return null;
+    }
+
+    @Override
+    public List<String> getURLsFromPage(Document page) {
+        Elements elements = page.select(".shm-thumb.thumb>a").not(".shm-thumb-link");
+        List<String> res = new ArrayList<>(elements.size());
+
+        for (Element e : elements) {
+            res.add(e.absUrl("href"));
+        }
+
+        return res;
+    }
+
+    @Override
+    public void downloadURL(URL url, int index) {
+        try {
+            String name = url.getPath();
+            String ext = ".png";
+
+            name = name.substring(name.lastIndexOf('/') + 1);
+            if (name.indexOf('.') >= 0) {
+                ext = name.substring(name.lastIndexOf('.'));
+                name = name.substring(0, name.length() - ext.length());
+            }
+
+            Path outFile = Paths.get(workingDir
+                + "/"
+                + Utils.filesystemSafe(new URI(name).getPath())
+                + ext);
+            addURLToDownload(url, outFile);
+        } catch (URISyntaxException ex) {
+            logger.error("Error while downloading URL " + url, ex);
+        }
+    }
+
+    private String getTerm(URL url) throws MalformedURLException {
+        if (gidPattern == null) {
+            gidPattern = Pattern.compile("^https?://(www\\.)?rule34\\.paheal\\.net/post/list/([a-zA-Z0-9$_.+!*'(),%-]+)(/.*)?(#.*)?$");
+        }
+
+        Matcher m = gidPattern.matcher(url.toExternalForm());
+        if (m.matches()) {
+            return m.group(2);
+        }
+
+        throw new MalformedURLException("Expected paheal.net URL format: rule34.paheal.net/post/list/searchterm - got " + url + " instead");
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException {
+        try {
+            return Utils.filesystemSafe(new URI(getTerm(url)).getPath());
+        } catch (URISyntaxException ex) {
+            logger.error(ex);
+        }
+
+        throw new MalformedURLException("Expected paheal.net URL format: rule34.paheal.net/post/list/searchterm - got " + url + " instead");
+    }
+}

+ 21 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/PawooRipper.java

@@ -0,0 +1,21 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.URL;
+
+public class PawooRipper extends MastodonRipper {
+    public PawooRipper(URL url) throws IOException {
+        super(url);
+    }
+
+    @Override
+    public String getHost() {
+        return "pawoo";
+    }
+
+    @Override
+    public String getDomain() {
+        return "pawoo.net";
+    }
+
+}

+ 305 - 0
src/main/java/com/rarchives/ripme/ripper/rippers/PhotobucketRipper.java

@@ -0,0 +1,305 @@
+package com.rarchives.ripme.ripper.rippers;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.json.JSONArray;
+import org.json.JSONObject;
+import org.jsoup.Connection;
+import org.jsoup.nodes.Document;
+import org.jsoup.nodes.Element;
+
+import com.rarchives.ripme.ripper.AbstractHTMLRipper;
+import com.rarchives.ripme.utils.Http;
+
+// TODO: Probably want to add queue support for cases like this:
+// http://s732.photobucket.com/user/doublesix66/library/WARZONE?sort=3&page=1
+public class PhotobucketRipper extends AbstractHTMLRipper {
+
+    private static final Logger logger = LogManager.getLogger(PhotobucketRipper.class);
+
+    private static final String DOMAIN = "photobucket.com",
+                                HOST   = "photobucket";
+    private static final int ITEMS_PER_PAGE = 24;
+    private static final int WAIT_BEFORE_NEXT_PAGE = 2000;
+
+    private final class AlbumMetadata {
+        private final String baseURL;
+        private final String location;
+        private final int sortOrder;
+        // cookies for the current page of this album
+        private Map<String, String> cookies;
+        private Document currPage;
+        private int numPages;
+        private int pageIndex = 1;
+
+        private AlbumMetadata(JSONObject data) {
+            this.baseURL = data.getString("url");
+            this.location = data.getString("location")
+                                .replace(" ", "_");
+            this.sortOrder = data.getInt("sortOrder");
+        }
+
+        private String getCurrPageURL(){
+            return baseURL + String.format("?sort=%d&page=%d",
+                                       sortOrder, pageIndex);
+        }
+    }
+
+    private final Pattern collDataPattern;
+    private final Pattern pbURLPattern;
+
+    // all albums including sub-albums to rip
+    private List<AlbumMetadata> albums;
+    // the album currently being ripped
+    private AlbumMetadata currAlbum;
+    // a new index per album downloaded
+    private int index = 0;
+
+    public PhotobucketRipper(URL url) throws IOException {
+        super(url);
+        this.collDataPattern = Pattern.compile(
+                "^.*collectionData: (\\{.*}).*$", Pattern.DOTALL
+        );
+        this.pbURLPattern = Pattern.compile(
+                "^https?://([a-zA-Z0-9]+)\\.photobucket\\.com/user/" +
+                "([a-zA-Z0-9_\\-]+)/library/([^?]*).*$"
+        );
+    }
+
+    @Override
+    protected String getDomain() {
+        return DOMAIN;
+    }
+
+    @Override
+    public String getHost() {
+        return HOST;
+    }
+
+    @Override
+    public URL sanitizeURL(URL url) throws MalformedURLException, URISyntaxException {
+        logger.info(url);
+        String u = url.toExternalForm();
+        if (u.contains("?")) {
+            // strip options from URL
+            u = u.substring(0, u.indexOf("?"));
+        }
+        if (!u.endsWith("/")) {
+            // append trailing slash
+            u = u + "/";
+        }
+        return new URI(u).toURL();
+    }
+
+    @Override
+    public String getGID(URL url) throws MalformedURLException, URISyntaxException {
+        Matcher m;
+
+        URL sanitized = sanitizeURL(url);
+
+        // http://s844.photobucket.com/user/SpazzySpizzy/library/Lady%20Gaga?sort=3&page=1
+        m = pbURLPattern.matcher(sanitized.toExternalForm());
+        if (m.matches()) {
+            // the username is not really a unique GID, because the same user
+            // can have multiple albums, but on the other hand, using HOST_GID
+            // as save directory means we can group ripped albums of the same
+            // user.
+            return m.group(2);
+        }
+
+        throw new MalformedURLException(
+                "Expected photobucket.com gallery formats: "
+                        + "http://x###.photobucket.com/username/library/..."
+                        + " Got: " + url);
+    }
+
+
+
+    // Page iteration
+
+
+
+    @Override
+    public Document getFirstPage() throws IOException {
+        if (this.currAlbum == null) {
+            this.albums = getAlbumMetadata(this.url.toExternalForm());
+            logger.info("Detected " + albums.size() + " albums in total");
+        }
+        this.currAlbum = this.albums.remove(0);
+        // NOTE: Why not just get media count in the metadata json?
+        //
+        // Because that data might not reflect what the user sees on the page
+        // and can lead to iterating more pages than there actually are.
+        //
+        // An example:
+        // Metadata JSON -> AlbumStats: 146 images + 0 videos -> 146 items/7 pages
+        // http://s1255.photobucket.com/api/user/mimajki/album/Movie%20gifs/get?subAlbums=48&json=1
+        // Actual item count when looking at the album url: 131 items/6 pages
+        // http://s1255.photobucket.com/user/mimajki/library/Movie%20gifs?sort=6&page=1
+        Connection.Response resp = Http.url(currAlbum.getCurrPageURL()).response();
+        this.currAlbum.cookies = resp.cookies();
+        this.currAlbum.currPage = resp.parse();
+        JSONObject collectionData = getCollectionData(currAlbum.currPage);
+        int totalNumItems = collectionData.getInt("total");
+        this.currAlbum.numPages = (int) Math.ceil(
+                (double)totalNumItems / (double)ITEMS_PER_PAGE);
+        this.index = 0;
+        return currAlbum.currPage;
+    }
+
+    @Override
+    public Document getNextPage(Document page) throws IOException {
+        this.currAlbum.pageIndex++;
+        boolean endOfAlbum = currAlbum.pageIndex > currAlbum.numPages;
+        boolean noMoreSubalbums = albums.isEmpty();
+        if (endOfAlbum && noMoreSubalbums){
+            throw new IOException("No more pages");
+        }
+        try {
+            Thread.sleep(WAIT_BEFORE_NEXT_PAGE);
+        } catch (InterruptedException e) {
+            logger.info("Interrupted while waiting before getting next page");
+        }
+        if (endOfAlbum){
+            logger.info("Turning to next album " + albums.get(0).baseURL);
+            return getFirstPage();
+        } else {
+            logger.info("Turning to page " + currAlbum.pageIndex +
+                    " of album " + currAlbum.baseURL);
+            Connection.Response resp = Http.url(currAlbum.getCurrPageURL()).response();
+            currAlbum.cookies = resp.cookies();
+            currAlbum.currPage = resp.parse();
+            return currAlbum.currPage;
+        }
+    }
+
+
+
+    // Media parsing
+
+
+
+    @Override
+    protected List<String> getURLsFromPage(Document page) {
+        JSONObject collectionData = getCollectionData(page);
+        if (collectionData == null) {
+            logger.error("Unable to find JSON data at URL: " + page.location());
+            // probably better than returning null, as the ripper will display
+            // that nothing was found instead of a NullPointerException
+            return new ArrayList<>();
+        } else {
+            return getImageURLs(collectionData);
+        }
+    }
+
+    private JSONObject getCollectionData(Document page){
+        // Retrieve JSON from a script tag in the returned document
+        for (Element script : page.select("script[type=text/javascript]")) {
+            String data = script.data();
+            // Ensure this chunk of javascript contains the album info
+            if (data.contains("libraryAlbumsPageCollectionData")) {
+                Matcher m = collDataPattern.matcher(data);
+                if (m.matches()) {
+                    // Grab the JSON
+                    return new JSONObject(m.group(1));
+                }
+            }
+        }
+        return null;
+    }
+
+    private List<String> getImageURLs(JSONObject collectionData){
+        List<String> results = new ArrayList<>();
+        JSONObject items = collectionData.getJSONObject("items");
+        JSONArray objects = items.getJSONArray("objects");
+        for (int i = 0; i < objects.length(); i++) {
+            JSONObject object = objects.getJSONObject(i);
+            String imgURL = object.getString("fullsizeUrl");
+            results.add(imgURL);
+        }
+        return results;
+    }
+
+    @Override
+    protected void downloadURL(URL url, int index) {
+        addURLToDownload(url, getPrefix(++this.index), currAlbum.location,
+                currAlbum.currPage.location(), currAlbum.cookies);
+    }
+
+
+
+    // helper methods (for album metadata retrieval)
+
+
+
+    private List<AlbumMetadata> getAlbumMetadata(String albumURL)
+            throws IOException {
+        JSONObject data = getAlbumMetadataJSON(albumURL);
+        List<AlbumMetadata> metadata = new ArrayList<>();
+        metadata.add(new AlbumMetadata(data));
+        if (!data.getString("location").equals("")) {
+            // if the location were to equal "", then we are at the profile
+            // page of a user. Ripping all sub-albums here would mean ripping
+            // all albums of a user (Not supported, only rip items in a users
+            // personal bucket).
+            for (JSONObject sub : getSubAlbumJSONs(data)){
+                metadata.add(new AlbumMetadata(sub));
+            }
+        }
+        logger.info("Succesfully retrieved and parsed metadata");
+        return metadata;
+    }
+
+    private JSONObject getAlbumMetadataJSON(String albumURL)
+            throws IOException {
+        String subdomain, user, albumTitle;
+        Matcher m = pbURLPattern.matcher(albumURL);
+        if (!m.matches()){
+            throw new MalformedURLException("invalid URL " + albumURL);
+        }
+        subdomain = m.group(1);
+        user = m.group(2);
+        albumTitle = m.group(3);
+        if (albumTitle.endsWith("/")){
+            albumTitle = albumTitle.substring(0, albumTitle.length() - 1);
+        }
+        String apiURL = String.format("http://%s.photobucket.com/api/user/" +
+                        "%s/album/%s/get?subAlbums=%d&json=1",
+                subdomain, user, albumTitle, ITEMS_PER_PAGE);
+        logger.info("Loading " + apiURL);
+        JSONObject data = Http.url(apiURL).getJSON().getJSONObject("data");
+        if (data.has("subAlbums")) {
+            int count = data.getInt("subAlbumCount");
+            if (count > ITEMS_PER_PAGE) {
+                apiURL = String.format("http://%s.photobucket.com/api/user/" +
+                                "%s/album/%s/get?subAlbums=%d&json=1",
+                        subdomain, user, albumTitle, count);
+                data = Http.url(apiURL).getJSON().getJSONObject("data");
+            }
+        }
+        return data;
+    }
+
+    private List<JSONObject> getSubAlbumJSONs(JSONObject data) {
+        List<JSONObject> subalbumJSONs = new ArrayList<>();
+        if (data.has("subAlbums")) {
+            JSONArray subalbums = data.getJSONArray("subAlbums");
+            for (int idx = 0; idx < subalbums.length(); idx++) {
+                JSONObject subalbumJSON = subalbums.getJSONObject(idx);
+                subalbumJSONs.add(subalbumJSON);
+            }
+        }
+        return subalbumJSONs;
+    }
+}

Niektóre pliki nie zostały wyświetlone z powodu dużej ilości zmienionych plików